1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
49 #include <algorithm>
\r
51 // Static variable definitions.
\r
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
53 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
66 static std::string convertCharPointerToStdString(const char *text)
\r
68 return std::string(text);
\r
71 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
74 std::string s( length-1, '\0' );
\r
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
90 // *************************************************** //
\r
92 // RtAudio definitions.
\r
94 // *************************************************** //
\r
96 std::string RtAudio :: getVersion( void )
\r
98 return RTAUDIO_VERSION;
\r
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
\r
105 // The order here will control the order of RtAudio's API search in
\r
106 // the constructor.
\r
107 #if defined(__UNIX_JACK__)
\r
108 apis.push_back( UNIX_JACK );
\r
110 #if defined(__LINUX_ALSA__)
\r
111 apis.push_back( LINUX_ALSA );
\r
113 #if defined(__LINUX_PULSE__)
\r
114 apis.push_back( LINUX_PULSE );
\r
116 #if defined(__LINUX_OSS__)
\r
117 apis.push_back( LINUX_OSS );
\r
119 #if defined(__WINDOWS_ASIO__)
\r
120 apis.push_back( WINDOWS_ASIO );
\r
122 #if defined(__WINDOWS_WASAPI__)
\r
123 apis.push_back( WINDOWS_WASAPI );
\r
125 #if defined(__WINDOWS_DS__)
\r
126 apis.push_back( WINDOWS_DS );
\r
128 #if defined(__MACOSX_CORE__)
\r
129 apis.push_back( MACOSX_CORE );
\r
131 #if defined(__RTAUDIO_DUMMY__)
\r
132 apis.push_back( RTAUDIO_DUMMY );
\r
136 void RtAudio :: openRtApi( RtAudio::Api api )
\r
142 #if defined(__UNIX_JACK__)
\r
143 if ( api == UNIX_JACK )
\r
144 rtapi_ = new RtApiJack();
\r
146 #if defined(__LINUX_ALSA__)
\r
147 if ( api == LINUX_ALSA )
\r
148 rtapi_ = new RtApiAlsa();
\r
150 #if defined(__LINUX_PULSE__)
\r
151 if ( api == LINUX_PULSE )
\r
152 rtapi_ = new RtApiPulse();
\r
154 #if defined(__LINUX_OSS__)
\r
155 if ( api == LINUX_OSS )
\r
156 rtapi_ = new RtApiOss();
\r
158 #if defined(__WINDOWS_ASIO__)
\r
159 if ( api == WINDOWS_ASIO )
\r
160 rtapi_ = new RtApiAsio();
\r
162 #if defined(__WINDOWS_WASAPI__)
\r
163 if ( api == WINDOWS_WASAPI )
\r
164 rtapi_ = new RtApiWasapi();
\r
166 #if defined(__WINDOWS_DS__)
\r
167 if ( api == WINDOWS_DS )
\r
168 rtapi_ = new RtApiDs();
\r
170 #if defined(__MACOSX_CORE__)
\r
171 if ( api == MACOSX_CORE )
\r
172 rtapi_ = new RtApiCore();
\r
174 #if defined(__RTAUDIO_DUMMY__)
\r
175 if ( api == RTAUDIO_DUMMY )
\r
176 rtapi_ = new RtApiDummy();
\r
180 RtAudio :: RtAudio( RtAudio::Api api )
\r
184 if ( api != UNSPECIFIED ) {
\r
185 // Attempt to open the specified API.
\r
187 if ( rtapi_ ) return;
\r
189 // No compiled support for specified API value. Issue a debug
\r
190 // warning and continue as if no API was specified.
\r
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
194 // Iterate through the compiled APIs and return as soon as we find
\r
195 // one with at least one device or we reach the end of the list.
\r
196 std::vector< RtAudio::Api > apis;
\r
197 getCompiledApi( apis );
\r
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
199 openRtApi( apis[i] );
\r
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
203 if ( rtapi_ ) return;
\r
205 // It should not be possible to get here because the preprocessor
\r
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
207 // API-specific definitions are passed to the compiler. But just in
\r
208 // case something weird happens, we'll thow an error.
\r
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
213 RtAudio :: ~RtAudio()
\r
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
220 RtAudio::StreamParameters *inputParameters,
\r
221 RtAudioFormat format, unsigned int sampleRate,
\r
222 unsigned int *bufferFrames,
\r
223 RtAudioCallback callback, void *userData,
\r
224 RtAudio::StreamOptions *options,
\r
225 RtAudioErrorCallback errorCallback )
\r
227 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
228 sampleRate, bufferFrames, callback,
\r
229 userData, options, errorCallback );
\r
232 // *************************************************** //
\r
234 // Public RtApi definitions (see end of file for
\r
235 // private or protected utility functions).
\r
237 // *************************************************** //
\r
241 stream_.state = STREAM_CLOSED;
\r
242 stream_.mode = UNINITIALIZED;
\r
243 stream_.apiHandle = 0;
\r
244 stream_.userBuffer[0] = 0;
\r
245 stream_.userBuffer[1] = 0;
\r
246 MUTEX_INITIALIZE( &stream_.mutex );
\r
247 showWarnings_ = true;
\r
248 firstErrorOccurred_ = false;
\r
253 MUTEX_DESTROY( &stream_.mutex );
\r
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
257 RtAudio::StreamParameters *iParams,
\r
258 RtAudioFormat format, unsigned int sampleRate,
\r
259 unsigned int *bufferFrames,
\r
260 RtAudioCallback callback, void *userData,
\r
261 RtAudio::StreamOptions *options,
\r
262 RtAudioErrorCallback errorCallback )
\r
264 if ( stream_.state != STREAM_CLOSED ) {
\r
265 errorText_ = "RtApi::openStream: a stream is already open!";
\r
266 error( RtAudioError::INVALID_USE );
\r
270 // Clear stream information potentially left from a previously open stream.
\r
273 if ( oParams && oParams->nChannels < 1 ) {
\r
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 if ( iParams && iParams->nChannels < 1 ) {
\r
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
281 error( RtAudioError::INVALID_USE );
\r
285 if ( oParams == NULL && iParams == NULL ) {
\r
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
287 error( RtAudioError::INVALID_USE );
\r
291 if ( formatBytes(format) == 0 ) {
\r
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
293 error( RtAudioError::INVALID_USE );
\r
297 unsigned int nDevices = getDeviceCount();
\r
298 unsigned int oChannels = 0;
\r
300 oChannels = oParams->nChannels;
\r
301 if ( oParams->deviceId >= nDevices ) {
\r
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
303 error( RtAudioError::INVALID_USE );
\r
308 unsigned int iChannels = 0;
\r
310 iChannels = iParams->nChannels;
\r
311 if ( iParams->deviceId >= nDevices ) {
\r
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
313 error( RtAudioError::INVALID_USE );
\r
320 if ( oChannels > 0 ) {
\r
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
323 sampleRate, format, bufferFrames, options );
\r
324 if ( result == false ) {
\r
325 error( RtAudioError::SYSTEM_ERROR );
\r
330 if ( iChannels > 0 ) {
\r
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
333 sampleRate, format, bufferFrames, options );
\r
334 if ( result == false ) {
\r
335 if ( oChannels > 0 ) closeStream();
\r
336 error( RtAudioError::SYSTEM_ERROR );
\r
341 stream_.callbackInfo.callback = (void *) callback;
\r
342 stream_.callbackInfo.userData = userData;
\r
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
346 stream_.state = STREAM_STOPPED;
\r
349 unsigned int RtApi :: getDefaultInputDevice( void )
\r
351 // Should be implemented in subclasses if possible.
\r
355 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
357 // Should be implemented in subclasses if possible.
\r
361 void RtApi :: closeStream( void )
\r
363 // MUST be implemented in subclasses!
\r
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
370 RtAudio::StreamOptions * /*options*/ )
\r
372 // MUST be implemented in subclasses!
\r
376 void RtApi :: tickStreamTime( void )
\r
378 // Subclasses that do not provide their own implementation of
\r
379 // getStreamTime should call this function once per buffer I/O to
\r
380 // provide basic stream time support.
\r
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
384 #if defined( HAVE_GETTIMEOFDAY )
\r
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
389 long RtApi :: getStreamLatency( void )
\r
393 long totalLatency = 0;
\r
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
395 totalLatency = stream_.latency[0];
\r
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
397 totalLatency += stream_.latency[1];
\r
399 return totalLatency;
\r
402 double RtApi :: getStreamTime( void )
\r
406 #if defined( HAVE_GETTIMEOFDAY )
\r
407 // Return a very accurate estimate of the stream time by
\r
408 // adding in the elapsed time since the last tick.
\r
409 struct timeval then;
\r
410 struct timeval now;
\r
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
413 return stream_.streamTime;
\r
415 gettimeofday( &now, NULL );
\r
416 then = stream_.lastTickTimestamp;
\r
417 return stream_.streamTime +
\r
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
419 (then.tv_sec + 0.000001 * then.tv_usec));
\r
421 return stream_.streamTime;
\r
425 void RtApi :: setStreamTime( double time )
\r
430 stream_.streamTime = time;
\r
431 #if defined( HAVE_GETTIMEOFDAY )
\r
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
436 unsigned int RtApi :: getStreamSampleRate( void )
\r
440 return stream_.sampleRate;
\r
444 // *************************************************** //
\r
446 // OS/API-specific methods.
\r
448 // *************************************************** //
\r
450 #if defined(__MACOSX_CORE__)
\r
452 // The OS X CoreAudio API is designed to use a separate callback
\r
453 // procedure for each of its audio devices. A single RtAudio duplex
\r
454 // stream using two different devices is supported here, though it
\r
455 // cannot be guaranteed to always behave correctly because we cannot
\r
456 // synchronize these two callbacks.
\r
458 // A property listener is installed for over/underrun information.
\r
459 // However, no functionality is currently provided to allow property
\r
460 // listeners to trigger user handlers because it is unclear what could
\r
461 // be done if a critical stream parameter (buffer size, sample rate,
\r
462 // device disconnect) notification arrived. The listeners entail
\r
463 // quite a bit of extra code and most likely, a user program wouldn't
\r
464 // be prepared for the result anyway. However, we do provide a flag
\r
465 // to the client callback function to inform of an over/underrun.
\r
467 // A structure to hold various information related to the CoreAudio API
\r
469 struct CoreHandle {
\r
470 AudioDeviceID id[2]; // device ids
\r
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
472 AudioDeviceIOProcID procId[2];
\r
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
475 UInt32 nStreams[2]; // number of streams to use
\r
477 char *deviceBuffer;
\r
478 pthread_cond_t condition;
\r
479 int drainCounter; // Tracks callback counts when draining
\r
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
486 RtApiCore:: RtApiCore()
\r
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
489 // This is a largely undocumented but absolutely necessary
\r
490 // requirement starting with OS-X 10.6. If not called, queries and
\r
491 // updates to various audio device properties are not handled
\r
493 CFRunLoopRef theRunLoop = NULL;
\r
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
495 kAudioObjectPropertyScopeGlobal,
\r
496 kAudioObjectPropertyElementMaster };
\r
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
498 if ( result != noErr ) {
\r
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
500 error( RtAudioError::WARNING );
\r
505 RtApiCore :: ~RtApiCore()
\r
507 // The subclass destructor gets called before the base class
\r
508 // destructor, so close an existing stream before deallocating
\r
509 // apiDeviceId memory.
\r
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
513 unsigned int RtApiCore :: getDeviceCount( void )
\r
515 // Find out how many audio devices there are, if any.
\r
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
519 if ( result != noErr ) {
\r
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
521 error( RtAudioError::WARNING );
\r
525 return dataSize / sizeof( AudioDeviceID );
\r
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
530 unsigned int nDevices = getDeviceCount();
\r
531 if ( nDevices <= 1 ) return 0;
\r
534 UInt32 dataSize = sizeof( AudioDeviceID );
\r
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
537 if ( result != noErr ) {
\r
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
539 error( RtAudioError::WARNING );
\r
543 dataSize *= nDevices;
\r
544 AudioDeviceID deviceList[ nDevices ];
\r
545 property.mSelector = kAudioHardwarePropertyDevices;
\r
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
547 if ( result != noErr ) {
\r
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
549 error( RtAudioError::WARNING );
\r
553 for ( unsigned int i=0; i<nDevices; i++ )
\r
554 if ( id == deviceList[i] ) return i;
\r
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
557 error( RtAudioError::WARNING );
\r
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
563 unsigned int nDevices = getDeviceCount();
\r
564 if ( nDevices <= 1 ) return 0;
\r
567 UInt32 dataSize = sizeof( AudioDeviceID );
\r
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
570 if ( result != noErr ) {
\r
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
572 error( RtAudioError::WARNING );
\r
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
577 AudioDeviceID deviceList[ nDevices ];
\r
578 property.mSelector = kAudioHardwarePropertyDevices;
\r
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
580 if ( result != noErr ) {
\r
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
582 error( RtAudioError::WARNING );
\r
586 for ( unsigned int i=0; i<nDevices; i++ )
\r
587 if ( id == deviceList[i] ) return i;
\r
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
590 error( RtAudioError::WARNING );
\r
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
596 RtAudio::DeviceInfo info;
\r
597 info.probed = false;
\r
600 unsigned int nDevices = getDeviceCount();
\r
601 if ( nDevices == 0 ) {
\r
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
603 error( RtAudioError::INVALID_USE );
\r
607 if ( device >= nDevices ) {
\r
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
609 error( RtAudioError::INVALID_USE );
\r
613 AudioDeviceID deviceList[ nDevices ];
\r
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
616 kAudioObjectPropertyScopeGlobal,
\r
617 kAudioObjectPropertyElementMaster };
\r
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
619 0, NULL, &dataSize, (void *) &deviceList );
\r
620 if ( result != noErr ) {
\r
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
622 error( RtAudioError::WARNING );
\r
626 AudioDeviceID id = deviceList[ device ];
\r
628 // Get the device name.
\r
630 CFStringRef cfname;
\r
631 dataSize = sizeof( CFStringRef );
\r
632 property.mSelector = kAudioObjectPropertyManufacturer;
\r
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
634 if ( result != noErr ) {
\r
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
636 errorText_ = errorStream_.str();
\r
637 error( RtAudioError::WARNING );
\r
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
642 int length = CFStringGetLength(cfname);
\r
643 char *mname = (char *)malloc(length * 3 + 1);
\r
644 #if defined( UNICODE ) || defined( _UNICODE )
\r
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
649 info.name.append( (const char *)mname, strlen(mname) );
\r
650 info.name.append( ": " );
\r
651 CFRelease( cfname );
\r
654 property.mSelector = kAudioObjectPropertyName;
\r
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
656 if ( result != noErr ) {
\r
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
658 errorText_ = errorStream_.str();
\r
659 error( RtAudioError::WARNING );
\r
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
664 length = CFStringGetLength(cfname);
\r
665 char *name = (char *)malloc(length * 3 + 1);
\r
666 #if defined( UNICODE ) || defined( _UNICODE )
\r
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
671 info.name.append( (const char *)name, strlen(name) );
\r
672 CFRelease( cfname );
\r
675 // Get the output stream "configuration".
\r
676 AudioBufferList *bufferList = nil;
\r
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
678 property.mScope = kAudioDevicePropertyScopeOutput;
\r
679 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
682 if ( result != noErr || dataSize == 0 ) {
\r
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
684 errorText_ = errorStream_.str();
\r
685 error( RtAudioError::WARNING );
\r
689 // Allocate the AudioBufferList.
\r
690 bufferList = (AudioBufferList *) malloc( dataSize );
\r
691 if ( bufferList == NULL ) {
\r
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
693 error( RtAudioError::WARNING );
\r
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
698 if ( result != noErr || dataSize == 0 ) {
\r
699 free( bufferList );
\r
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
701 errorText_ = errorStream_.str();
\r
702 error( RtAudioError::WARNING );
\r
706 // Get output channel information.
\r
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
708 for ( i=0; i<nStreams; i++ )
\r
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
710 free( bufferList );
\r
712 // Get the input stream "configuration".
\r
713 property.mScope = kAudioDevicePropertyScopeInput;
\r
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
715 if ( result != noErr || dataSize == 0 ) {
\r
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
717 errorText_ = errorStream_.str();
\r
718 error( RtAudioError::WARNING );
\r
722 // Allocate the AudioBufferList.
\r
723 bufferList = (AudioBufferList *) malloc( dataSize );
\r
724 if ( bufferList == NULL ) {
\r
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
726 error( RtAudioError::WARNING );
\r
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
731 if (result != noErr || dataSize == 0) {
\r
732 free( bufferList );
\r
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
734 errorText_ = errorStream_.str();
\r
735 error( RtAudioError::WARNING );
\r
739 // Get input channel information.
\r
740 nStreams = bufferList->mNumberBuffers;
\r
741 for ( i=0; i<nStreams; i++ )
\r
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
743 free( bufferList );
\r
745 // If device opens for both playback and capture, we determine the channels.
\r
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
749 // Probe the device sample rates.
\r
750 bool isInput = false;
\r
751 if ( info.outputChannels == 0 ) isInput = true;
\r
753 // Determine the supported sample rates.
\r
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
759 errorText_ = errorStream_.str();
\r
760 error( RtAudioError::WARNING );
\r
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
765 AudioValueRange rangeList[ nRanges ];
\r
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
767 if ( result != kAudioHardwareNoError ) {
\r
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
769 errorText_ = errorStream_.str();
\r
770 error( RtAudioError::WARNING );
\r
774 // The sample rate reporting mechanism is a bit of a mystery. It
\r
775 // seems that it can either return individual rates or a range of
\r
776 // rates. I assume that if the min / max range values are the same,
\r
777 // then that represents a single supported rate and if the min / max
\r
778 // range values are different, the device supports an arbitrary
\r
779 // range of values (though there might be multiple ranges, so we'll
\r
780 // use the most conservative range).
\r
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
782 bool haveValueRange = false;
\r
783 info.sampleRates.clear();
\r
784 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
787 info.sampleRates.push_back( tmpSr );
\r
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
790 info.preferredSampleRate = tmpSr;
\r
793 haveValueRange = true;
\r
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
799 if ( haveValueRange ) {
\r
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
805 info.preferredSampleRate = SAMPLE_RATES[k];
\r
810 // Sort and remove any redundant values
\r
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
814 if ( info.sampleRates.size() == 0 ) {
\r
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
816 errorText_ = errorStream_.str();
\r
817 error( RtAudioError::WARNING );
\r
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
822 // Thus, any other "physical" formats supported by the device are of
\r
823 // no interest to the client.
\r
824 info.nativeFormats = RTAUDIO_FLOAT32;
\r
826 if ( info.outputChannels > 0 )
\r
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
828 if ( info.inputChannels > 0 )
\r
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
831 info.probed = true;
\r
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
836 const AudioTimeStamp* /*inNow*/,
\r
837 const AudioBufferList* inInputData,
\r
838 const AudioTimeStamp* /*inInputTime*/,
\r
839 AudioBufferList* outOutputData,
\r
840 const AudioTimeStamp* /*inOutputTime*/,
\r
841 void* infoPointer )
\r
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
845 RtApiCore *object = (RtApiCore *) info->object;
\r
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
847 return kAudioHardwareUnspecifiedError;
\r
849 return kAudioHardwareNoError;
\r
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
854 const AudioObjectPropertyAddress properties[],
\r
855 void* handlePointer )
\r
857 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
861 handle->xrun[1] = true;
\r
863 handle->xrun[0] = true;
\r
867 return kAudioHardwareNoError;
\r
870 static OSStatus rateListener( AudioObjectID inDevice,
\r
871 UInt32 /*nAddresses*/,
\r
872 const AudioObjectPropertyAddress /*properties*/[],
\r
873 void* ratePointer )
\r
875 Float64 *rate = (Float64 *) ratePointer;
\r
876 UInt32 dataSize = sizeof( Float64 );
\r
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
878 kAudioObjectPropertyScopeGlobal,
\r
879 kAudioObjectPropertyElementMaster };
\r
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
881 return kAudioHardwareNoError;
\r
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
885 unsigned int firstChannel, unsigned int sampleRate,
\r
886 RtAudioFormat format, unsigned int *bufferSize,
\r
887 RtAudio::StreamOptions *options )
\r
890 unsigned int nDevices = getDeviceCount();
\r
891 if ( nDevices == 0 ) {
\r
892 // This should not happen because a check is made before this function is called.
\r
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
897 if ( device >= nDevices ) {
\r
898 // This should not happen because a check is made before this function is called.
\r
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
903 AudioDeviceID deviceList[ nDevices ];
\r
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
906 kAudioObjectPropertyScopeGlobal,
\r
907 kAudioObjectPropertyElementMaster };
\r
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
909 0, NULL, &dataSize, (void *) &deviceList );
\r
910 if ( result != noErr ) {
\r
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
915 AudioDeviceID id = deviceList[ device ];
\r
917 // Setup for stream mode.
\r
918 bool isInput = false;
\r
919 if ( mode == INPUT ) {
\r
921 property.mScope = kAudioDevicePropertyScopeInput;
\r
924 property.mScope = kAudioDevicePropertyScopeOutput;
\r
926 // Get the stream "configuration".
\r
927 AudioBufferList *bufferList = nil;
\r
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
931 if ( result != noErr || dataSize == 0 ) {
\r
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
933 errorText_ = errorStream_.str();
\r
937 // Allocate the AudioBufferList.
\r
938 bufferList = (AudioBufferList *) malloc( dataSize );
\r
939 if ( bufferList == NULL ) {
\r
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
945 if (result != noErr || dataSize == 0) {
\r
946 free( bufferList );
\r
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
948 errorText_ = errorStream_.str();
\r
952 // Search for one or more streams that contain the desired number of
\r
953 // channels. CoreAudio devices can have an arbitrary number of
\r
954 // streams and each stream can have an arbitrary number of channels.
\r
955 // For each stream, a single buffer of interleaved samples is
\r
956 // provided. RtAudio prefers the use of one stream of interleaved
\r
957 // data or multiple consecutive single-channel streams. However, we
\r
958 // now support multiple consecutive multi-channel streams of
\r
959 // interleaved data as well.
\r
960 UInt32 iStream, offsetCounter = firstChannel;
\r
961 UInt32 nStreams = bufferList->mNumberBuffers;
\r
962 bool monoMode = false;
\r
963 bool foundStream = false;
\r
965 // First check that the device supports the requested number of
\r
967 UInt32 deviceChannels = 0;
\r
968 for ( iStream=0; iStream<nStreams; iStream++ )
\r
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
971 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
972 free( bufferList );
\r
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
974 errorText_ = errorStream_.str();
\r
978 // Look for a single stream meeting our needs.
\r
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
982 if ( streamChannels >= channels + offsetCounter ) {
\r
983 firstStream = iStream;
\r
984 channelOffset = offsetCounter;
\r
985 foundStream = true;
\r
988 if ( streamChannels > offsetCounter ) break;
\r
989 offsetCounter -= streamChannels;
\r
992 // If we didn't find a single stream above, then we should be able
\r
993 // to meet the channel specification with multiple streams.
\r
994 if ( foundStream == false ) {
\r
996 offsetCounter = firstChannel;
\r
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
999 if ( streamChannels > offsetCounter ) break;
\r
1000 offsetCounter -= streamChannels;
\r
1003 firstStream = iStream;
\r
1004 channelOffset = offsetCounter;
\r
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1007 if ( streamChannels > 1 ) monoMode = false;
\r
1008 while ( channelCounter > 0 ) {
\r
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1010 if ( streamChannels > 1 ) monoMode = false;
\r
1011 channelCounter -= streamChannels;
\r
1016 free( bufferList );
\r
1018 // Determine the buffer size.
\r
1019 AudioValueRange bufferRange;
\r
1020 dataSize = sizeof( AudioValueRange );
\r
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1024 if ( result != noErr ) {
\r
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1026 errorText_ = errorStream_.str();
\r
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1034 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1035 // need to make this setting for the master channel.
\r
1036 UInt32 theSize = (UInt32) *bufferSize;
\r
1037 dataSize = sizeof( UInt32 );
\r
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1041 if ( result != noErr ) {
\r
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1043 errorText_ = errorStream_.str();
\r
1047 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1048 // MUST be the same in both directions!
\r
1049 *bufferSize = theSize;
\r
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1052 errorText_ = errorStream_.str();
\r
1056 stream_.bufferSize = *bufferSize;
\r
1057 stream_.nBuffers = 1;
\r
1059 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1062 dataSize = sizeof( hog_pid );
\r
1063 property.mSelector = kAudioDevicePropertyHogMode;
\r
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1065 if ( result != noErr ) {
\r
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1067 errorText_ = errorStream_.str();
\r
1071 if ( hog_pid != getpid() ) {
\r
1072 hog_pid = getpid();
\r
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1074 if ( result != noErr ) {
\r
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1076 errorText_ = errorStream_.str();
\r
1082 // Check and if necessary, change the sample rate for the device.
\r
1083 Float64 nominalRate;
\r
1084 dataSize = sizeof( Float64 );
\r
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1087 if ( result != noErr ) {
\r
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1089 errorText_ = errorStream_.str();
\r
1093 // Only change the sample rate if off by more than 1 Hz.
\r
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1096 // Set a property listener for the sample rate change
\r
1097 Float64 reportedRate = 0.0;
\r
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1100 if ( result != noErr ) {
\r
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1102 errorText_ = errorStream_.str();
\r
1106 nominalRate = (Float64) sampleRate;
\r
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1108 if ( result != noErr ) {
\r
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1111 errorText_ = errorStream_.str();
\r
1115 // Now wait until the reported nominal rate is what we just set.
\r
1116 UInt32 microCounter = 0;
\r
1117 while ( reportedRate != nominalRate ) {
\r
1118 microCounter += 5000;
\r
1119 if ( microCounter > 5000000 ) break;
\r
1123 // Remove the property listener.
\r
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1126 if ( microCounter > 5000000 ) {
\r
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1128 errorText_ = errorStream_.str();
\r
1133 // Now set the stream format for all streams. Also, check the
\r
1134 // physical format of the device and change that if necessary.
\r
1135 AudioStreamBasicDescription description;
\r
1136 dataSize = sizeof( AudioStreamBasicDescription );
\r
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1139 if ( result != noErr ) {
\r
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1141 errorText_ = errorStream_.str();
\r
1145 // Set the sample rate and data format id. However, only make the
\r
1146 // change if the sample rate is not within 1.0 of the desired
\r
1147 // rate and the format is not linear pcm.
\r
1148 bool updateFormat = false;
\r
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1150 description.mSampleRate = (Float64) sampleRate;
\r
1151 updateFormat = true;
\r
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1155 description.mFormatID = kAudioFormatLinearPCM;
\r
1156 updateFormat = true;
\r
1159 if ( updateFormat ) {
\r
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1161 if ( result != noErr ) {
\r
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1163 errorText_ = errorStream_.str();
\r
1168 // Now check the physical format.
\r
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1171 if ( result != noErr ) {
\r
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1173 errorText_ = errorStream_.str();
\r
1177 //std::cout << "Current physical stream format:" << std::endl;
\r
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1184 description.mFormatID = kAudioFormatLinearPCM;
\r
1185 //description.mSampleRate = (Float64) sampleRate;
\r
1186 AudioStreamBasicDescription testDescription = description;
\r
1187 UInt32 formatFlags;
\r
1189 // We'll try higher bit rates first and then work our way down.
\r
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1204 bool setPhysicalFormat = false;
\r
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1206 testDescription = description;
\r
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1208 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1215 if ( result == noErr ) {
\r
1216 setPhysicalFormat = true;
\r
1217 //std::cout << "Updated physical stream format:" << std::endl;
\r
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1226 if ( !setPhysicalFormat ) {
\r
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1228 errorText_ = errorStream_.str();
\r
1231 } // done setting virtual/physical formats.
\r
1233 // Get the stream / device latency.
\r
1235 dataSize = sizeof( UInt32 );
\r
1236 property.mSelector = kAudioDevicePropertyLatency;
\r
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1242 errorText_ = errorStream_.str();
\r
1243 error( RtAudioError::WARNING );
\r
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1248 // always be presented in native-endian format, so we should never
\r
1249 // need to byte swap.
\r
1250 stream_.doByteSwap[mode] = false;
\r
1252 // From the CoreAudio documentation, PCM data must be supplied as
\r
1254 stream_.userFormat = format;
\r
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1257 if ( streamCount == 1 )
\r
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1259 else // multiple streams
\r
1260 stream_.nDeviceChannels[mode] = channels;
\r
1261 stream_.nUserChannels[mode] = channels;
\r
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1264 else stream_.userInterleaved = true;
\r
1265 stream_.deviceInterleaved[mode] = true;
\r
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1268 // Set flags for buffer conversion.
\r
1269 stream_.doConvertBuffer[mode] = false;
\r
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1271 stream_.doConvertBuffer[mode] = true;
\r
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1274 if ( streamCount == 1 ) {
\r
1275 if ( stream_.nUserChannels[mode] > 1 &&
\r
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1277 stream_.doConvertBuffer[mode] = true;
\r
1279 else if ( monoMode && stream_.userInterleaved )
\r
1280 stream_.doConvertBuffer[mode] = true;
\r
1282 // Allocate our CoreHandle structure for the stream.
\r
1283 CoreHandle *handle = 0;
\r
1284 if ( stream_.apiHandle == 0 ) {
\r
1286 handle = new CoreHandle;
\r
1288 catch ( std::bad_alloc& ) {
\r
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1297 stream_.apiHandle = (void *) handle;
\r
1300 handle = (CoreHandle *) stream_.apiHandle;
\r
1301 handle->iStream[mode] = firstStream;
\r
1302 handle->nStreams[mode] = streamCount;
\r
1303 handle->id[mode] = id;
\r
1305 // Allocate necessary internal buffers.
\r
1306 unsigned long bufferBytes;
\r
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1311 if ( stream_.userBuffer[mode] == NULL ) {
\r
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1316 // If possible, we will make use of the CoreAudio stream buffers as
\r
1317 // "device buffers". However, we can't do this if using multiple
\r
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1321 bool makeBuffer = true;
\r
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1323 if ( mode == INPUT ) {
\r
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1330 if ( makeBuffer ) {
\r
1331 bufferBytes *= *bufferSize;
\r
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1334 if ( stream_.deviceBuffer == NULL ) {
\r
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1341 stream_.sampleRate = sampleRate;
\r
1342 stream_.device[mode] = device;
\r
1343 stream_.state = STREAM_STOPPED;
\r
1344 stream_.callbackInfo.object = (void *) this;
\r
1346 // Setup the buffer conversion information structure.
\r
1347 if ( stream_.doConvertBuffer[mode] ) {
\r
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1349 else setConvertInfo( mode, channelOffset );
\r
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1353 // Only one callback procedure per device.
\r
1354 stream_.mode = DUPLEX;
\r
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1362 if ( result != noErr ) {
\r
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1364 errorText_ = errorStream_.str();
\r
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1368 stream_.mode = DUPLEX;
\r
1370 stream_.mode = mode;
\r
1373 // Setup the device property listener for over/underload.
\r
1374 property.mSelector = kAudioDeviceProcessorOverload;
\r
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1382 pthread_cond_destroy( &handle->condition );
\r
1384 stream_.apiHandle = 0;
\r
1387 for ( int i=0; i<2; i++ ) {
\r
1388 if ( stream_.userBuffer[i] ) {
\r
1389 free( stream_.userBuffer[i] );
\r
1390 stream_.userBuffer[i] = 0;
\r
1394 if ( stream_.deviceBuffer ) {
\r
1395 free( stream_.deviceBuffer );
\r
1396 stream_.deviceBuffer = 0;
\r
1399 stream_.state = STREAM_CLOSED;
\r
1403 void RtApiCore :: closeStream( void )
\r
1405 if ( stream_.state == STREAM_CLOSED ) {
\r
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1407 error( RtAudioError::WARNING );
\r
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1415 kAudioObjectPropertyScopeGlobal,
\r
1416 kAudioObjectPropertyElementMaster };
\r
1418 property.mSelector = kAudioDeviceProcessorOverload;
\r
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1422 error( RtAudioError::WARNING );
\r
1425 if ( stream_.state == STREAM_RUNNING )
\r
1426 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1438 kAudioObjectPropertyScopeGlobal,
\r
1439 kAudioObjectPropertyElementMaster };
\r
1441 property.mSelector = kAudioDeviceProcessorOverload;
\r
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1445 error( RtAudioError::WARNING );
\r
1448 if ( stream_.state == STREAM_RUNNING )
\r
1449 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1458 for ( int i=0; i<2; i++ ) {
\r
1459 if ( stream_.userBuffer[i] ) {
\r
1460 free( stream_.userBuffer[i] );
\r
1461 stream_.userBuffer[i] = 0;
\r
1465 if ( stream_.deviceBuffer ) {
\r
1466 free( stream_.deviceBuffer );
\r
1467 stream_.deviceBuffer = 0;
\r
1470 // Destroy pthread condition variable.
\r
1471 pthread_cond_destroy( &handle->condition );
\r
1473 stream_.apiHandle = 0;
\r
1475 stream_.mode = UNINITIALIZED;
\r
1476 stream_.state = STREAM_CLOSED;
\r
1479 void RtApiCore :: startStream( void )
\r
1482 if ( stream_.state == STREAM_RUNNING ) {
\r
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1484 error( RtAudioError::WARNING );
\r
1488 OSStatus result = noErr;
\r
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1493 if ( result != noErr ) {
\r
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1495 errorText_ = errorStream_.str();
\r
1500 if ( stream_.mode == INPUT ||
\r
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1504 if ( result != noErr ) {
\r
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1506 errorText_ = errorStream_.str();
\r
1511 handle->drainCounter = 0;
\r
1512 handle->internalDrain = false;
\r
1513 stream_.state = STREAM_RUNNING;
\r
1516 if ( result == noErr ) return;
\r
1517 error( RtAudioError::SYSTEM_ERROR );
\r
1520 void RtApiCore :: stopStream( void )
\r
1523 if ( stream_.state == STREAM_STOPPED ) {
\r
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1525 error( RtAudioError::WARNING );
\r
1529 OSStatus result = noErr;
\r
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1533 if ( handle->drainCounter == 0 ) {
\r
1534 handle->drainCounter = 2;
\r
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1539 if ( result != noErr ) {
\r
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1541 errorText_ = errorStream_.str();
\r
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1549 if ( result != noErr ) {
\r
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1551 errorText_ = errorStream_.str();
\r
1556 stream_.state = STREAM_STOPPED;
\r
1559 if ( result == noErr ) return;
\r
1560 error( RtAudioError::SYSTEM_ERROR );
\r
1563 void RtApiCore :: abortStream( void )
\r
1566 if ( stream_.state == STREAM_STOPPED ) {
\r
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1568 error( RtAudioError::WARNING );
\r
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1573 handle->drainCounter = 2;
\r
1578 // This function will be called by a spawned thread when the user
\r
1579 // callback function signals that the stream should be stopped or
\r
1580 // aborted. It is better to handle it this way because the
\r
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1582 // function is called.
\r
1583 static void *coreStopStream( void *ptr )
\r
1585 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1586 RtApiCore *object = (RtApiCore *) info->object;
\r
1588 object->stopStream();
\r
1589 pthread_exit( NULL );
\r
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1593 const AudioBufferList *inBufferList,
\r
1594 const AudioBufferList *outBufferList )
\r
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1597 if ( stream_.state == STREAM_CLOSED ) {
\r
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1599 error( RtAudioError::WARNING );
\r
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1606 // Check if we were draining the stream and signal is finished.
\r
1607 if ( handle->drainCounter > 3 ) {
\r
1608 ThreadHandle threadId;
\r
1610 stream_.state = STREAM_STOPPING;
\r
1611 if ( handle->internalDrain == true )
\r
1612 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1613 else // external call to stopStream()
\r
1614 pthread_cond_signal( &handle->condition );
\r
1618 AudioDeviceID outputDevice = handle->id[0];
\r
1620 // Invoke user callback to get fresh output data UNLESS we are
\r
1621 // draining stream or duplex mode AND the input/output devices are
\r
1622 // different AND this function is called for the input device.
\r
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1625 double streamTime = getStreamTime();
\r
1626 RtAudioStreamStatus status = 0;
\r
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1629 handle->xrun[0] = false;
\r
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1632 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1633 handle->xrun[1] = false;
\r
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1637 stream_.bufferSize, streamTime, status, info->userData );
\r
1638 if ( cbReturnValue == 2 ) {
\r
1639 stream_.state = STREAM_STOPPING;
\r
1640 handle->drainCounter = 2;
\r
1644 else if ( cbReturnValue == 1 ) {
\r
1645 handle->drainCounter = 1;
\r
1646 handle->internalDrain = true;
\r
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1654 if ( handle->nStreams[0] == 1 ) {
\r
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1659 else { // fill multiple streams with zeros
\r
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1667 else if ( handle->nStreams[0] == 1 ) {
\r
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1672 else { // copy from user buffer
\r
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1674 stream_.userBuffer[0],
\r
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1678 else { // fill multiple streams
\r
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1680 if ( stream_.doConvertBuffer[0] ) {
\r
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1692 else { // fill multiple multi-channel streams with interleaved data
\r
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1694 Float32 *out, *in;
\r
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1697 UInt32 inChannels = stream_.nUserChannels[0];
\r
1698 if ( stream_.doConvertBuffer[0] ) {
\r
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1700 inChannels = stream_.nDeviceChannels[0];
\r
1703 if ( inInterleaved ) inOffset = 1;
\r
1704 else inOffset = stream_.bufferSize;
\r
1706 channelsLeft = inChannels;
\r
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1713 // Account for possible channel offset in first stream
\r
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1715 streamChannels -= stream_.channelOffset[0];
\r
1716 outJump = stream_.channelOffset[0];
\r
1720 // Account for possible unfilled channels at end of the last stream
\r
1721 if ( streamChannels > channelsLeft ) {
\r
1722 outJump = streamChannels - channelsLeft;
\r
1723 streamChannels = channelsLeft;
\r
1726 // Determine input buffer offsets and skips
\r
1727 if ( inInterleaved ) {
\r
1728 inJump = inChannels;
\r
1729 in += inChannels - channelsLeft;
\r
1733 in += (inChannels - channelsLeft) * inOffset;
\r
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1738 *out++ = in[j*inOffset];
\r
1743 channelsLeft -= streamChannels;
\r
1749 // Don't bother draining input
\r
1750 if ( handle->drainCounter ) {
\r
1751 handle->drainCounter++;
\r
1755 AudioDeviceID inputDevice;
\r
1756 inputDevice = handle->id[1];
\r
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1759 if ( handle->nStreams[1] == 1 ) {
\r
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1761 convertBuffer( stream_.userBuffer[1],
\r
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1763 stream_.convertInfo[1] );
\r
1765 else { // copy to user buffer
\r
1766 memcpy( stream_.userBuffer[1],
\r
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1771 else { // read from multiple streams
\r
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1782 else { // read from multiple multi-channel streams
\r
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1784 Float32 *out, *in;
\r
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1787 UInt32 outChannels = stream_.nUserChannels[1];
\r
1788 if ( stream_.doConvertBuffer[1] ) {
\r
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1790 outChannels = stream_.nDeviceChannels[1];
\r
1793 if ( outInterleaved ) outOffset = 1;
\r
1794 else outOffset = stream_.bufferSize;
\r
1796 channelsLeft = outChannels;
\r
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1803 // Account for possible channel offset in first stream
\r
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1805 streamChannels -= stream_.channelOffset[1];
\r
1806 inJump = stream_.channelOffset[1];
\r
1810 // Account for possible unread channels at end of the last stream
\r
1811 if ( streamChannels > channelsLeft ) {
\r
1812 inJump = streamChannels - channelsLeft;
\r
1813 streamChannels = channelsLeft;
\r
1816 // Determine output buffer offsets and skips
\r
1817 if ( outInterleaved ) {
\r
1818 outJump = outChannels;
\r
1819 out += outChannels - channelsLeft;
\r
1823 out += (outChannels - channelsLeft) * outOffset;
\r
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1828 out[j*outOffset] = *in++;
\r
1833 channelsLeft -= streamChannels;
\r
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1838 convertBuffer( stream_.userBuffer[1],
\r
1839 stream_.deviceBuffer,
\r
1840 stream_.convertInfo[1] );
\r
1846 //MUTEX_UNLOCK( &stream_.mutex );
\r
1848 RtApi::tickStreamTime();
\r
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1856 case kAudioHardwareNotRunningError:
\r
1857 return "kAudioHardwareNotRunningError";
\r
1859 case kAudioHardwareUnspecifiedError:
\r
1860 return "kAudioHardwareUnspecifiedError";
\r
1862 case kAudioHardwareUnknownPropertyError:
\r
1863 return "kAudioHardwareUnknownPropertyError";
\r
1865 case kAudioHardwareBadPropertySizeError:
\r
1866 return "kAudioHardwareBadPropertySizeError";
\r
1868 case kAudioHardwareIllegalOperationError:
\r
1869 return "kAudioHardwareIllegalOperationError";
\r
1871 case kAudioHardwareBadObjectError:
\r
1872 return "kAudioHardwareBadObjectError";
\r
1874 case kAudioHardwareBadDeviceError:
\r
1875 return "kAudioHardwareBadDeviceError";
\r
1877 case kAudioHardwareBadStreamError:
\r
1878 return "kAudioHardwareBadStreamError";
\r
1880 case kAudioHardwareUnsupportedOperationError:
\r
1881 return "kAudioHardwareUnsupportedOperationError";
\r
1883 case kAudioDeviceUnsupportedFormatError:
\r
1884 return "kAudioDeviceUnsupportedFormatError";
\r
1886 case kAudioDevicePermissionsError:
\r
1887 return "kAudioDevicePermissionsError";
\r
1890 return "CoreAudio unknown error";
\r
1894 //******************** End of __MACOSX_CORE__ *********************//
\r
1897 #if defined(__UNIX_JACK__)
\r
1899 // JACK is a low-latency audio server, originally written for the
\r
1900 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1901 // connect a number of different applications to an audio device, as
\r
1902 // well as allowing them to share audio between themselves.
\r
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1905 // have ports connected to the server. The JACK server is typically
\r
1906 // started in a terminal as follows:
\r
1908 // .jackd -d alsa -d hw:0
\r
1910 // or through an interface program such as qjackctl. Many of the
\r
1911 // parameters normally set for a stream are fixed by the JACK server
\r
1912 // and can be specified when the JACK server is started. In
\r
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1918 // frames, and number of buffers = 4. Once the server is running, it
\r
1919 // is not possible to override these values. If the values are not
\r
1920 // specified in the command-line, the JACK server uses default values.
\r
1922 // The JACK server does not have to be running when an instance of
\r
1923 // RtApiJack is created, though the function getDeviceCount() will
\r
1924 // report 0 devices found until JACK has been started. When no
\r
1925 // devices are available (i.e., the JACK server is not running), a
\r
1926 // stream cannot be opened.
\r
1928 #include <jack/jack.h>
\r
1929 #include <unistd.h>
\r
1932 // A structure to hold various information related to the Jack API
\r
1933 // implementation.
\r
1934 struct JackHandle {
\r
1935 jack_client_t *client;
\r
1936 jack_port_t **ports[2];
\r
1937 std::string deviceName[2];
\r
1939 pthread_cond_t condition;
\r
1940 int drainCounter; // Tracks callback counts when draining
\r
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1947 static void jackSilentError( const char * ) {};
\r
1949 RtApiJack :: RtApiJack()
\r
1951 // Nothing to do here.
\r
1952 #if !defined(__RTAUDIO_DEBUG__)
\r
1953 // Turn off Jack's internal error reporting.
\r
1954 jack_set_error_function( &jackSilentError );
\r
1958 RtApiJack :: ~RtApiJack()
\r
1960 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1963 unsigned int RtApiJack :: getDeviceCount( void )
\r
1965 // See if we can become a jack client.
\r
1966 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1967 jack_status_t *status = NULL;
\r
1968 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1969 if ( client == 0 ) return 0;
\r
1971 const char **ports;
\r
1972 std::string port, previousPort;
\r
1973 unsigned int nChannels = 0, nDevices = 0;
\r
1974 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1976 // Parse the port names up to the first colon (:).
\r
1977 size_t iColon = 0;
\r
1979 port = (char *) ports[ nChannels ];
\r
1980 iColon = port.find(":");
\r
1981 if ( iColon != std::string::npos ) {
\r
1982 port = port.substr( 0, iColon + 1 );
\r
1983 if ( port != previousPort ) {
\r
1985 previousPort = port;
\r
1988 } while ( ports[++nChannels] );
\r
1992 jack_client_close( client );
\r
1996 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1998 RtAudio::DeviceInfo info;
\r
1999 info.probed = false;
\r
2001 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
2002 jack_status_t *status = NULL;
\r
2003 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2004 if ( client == 0 ) {
\r
2005 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2006 error( RtAudioError::WARNING );
\r
2010 const char **ports;
\r
2011 std::string port, previousPort;
\r
2012 unsigned int nPorts = 0, nDevices = 0;
\r
2013 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2015 // Parse the port names up to the first colon (:).
\r
2016 size_t iColon = 0;
\r
2018 port = (char *) ports[ nPorts ];
\r
2019 iColon = port.find(":");
\r
2020 if ( iColon != std::string::npos ) {
\r
2021 port = port.substr( 0, iColon );
\r
2022 if ( port != previousPort ) {
\r
2023 if ( nDevices == device ) info.name = port;
\r
2025 previousPort = port;
\r
2028 } while ( ports[++nPorts] );
\r
2032 if ( device >= nDevices ) {
\r
2033 jack_client_close( client );
\r
2034 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2035 error( RtAudioError::INVALID_USE );
\r
2039 // Get the current jack server sample rate.
\r
2040 info.sampleRates.clear();
\r
2042 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2043 info.sampleRates.push_back( info.preferredSampleRate );
\r
2045 // Count the available ports containing the client name as device
\r
2046 // channels. Jack "input ports" equal RtAudio output channels.
\r
2047 unsigned int nChannels = 0;
\r
2048 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2050 while ( ports[ nChannels ] ) nChannels++;
\r
2052 info.outputChannels = nChannels;
\r
2055 // Jack "output ports" equal RtAudio input channels.
\r
2057 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2059 while ( ports[ nChannels ] ) nChannels++;
\r
2061 info.inputChannels = nChannels;
\r
2064 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2065 jack_client_close(client);
\r
2066 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2067 error( RtAudioError::WARNING );
\r
2071 // If device opens for both playback and capture, we determine the channels.
\r
2072 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2073 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2075 // Jack always uses 32-bit floats.
\r
2076 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2078 // Jack doesn't provide default devices so we'll use the first available one.
\r
2079 if ( device == 0 && info.outputChannels > 0 )
\r
2080 info.isDefaultOutput = true;
\r
2081 if ( device == 0 && info.inputChannels > 0 )
\r
2082 info.isDefaultInput = true;
\r
2084 jack_client_close(client);
\r
2085 info.probed = true;
\r
2089 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2091 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2093 RtApiJack *object = (RtApiJack *) info->object;
\r
2094 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2099 // This function will be called by a spawned thread when the Jack
\r
2100 // server signals that it is shutting down. It is necessary to handle
\r
2101 // it this way because the jackShutdown() function must return before
\r
2102 // the jack_deactivate() function (in closeStream()) will return.
\r
2103 static void *jackCloseStream( void *ptr )
\r
2105 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2106 RtApiJack *object = (RtApiJack *) info->object;
\r
2108 object->closeStream();
\r
2110 pthread_exit( NULL );
\r
2112 static void jackShutdown( void *infoPointer )
\r
2114 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2115 RtApiJack *object = (RtApiJack *) info->object;
\r
2117 // Check current stream state. If stopped, then we'll assume this
\r
2118 // was called as a result of a call to RtApiJack::stopStream (the
\r
2119 // deactivation of a client handle causes this function to be called).
\r
2120 // If not, we'll assume the Jack server is shutting down or some
\r
2121 // other problem occurred and we should close the stream.
\r
2122 if ( object->isStreamRunning() == false ) return;
\r
2124 ThreadHandle threadId;
\r
2125 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2126 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2129 static int jackXrun( void *infoPointer )
\r
2131 JackHandle *handle = (JackHandle *) infoPointer;
\r
2133 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2134 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2139 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2140 unsigned int firstChannel, unsigned int sampleRate,
\r
2141 RtAudioFormat format, unsigned int *bufferSize,
\r
2142 RtAudio::StreamOptions *options )
\r
2144 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2146 // Look for jack server and try to become a client (only do once per stream).
\r
2147 jack_client_t *client = 0;
\r
2148 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2149 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2150 jack_status_t *status = NULL;
\r
2151 if ( options && !options->streamName.empty() )
\r
2152 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2154 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2155 if ( client == 0 ) {
\r
2156 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2157 error( RtAudioError::WARNING );
\r
2162 // The handle must have been created on an earlier pass.
\r
2163 client = handle->client;
\r
2166 const char **ports;
\r
2167 std::string port, previousPort, deviceName;
\r
2168 unsigned int nPorts = 0, nDevices = 0;
\r
2169 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2171 // Parse the port names up to the first colon (:).
\r
2172 size_t iColon = 0;
\r
2174 port = (char *) ports[ nPorts ];
\r
2175 iColon = port.find(":");
\r
2176 if ( iColon != std::string::npos ) {
\r
2177 port = port.substr( 0, iColon );
\r
2178 if ( port != previousPort ) {
\r
2179 if ( nDevices == device ) deviceName = port;
\r
2181 previousPort = port;
\r
2184 } while ( ports[++nPorts] );
\r
2188 if ( device >= nDevices ) {
\r
2189 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2193 // Count the available ports containing the client name as device
\r
2194 // channels. Jack "input ports" equal RtAudio output channels.
\r
2195 unsigned int nChannels = 0;
\r
2196 unsigned long flag = JackPortIsInput;
\r
2197 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2198 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2200 while ( ports[ nChannels ] ) nChannels++;
\r
2204 // Compare the jack ports for specified client to the requested number of channels.
\r
2205 if ( nChannels < (channels + firstChannel) ) {
\r
2206 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2207 errorText_ = errorStream_.str();
\r
2211 // Check the jack server sample rate.
\r
2212 unsigned int jackRate = jack_get_sample_rate( client );
\r
2213 if ( sampleRate != jackRate ) {
\r
2214 jack_client_close( client );
\r
2215 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2216 errorText_ = errorStream_.str();
\r
2219 stream_.sampleRate = jackRate;
\r
2221 // Get the latency of the JACK port.
\r
2222 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2223 if ( ports[ firstChannel ] ) {
\r
2224 // Added by Ge Wang
\r
2225 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2226 // the range (usually the min and max are equal)
\r
2227 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2228 // get the latency range
\r
2229 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2230 // be optimistic, use the min!
\r
2231 stream_.latency[mode] = latrange.min;
\r
2232 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2236 // The jack server always uses 32-bit floating-point data.
\r
2237 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2238 stream_.userFormat = format;
\r
2240 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2241 else stream_.userInterleaved = true;
\r
2243 // Jack always uses non-interleaved buffers.
\r
2244 stream_.deviceInterleaved[mode] = false;
\r
2246 // Jack always provides host byte-ordered data.
\r
2247 stream_.doByteSwap[mode] = false;
\r
2249 // Get the buffer size. The buffer size and number of buffers
\r
2250 // (periods) is set when the jack server is started.
\r
2251 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2252 *bufferSize = stream_.bufferSize;
\r
2254 stream_.nDeviceChannels[mode] = channels;
\r
2255 stream_.nUserChannels[mode] = channels;
\r
2257 // Set flags for buffer conversion.
\r
2258 stream_.doConvertBuffer[mode] = false;
\r
2259 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2260 stream_.doConvertBuffer[mode] = true;
\r
2261 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2262 stream_.nUserChannels[mode] > 1 )
\r
2263 stream_.doConvertBuffer[mode] = true;
\r
2265 // Allocate our JackHandle structure for the stream.
\r
2266 if ( handle == 0 ) {
\r
2268 handle = new JackHandle;
\r
2270 catch ( std::bad_alloc& ) {
\r
2271 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2275 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2276 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2279 stream_.apiHandle = (void *) handle;
\r
2280 handle->client = client;
\r
2282 handle->deviceName[mode] = deviceName;
\r
2284 // Allocate necessary internal buffers.
\r
2285 unsigned long bufferBytes;
\r
2286 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2287 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2288 if ( stream_.userBuffer[mode] == NULL ) {
\r
2289 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2293 if ( stream_.doConvertBuffer[mode] ) {
\r
2295 bool makeBuffer = true;
\r
2296 if ( mode == OUTPUT )
\r
2297 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2298 else { // mode == INPUT
\r
2299 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2300 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2301 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2302 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2306 if ( makeBuffer ) {
\r
2307 bufferBytes *= *bufferSize;
\r
2308 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2309 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2310 if ( stream_.deviceBuffer == NULL ) {
\r
2311 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2317 // Allocate memory for the Jack ports (channels) identifiers.
\r
2318 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2319 if ( handle->ports[mode] == NULL ) {
\r
2320 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2324 stream_.device[mode] = device;
\r
2325 stream_.channelOffset[mode] = firstChannel;
\r
2326 stream_.state = STREAM_STOPPED;
\r
2327 stream_.callbackInfo.object = (void *) this;
\r
2329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2330 // We had already set up the stream for output.
\r
2331 stream_.mode = DUPLEX;
\r
2333 stream_.mode = mode;
\r
2334 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2335 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2336 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2339 // Register our ports.
\r
2341 if ( mode == OUTPUT ) {
\r
2342 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2343 snprintf( label, 64, "outport %d", i );
\r
2344 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2345 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2349 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2350 snprintf( label, 64, "inport %d", i );
\r
2351 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2352 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2356 // Setup the buffer conversion information structure. We don't use
\r
2357 // buffers to do channel offsets, so we override that parameter
\r
2359 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2365 pthread_cond_destroy( &handle->condition );
\r
2366 jack_client_close( handle->client );
\r
2368 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2369 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2372 stream_.apiHandle = 0;
\r
2375 for ( int i=0; i<2; i++ ) {
\r
2376 if ( stream_.userBuffer[i] ) {
\r
2377 free( stream_.userBuffer[i] );
\r
2378 stream_.userBuffer[i] = 0;
\r
2382 if ( stream_.deviceBuffer ) {
\r
2383 free( stream_.deviceBuffer );
\r
2384 stream_.deviceBuffer = 0;
\r
2390 void RtApiJack :: closeStream( void )
\r
2392 if ( stream_.state == STREAM_CLOSED ) {
\r
2393 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2394 error( RtAudioError::WARNING );
\r
2398 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2401 if ( stream_.state == STREAM_RUNNING )
\r
2402 jack_deactivate( handle->client );
\r
2404 jack_client_close( handle->client );
\r
2408 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2409 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2410 pthread_cond_destroy( &handle->condition );
\r
2412 stream_.apiHandle = 0;
\r
2415 for ( int i=0; i<2; i++ ) {
\r
2416 if ( stream_.userBuffer[i] ) {
\r
2417 free( stream_.userBuffer[i] );
\r
2418 stream_.userBuffer[i] = 0;
\r
2422 if ( stream_.deviceBuffer ) {
\r
2423 free( stream_.deviceBuffer );
\r
2424 stream_.deviceBuffer = 0;
\r
2427 stream_.mode = UNINITIALIZED;
\r
2428 stream_.state = STREAM_CLOSED;
\r
2431 void RtApiJack :: startStream( void )
\r
2434 if ( stream_.state == STREAM_RUNNING ) {
\r
2435 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2436 error( RtAudioError::WARNING );
\r
2440 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2441 int result = jack_activate( handle->client );
\r
2443 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2447 const char **ports;
\r
2449 // Get the list of available ports.
\r
2450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2452 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2453 if ( ports == NULL) {
\r
2454 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2458 // Now make the port connections. Since RtAudio wasn't designed to
\r
2459 // allow the user to select particular channels of a device, we'll
\r
2460 // just open the first "nChannels" ports with offset.
\r
2461 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2463 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2464 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2467 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2474 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2476 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2477 if ( ports == NULL) {
\r
2478 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2482 // Now make the port connections. See note above.
\r
2483 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2485 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2486 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2489 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2496 handle->drainCounter = 0;
\r
2497 handle->internalDrain = false;
\r
2498 stream_.state = STREAM_RUNNING;
\r
2501 if ( result == 0 ) return;
\r
2502 error( RtAudioError::SYSTEM_ERROR );
\r
2505 void RtApiJack :: stopStream( void )
\r
2508 if ( stream_.state == STREAM_STOPPED ) {
\r
2509 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2510 error( RtAudioError::WARNING );
\r
2514 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2515 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2517 if ( handle->drainCounter == 0 ) {
\r
2518 handle->drainCounter = 2;
\r
2519 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2523 jack_deactivate( handle->client );
\r
2524 stream_.state = STREAM_STOPPED;
\r
2527 void RtApiJack :: abortStream( void )
\r
2530 if ( stream_.state == STREAM_STOPPED ) {
\r
2531 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2532 error( RtAudioError::WARNING );
\r
2536 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2537 handle->drainCounter = 2;
\r
2542 // This function will be called by a spawned thread when the user
\r
2543 // callback function signals that the stream should be stopped or
\r
2544 // aborted. It is necessary to handle it this way because the
\r
2545 // callbackEvent() function must return before the jack_deactivate()
\r
2546 // function will return.
\r
2547 static void *jackStopStream( void *ptr )
\r
2549 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2550 RtApiJack *object = (RtApiJack *) info->object;
\r
2552 object->stopStream();
\r
2553 pthread_exit( NULL );
\r
2556 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2558 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2559 if ( stream_.state == STREAM_CLOSED ) {
\r
2560 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2561 error( RtAudioError::WARNING );
\r
2564 if ( stream_.bufferSize != nframes ) {
\r
2565 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2566 error( RtAudioError::WARNING );
\r
2570 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2571 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2573 // Check if we were draining the stream and signal is finished.
\r
2574 if ( handle->drainCounter > 3 ) {
\r
2575 ThreadHandle threadId;
\r
2577 stream_.state = STREAM_STOPPING;
\r
2578 if ( handle->internalDrain == true )
\r
2579 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2581 pthread_cond_signal( &handle->condition );
\r
2585 // Invoke user callback first, to get fresh output data.
\r
2586 if ( handle->drainCounter == 0 ) {
\r
2587 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2588 double streamTime = getStreamTime();
\r
2589 RtAudioStreamStatus status = 0;
\r
2590 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2591 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2592 handle->xrun[0] = false;
\r
2594 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2595 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2596 handle->xrun[1] = false;
\r
2598 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2599 stream_.bufferSize, streamTime, status, info->userData );
\r
2600 if ( cbReturnValue == 2 ) {
\r
2601 stream_.state = STREAM_STOPPING;
\r
2602 handle->drainCounter = 2;
\r
2604 pthread_create( &id, NULL, jackStopStream, info );
\r
2607 else if ( cbReturnValue == 1 ) {
\r
2608 handle->drainCounter = 1;
\r
2609 handle->internalDrain = true;
\r
2613 jack_default_audio_sample_t *jackbuffer;
\r
2614 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2615 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2617 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2619 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2620 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2621 memset( jackbuffer, 0, bufferBytes );
\r
2625 else if ( stream_.doConvertBuffer[0] ) {
\r
2627 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2629 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2630 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2631 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2634 else { // no buffer conversion
\r
2635 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2637 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2642 // Don't bother draining input
\r
2643 if ( handle->drainCounter ) {
\r
2644 handle->drainCounter++;
\r
2648 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2650 if ( stream_.doConvertBuffer[1] ) {
\r
2651 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2652 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2653 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2655 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2657 else { // no buffer conversion
\r
2658 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2659 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2660 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2666 RtApi::tickStreamTime();
\r
2669 //******************** End of __UNIX_JACK__ *********************//
\r
2672 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2674 // The ASIO API is designed around a callback scheme, so this
\r
2675 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2676 // Jack. The primary constraint with ASIO is that it only allows
\r
2677 // access to a single driver at a time. Thus, it is not possible to
\r
2678 // have more than one simultaneous RtAudio stream.
\r
2680 // This implementation also requires a number of external ASIO files
\r
2681 // and a few global variables. The ASIO callback scheme does not
\r
2682 // allow for the passing of user data, so we must create a global
\r
2683 // pointer to our callbackInfo structure.
\r
2685 // On unix systems, we make use of a pthread condition variable.
\r
2686 // Since there is no equivalent in Windows, I hacked something based
\r
2687 // on information found in
\r
2688 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2690 #include "asiosys.h"
\r
2692 #include "iasiothiscallresolver.h"
\r
2693 #include "asiodrivers.h"
\r
2696 static AsioDrivers drivers;
\r
2697 static ASIOCallbacks asioCallbacks;
\r
2698 static ASIODriverInfo driverInfo;
\r
2699 static CallbackInfo *asioCallbackInfo;
\r
2700 static bool asioXRun;
\r
2702 struct AsioHandle {
\r
2703 int drainCounter; // Tracks callback counts when draining
\r
2704 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2705 ASIOBufferInfo *bufferInfos;
\r
2709 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2712 // Function declarations (definitions at end of section)
\r
2713 static const char* getAsioErrorString( ASIOError result );
\r
2714 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2715 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2717 RtApiAsio :: RtApiAsio()
\r
2719 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2720 // CoInitialize beforehand, but it must be for appartment threading
\r
2721 // (in which case, CoInitilialize will return S_FALSE here).
\r
2722 coInitialized_ = false;
\r
2723 HRESULT hr = CoInitialize( NULL );
\r
2724 if ( FAILED(hr) ) {
\r
2725 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2726 error( RtAudioError::WARNING );
\r
2728 coInitialized_ = true;
\r
2730 drivers.removeCurrentDriver();
\r
2731 driverInfo.asioVersion = 2;
\r
2733 // See note in DirectSound implementation about GetDesktopWindow().
\r
2734 driverInfo.sysRef = GetForegroundWindow();
\r
2737 RtApiAsio :: ~RtApiAsio()
\r
2739 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2740 if ( coInitialized_ ) CoUninitialize();
\r
2743 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2745 return (unsigned int) drivers.asioGetNumDev();
\r
2748 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2750 RtAudio::DeviceInfo info;
\r
2751 info.probed = false;
\r
2754 unsigned int nDevices = getDeviceCount();
\r
2755 if ( nDevices == 0 ) {
\r
2756 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2757 error( RtAudioError::INVALID_USE );
\r
2761 if ( device >= nDevices ) {
\r
2762 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2763 error( RtAudioError::INVALID_USE );
\r
2767 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2768 if ( stream_.state != STREAM_CLOSED ) {
\r
2769 if ( device >= devices_.size() ) {
\r
2770 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2771 error( RtAudioError::WARNING );
\r
2774 return devices_[ device ];
\r
2777 char driverName[32];
\r
2778 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2779 if ( result != ASE_OK ) {
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.name = driverName;
\r
2788 if ( !drivers.loadDriver( driverName ) ) {
\r
2789 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2790 errorText_ = errorStream_.str();
\r
2791 error( RtAudioError::WARNING );
\r
2795 result = ASIOInit( &driverInfo );
\r
2796 if ( result != ASE_OK ) {
\r
2797 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2798 errorText_ = errorStream_.str();
\r
2799 error( RtAudioError::WARNING );
\r
2803 // Determine the device channel information.
\r
2804 long inputChannels, outputChannels;
\r
2805 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2806 if ( result != ASE_OK ) {
\r
2807 drivers.removeCurrentDriver();
\r
2808 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2809 errorText_ = errorStream_.str();
\r
2810 error( RtAudioError::WARNING );
\r
2814 info.outputChannels = outputChannels;
\r
2815 info.inputChannels = inputChannels;
\r
2816 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2817 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2819 // Determine the supported sample rates.
\r
2820 info.sampleRates.clear();
\r
2821 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2822 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2823 if ( result == ASE_OK ) {
\r
2824 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2826 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2827 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2831 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2832 ASIOChannelInfo channelInfo;
\r
2833 channelInfo.channel = 0;
\r
2834 channelInfo.isInput = true;
\r
2835 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2836 result = ASIOGetChannelInfo( &channelInfo );
\r
2837 if ( result != ASE_OK ) {
\r
2838 drivers.removeCurrentDriver();
\r
2839 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2840 errorText_ = errorStream_.str();
\r
2841 error( RtAudioError::WARNING );
\r
2845 info.nativeFormats = 0;
\r
2846 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2847 info.nativeFormats |= RTAUDIO_SINT16;
\r
2848 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2849 info.nativeFormats |= RTAUDIO_SINT32;
\r
2850 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2851 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2852 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2853 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2854 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2855 info.nativeFormats |= RTAUDIO_SINT24;
\r
2857 if ( info.outputChannels > 0 )
\r
2858 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2859 if ( info.inputChannels > 0 )
\r
2860 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2862 info.probed = true;
\r
2863 drivers.removeCurrentDriver();
\r
2867 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2869 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2870 object->callbackEvent( index );
\r
2873 void RtApiAsio :: saveDeviceInfo( void )
\r
2877 unsigned int nDevices = getDeviceCount();
\r
2878 devices_.resize( nDevices );
\r
2879 for ( unsigned int i=0; i<nDevices; i++ )
\r
2880 devices_[i] = getDeviceInfo( i );
\r
2883 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2884 unsigned int firstChannel, unsigned int sampleRate,
\r
2885 RtAudioFormat format, unsigned int *bufferSize,
\r
2886 RtAudio::StreamOptions *options )
\r
2887 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2889 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2891 // For ASIO, a duplex stream MUST use the same driver.
\r
2892 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2893 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2897 char driverName[32];
\r
2898 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2899 if ( result != ASE_OK ) {
\r
2900 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2901 errorText_ = errorStream_.str();
\r
2905 // Only load the driver once for duplex stream.
\r
2906 if ( !isDuplexInput ) {
\r
2907 // The getDeviceInfo() function will not work when a stream is open
\r
2908 // because ASIO does not allow multiple devices to run at the same
\r
2909 // time. Thus, we'll probe the system before opening a stream and
\r
2910 // save the results for use by getDeviceInfo().
\r
2911 this->saveDeviceInfo();
\r
2913 if ( !drivers.loadDriver( driverName ) ) {
\r
2914 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2915 errorText_ = errorStream_.str();
\r
2919 result = ASIOInit( &driverInfo );
\r
2920 if ( result != ASE_OK ) {
\r
2921 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2922 errorText_ = errorStream_.str();
\r
2927 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2928 bool buffersAllocated = false;
\r
2929 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2930 unsigned int nChannels;
\r
2933 // Check the device channel count.
\r
2934 long inputChannels, outputChannels;
\r
2935 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2936 if ( result != ASE_OK ) {
\r
2937 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2938 errorText_ = errorStream_.str();
\r
2942 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2943 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2944 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2945 errorText_ = errorStream_.str();
\r
2948 stream_.nDeviceChannels[mode] = channels;
\r
2949 stream_.nUserChannels[mode] = channels;
\r
2950 stream_.channelOffset[mode] = firstChannel;
\r
2952 // Verify the sample rate is supported.
\r
2953 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2954 if ( result != ASE_OK ) {
\r
2955 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2956 errorText_ = errorStream_.str();
\r
2960 // Get the current sample rate
\r
2961 ASIOSampleRate currentRate;
\r
2962 result = ASIOGetSampleRate( ¤tRate );
\r
2963 if ( result != ASE_OK ) {
\r
2964 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2965 errorText_ = errorStream_.str();
\r
2969 // Set the sample rate only if necessary
\r
2970 if ( currentRate != sampleRate ) {
\r
2971 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2972 if ( result != ASE_OK ) {
\r
2973 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2974 errorText_ = errorStream_.str();
\r
2979 // Determine the driver data type.
\r
2980 ASIOChannelInfo channelInfo;
\r
2981 channelInfo.channel = 0;
\r
2982 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2983 else channelInfo.isInput = true;
\r
2984 result = ASIOGetChannelInfo( &channelInfo );
\r
2985 if ( result != ASE_OK ) {
\r
2986 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2987 errorText_ = errorStream_.str();
\r
2991 // Assuming WINDOWS host is always little-endian.
\r
2992 stream_.doByteSwap[mode] = false;
\r
2993 stream_.userFormat = format;
\r
2994 stream_.deviceFormat[mode] = 0;
\r
2995 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2996 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2997 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2999 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
3000 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
3001 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3003 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3004 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3005 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3007 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3008 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3009 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3011 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3012 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3013 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3016 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3017 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3018 errorText_ = errorStream_.str();
\r
3022 // Set the buffer size. For a duplex stream, this will end up
\r
3023 // setting the buffer size based on the input constraints, which
\r
3025 long minSize, maxSize, preferSize, granularity;
\r
3026 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3027 if ( result != ASE_OK ) {
\r
3028 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3029 errorText_ = errorStream_.str();
\r
3033 if ( isDuplexInput ) {
\r
3034 // When this is the duplex input (output was opened before), then we have to use the same
\r
3035 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3036 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3037 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3038 // to the "bufferSize" param as usual to set up processing buffers.
\r
3040 *bufferSize = stream_.bufferSize;
\r
3043 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3044 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3045 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3046 else if ( granularity == -1 ) {
\r
3047 // Make sure bufferSize is a power of two.
\r
3048 int log2_of_min_size = 0;
\r
3049 int log2_of_max_size = 0;
\r
3051 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3052 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3053 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3056 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3057 int min_delta_num = log2_of_min_size;
\r
3059 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3060 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3061 if (current_delta < min_delta) {
\r
3062 min_delta = current_delta;
\r
3063 min_delta_num = i;
\r
3067 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3068 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3069 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3071 else if ( granularity != 0 ) {
\r
3072 // Set to an even multiple of granularity, rounding up.
\r
3073 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3078 // we don't use it anymore, see above!
\r
3079 // Just left it here for the case...
\r
3080 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3081 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3086 stream_.bufferSize = *bufferSize;
\r
3087 stream_.nBuffers = 2;
\r
3089 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3090 else stream_.userInterleaved = true;
\r
3092 // ASIO always uses non-interleaved buffers.
\r
3093 stream_.deviceInterleaved[mode] = false;
\r
3095 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3096 if ( handle == 0 ) {
\r
3098 handle = new AsioHandle;
\r
3100 catch ( std::bad_alloc& ) {
\r
3101 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3104 handle->bufferInfos = 0;
\r
3106 // Create a manual-reset event.
\r
3107 handle->condition = CreateEvent( NULL, // no security
\r
3108 TRUE, // manual-reset
\r
3109 FALSE, // non-signaled initially
\r
3110 NULL ); // unnamed
\r
3111 stream_.apiHandle = (void *) handle;
\r
3114 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3115 // and output separately, we'll have to dispose of previously
\r
3116 // created output buffers for a duplex stream.
\r
3117 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3118 ASIODisposeBuffers();
\r
3119 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3122 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3124 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3125 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3126 if ( handle->bufferInfos == NULL ) {
\r
3127 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3128 errorText_ = errorStream_.str();
\r
3132 ASIOBufferInfo *infos;
\r
3133 infos = handle->bufferInfos;
\r
3134 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3135 infos->isInput = ASIOFalse;
\r
3136 infos->channelNum = i + stream_.channelOffset[0];
\r
3137 infos->buffers[0] = infos->buffers[1] = 0;
\r
3139 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3140 infos->isInput = ASIOTrue;
\r
3141 infos->channelNum = i + stream_.channelOffset[1];
\r
3142 infos->buffers[0] = infos->buffers[1] = 0;
\r
3145 // prepare for callbacks
\r
3146 stream_.sampleRate = sampleRate;
\r
3147 stream_.device[mode] = device;
\r
3148 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3150 // store this class instance before registering callbacks, that are going to use it
\r
3151 asioCallbackInfo = &stream_.callbackInfo;
\r
3152 stream_.callbackInfo.object = (void *) this;
\r
3154 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3155 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3156 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3157 asioCallbacks.asioMessage = &asioMessages;
\r
3158 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3159 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3160 if ( result != ASE_OK ) {
\r
3161 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3162 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3163 // in that case, let's be naïve and try that instead
\r
3164 *bufferSize = preferSize;
\r
3165 stream_.bufferSize = *bufferSize;
\r
3166 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3169 if ( result != ASE_OK ) {
\r
3170 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3171 errorText_ = errorStream_.str();
\r
3174 buffersAllocated = true;
\r
3175 stream_.state = STREAM_STOPPED;
\r
3177 // Set flags for buffer conversion.
\r
3178 stream_.doConvertBuffer[mode] = false;
\r
3179 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3180 stream_.doConvertBuffer[mode] = true;
\r
3181 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3182 stream_.nUserChannels[mode] > 1 )
\r
3183 stream_.doConvertBuffer[mode] = true;
\r
3185 // Allocate necessary internal buffers
\r
3186 unsigned long bufferBytes;
\r
3187 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3188 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3189 if ( stream_.userBuffer[mode] == NULL ) {
\r
3190 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3194 if ( stream_.doConvertBuffer[mode] ) {
\r
3196 bool makeBuffer = true;
\r
3197 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3198 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3199 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3200 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3203 if ( makeBuffer ) {
\r
3204 bufferBytes *= *bufferSize;
\r
3205 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3206 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3207 if ( stream_.deviceBuffer == NULL ) {
\r
3208 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3214 // Determine device latencies
\r
3215 long inputLatency, outputLatency;
\r
3216 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3217 if ( result != ASE_OK ) {
\r
3218 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3219 errorText_ = errorStream_.str();
\r
3220 error( RtAudioError::WARNING); // warn but don't fail
\r
3223 stream_.latency[0] = outputLatency;
\r
3224 stream_.latency[1] = inputLatency;
\r
3227 // Setup the buffer conversion information structure. We don't use
\r
3228 // buffers to do channel offsets, so we override that parameter
\r
3230 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3235 if ( !isDuplexInput ) {
\r
3236 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3237 // So we clean up for single channel only
\r
3239 if ( buffersAllocated )
\r
3240 ASIODisposeBuffers();
\r
3242 drivers.removeCurrentDriver();
\r
3245 CloseHandle( handle->condition );
\r
3246 if ( handle->bufferInfos )
\r
3247 free( handle->bufferInfos );
\r
3250 stream_.apiHandle = 0;
\r
3254 if ( stream_.userBuffer[mode] ) {
\r
3255 free( stream_.userBuffer[mode] );
\r
3256 stream_.userBuffer[mode] = 0;
\r
3259 if ( stream_.deviceBuffer ) {
\r
3260 free( stream_.deviceBuffer );
\r
3261 stream_.deviceBuffer = 0;
\r
3266 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3268 void RtApiAsio :: closeStream()
\r
3270 if ( stream_.state == STREAM_CLOSED ) {
\r
3271 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3272 error( RtAudioError::WARNING );
\r
3276 if ( stream_.state == STREAM_RUNNING ) {
\r
3277 stream_.state = STREAM_STOPPED;
\r
3280 ASIODisposeBuffers();
\r
3281 drivers.removeCurrentDriver();
\r
3283 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3285 CloseHandle( handle->condition );
\r
3286 if ( handle->bufferInfos )
\r
3287 free( handle->bufferInfos );
\r
3289 stream_.apiHandle = 0;
\r
3292 for ( int i=0; i<2; i++ ) {
\r
3293 if ( stream_.userBuffer[i] ) {
\r
3294 free( stream_.userBuffer[i] );
\r
3295 stream_.userBuffer[i] = 0;
\r
3299 if ( stream_.deviceBuffer ) {
\r
3300 free( stream_.deviceBuffer );
\r
3301 stream_.deviceBuffer = 0;
\r
3304 stream_.mode = UNINITIALIZED;
\r
3305 stream_.state = STREAM_CLOSED;
\r
3308 bool stopThreadCalled = false;
\r
3310 void RtApiAsio :: startStream()
\r
3313 if ( stream_.state == STREAM_RUNNING ) {
\r
3314 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3315 error( RtAudioError::WARNING );
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3320 ASIOError result = ASIOStart();
\r
3321 if ( result != ASE_OK ) {
\r
3322 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3323 errorText_ = errorStream_.str();
\r
3327 handle->drainCounter = 0;
\r
3328 handle->internalDrain = false;
\r
3329 ResetEvent( handle->condition );
\r
3330 stream_.state = STREAM_RUNNING;
\r
3334 stopThreadCalled = false;
\r
3336 if ( result == ASE_OK ) return;
\r
3337 error( RtAudioError::SYSTEM_ERROR );
\r
3340 void RtApiAsio :: stopStream()
\r
3343 if ( stream_.state == STREAM_STOPPED ) {
\r
3344 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3345 error( RtAudioError::WARNING );
\r
3349 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3350 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3351 if ( handle->drainCounter == 0 ) {
\r
3352 handle->drainCounter = 2;
\r
3353 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3357 stream_.state = STREAM_STOPPED;
\r
3359 ASIOError result = ASIOStop();
\r
3360 if ( result != ASE_OK ) {
\r
3361 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3362 errorText_ = errorStream_.str();
\r
3365 if ( result == ASE_OK ) return;
\r
3366 error( RtAudioError::SYSTEM_ERROR );
\r
3369 void RtApiAsio :: abortStream()
\r
3372 if ( stream_.state == STREAM_STOPPED ) {
\r
3373 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3374 error( RtAudioError::WARNING );
\r
3378 // The following lines were commented-out because some behavior was
\r
3379 // noted where the device buffers need to be zeroed to avoid
\r
3380 // continuing sound, even when the device buffers are completely
\r
3381 // disposed. So now, calling abort is the same as calling stop.
\r
3382 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3383 // handle->drainCounter = 2;
\r
3387 // This function will be called by a spawned thread when the user
\r
3388 // callback function signals that the stream should be stopped or
\r
3389 // aborted. It is necessary to handle it this way because the
\r
3390 // callbackEvent() function must return before the ASIOStop()
\r
3391 // function will return.
\r
3392 static unsigned __stdcall asioStopStream( void *ptr )
\r
3394 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3395 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3397 object->stopStream();
\r
3398 _endthreadex( 0 );
\r
3402 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3404 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3405 if ( stream_.state == STREAM_CLOSED ) {
\r
3406 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3407 error( RtAudioError::WARNING );
\r
3411 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3412 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3414 // Check if we were draining the stream and signal if finished.
\r
3415 if ( handle->drainCounter > 3 ) {
\r
3417 stream_.state = STREAM_STOPPING;
\r
3418 if ( handle->internalDrain == false )
\r
3419 SetEvent( handle->condition );
\r
3420 else { // spawn a thread to stop the stream
\r
3421 unsigned threadId;
\r
3422 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3423 &stream_.callbackInfo, 0, &threadId );
\r
3428 // Invoke user callback to get fresh output data UNLESS we are
\r
3429 // draining stream.
\r
3430 if ( handle->drainCounter == 0 ) {
\r
3431 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3432 double streamTime = getStreamTime();
\r
3433 RtAudioStreamStatus status = 0;
\r
3434 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3435 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3438 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3439 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3442 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3443 stream_.bufferSize, streamTime, status, info->userData );
\r
3444 if ( cbReturnValue == 2 ) {
\r
3445 stream_.state = STREAM_STOPPING;
\r
3446 handle->drainCounter = 2;
\r
3447 unsigned threadId;
\r
3448 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3449 &stream_.callbackInfo, 0, &threadId );
\r
3452 else if ( cbReturnValue == 1 ) {
\r
3453 handle->drainCounter = 1;
\r
3454 handle->internalDrain = true;
\r
3458 unsigned int nChannels, bufferBytes, i, j;
\r
3459 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3460 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3462 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3464 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3466 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3467 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3468 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3472 else if ( stream_.doConvertBuffer[0] ) {
\r
3474 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3475 if ( stream_.doByteSwap[0] )
\r
3476 byteSwapBuffer( stream_.deviceBuffer,
\r
3477 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3478 stream_.deviceFormat[0] );
\r
3480 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3481 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3482 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3483 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3489 if ( stream_.doByteSwap[0] )
\r
3490 byteSwapBuffer( stream_.userBuffer[0],
\r
3491 stream_.bufferSize * stream_.nUserChannels[0],
\r
3492 stream_.userFormat );
\r
3494 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3495 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3496 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3497 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3503 // Don't bother draining input
\r
3504 if ( handle->drainCounter ) {
\r
3505 handle->drainCounter++;
\r
3509 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3511 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3513 if (stream_.doConvertBuffer[1]) {
\r
3515 // Always interleave ASIO input data.
\r
3516 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3517 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3518 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3519 handle->bufferInfos[i].buffers[bufferIndex],
\r
3523 if ( stream_.doByteSwap[1] )
\r
3524 byteSwapBuffer( stream_.deviceBuffer,
\r
3525 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3526 stream_.deviceFormat[1] );
\r
3527 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3531 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3532 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3533 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3534 handle->bufferInfos[i].buffers[bufferIndex],
\r
3539 if ( stream_.doByteSwap[1] )
\r
3540 byteSwapBuffer( stream_.userBuffer[1],
\r
3541 stream_.bufferSize * stream_.nUserChannels[1],
\r
3542 stream_.userFormat );
\r
3547 // The following call was suggested by Malte Clasen. While the API
\r
3548 // documentation indicates it should not be required, some device
\r
3549 // drivers apparently do not function correctly without it.
\r
3550 ASIOOutputReady();
\r
3552 RtApi::tickStreamTime();
\r
3556 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3558 // The ASIO documentation says that this usually only happens during
\r
3559 // external sync. Audio processing is not stopped by the driver,
\r
3560 // actual sample rate might not have even changed, maybe only the
\r
3561 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3564 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3566 object->stopStream();
\r
3568 catch ( RtAudioError &exception ) {
\r
3569 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3573 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3576 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3580 switch( selector ) {
\r
3581 case kAsioSelectorSupported:
\r
3582 if ( value == kAsioResetRequest
\r
3583 || value == kAsioEngineVersion
\r
3584 || value == kAsioResyncRequest
\r
3585 || value == kAsioLatenciesChanged
\r
3586 // The following three were added for ASIO 2.0, you don't
\r
3587 // necessarily have to support them.
\r
3588 || value == kAsioSupportsTimeInfo
\r
3589 || value == kAsioSupportsTimeCode
\r
3590 || value == kAsioSupportsInputMonitor)
\r
3593 case kAsioResetRequest:
\r
3594 // Defer the task and perform the reset of the driver during the
\r
3595 // next "safe" situation. You cannot reset the driver right now,
\r
3596 // as this code is called from the driver. Reset the driver is
\r
3597 // done by completely destruct is. I.e. ASIOStop(),
\r
3598 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3600 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3603 case kAsioResyncRequest:
\r
3604 // This informs the application that the driver encountered some
\r
3605 // non-fatal data loss. It is used for synchronization purposes
\r
3606 // of different media. Added mainly to work around the Win16Mutex
\r
3607 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3608 // which could lose data because the Mutex was held too long by
\r
3609 // another thread. However a driver can issue it in other
\r
3610 // situations, too.
\r
3611 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3615 case kAsioLatenciesChanged:
\r
3616 // This will inform the host application that the drivers were
\r
3617 // latencies changed. Beware, it this does not mean that the
\r
3618 // buffer sizes have changed! You might need to update internal
\r
3620 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3623 case kAsioEngineVersion:
\r
3624 // Return the supported ASIO version of the host application. If
\r
3625 // a host application does not implement this selector, ASIO 1.0
\r
3626 // is assumed by the driver.
\r
3629 case kAsioSupportsTimeInfo:
\r
3630 // Informs the driver whether the
\r
3631 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3632 // For compatibility with ASIO 1.0 drivers the host application
\r
3633 // should always support the "old" bufferSwitch method, too.
\r
3636 case kAsioSupportsTimeCode:
\r
3637 // Informs the driver whether application is interested in time
\r
3638 // code info. If an application does not need to know about time
\r
3639 // code, the driver has less work to do.
\r
3646 static const char* getAsioErrorString( ASIOError result )
\r
3651 const char*message;
\r
3654 static const Messages m[] =
\r
3656 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3657 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3658 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3659 { ASE_InvalidMode, "Invalid mode." },
\r
3660 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3661 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3662 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3665 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3666 if ( m[i].value == result ) return m[i].message;
\r
3668 return "Unknown error.";
\r
3671 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3675 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3677 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3678 // - Introduces support for the Windows WASAPI API
\r
3679 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3680 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3681 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3686 #include <audioclient.h>
\r
3688 #include <mmdeviceapi.h>
\r
3689 #include <functiondiscoverykeys_devpkey.h>
\r
3691 //=============================================================================
\r
3693 #define SAFE_RELEASE( objectPtr )\
\r
3696 objectPtr->Release();\
\r
3697 objectPtr = NULL;\
\r
3700 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3702 //-----------------------------------------------------------------------------
\r
3704 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3705 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3706 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3707 // provide intermediate storage for read / write synchronization.
\r
3708 class WasapiBuffer
\r
3712 : buffer_( NULL ),
\r
3721 // sets the length of the internal ring buffer
\r
3722 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3725 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3727 bufferSize_ = bufferSize;
\r
3732 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3733 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3735 if ( !buffer || // incoming buffer is NULL
\r
3736 bufferSize == 0 || // incoming buffer has no data
\r
3737 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3742 unsigned int relOutIndex = outIndex_;
\r
3743 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3744 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3745 relOutIndex += bufferSize_;
\r
3748 // "in" index can end on the "out" index but cannot begin at it
\r
3749 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3750 return false; // not enough space between "in" index and "out" index
\r
3753 // copy buffer from external to internal
\r
3754 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3755 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3756 int fromInSize = bufferSize - fromZeroSize;
\r
3760 case RTAUDIO_SINT8:
\r
3761 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3762 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3764 case RTAUDIO_SINT16:
\r
3765 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3766 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3768 case RTAUDIO_SINT24:
\r
3769 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3770 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3772 case RTAUDIO_SINT32:
\r
3773 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3774 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3776 case RTAUDIO_FLOAT32:
\r
3777 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3778 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3780 case RTAUDIO_FLOAT64:
\r
3781 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3782 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3786 // update "in" index
\r
3787 inIndex_ += bufferSize;
\r
3788 inIndex_ %= bufferSize_;
\r
3793 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3794 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3796 if ( !buffer || // incoming buffer is NULL
\r
3797 bufferSize == 0 || // incoming buffer has no data
\r
3798 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3803 unsigned int relInIndex = inIndex_;
\r
3804 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3805 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3806 relInIndex += bufferSize_;
\r
3809 // "out" index can begin at and end on the "in" index
\r
3810 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3811 return false; // not enough space between "out" index and "in" index
\r
3814 // copy buffer from internal to external
\r
3815 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3816 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3817 int fromOutSize = bufferSize - fromZeroSize;
\r
3821 case RTAUDIO_SINT8:
\r
3822 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3823 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3825 case RTAUDIO_SINT16:
\r
3826 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3827 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3829 case RTAUDIO_SINT24:
\r
3830 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3831 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3833 case RTAUDIO_SINT32:
\r
3834 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3835 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3837 case RTAUDIO_FLOAT32:
\r
3838 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3839 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3841 case RTAUDIO_FLOAT64:
\r
3842 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3843 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3847 // update "out" index
\r
3848 outIndex_ += bufferSize;
\r
3849 outIndex_ %= bufferSize_;
\r
3856 unsigned int bufferSize_;
\r
3857 unsigned int inIndex_;
\r
3858 unsigned int outIndex_;
\r
3861 //-----------------------------------------------------------------------------
\r
3863 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3864 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3865 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3866 // This sample rate converter works best with conversions between one rate and its multiple.
\r
3867 void convertBufferWasapi( char* outBuffer,
\r
3868 const char* inBuffer,
\r
3869 const unsigned int& channelCount,
\r
3870 const unsigned int& inSampleRate,
\r
3871 const unsigned int& outSampleRate,
\r
3872 const unsigned int& inSampleCount,
\r
3873 unsigned int& outSampleCount,
\r
3874 const RtAudioFormat& format )
\r
3876 // calculate the new outSampleCount and relative sampleStep
\r
3877 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3878 float sampleRatioInv = ( float ) 1 / sampleRatio;
\r
3879 float sampleStep = 1.0f / sampleRatio;
\r
3880 float inSampleFraction = 0.0f;
\r
3882 outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
\r
3884 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
\r
3885 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
\r
3887 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3888 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3890 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3894 case RTAUDIO_SINT8:
\r
3895 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3897 case RTAUDIO_SINT16:
\r
3898 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3900 case RTAUDIO_SINT24:
\r
3901 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3903 case RTAUDIO_SINT32:
\r
3904 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3906 case RTAUDIO_FLOAT32:
\r
3907 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3909 case RTAUDIO_FLOAT64:
\r
3910 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3914 // jump to next in sample
\r
3915 inSampleFraction += sampleStep;
\r
3918 else // else interpolate
\r
3920 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3921 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3923 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3924 float inSampleDec = inSampleFraction - inSample;
\r
3925 unsigned int frameInSample = inSample * channelCount;
\r
3926 unsigned int frameOutSample = outSample * channelCount;
\r
3930 case RTAUDIO_SINT8:
\r
3932 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3934 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
\r
3935 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3936 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
\r
3937 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3941 case RTAUDIO_SINT16:
\r
3943 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3945 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
\r
3946 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3947 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
\r
3948 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3952 case RTAUDIO_SINT24:
\r
3954 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3956 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
\r
3957 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
\r
3958 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3959 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3963 case RTAUDIO_SINT32:
\r
3965 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3967 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
\r
3968 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3969 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3970 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3974 case RTAUDIO_FLOAT32:
\r
3976 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3978 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
\r
3979 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3980 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3981 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3985 case RTAUDIO_FLOAT64:
\r
3987 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3989 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
\r
3990 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3991 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3992 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3998 // jump to next in sample
\r
3999 inSampleFraction += sampleStep;
\r
4004 //-----------------------------------------------------------------------------
\r
4006 // A structure to hold various information related to the WASAPI implementation.
\r
4007 struct WasapiHandle
\r
4009 IAudioClient* captureAudioClient;
\r
4010 IAudioClient* renderAudioClient;
\r
4011 IAudioCaptureClient* captureClient;
\r
4012 IAudioRenderClient* renderClient;
\r
4013 HANDLE captureEvent;
\r
4014 HANDLE renderEvent;
\r
4017 : captureAudioClient( NULL ),
\r
4018 renderAudioClient( NULL ),
\r
4019 captureClient( NULL ),
\r
4020 renderClient( NULL ),
\r
4021 captureEvent( NULL ),
\r
4022 renderEvent( NULL ) {}
\r
4025 //=============================================================================
\r
4027 RtApiWasapi::RtApiWasapi()
\r
4028 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
4030 // WASAPI can run either apartment or multi-threaded
\r
4031 HRESULT hr = CoInitialize( NULL );
\r
4032 if ( !FAILED( hr ) )
\r
4033 coInitialized_ = true;
\r
4035 // Instantiate device enumerator
\r
4036 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
4037 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
4038 ( void** ) &deviceEnumerator_ );
\r
4040 if ( FAILED( hr ) ) {
\r
4041 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
4042 error( RtAudioError::DRIVER_ERROR );
\r
4046 //-----------------------------------------------------------------------------
\r
4048 RtApiWasapi::~RtApiWasapi()
\r
4050 if ( stream_.state != STREAM_CLOSED )
\r
4053 SAFE_RELEASE( deviceEnumerator_ );
\r
4055 // If this object previously called CoInitialize()
\r
4056 if ( coInitialized_ )
\r
4060 //=============================================================================
\r
4062 unsigned int RtApiWasapi::getDeviceCount( void )
\r
4064 unsigned int captureDeviceCount = 0;
\r
4065 unsigned int renderDeviceCount = 0;
\r
4067 IMMDeviceCollection* captureDevices = NULL;
\r
4068 IMMDeviceCollection* renderDevices = NULL;
\r
4070 // Count capture devices
\r
4071 errorText_.clear();
\r
4072 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4073 if ( FAILED( hr ) ) {
\r
4074 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4078 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4079 if ( FAILED( hr ) ) {
\r
4080 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4084 // Count render devices
\r
4085 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4086 if ( FAILED( hr ) ) {
\r
4087 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4091 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4092 if ( FAILED( hr ) ) {
\r
4093 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4098 // release all references
\r
4099 SAFE_RELEASE( captureDevices );
\r
4100 SAFE_RELEASE( renderDevices );
\r
4102 if ( errorText_.empty() )
\r
4103 return captureDeviceCount + renderDeviceCount;
\r
4105 error( RtAudioError::DRIVER_ERROR );
\r
4109 //-----------------------------------------------------------------------------
\r
4111 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4113 RtAudio::DeviceInfo info;
\r
4114 unsigned int captureDeviceCount = 0;
\r
4115 unsigned int renderDeviceCount = 0;
\r
4116 std::string defaultDeviceName;
\r
4117 bool isCaptureDevice = false;
\r
4119 PROPVARIANT deviceNameProp;
\r
4120 PROPVARIANT defaultDeviceNameProp;
\r
4122 IMMDeviceCollection* captureDevices = NULL;
\r
4123 IMMDeviceCollection* renderDevices = NULL;
\r
4124 IMMDevice* devicePtr = NULL;
\r
4125 IMMDevice* defaultDevicePtr = NULL;
\r
4126 IAudioClient* audioClient = NULL;
\r
4127 IPropertyStore* devicePropStore = NULL;
\r
4128 IPropertyStore* defaultDevicePropStore = NULL;
\r
4130 WAVEFORMATEX* deviceFormat = NULL;
\r
4131 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4134 info.probed = false;
\r
4136 // Count capture devices
\r
4137 errorText_.clear();
\r
4138 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4139 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4140 if ( FAILED( hr ) ) {
\r
4141 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4145 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4146 if ( FAILED( hr ) ) {
\r
4147 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4151 // Count render devices
\r
4152 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4153 if ( FAILED( hr ) ) {
\r
4154 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4158 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4159 if ( FAILED( hr ) ) {
\r
4160 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4164 // validate device index
\r
4165 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4166 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4167 errorType = RtAudioError::INVALID_USE;
\r
4171 // determine whether index falls within capture or render devices
\r
4172 if ( device >= renderDeviceCount ) {
\r
4173 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4174 if ( FAILED( hr ) ) {
\r
4175 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4178 isCaptureDevice = true;
\r
4181 hr = renderDevices->Item( device, &devicePtr );
\r
4182 if ( FAILED( hr ) ) {
\r
4183 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4186 isCaptureDevice = false;
\r
4189 // get default device name
\r
4190 if ( isCaptureDevice ) {
\r
4191 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4192 if ( FAILED( hr ) ) {
\r
4193 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4198 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4199 if ( FAILED( hr ) ) {
\r
4200 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4205 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4206 if ( FAILED( hr ) ) {
\r
4207 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4210 PropVariantInit( &defaultDeviceNameProp );
\r
4212 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4213 if ( FAILED( hr ) ) {
\r
4214 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4218 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4221 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4222 if ( FAILED( hr ) ) {
\r
4223 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4227 PropVariantInit( &deviceNameProp );
\r
4229 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4230 if ( FAILED( hr ) ) {
\r
4231 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4235 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4238 if ( isCaptureDevice ) {
\r
4239 info.isDefaultInput = info.name == defaultDeviceName;
\r
4240 info.isDefaultOutput = false;
\r
4243 info.isDefaultInput = false;
\r
4244 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4248 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4249 if ( FAILED( hr ) ) {
\r
4250 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4254 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4255 if ( FAILED( hr ) ) {
\r
4256 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4260 if ( isCaptureDevice ) {
\r
4261 info.inputChannels = deviceFormat->nChannels;
\r
4262 info.outputChannels = 0;
\r
4263 info.duplexChannels = 0;
\r
4266 info.inputChannels = 0;
\r
4267 info.outputChannels = deviceFormat->nChannels;
\r
4268 info.duplexChannels = 0;
\r
4272 info.sampleRates.clear();
\r
4274 // allow support for all sample rates as we have a built-in sample rate converter
\r
4275 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4276 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4278 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4281 info.nativeFormats = 0;
\r
4283 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4284 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4285 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4287 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4288 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4290 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4291 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4294 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4295 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4296 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4298 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4299 info.nativeFormats |= RTAUDIO_SINT8;
\r
4301 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4302 info.nativeFormats |= RTAUDIO_SINT16;
\r
4304 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4305 info.nativeFormats |= RTAUDIO_SINT24;
\r
4307 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4308 info.nativeFormats |= RTAUDIO_SINT32;
\r
4313 info.probed = true;
\r
4316 // release all references
\r
4317 PropVariantClear( &deviceNameProp );
\r
4318 PropVariantClear( &defaultDeviceNameProp );
\r
4320 SAFE_RELEASE( captureDevices );
\r
4321 SAFE_RELEASE( renderDevices );
\r
4322 SAFE_RELEASE( devicePtr );
\r
4323 SAFE_RELEASE( defaultDevicePtr );
\r
4324 SAFE_RELEASE( audioClient );
\r
4325 SAFE_RELEASE( devicePropStore );
\r
4326 SAFE_RELEASE( defaultDevicePropStore );
\r
4328 CoTaskMemFree( deviceFormat );
\r
4329 CoTaskMemFree( closestMatchFormat );
\r
4331 if ( !errorText_.empty() )
\r
4332 error( errorType );
\r
4336 //-----------------------------------------------------------------------------
\r
4338 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4340 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4341 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4349 //-----------------------------------------------------------------------------
\r
4351 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4353 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4354 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4362 //-----------------------------------------------------------------------------
\r
4364 void RtApiWasapi::closeStream( void )
\r
4366 if ( stream_.state == STREAM_CLOSED ) {
\r
4367 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4368 error( RtAudioError::WARNING );
\r
4372 if ( stream_.state != STREAM_STOPPED )
\r
4375 // clean up stream memory
\r
4376 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4377 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4379 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4380 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4382 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4383 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4385 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4386 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4388 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4389 stream_.apiHandle = NULL;
\r
4391 for ( int i = 0; i < 2; i++ ) {
\r
4392 if ( stream_.userBuffer[i] ) {
\r
4393 free( stream_.userBuffer[i] );
\r
4394 stream_.userBuffer[i] = 0;
\r
4398 if ( stream_.deviceBuffer ) {
\r
4399 free( stream_.deviceBuffer );
\r
4400 stream_.deviceBuffer = 0;
\r
4403 // update stream state
\r
4404 stream_.state = STREAM_CLOSED;
\r
4407 //-----------------------------------------------------------------------------
\r
4409 void RtApiWasapi::startStream( void )
\r
4413 if ( stream_.state == STREAM_RUNNING ) {
\r
4414 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4415 error( RtAudioError::WARNING );
\r
4419 // update stream state
\r
4420 stream_.state = STREAM_RUNNING;
\r
4422 // create WASAPI stream thread
\r
4423 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4425 if ( !stream_.callbackInfo.thread ) {
\r
4426 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4427 error( RtAudioError::THREAD_ERROR );
\r
4430 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4431 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4435 //-----------------------------------------------------------------------------
\r
4437 void RtApiWasapi::stopStream( void )
\r
4441 if ( stream_.state == STREAM_STOPPED ) {
\r
4442 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4443 error( RtAudioError::WARNING );
\r
4447 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4448 stream_.state = STREAM_STOPPING;
\r
4450 // wait until stream thread is stopped
\r
4451 while( stream_.state != STREAM_STOPPED ) {
\r
4455 // Wait for the last buffer to play before stopping.
\r
4456 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4458 // stop capture client if applicable
\r
4459 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4460 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4461 if ( FAILED( hr ) ) {
\r
4462 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4463 error( RtAudioError::DRIVER_ERROR );
\r
4468 // stop render client if applicable
\r
4469 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4470 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4471 if ( FAILED( hr ) ) {
\r
4472 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4473 error( RtAudioError::DRIVER_ERROR );
\r
4478 // close thread handle
\r
4479 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4480 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4481 error( RtAudioError::THREAD_ERROR );
\r
4485 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4488 //-----------------------------------------------------------------------------
\r
4490 void RtApiWasapi::abortStream( void )
\r
4494 if ( stream_.state == STREAM_STOPPED ) {
\r
4495 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4496 error( RtAudioError::WARNING );
\r
4500 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4501 stream_.state = STREAM_STOPPING;
\r
4503 // wait until stream thread is stopped
\r
4504 while ( stream_.state != STREAM_STOPPED ) {
\r
4508 // stop capture client if applicable
\r
4509 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4510 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4511 if ( FAILED( hr ) ) {
\r
4512 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4513 error( RtAudioError::DRIVER_ERROR );
\r
4518 // stop render client if applicable
\r
4519 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4520 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4521 if ( FAILED( hr ) ) {
\r
4522 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4523 error( RtAudioError::DRIVER_ERROR );
\r
4528 // close thread handle
\r
4529 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4530 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4531 error( RtAudioError::THREAD_ERROR );
\r
4535 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4538 //-----------------------------------------------------------------------------
\r
4540 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4541 unsigned int firstChannel, unsigned int sampleRate,
\r
4542 RtAudioFormat format, unsigned int* bufferSize,
\r
4543 RtAudio::StreamOptions* options )
\r
4545 bool methodResult = FAILURE;
\r
4546 unsigned int captureDeviceCount = 0;
\r
4547 unsigned int renderDeviceCount = 0;
\r
4549 IMMDeviceCollection* captureDevices = NULL;
\r
4550 IMMDeviceCollection* renderDevices = NULL;
\r
4551 IMMDevice* devicePtr = NULL;
\r
4552 WAVEFORMATEX* deviceFormat = NULL;
\r
4553 unsigned int bufferBytes;
\r
4554 stream_.state = STREAM_STOPPED;
\r
4556 // create API Handle if not already created
\r
4557 if ( !stream_.apiHandle )
\r
4558 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4560 // Count capture devices
\r
4561 errorText_.clear();
\r
4562 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4563 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4564 if ( FAILED( hr ) ) {
\r
4565 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4569 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4570 if ( FAILED( hr ) ) {
\r
4571 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4575 // Count render devices
\r
4576 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4577 if ( FAILED( hr ) ) {
\r
4578 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4582 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4583 if ( FAILED( hr ) ) {
\r
4584 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4588 // validate device index
\r
4589 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4590 errorType = RtAudioError::INVALID_USE;
\r
4591 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4595 // determine whether index falls within capture or render devices
\r
4596 if ( device >= renderDeviceCount ) {
\r
4597 if ( mode != INPUT ) {
\r
4598 errorType = RtAudioError::INVALID_USE;
\r
4599 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4603 // retrieve captureAudioClient from devicePtr
\r
4604 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4606 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4607 if ( FAILED( hr ) ) {
\r
4608 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4612 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4613 NULL, ( void** ) &captureAudioClient );
\r
4614 if ( FAILED( hr ) ) {
\r
4615 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4619 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4620 if ( FAILED( hr ) ) {
\r
4621 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4625 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4626 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4629 if ( mode != OUTPUT ) {
\r
4630 errorType = RtAudioError::INVALID_USE;
\r
4631 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4635 // retrieve renderAudioClient from devicePtr
\r
4636 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4638 hr = renderDevices->Item( device, &devicePtr );
\r
4639 if ( FAILED( hr ) ) {
\r
4640 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4644 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4645 NULL, ( void** ) &renderAudioClient );
\r
4646 if ( FAILED( hr ) ) {
\r
4647 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4651 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4652 if ( FAILED( hr ) ) {
\r
4653 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4657 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4658 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4661 // fill stream data
\r
4662 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4663 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4664 stream_.mode = DUPLEX;
\r
4667 stream_.mode = mode;
\r
4670 stream_.device[mode] = device;
\r
4671 stream_.doByteSwap[mode] = false;
\r
4672 stream_.sampleRate = sampleRate;
\r
4673 stream_.bufferSize = *bufferSize;
\r
4674 stream_.nBuffers = 1;
\r
4675 stream_.nUserChannels[mode] = channels;
\r
4676 stream_.channelOffset[mode] = firstChannel;
\r
4677 stream_.userFormat = format;
\r
4678 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4680 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4681 stream_.userInterleaved = false;
\r
4683 stream_.userInterleaved = true;
\r
4684 stream_.deviceInterleaved[mode] = true;
\r
4686 // Set flags for buffer conversion.
\r
4687 stream_.doConvertBuffer[mode] = false;
\r
4688 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4689 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4690 stream_.doConvertBuffer[mode] = true;
\r
4691 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4692 stream_.nUserChannels[mode] > 1 )
\r
4693 stream_.doConvertBuffer[mode] = true;
\r
4695 if ( stream_.doConvertBuffer[mode] )
\r
4696 setConvertInfo( mode, 0 );
\r
4698 // Allocate necessary internal buffers
\r
4699 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4701 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4702 if ( !stream_.userBuffer[mode] ) {
\r
4703 errorType = RtAudioError::MEMORY_ERROR;
\r
4704 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4708 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4709 stream_.callbackInfo.priority = 15;
\r
4711 stream_.callbackInfo.priority = 0;
\r
4713 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4714 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4716 methodResult = SUCCESS;
\r
4720 SAFE_RELEASE( captureDevices );
\r
4721 SAFE_RELEASE( renderDevices );
\r
4722 SAFE_RELEASE( devicePtr );
\r
4723 CoTaskMemFree( deviceFormat );
\r
4725 // if method failed, close the stream
\r
4726 if ( methodResult == FAILURE )
\r
4729 if ( !errorText_.empty() )
\r
4730 error( errorType );
\r
4731 return methodResult;
\r
4734 //=============================================================================
\r
4736 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4739 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4744 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4747 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4752 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4755 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4760 //-----------------------------------------------------------------------------
\r
4762 void RtApiWasapi::wasapiThread()
\r
4764 // as this is a new thread, we must CoInitialize it
\r
4765 CoInitialize( NULL );
\r
4769 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4770 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4771 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4772 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4773 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4774 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4776 WAVEFORMATEX* captureFormat = NULL;
\r
4777 WAVEFORMATEX* renderFormat = NULL;
\r
4778 float captureSrRatio = 0.0f;
\r
4779 float renderSrRatio = 0.0f;
\r
4780 WasapiBuffer captureBuffer;
\r
4781 WasapiBuffer renderBuffer;
\r
4783 // declare local stream variables
\r
4784 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4785 BYTE* streamBuffer = NULL;
\r
4786 unsigned long captureFlags = 0;
\r
4787 unsigned int bufferFrameCount = 0;
\r
4788 unsigned int numFramesPadding = 0;
\r
4789 unsigned int convBufferSize = 0;
\r
4790 bool callbackPushed = false;
\r
4791 bool callbackPulled = false;
\r
4792 bool callbackStopped = false;
\r
4793 int callbackResult = 0;
\r
4795 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4796 char* convBuffer = NULL;
\r
4797 unsigned int convBuffSize = 0;
\r
4798 unsigned int deviceBuffSize = 0;
\r
4800 errorText_.clear();
\r
4801 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4803 // Attempt to assign "Pro Audio" characteristic to thread
\r
4804 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4806 DWORD taskIndex = 0;
\r
4807 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4808 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4809 FreeLibrary( AvrtDll );
\r
4812 // start capture stream if applicable
\r
4813 if ( captureAudioClient ) {
\r
4814 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4815 if ( FAILED( hr ) ) {
\r
4816 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4820 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4822 // initialize capture stream according to desire buffer size
\r
4823 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4824 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4826 if ( !captureClient ) {
\r
4827 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4828 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4829 desiredBufferPeriod,
\r
4830 desiredBufferPeriod,
\r
4833 if ( FAILED( hr ) ) {
\r
4834 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4838 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4839 ( void** ) &captureClient );
\r
4840 if ( FAILED( hr ) ) {
\r
4841 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4845 // configure captureEvent to trigger on every available capture buffer
\r
4846 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4847 if ( !captureEvent ) {
\r
4848 errorType = RtAudioError::SYSTEM_ERROR;
\r
4849 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4853 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4854 if ( FAILED( hr ) ) {
\r
4855 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4859 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4860 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4863 unsigned int inBufferSize = 0;
\r
4864 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4865 if ( FAILED( hr ) ) {
\r
4866 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4870 // scale outBufferSize according to stream->user sample rate ratio
\r
4871 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4872 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4874 // set captureBuffer size
\r
4875 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4877 // reset the capture stream
\r
4878 hr = captureAudioClient->Reset();
\r
4879 if ( FAILED( hr ) ) {
\r
4880 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4884 // start the capture stream
\r
4885 hr = captureAudioClient->Start();
\r
4886 if ( FAILED( hr ) ) {
\r
4887 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4892 // start render stream if applicable
\r
4893 if ( renderAudioClient ) {
\r
4894 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4895 if ( FAILED( hr ) ) {
\r
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4900 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4902 // initialize render stream according to desire buffer size
\r
4903 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4904 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4906 if ( !renderClient ) {
\r
4907 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4908 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4909 desiredBufferPeriod,
\r
4910 desiredBufferPeriod,
\r
4913 if ( FAILED( hr ) ) {
\r
4914 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4918 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4919 ( void** ) &renderClient );
\r
4920 if ( FAILED( hr ) ) {
\r
4921 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4925 // configure renderEvent to trigger on every available render buffer
\r
4926 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4927 if ( !renderEvent ) {
\r
4928 errorType = RtAudioError::SYSTEM_ERROR;
\r
4929 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4933 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4934 if ( FAILED( hr ) ) {
\r
4935 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4939 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4940 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4943 unsigned int outBufferSize = 0;
\r
4944 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4945 if ( FAILED( hr ) ) {
\r
4946 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4950 // scale inBufferSize according to user->stream sample rate ratio
\r
4951 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4952 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4954 // set renderBuffer size
\r
4955 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4957 // reset the render stream
\r
4958 hr = renderAudioClient->Reset();
\r
4959 if ( FAILED( hr ) ) {
\r
4960 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4964 // start the render stream
\r
4965 hr = renderAudioClient->Start();
\r
4966 if ( FAILED( hr ) ) {
\r
4967 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4972 if ( stream_.mode == INPUT ) {
\r
4973 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4974 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4976 else if ( stream_.mode == OUTPUT ) {
\r
4977 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4978 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4980 else if ( stream_.mode == DUPLEX ) {
\r
4981 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4982 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4983 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4984 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4987 convBuffer = ( char* ) malloc( convBuffSize );
\r
4988 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4989 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4990 errorType = RtAudioError::MEMORY_ERROR;
\r
4991 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4995 // stream process loop
\r
4996 while ( stream_.state != STREAM_STOPPING ) {
\r
4997 if ( !callbackPulled ) {
\r
5000 // 1. Pull callback buffer from inputBuffer
\r
5001 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
5002 // Convert callback buffer to user format
\r
5004 if ( captureAudioClient ) {
\r
5005 // Pull callback buffer from inputBuffer
\r
5006 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
5007 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
5008 stream_.deviceFormat[INPUT] );
\r
5010 if ( callbackPulled ) {
\r
5011 // Convert callback buffer to user sample rate
\r
5012 convertBufferWasapi( stream_.deviceBuffer,
\r
5014 stream_.nDeviceChannels[INPUT],
\r
5015 captureFormat->nSamplesPerSec,
\r
5016 stream_.sampleRate,
\r
5017 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
5019 stream_.deviceFormat[INPUT] );
\r
5021 if ( stream_.doConvertBuffer[INPUT] ) {
\r
5022 // Convert callback buffer to user format
\r
5023 convertBuffer( stream_.userBuffer[INPUT],
\r
5024 stream_.deviceBuffer,
\r
5025 stream_.convertInfo[INPUT] );
\r
5028 // no further conversion, simple copy deviceBuffer to userBuffer
\r
5029 memcpy( stream_.userBuffer[INPUT],
\r
5030 stream_.deviceBuffer,
\r
5031 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
5036 // if there is no capture stream, set callbackPulled flag
\r
5037 callbackPulled = true;
\r
5040 // Execute Callback
\r
5041 // ================
\r
5042 // 1. Execute user callback method
\r
5043 // 2. Handle return value from callback
\r
5045 // if callback has not requested the stream to stop
\r
5046 if ( callbackPulled && !callbackStopped ) {
\r
5047 // Execute user callback method
\r
5048 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
5049 stream_.userBuffer[INPUT],
\r
5050 stream_.bufferSize,
\r
5052 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
5053 stream_.callbackInfo.userData );
\r
5055 // Handle return value from callback
\r
5056 if ( callbackResult == 1 ) {
\r
5057 // instantiate a thread to stop this thread
\r
5058 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
5059 if ( !threadHandle ) {
\r
5060 errorType = RtAudioError::THREAD_ERROR;
\r
5061 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
5064 else if ( !CloseHandle( threadHandle ) ) {
\r
5065 errorType = RtAudioError::THREAD_ERROR;
\r
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
5070 callbackStopped = true;
\r
5072 else if ( callbackResult == 2 ) {
\r
5073 // instantiate a thread to stop this thread
\r
5074 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
5075 if ( !threadHandle ) {
\r
5076 errorType = RtAudioError::THREAD_ERROR;
\r
5077 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5080 else if ( !CloseHandle( threadHandle ) ) {
\r
5081 errorType = RtAudioError::THREAD_ERROR;
\r
5082 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5086 callbackStopped = true;
\r
5091 // Callback Output
\r
5092 // ===============
\r
5093 // 1. Convert callback buffer to stream format
\r
5094 // 2. Convert callback buffer to stream sample rate and channel count
\r
5095 // 3. Push callback buffer into outputBuffer
\r
5097 if ( renderAudioClient && callbackPulled ) {
\r
5098 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5099 // Convert callback buffer to stream format
\r
5100 convertBuffer( stream_.deviceBuffer,
\r
5101 stream_.userBuffer[OUTPUT],
\r
5102 stream_.convertInfo[OUTPUT] );
\r
5106 // Convert callback buffer to stream sample rate
\r
5107 convertBufferWasapi( convBuffer,
\r
5108 stream_.deviceBuffer,
\r
5109 stream_.nDeviceChannels[OUTPUT],
\r
5110 stream_.sampleRate,
\r
5111 renderFormat->nSamplesPerSec,
\r
5112 stream_.bufferSize,
\r
5114 stream_.deviceFormat[OUTPUT] );
\r
5116 // Push callback buffer into outputBuffer
\r
5117 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5118 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5119 stream_.deviceFormat[OUTPUT] );
\r
5122 // if there is no render stream, set callbackPushed flag
\r
5123 callbackPushed = true;
\r
5128 // 1. Get capture buffer from stream
\r
5129 // 2. Push capture buffer into inputBuffer
\r
5130 // 3. If 2. was successful: Release capture buffer
\r
5132 if ( captureAudioClient ) {
\r
5133 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5134 if ( !callbackPulled ) {
\r
5135 WaitForSingleObject( captureEvent, INFINITE );
\r
5138 // Get capture buffer from stream
\r
5139 hr = captureClient->GetBuffer( &streamBuffer,
\r
5140 &bufferFrameCount,
\r
5141 &captureFlags, NULL, NULL );
\r
5142 if ( FAILED( hr ) ) {
\r
5143 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5147 if ( bufferFrameCount != 0 ) {
\r
5148 // Push capture buffer into inputBuffer
\r
5149 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5150 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5151 stream_.deviceFormat[INPUT] ) )
\r
5153 // Release capture buffer
\r
5154 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5155 if ( FAILED( hr ) ) {
\r
5156 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5162 // Inform WASAPI that capture was unsuccessful
\r
5163 hr = captureClient->ReleaseBuffer( 0 );
\r
5164 if ( FAILED( hr ) ) {
\r
5165 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5172 // Inform WASAPI that capture was unsuccessful
\r
5173 hr = captureClient->ReleaseBuffer( 0 );
\r
5174 if ( FAILED( hr ) ) {
\r
5175 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5183 // 1. Get render buffer from stream
\r
5184 // 2. Pull next buffer from outputBuffer
\r
5185 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5186 // Release render buffer
\r
5188 if ( renderAudioClient ) {
\r
5189 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5190 if ( callbackPulled && !callbackPushed ) {
\r
5191 WaitForSingleObject( renderEvent, INFINITE );
\r
5194 // Get render buffer from stream
\r
5195 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5196 if ( FAILED( hr ) ) {
\r
5197 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5201 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5202 if ( FAILED( hr ) ) {
\r
5203 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5207 bufferFrameCount -= numFramesPadding;
\r
5209 if ( bufferFrameCount != 0 ) {
\r
5210 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5211 if ( FAILED( hr ) ) {
\r
5212 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5216 // Pull next buffer from outputBuffer
\r
5217 // Fill render buffer with next buffer
\r
5218 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5219 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5220 stream_.deviceFormat[OUTPUT] ) )
\r
5222 // Release render buffer
\r
5223 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5224 if ( FAILED( hr ) ) {
\r
5225 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5231 // Inform WASAPI that render was unsuccessful
\r
5232 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5233 if ( FAILED( hr ) ) {
\r
5234 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5241 // Inform WASAPI that render was unsuccessful
\r
5242 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5243 if ( FAILED( hr ) ) {
\r
5244 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5250 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5251 if ( callbackPushed ) {
\r
5252 callbackPulled = false;
\r
5253 // tick stream time
\r
5254 RtApi::tickStreamTime();
\r
5261 CoTaskMemFree( captureFormat );
\r
5262 CoTaskMemFree( renderFormat );
\r
5264 free ( convBuffer );
\r
5268 // update stream state
\r
5269 stream_.state = STREAM_STOPPED;
\r
5271 if ( errorText_.empty() )
\r
5274 error( errorType );
\r
5277 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5281 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5283 // Modified by Robin Davies, October 2005
\r
5284 // - Improvements to DirectX pointer chasing.
\r
5285 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5286 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5287 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5288 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5290 #include <mmsystem.h>
\r
5291 #include <mmreg.h>
\r
5292 #include <dsound.h>
\r
5293 #include <assert.h>
\r
5294 #include <algorithm>
\r
5296 #if defined(__MINGW32__)
\r
5297 // missing from latest mingw winapi
\r
5298 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5299 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5300 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5301 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5304 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5306 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5307 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5310 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5312 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5313 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5314 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5315 return pointer >= earlierPointer && pointer < laterPointer;
\r
5318 // A structure to hold various information related to the DirectSound
\r
5319 // API implementation.
\r
5321 unsigned int drainCounter; // Tracks callback counts when draining
\r
5322 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5326 UINT bufferPointer[2];
\r
5327 DWORD dsBufferSize[2];
\r
5328 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5332 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5335 // Declarations for utility functions, callbacks, and structures
\r
5336 // specific to the DirectSound implementation.
\r
5337 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5338 LPCTSTR description,
\r
5340 LPVOID lpContext );
\r
5342 static const char* getErrorString( int code );
\r
5344 static unsigned __stdcall callbackHandler( void *ptr );
\r
5353 : found(false) { validId[0] = false; validId[1] = false; }
\r
5356 struct DsProbeData {
\r
5358 std::vector<struct DsDevice>* dsDevices;
\r
5361 RtApiDs :: RtApiDs()
\r
5363 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5364 // accept whatever the mainline chose for a threading model.
\r
5365 coInitialized_ = false;
\r
5366 HRESULT hr = CoInitialize( NULL );
\r
5367 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5370 RtApiDs :: ~RtApiDs()
\r
5372 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5373 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5376 // The DirectSound default output is always the first device.
\r
5377 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5382 // The DirectSound default input is always the first input device,
\r
5383 // which is the first capture device enumerated.
\r
5384 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5389 unsigned int RtApiDs :: getDeviceCount( void )
\r
5391 // Set query flag for previously found devices to false, so that we
\r
5392 // can check for any devices that have disappeared.
\r
5393 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5394 dsDevices[i].found = false;
\r
5396 // Query DirectSound devices.
\r
5397 struct DsProbeData probeInfo;
\r
5398 probeInfo.isInput = false;
\r
5399 probeInfo.dsDevices = &dsDevices;
\r
5400 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5401 if ( FAILED( result ) ) {
\r
5402 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5403 errorText_ = errorStream_.str();
\r
5404 error( RtAudioError::WARNING );
\r
5407 // Query DirectSoundCapture devices.
\r
5408 probeInfo.isInput = true;
\r
5409 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5410 if ( FAILED( result ) ) {
\r
5411 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5412 errorText_ = errorStream_.str();
\r
5413 error( RtAudioError::WARNING );
\r
5416 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5417 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5418 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5422 return static_cast<unsigned int>(dsDevices.size());
\r
5425 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5427 RtAudio::DeviceInfo info;
\r
5428 info.probed = false;
\r
5430 if ( dsDevices.size() == 0 ) {
\r
5431 // Force a query of all devices
\r
5433 if ( dsDevices.size() == 0 ) {
\r
5434 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5435 error( RtAudioError::INVALID_USE );
\r
5440 if ( device >= dsDevices.size() ) {
\r
5441 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5442 error( RtAudioError::INVALID_USE );
\r
5447 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5449 LPDIRECTSOUND output;
\r
5451 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5452 if ( FAILED( result ) ) {
\r
5453 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5454 errorText_ = errorStream_.str();
\r
5455 error( RtAudioError::WARNING );
\r
5459 outCaps.dwSize = sizeof( outCaps );
\r
5460 result = output->GetCaps( &outCaps );
\r
5461 if ( FAILED( result ) ) {
\r
5462 output->Release();
\r
5463 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5464 errorText_ = errorStream_.str();
\r
5465 error( RtAudioError::WARNING );
\r
5469 // Get output channel information.
\r
5470 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5472 // Get sample rate information.
\r
5473 info.sampleRates.clear();
\r
5474 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5475 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5476 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5477 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5479 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5480 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5484 // Get format information.
\r
5485 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5486 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5488 output->Release();
\r
5490 if ( getDefaultOutputDevice() == device )
\r
5491 info.isDefaultOutput = true;
\r
5493 if ( dsDevices[ device ].validId[1] == false ) {
\r
5494 info.name = dsDevices[ device ].name;
\r
5495 info.probed = true;
\r
5501 LPDIRECTSOUNDCAPTURE input;
\r
5502 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5503 if ( FAILED( result ) ) {
\r
5504 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5505 errorText_ = errorStream_.str();
\r
5506 error( RtAudioError::WARNING );
\r
5511 inCaps.dwSize = sizeof( inCaps );
\r
5512 result = input->GetCaps( &inCaps );
\r
5513 if ( FAILED( result ) ) {
\r
5515 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5516 errorText_ = errorStream_.str();
\r
5517 error( RtAudioError::WARNING );
\r
5521 // Get input channel information.
\r
5522 info.inputChannels = inCaps.dwChannels;
\r
5524 // Get sample rate and format information.
\r
5525 std::vector<unsigned int> rates;
\r
5526 if ( inCaps.dwChannels >= 2 ) {
\r
5527 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5528 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5529 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5530 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5531 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5532 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5533 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5534 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5536 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5537 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5538 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5539 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5540 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5542 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5543 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5544 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5545 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5546 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5549 else if ( inCaps.dwChannels == 1 ) {
\r
5550 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5551 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5552 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5553 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5554 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5555 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5556 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5557 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5559 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5560 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5561 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5562 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5563 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5565 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5566 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5567 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5568 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5569 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5572 else info.inputChannels = 0; // technically, this would be an error
\r
5576 if ( info.inputChannels == 0 ) return info;
\r
5578 // Copy the supported rates to the info structure but avoid duplication.
\r
5580 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5582 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5583 if ( rates[i] == info.sampleRates[j] ) {
\r
5588 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5590 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5592 // If device opens for both playback and capture, we determine the channels.
\r
5593 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5594 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5596 if ( device == 0 ) info.isDefaultInput = true;
\r
5598 // Copy name and return.
\r
5599 info.name = dsDevices[ device ].name;
\r
5600 info.probed = true;
\r
5604 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5605 unsigned int firstChannel, unsigned int sampleRate,
\r
5606 RtAudioFormat format, unsigned int *bufferSize,
\r
5607 RtAudio::StreamOptions *options )
\r
5609 if ( channels + firstChannel > 2 ) {
\r
5610 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5614 size_t nDevices = dsDevices.size();
\r
5615 if ( nDevices == 0 ) {
\r
5616 // This should not happen because a check is made before this function is called.
\r
5617 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5621 if ( device >= nDevices ) {
\r
5622 // This should not happen because a check is made before this function is called.
\r
5623 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5627 if ( mode == OUTPUT ) {
\r
5628 if ( dsDevices[ device ].validId[0] == false ) {
\r
5629 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5630 errorText_ = errorStream_.str();
\r
5634 else { // mode == INPUT
\r
5635 if ( dsDevices[ device ].validId[1] == false ) {
\r
5636 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5637 errorText_ = errorStream_.str();
\r
5642 // According to a note in PortAudio, using GetDesktopWindow()
\r
5643 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5644 // that occur when the application's window is not the foreground
\r
5645 // window. Also, if the application window closes before the
\r
5646 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5647 // problems when using GetDesktopWindow() but it seems fine now
\r
5648 // (January 2010). I'll leave it commented here.
\r
5649 // HWND hWnd = GetForegroundWindow();
\r
5650 HWND hWnd = GetDesktopWindow();
\r
5652 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5653 // two. This is a judgement call and a value of two is probably too
\r
5654 // low for capture, but it should work for playback.
\r
5656 if ( options ) nBuffers = options->numberOfBuffers;
\r
5657 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5658 if ( nBuffers < 2 ) nBuffers = 3;
\r
5660 // Check the lower range of the user-specified buffer size and set
\r
5661 // (arbitrarily) to a lower bound of 32.
\r
5662 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5664 // Create the wave format structure. The data format setting will
\r
5665 // be determined later.
\r
5666 WAVEFORMATEX waveFormat;
\r
5667 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5668 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5669 waveFormat.nChannels = channels + firstChannel;
\r
5670 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5672 // Determine the device buffer size. By default, we'll use the value
\r
5673 // defined above (32K), but we will grow it to make allowances for
\r
5674 // very large software buffer sizes.
\r
5675 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5676 DWORD dsPointerLeadTime = 0;
\r
5678 void *ohandle = 0, *bhandle = 0;
\r
5680 if ( mode == OUTPUT ) {
\r
5682 LPDIRECTSOUND output;
\r
5683 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5684 if ( FAILED( result ) ) {
\r
5685 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5686 errorText_ = errorStream_.str();
\r
5691 outCaps.dwSize = sizeof( outCaps );
\r
5692 result = output->GetCaps( &outCaps );
\r
5693 if ( FAILED( result ) ) {
\r
5694 output->Release();
\r
5695 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5696 errorText_ = errorStream_.str();
\r
5700 // Check channel information.
\r
5701 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5702 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5703 errorText_ = errorStream_.str();
\r
5707 // Check format information. Use 16-bit format unless not
\r
5708 // supported or user requests 8-bit.
\r
5709 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5710 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5711 waveFormat.wBitsPerSample = 16;
\r
5712 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5715 waveFormat.wBitsPerSample = 8;
\r
5716 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5718 stream_.userFormat = format;
\r
5720 // Update wave format structure and buffer information.
\r
5721 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5722 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5723 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5725 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5726 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5727 dsBufferSize *= 2;
\r
5729 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5730 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5731 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5732 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5733 if ( FAILED( result ) ) {
\r
5734 output->Release();
\r
5735 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5736 errorText_ = errorStream_.str();
\r
5740 // Even though we will write to the secondary buffer, we need to
\r
5741 // access the primary buffer to set the correct output format
\r
5742 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5743 // buffer description.
\r
5744 DSBUFFERDESC bufferDescription;
\r
5745 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5746 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5747 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5749 // Obtain the primary buffer
\r
5750 LPDIRECTSOUNDBUFFER buffer;
\r
5751 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5752 if ( FAILED( result ) ) {
\r
5753 output->Release();
\r
5754 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5755 errorText_ = errorStream_.str();
\r
5759 // Set the primary DS buffer sound format.
\r
5760 result = buffer->SetFormat( &waveFormat );
\r
5761 if ( FAILED( result ) ) {
\r
5762 output->Release();
\r
5763 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5764 errorText_ = errorStream_.str();
\r
5768 // Setup the secondary DS buffer description.
\r
5769 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5770 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5771 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5772 DSBCAPS_GLOBALFOCUS |
\r
5773 DSBCAPS_GETCURRENTPOSITION2 |
\r
5774 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5775 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5776 bufferDescription.lpwfxFormat = &waveFormat;
\r
5778 // Try to create the secondary DS buffer. If that doesn't work,
\r
5779 // try to use software mixing. Otherwise, there's a problem.
\r
5780 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5781 if ( FAILED( result ) ) {
\r
5782 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5783 DSBCAPS_GLOBALFOCUS |
\r
5784 DSBCAPS_GETCURRENTPOSITION2 |
\r
5785 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5786 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5787 if ( FAILED( result ) ) {
\r
5788 output->Release();
\r
5789 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5790 errorText_ = errorStream_.str();
\r
5795 // Get the buffer size ... might be different from what we specified.
\r
5797 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5798 result = buffer->GetCaps( &dsbcaps );
\r
5799 if ( FAILED( result ) ) {
\r
5800 output->Release();
\r
5801 buffer->Release();
\r
5802 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5803 errorText_ = errorStream_.str();
\r
5807 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5809 // Lock the DS buffer
\r
5812 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5813 if ( FAILED( result ) ) {
\r
5814 output->Release();
\r
5815 buffer->Release();
\r
5816 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5817 errorText_ = errorStream_.str();
\r
5821 // Zero the DS buffer
\r
5822 ZeroMemory( audioPtr, dataLen );
\r
5824 // Unlock the DS buffer
\r
5825 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5826 if ( FAILED( result ) ) {
\r
5827 output->Release();
\r
5828 buffer->Release();
\r
5829 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5830 errorText_ = errorStream_.str();
\r
5834 ohandle = (void *) output;
\r
5835 bhandle = (void *) buffer;
\r
5838 if ( mode == INPUT ) {
\r
5840 LPDIRECTSOUNDCAPTURE input;
\r
5841 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5842 if ( FAILED( result ) ) {
\r
5843 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5844 errorText_ = errorStream_.str();
\r
5849 inCaps.dwSize = sizeof( inCaps );
\r
5850 result = input->GetCaps( &inCaps );
\r
5851 if ( FAILED( result ) ) {
\r
5853 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5854 errorText_ = errorStream_.str();
\r
5858 // Check channel information.
\r
5859 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5860 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5864 // Check format information. Use 16-bit format unless user
\r
5865 // requests 8-bit.
\r
5866 DWORD deviceFormats;
\r
5867 if ( channels + firstChannel == 2 ) {
\r
5868 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5869 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5870 waveFormat.wBitsPerSample = 8;
\r
5871 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5873 else { // assume 16-bit is supported
\r
5874 waveFormat.wBitsPerSample = 16;
\r
5875 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5878 else { // channel == 1
\r
5879 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5880 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5881 waveFormat.wBitsPerSample = 8;
\r
5882 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5884 else { // assume 16-bit is supported
\r
5885 waveFormat.wBitsPerSample = 16;
\r
5886 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5889 stream_.userFormat = format;
\r
5891 // Update wave format structure and buffer information.
\r
5892 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5893 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5894 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5896 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5897 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5898 dsBufferSize *= 2;
\r
5900 // Setup the secondary DS buffer description.
\r
5901 DSCBUFFERDESC bufferDescription;
\r
5902 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5903 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5904 bufferDescription.dwFlags = 0;
\r
5905 bufferDescription.dwReserved = 0;
\r
5906 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5907 bufferDescription.lpwfxFormat = &waveFormat;
\r
5909 // Create the capture buffer.
\r
5910 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5911 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5912 if ( FAILED( result ) ) {
\r
5914 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5915 errorText_ = errorStream_.str();
\r
5919 // Get the buffer size ... might be different from what we specified.
\r
5920 DSCBCAPS dscbcaps;
\r
5921 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5922 result = buffer->GetCaps( &dscbcaps );
\r
5923 if ( FAILED( result ) ) {
\r
5925 buffer->Release();
\r
5926 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5927 errorText_ = errorStream_.str();
\r
5931 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5933 // NOTE: We could have a problem here if this is a duplex stream
\r
5934 // and the play and capture hardware buffer sizes are different
\r
5935 // (I'm actually not sure if that is a problem or not).
\r
5936 // Currently, we are not verifying that.
\r
5938 // Lock the capture buffer
\r
5941 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5942 if ( FAILED( result ) ) {
\r
5944 buffer->Release();
\r
5945 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5946 errorText_ = errorStream_.str();
\r
5950 // Zero the buffer
\r
5951 ZeroMemory( audioPtr, dataLen );
\r
5953 // Unlock the buffer
\r
5954 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5955 if ( FAILED( result ) ) {
\r
5957 buffer->Release();
\r
5958 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5959 errorText_ = errorStream_.str();
\r
5963 ohandle = (void *) input;
\r
5964 bhandle = (void *) buffer;
\r
5967 // Set various stream parameters
\r
5968 DsHandle *handle = 0;
\r
5969 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5970 stream_.nUserChannels[mode] = channels;
\r
5971 stream_.bufferSize = *bufferSize;
\r
5972 stream_.channelOffset[mode] = firstChannel;
\r
5973 stream_.deviceInterleaved[mode] = true;
\r
5974 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5975 else stream_.userInterleaved = true;
\r
5977 // Set flag for buffer conversion
\r
5978 stream_.doConvertBuffer[mode] = false;
\r
5979 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5980 stream_.doConvertBuffer[mode] = true;
\r
5981 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5982 stream_.doConvertBuffer[mode] = true;
\r
5983 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5984 stream_.nUserChannels[mode] > 1 )
\r
5985 stream_.doConvertBuffer[mode] = true;
\r
5987 // Allocate necessary internal buffers
\r
5988 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5989 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5990 if ( stream_.userBuffer[mode] == NULL ) {
\r
5991 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5995 if ( stream_.doConvertBuffer[mode] ) {
\r
5997 bool makeBuffer = true;
\r
5998 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5999 if ( mode == INPUT ) {
\r
6000 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6001 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6002 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
6006 if ( makeBuffer ) {
\r
6007 bufferBytes *= *bufferSize;
\r
6008 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6009 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6010 if ( stream_.deviceBuffer == NULL ) {
\r
6011 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
6017 // Allocate our DsHandle structures for the stream.
\r
6018 if ( stream_.apiHandle == 0 ) {
\r
6020 handle = new DsHandle;
\r
6022 catch ( std::bad_alloc& ) {
\r
6023 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
6027 // Create a manual-reset event.
\r
6028 handle->condition = CreateEvent( NULL, // no security
\r
6029 TRUE, // manual-reset
\r
6030 FALSE, // non-signaled initially
\r
6031 NULL ); // unnamed
\r
6032 stream_.apiHandle = (void *) handle;
\r
6035 handle = (DsHandle *) stream_.apiHandle;
\r
6036 handle->id[mode] = ohandle;
\r
6037 handle->buffer[mode] = bhandle;
\r
6038 handle->dsBufferSize[mode] = dsBufferSize;
\r
6039 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
6041 stream_.device[mode] = device;
\r
6042 stream_.state = STREAM_STOPPED;
\r
6043 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
6044 // We had already set up an output stream.
\r
6045 stream_.mode = DUPLEX;
\r
6047 stream_.mode = mode;
\r
6048 stream_.nBuffers = nBuffers;
\r
6049 stream_.sampleRate = sampleRate;
\r
6051 // Setup the buffer conversion information structure.
\r
6052 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6054 // Setup the callback thread.
\r
6055 if ( stream_.callbackInfo.isRunning == false ) {
\r
6056 unsigned threadId;
\r
6057 stream_.callbackInfo.isRunning = true;
\r
6058 stream_.callbackInfo.object = (void *) this;
\r
6059 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
6060 &stream_.callbackInfo, 0, &threadId );
\r
6061 if ( stream_.callbackInfo.thread == 0 ) {
\r
6062 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
6066 // Boost DS thread priority
\r
6067 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
6073 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6074 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6075 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6076 if ( buffer ) buffer->Release();
\r
6077 object->Release();
\r
6079 if ( handle->buffer[1] ) {
\r
6080 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6081 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6082 if ( buffer ) buffer->Release();
\r
6083 object->Release();
\r
6085 CloseHandle( handle->condition );
\r
6087 stream_.apiHandle = 0;
\r
6090 for ( int i=0; i<2; i++ ) {
\r
6091 if ( stream_.userBuffer[i] ) {
\r
6092 free( stream_.userBuffer[i] );
\r
6093 stream_.userBuffer[i] = 0;
\r
6097 if ( stream_.deviceBuffer ) {
\r
6098 free( stream_.deviceBuffer );
\r
6099 stream_.deviceBuffer = 0;
\r
6102 stream_.state = STREAM_CLOSED;
\r
6106 void RtApiDs :: closeStream()
\r
6108 if ( stream_.state == STREAM_CLOSED ) {
\r
6109 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6110 error( RtAudioError::WARNING );
\r
6114 // Stop the callback thread.
\r
6115 stream_.callbackInfo.isRunning = false;
\r
6116 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6117 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6119 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6121 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6122 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6123 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6126 buffer->Release();
\r
6128 object->Release();
\r
6130 if ( handle->buffer[1] ) {
\r
6131 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6132 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6135 buffer->Release();
\r
6137 object->Release();
\r
6139 CloseHandle( handle->condition );
\r
6141 stream_.apiHandle = 0;
\r
6144 for ( int i=0; i<2; i++ ) {
\r
6145 if ( stream_.userBuffer[i] ) {
\r
6146 free( stream_.userBuffer[i] );
\r
6147 stream_.userBuffer[i] = 0;
\r
6151 if ( stream_.deviceBuffer ) {
\r
6152 free( stream_.deviceBuffer );
\r
6153 stream_.deviceBuffer = 0;
\r
6156 stream_.mode = UNINITIALIZED;
\r
6157 stream_.state = STREAM_CLOSED;
\r
6160 void RtApiDs :: startStream()
\r
6163 if ( stream_.state == STREAM_RUNNING ) {
\r
6164 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6165 error( RtAudioError::WARNING );
\r
6169 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6171 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6172 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6173 // this is already in effect.
\r
6174 timeBeginPeriod( 1 );
\r
6176 buffersRolling = false;
\r
6177 duplexPrerollBytes = 0;
\r
6179 if ( stream_.mode == DUPLEX ) {
\r
6180 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6181 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6184 HRESULT result = 0;
\r
6185 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6187 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6188 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6189 if ( FAILED( result ) ) {
\r
6190 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6191 errorText_ = errorStream_.str();
\r
6196 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6198 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6199 result = buffer->Start( DSCBSTART_LOOPING );
\r
6200 if ( FAILED( result ) ) {
\r
6201 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6202 errorText_ = errorStream_.str();
\r
6207 handle->drainCounter = 0;
\r
6208 handle->internalDrain = false;
\r
6209 ResetEvent( handle->condition );
\r
6210 stream_.state = STREAM_RUNNING;
\r
6213 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6216 void RtApiDs :: stopStream()
\r
6219 if ( stream_.state == STREAM_STOPPED ) {
\r
6220 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6221 error( RtAudioError::WARNING );
\r
6225 HRESULT result = 0;
\r
6228 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6229 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6230 if ( handle->drainCounter == 0 ) {
\r
6231 handle->drainCounter = 2;
\r
6232 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6235 stream_.state = STREAM_STOPPED;
\r
6237 MUTEX_LOCK( &stream_.mutex );
\r
6239 // Stop the buffer and clear memory
\r
6240 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6241 result = buffer->Stop();
\r
6242 if ( FAILED( result ) ) {
\r
6243 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6244 errorText_ = errorStream_.str();
\r
6248 // Lock the buffer and clear it so that if we start to play again,
\r
6249 // we won't have old data playing.
\r
6250 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6251 if ( FAILED( result ) ) {
\r
6252 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6253 errorText_ = errorStream_.str();
\r
6257 // Zero the DS buffer
\r
6258 ZeroMemory( audioPtr, dataLen );
\r
6260 // Unlock the DS buffer
\r
6261 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6262 if ( FAILED( result ) ) {
\r
6263 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6264 errorText_ = errorStream_.str();
\r
6268 // If we start playing again, we must begin at beginning of buffer.
\r
6269 handle->bufferPointer[0] = 0;
\r
6272 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6273 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6277 stream_.state = STREAM_STOPPED;
\r
6279 if ( stream_.mode != DUPLEX )
\r
6280 MUTEX_LOCK( &stream_.mutex );
\r
6282 result = buffer->Stop();
\r
6283 if ( FAILED( result ) ) {
\r
6284 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6285 errorText_ = errorStream_.str();
\r
6289 // Lock the buffer and clear it so that if we start to play again,
\r
6290 // we won't have old data playing.
\r
6291 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6292 if ( FAILED( result ) ) {
\r
6293 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6294 errorText_ = errorStream_.str();
\r
6298 // Zero the DS buffer
\r
6299 ZeroMemory( audioPtr, dataLen );
\r
6301 // Unlock the DS buffer
\r
6302 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6303 if ( FAILED( result ) ) {
\r
6304 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6305 errorText_ = errorStream_.str();
\r
6309 // If we start recording again, we must begin at beginning of buffer.
\r
6310 handle->bufferPointer[1] = 0;
\r
6314 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6315 MUTEX_UNLOCK( &stream_.mutex );
\r
6317 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6320 void RtApiDs :: abortStream()
\r
6323 if ( stream_.state == STREAM_STOPPED ) {
\r
6324 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6325 error( RtAudioError::WARNING );
\r
6329 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6330 handle->drainCounter = 2;
\r
6335 void RtApiDs :: callbackEvent()
\r
6337 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6338 Sleep( 50 ); // sleep 50 milliseconds
\r
6342 if ( stream_.state == STREAM_CLOSED ) {
\r
6343 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6344 error( RtAudioError::WARNING );
\r
6348 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6349 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6351 // Check if we were draining the stream and signal is finished.
\r
6352 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6354 stream_.state = STREAM_STOPPING;
\r
6355 if ( handle->internalDrain == false )
\r
6356 SetEvent( handle->condition );
\r
6362 // Invoke user callback to get fresh output data UNLESS we are
\r
6363 // draining stream.
\r
6364 if ( handle->drainCounter == 0 ) {
\r
6365 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6366 double streamTime = getStreamTime();
\r
6367 RtAudioStreamStatus status = 0;
\r
6368 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6369 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6370 handle->xrun[0] = false;
\r
6372 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6373 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6374 handle->xrun[1] = false;
\r
6376 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6377 stream_.bufferSize, streamTime, status, info->userData );
\r
6378 if ( cbReturnValue == 2 ) {
\r
6379 stream_.state = STREAM_STOPPING;
\r
6380 handle->drainCounter = 2;
\r
6384 else if ( cbReturnValue == 1 ) {
\r
6385 handle->drainCounter = 1;
\r
6386 handle->internalDrain = true;
\r
6391 DWORD currentWritePointer, safeWritePointer;
\r
6392 DWORD currentReadPointer, safeReadPointer;
\r
6393 UINT nextWritePointer;
\r
6395 LPVOID buffer1 = NULL;
\r
6396 LPVOID buffer2 = NULL;
\r
6397 DWORD bufferSize1 = 0;
\r
6398 DWORD bufferSize2 = 0;
\r
6403 MUTEX_LOCK( &stream_.mutex );
\r
6404 if ( stream_.state == STREAM_STOPPED ) {
\r
6405 MUTEX_UNLOCK( &stream_.mutex );
\r
6409 if ( buffersRolling == false ) {
\r
6410 if ( stream_.mode == DUPLEX ) {
\r
6411 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6413 // It takes a while for the devices to get rolling. As a result,
\r
6414 // there's no guarantee that the capture and write device pointers
\r
6415 // will move in lockstep. Wait here for both devices to start
\r
6416 // rolling, and then set our buffer pointers accordingly.
\r
6417 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6418 // bytes later than the write buffer.
\r
6420 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6421 // take place between the two GetCurrentPosition calls... but I'm
\r
6422 // really not sure how to solve the problem. Temporarily boost to
\r
6423 // Realtime priority, maybe; but I'm not sure what priority the
\r
6424 // DirectSound service threads run at. We *should* be roughly
\r
6425 // within a ms or so of correct.
\r
6427 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6428 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6430 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6432 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6433 if ( FAILED( result ) ) {
\r
6434 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6435 errorText_ = errorStream_.str();
\r
6436 MUTEX_UNLOCK( &stream_.mutex );
\r
6437 error( RtAudioError::SYSTEM_ERROR );
\r
6440 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6441 if ( FAILED( result ) ) {
\r
6442 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6443 errorText_ = errorStream_.str();
\r
6444 MUTEX_UNLOCK( &stream_.mutex );
\r
6445 error( RtAudioError::SYSTEM_ERROR );
\r
6449 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6450 if ( FAILED( result ) ) {
\r
6451 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6452 errorText_ = errorStream_.str();
\r
6453 MUTEX_UNLOCK( &stream_.mutex );
\r
6454 error( RtAudioError::SYSTEM_ERROR );
\r
6457 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6458 if ( FAILED( result ) ) {
\r
6459 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6460 errorText_ = errorStream_.str();
\r
6461 MUTEX_UNLOCK( &stream_.mutex );
\r
6462 error( RtAudioError::SYSTEM_ERROR );
\r
6465 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6469 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6471 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6472 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6473 handle->bufferPointer[1] = safeReadPointer;
\r
6475 else if ( stream_.mode == OUTPUT ) {
\r
6477 // Set the proper nextWritePosition after initial startup.
\r
6478 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6479 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6480 if ( FAILED( result ) ) {
\r
6481 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6482 errorText_ = errorStream_.str();
\r
6483 MUTEX_UNLOCK( &stream_.mutex );
\r
6484 error( RtAudioError::SYSTEM_ERROR );
\r
6487 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6488 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6491 buffersRolling = true;
\r
6494 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6496 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6498 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6499 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6500 bufferBytes *= formatBytes( stream_.userFormat );
\r
6501 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6504 // Setup parameters and do buffer conversion if necessary.
\r
6505 if ( stream_.doConvertBuffer[0] ) {
\r
6506 buffer = stream_.deviceBuffer;
\r
6507 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6508 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6509 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6512 buffer = stream_.userBuffer[0];
\r
6513 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6514 bufferBytes *= formatBytes( stream_.userFormat );
\r
6517 // No byte swapping necessary in DirectSound implementation.
\r
6519 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6520 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6522 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6523 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6525 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6526 nextWritePointer = handle->bufferPointer[0];
\r
6528 DWORD endWrite, leadPointer;
\r
6530 // Find out where the read and "safe write" pointers are.
\r
6531 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6532 if ( FAILED( result ) ) {
\r
6533 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6534 errorText_ = errorStream_.str();
\r
6535 MUTEX_UNLOCK( &stream_.mutex );
\r
6536 error( RtAudioError::SYSTEM_ERROR );
\r
6540 // We will copy our output buffer into the region between
\r
6541 // safeWritePointer and leadPointer. If leadPointer is not
\r
6542 // beyond the next endWrite position, wait until it is.
\r
6543 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6544 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6545 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6546 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6547 endWrite = nextWritePointer + bufferBytes;
\r
6549 // Check whether the entire write region is behind the play pointer.
\r
6550 if ( leadPointer >= endWrite ) break;
\r
6552 // If we are here, then we must wait until the leadPointer advances
\r
6553 // beyond the end of our next write region. We use the
\r
6554 // Sleep() function to suspend operation until that happens.
\r
6555 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6556 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6557 if ( millis < 1.0 ) millis = 1.0;
\r
6558 Sleep( (DWORD) millis );
\r
6561 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6562 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6563 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6564 handle->xrun[0] = true;
\r
6565 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6566 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6567 handle->bufferPointer[0] = nextWritePointer;
\r
6568 endWrite = nextWritePointer + bufferBytes;
\r
6571 // Lock free space in the buffer
\r
6572 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6573 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6574 if ( FAILED( result ) ) {
\r
6575 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6576 errorText_ = errorStream_.str();
\r
6577 MUTEX_UNLOCK( &stream_.mutex );
\r
6578 error( RtAudioError::SYSTEM_ERROR );
\r
6582 // Copy our buffer into the DS buffer
\r
6583 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6584 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6586 // Update our buffer offset and unlock sound buffer
\r
6587 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6588 if ( FAILED( result ) ) {
\r
6589 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6590 errorText_ = errorStream_.str();
\r
6591 MUTEX_UNLOCK( &stream_.mutex );
\r
6592 error( RtAudioError::SYSTEM_ERROR );
\r
6595 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6596 handle->bufferPointer[0] = nextWritePointer;
\r
6599 // Don't bother draining input
\r
6600 if ( handle->drainCounter ) {
\r
6601 handle->drainCounter++;
\r
6605 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6607 // Setup parameters.
\r
6608 if ( stream_.doConvertBuffer[1] ) {
\r
6609 buffer = stream_.deviceBuffer;
\r
6610 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6611 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6614 buffer = stream_.userBuffer[1];
\r
6615 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6616 bufferBytes *= formatBytes( stream_.userFormat );
\r
6619 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6620 long nextReadPointer = handle->bufferPointer[1];
\r
6621 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6623 // Find out where the write and "safe read" pointers are.
\r
6624 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6625 if ( FAILED( result ) ) {
\r
6626 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6627 errorText_ = errorStream_.str();
\r
6628 MUTEX_UNLOCK( &stream_.mutex );
\r
6629 error( RtAudioError::SYSTEM_ERROR );
\r
6633 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6634 DWORD endRead = nextReadPointer + bufferBytes;
\r
6636 // Handling depends on whether we are INPUT or DUPLEX.
\r
6637 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6638 // then a wait here will drag the write pointers into the forbidden zone.
\r
6640 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6641 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6642 // practical way to sync up the read and write pointers reliably, given the
\r
6643 // the very complex relationship between phase and increment of the read and write
\r
6646 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6647 // provide a pre-roll period of 0.5 seconds in which we return
\r
6648 // zeros from the read buffer while the pointers sync up.
\r
6650 if ( stream_.mode == DUPLEX ) {
\r
6651 if ( safeReadPointer < endRead ) {
\r
6652 if ( duplexPrerollBytes <= 0 ) {
\r
6653 // Pre-roll time over. Be more agressive.
\r
6654 int adjustment = endRead-safeReadPointer;
\r
6656 handle->xrun[1] = true;
\r
6658 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6659 // and perform fine adjustments later.
\r
6660 // - small adjustments: back off by twice as much.
\r
6661 if ( adjustment >= 2*bufferBytes )
\r
6662 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6664 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6666 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6670 // In pre=roll time. Just do it.
\r
6671 nextReadPointer = safeReadPointer - bufferBytes;
\r
6672 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6674 endRead = nextReadPointer + bufferBytes;
\r
6677 else { // mode == INPUT
\r
6678 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6679 // See comments for playback.
\r
6680 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6681 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6682 if ( millis < 1.0 ) millis = 1.0;
\r
6683 Sleep( (DWORD) millis );
\r
6685 // Wake up and find out where we are now.
\r
6686 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6687 if ( FAILED( result ) ) {
\r
6688 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6689 errorText_ = errorStream_.str();
\r
6690 MUTEX_UNLOCK( &stream_.mutex );
\r
6691 error( RtAudioError::SYSTEM_ERROR );
\r
6695 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6699 // Lock free space in the buffer
\r
6700 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6701 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6702 if ( FAILED( result ) ) {
\r
6703 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6704 errorText_ = errorStream_.str();
\r
6705 MUTEX_UNLOCK( &stream_.mutex );
\r
6706 error( RtAudioError::SYSTEM_ERROR );
\r
6710 if ( duplexPrerollBytes <= 0 ) {
\r
6711 // Copy our buffer into the DS buffer
\r
6712 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6713 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6716 memset( buffer, 0, bufferSize1 );
\r
6717 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6718 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6721 // Update our buffer offset and unlock sound buffer
\r
6722 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6723 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6724 if ( FAILED( result ) ) {
\r
6725 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6726 errorText_ = errorStream_.str();
\r
6727 MUTEX_UNLOCK( &stream_.mutex );
\r
6728 error( RtAudioError::SYSTEM_ERROR );
\r
6731 handle->bufferPointer[1] = nextReadPointer;
\r
6733 // No byte swapping necessary in DirectSound implementation.
\r
6735 // If necessary, convert 8-bit data from unsigned to signed.
\r
6736 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6737 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6739 // Do buffer conversion if necessary.
\r
6740 if ( stream_.doConvertBuffer[1] )
\r
6741 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6745 MUTEX_UNLOCK( &stream_.mutex );
\r
6746 RtApi::tickStreamTime();
\r
6749 // Definitions for utility functions and callbacks
\r
6750 // specific to the DirectSound implementation.
\r
6752 static unsigned __stdcall callbackHandler( void *ptr )
\r
6754 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6755 RtApiDs *object = (RtApiDs *) info->object;
\r
6756 bool* isRunning = &info->isRunning;
\r
6758 while ( *isRunning == true ) {
\r
6759 object->callbackEvent();
\r
6762 _endthreadex( 0 );
\r
6766 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6767 LPCTSTR description,
\r
6768 LPCTSTR /*module*/,
\r
6769 LPVOID lpContext )
\r
6771 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6772 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6775 bool validDevice = false;
\r
6776 if ( probeInfo.isInput == true ) {
\r
6778 LPDIRECTSOUNDCAPTURE object;
\r
6780 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6781 if ( hr != DS_OK ) return TRUE;
\r
6783 caps.dwSize = sizeof(caps);
\r
6784 hr = object->GetCaps( &caps );
\r
6785 if ( hr == DS_OK ) {
\r
6786 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6787 validDevice = true;
\r
6789 object->Release();
\r
6793 LPDIRECTSOUND object;
\r
6794 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6795 if ( hr != DS_OK ) return TRUE;
\r
6797 caps.dwSize = sizeof(caps);
\r
6798 hr = object->GetCaps( &caps );
\r
6799 if ( hr == DS_OK ) {
\r
6800 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6801 validDevice = true;
\r
6803 object->Release();
\r
6806 // If good device, then save its name and guid.
\r
6807 std::string name = convertCharPointerToStdString( description );
\r
6808 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6809 if ( lpguid == NULL )
\r
6810 name = "Default Device";
\r
6811 if ( validDevice ) {
\r
6812 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6813 if ( dsDevices[i].name == name ) {
\r
6814 dsDevices[i].found = true;
\r
6815 if ( probeInfo.isInput ) {
\r
6816 dsDevices[i].id[1] = lpguid;
\r
6817 dsDevices[i].validId[1] = true;
\r
6820 dsDevices[i].id[0] = lpguid;
\r
6821 dsDevices[i].validId[0] = true;
\r
6828 device.name = name;
\r
6829 device.found = true;
\r
6830 if ( probeInfo.isInput ) {
\r
6831 device.id[1] = lpguid;
\r
6832 device.validId[1] = true;
\r
6835 device.id[0] = lpguid;
\r
6836 device.validId[0] = true;
\r
6838 dsDevices.push_back( device );
\r
6844 static const char* getErrorString( int code )
\r
6848 case DSERR_ALLOCATED:
\r
6849 return "Already allocated";
\r
6851 case DSERR_CONTROLUNAVAIL:
\r
6852 return "Control unavailable";
\r
6854 case DSERR_INVALIDPARAM:
\r
6855 return "Invalid parameter";
\r
6857 case DSERR_INVALIDCALL:
\r
6858 return "Invalid call";
\r
6860 case DSERR_GENERIC:
\r
6861 return "Generic error";
\r
6863 case DSERR_PRIOLEVELNEEDED:
\r
6864 return "Priority level needed";
\r
6866 case DSERR_OUTOFMEMORY:
\r
6867 return "Out of memory";
\r
6869 case DSERR_BADFORMAT:
\r
6870 return "The sample rate or the channel format is not supported";
\r
6872 case DSERR_UNSUPPORTED:
\r
6873 return "Not supported";
\r
6875 case DSERR_NODRIVER:
\r
6876 return "No driver";
\r
6878 case DSERR_ALREADYINITIALIZED:
\r
6879 return "Already initialized";
\r
6881 case DSERR_NOAGGREGATION:
\r
6882 return "No aggregation";
\r
6884 case DSERR_BUFFERLOST:
\r
6885 return "Buffer lost";
\r
6887 case DSERR_OTHERAPPHASPRIO:
\r
6888 return "Another application already has priority";
\r
6890 case DSERR_UNINITIALIZED:
\r
6891 return "Uninitialized";
\r
6894 return "DirectSound unknown error";
\r
6897 //******************** End of __WINDOWS_DS__ *********************//
\r
6901 #if defined(__LINUX_ALSA__)
\r
6903 #include <alsa/asoundlib.h>
\r
6904 #include <unistd.h>
\r
6906 // A structure to hold various information related to the ALSA API
\r
6907 // implementation.
\r
6908 struct AlsaHandle {
\r
6909 snd_pcm_t *handles[2];
\r
6910 bool synchronized;
\r
6912 pthread_cond_t runnable_cv;
\r
6916 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6919 static void *alsaCallbackHandler( void * ptr );
\r
6921 RtApiAlsa :: RtApiAlsa()
\r
6923 // Nothing to do here.
\r
6926 RtApiAlsa :: ~RtApiAlsa()
\r
6928 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6931 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6933 unsigned nDevices = 0;
\r
6934 int result, subdevice, card;
\r
6936 snd_ctl_t *handle;
\r
6938 // Count cards and devices
\r
6940 snd_card_next( &card );
\r
6941 while ( card >= 0 ) {
\r
6942 sprintf( name, "hw:%d", card );
\r
6943 result = snd_ctl_open( &handle, name, 0 );
\r
6944 if ( result < 0 ) {
\r
6945 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6946 errorText_ = errorStream_.str();
\r
6947 error( RtAudioError::WARNING );
\r
6952 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6953 if ( result < 0 ) {
\r
6954 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6955 errorText_ = errorStream_.str();
\r
6956 error( RtAudioError::WARNING );
\r
6959 if ( subdevice < 0 )
\r
6964 snd_ctl_close( handle );
\r
6965 snd_card_next( &card );
\r
6968 result = snd_ctl_open( &handle, "default", 0 );
\r
6969 if (result == 0) {
\r
6971 snd_ctl_close( handle );
\r
6977 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6979 RtAudio::DeviceInfo info;
\r
6980 info.probed = false;
\r
6982 unsigned nDevices = 0;
\r
6983 int result, subdevice, card;
\r
6985 snd_ctl_t *chandle;
\r
6987 // Count cards and devices
\r
6990 snd_card_next( &card );
\r
6991 while ( card >= 0 ) {
\r
6992 sprintf( name, "hw:%d", card );
\r
6993 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6994 if ( result < 0 ) {
\r
6995 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6996 errorText_ = errorStream_.str();
\r
6997 error( RtAudioError::WARNING );
\r
7002 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7003 if ( result < 0 ) {
\r
7004 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7005 errorText_ = errorStream_.str();
\r
7006 error( RtAudioError::WARNING );
\r
7009 if ( subdevice < 0 ) break;
\r
7010 if ( nDevices == device ) {
\r
7011 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7017 snd_ctl_close( chandle );
\r
7018 snd_card_next( &card );
\r
7021 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7022 if ( result == 0 ) {
\r
7023 if ( nDevices == device ) {
\r
7024 strcpy( name, "default" );
\r
7030 if ( nDevices == 0 ) {
\r
7031 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
7032 error( RtAudioError::INVALID_USE );
\r
7036 if ( device >= nDevices ) {
\r
7037 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
7038 error( RtAudioError::INVALID_USE );
\r
7044 // If a stream is already open, we cannot probe the stream devices.
\r
7045 // Thus, use the saved results.
\r
7046 if ( stream_.state != STREAM_CLOSED &&
\r
7047 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
7048 snd_ctl_close( chandle );
\r
7049 if ( device >= devices_.size() ) {
\r
7050 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
7051 error( RtAudioError::WARNING );
\r
7054 return devices_[ device ];
\r
7057 int openMode = SND_PCM_ASYNC;
\r
7058 snd_pcm_stream_t stream;
\r
7059 snd_pcm_info_t *pcminfo;
\r
7060 snd_pcm_info_alloca( &pcminfo );
\r
7061 snd_pcm_t *phandle;
\r
7062 snd_pcm_hw_params_t *params;
\r
7063 snd_pcm_hw_params_alloca( ¶ms );
\r
7065 // First try for playback unless default device (which has subdev -1)
\r
7066 stream = SND_PCM_STREAM_PLAYBACK;
\r
7067 snd_pcm_info_set_stream( pcminfo, stream );
\r
7068 if ( subdevice != -1 ) {
\r
7069 snd_pcm_info_set_device( pcminfo, subdevice );
\r
7070 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
7072 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7073 if ( result < 0 ) {
\r
7074 // Device probably doesn't support playback.
\r
7075 goto captureProbe;
\r
7079 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7080 if ( result < 0 ) {
\r
7081 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7082 errorText_ = errorStream_.str();
\r
7083 error( RtAudioError::WARNING );
\r
7084 goto captureProbe;
\r
7087 // The device is open ... fill the parameter structure.
\r
7088 result = snd_pcm_hw_params_any( phandle, params );
\r
7089 if ( result < 0 ) {
\r
7090 snd_pcm_close( phandle );
\r
7091 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7092 errorText_ = errorStream_.str();
\r
7093 error( RtAudioError::WARNING );
\r
7094 goto captureProbe;
\r
7097 // Get output channel information.
\r
7098 unsigned int value;
\r
7099 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7100 if ( result < 0 ) {
\r
7101 snd_pcm_close( phandle );
\r
7102 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7103 errorText_ = errorStream_.str();
\r
7104 error( RtAudioError::WARNING );
\r
7105 goto captureProbe;
\r
7107 info.outputChannels = value;
\r
7108 snd_pcm_close( phandle );
\r
7111 stream = SND_PCM_STREAM_CAPTURE;
\r
7112 snd_pcm_info_set_stream( pcminfo, stream );
\r
7114 // Now try for capture unless default device (with subdev = -1)
\r
7115 if ( subdevice != -1 ) {
\r
7116 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7117 snd_ctl_close( chandle );
\r
7118 if ( result < 0 ) {
\r
7119 // Device probably doesn't support capture.
\r
7120 if ( info.outputChannels == 0 ) return info;
\r
7121 goto probeParameters;
\r
7125 snd_ctl_close( chandle );
\r
7127 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7128 if ( result < 0 ) {
\r
7129 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7130 errorText_ = errorStream_.str();
\r
7131 error( RtAudioError::WARNING );
\r
7132 if ( info.outputChannels == 0 ) return info;
\r
7133 goto probeParameters;
\r
7136 // The device is open ... fill the parameter structure.
\r
7137 result = snd_pcm_hw_params_any( phandle, params );
\r
7138 if ( result < 0 ) {
\r
7139 snd_pcm_close( phandle );
\r
7140 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7141 errorText_ = errorStream_.str();
\r
7142 error( RtAudioError::WARNING );
\r
7143 if ( info.outputChannels == 0 ) return info;
\r
7144 goto probeParameters;
\r
7147 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7148 if ( result < 0 ) {
\r
7149 snd_pcm_close( phandle );
\r
7150 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7151 errorText_ = errorStream_.str();
\r
7152 error( RtAudioError::WARNING );
\r
7153 if ( info.outputChannels == 0 ) return info;
\r
7154 goto probeParameters;
\r
7156 info.inputChannels = value;
\r
7157 snd_pcm_close( phandle );
\r
7159 // If device opens for both playback and capture, we determine the channels.
\r
7160 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7161 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7163 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7164 if ( device == 0 && info.outputChannels > 0 )
\r
7165 info.isDefaultOutput = true;
\r
7166 if ( device == 0 && info.inputChannels > 0 )
\r
7167 info.isDefaultInput = true;
\r
7170 // At this point, we just need to figure out the supported data
\r
7171 // formats and sample rates. We'll proceed by opening the device in
\r
7172 // the direction with the maximum number of channels, or playback if
\r
7173 // they are equal. This might limit our sample rate options, but so
\r
7176 if ( info.outputChannels >= info.inputChannels )
\r
7177 stream = SND_PCM_STREAM_PLAYBACK;
\r
7179 stream = SND_PCM_STREAM_CAPTURE;
\r
7180 snd_pcm_info_set_stream( pcminfo, stream );
\r
7182 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7183 if ( result < 0 ) {
\r
7184 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7185 errorText_ = errorStream_.str();
\r
7186 error( RtAudioError::WARNING );
\r
7190 // The device is open ... fill the parameter structure.
\r
7191 result = snd_pcm_hw_params_any( phandle, params );
\r
7192 if ( result < 0 ) {
\r
7193 snd_pcm_close( phandle );
\r
7194 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7195 errorText_ = errorStream_.str();
\r
7196 error( RtAudioError::WARNING );
\r
7200 // Test our discrete set of sample rate values.
\r
7201 info.sampleRates.clear();
\r
7202 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7203 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7204 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7206 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7207 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7210 if ( info.sampleRates.size() == 0 ) {
\r
7211 snd_pcm_close( phandle );
\r
7212 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7213 errorText_ = errorStream_.str();
\r
7214 error( RtAudioError::WARNING );
\r
7218 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7219 snd_pcm_format_t format;
\r
7220 info.nativeFormats = 0;
\r
7221 format = SND_PCM_FORMAT_S8;
\r
7222 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7223 info.nativeFormats |= RTAUDIO_SINT8;
\r
7224 format = SND_PCM_FORMAT_S16;
\r
7225 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7226 info.nativeFormats |= RTAUDIO_SINT16;
\r
7227 format = SND_PCM_FORMAT_S24;
\r
7228 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7229 info.nativeFormats |= RTAUDIO_SINT24;
\r
7230 format = SND_PCM_FORMAT_S32;
\r
7231 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7232 info.nativeFormats |= RTAUDIO_SINT32;
\r
7233 format = SND_PCM_FORMAT_FLOAT;
\r
7234 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7235 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7236 format = SND_PCM_FORMAT_FLOAT64;
\r
7237 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7238 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7240 // Check that we have at least one supported format
\r
7241 if ( info.nativeFormats == 0 ) {
\r
7242 snd_pcm_close( phandle );
\r
7243 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7244 errorText_ = errorStream_.str();
\r
7245 error( RtAudioError::WARNING );
\r
7249 // Get the device name
\r
7251 result = snd_card_get_name( card, &cardname );
\r
7252 if ( result >= 0 ) {
\r
7253 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7258 // That's all ... close the device and return
\r
7259 snd_pcm_close( phandle );
\r
7260 info.probed = true;
\r
7264 void RtApiAlsa :: saveDeviceInfo( void )
\r
7268 unsigned int nDevices = getDeviceCount();
\r
7269 devices_.resize( nDevices );
\r
7270 for ( unsigned int i=0; i<nDevices; i++ )
\r
7271 devices_[i] = getDeviceInfo( i );
\r
7274 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7275 unsigned int firstChannel, unsigned int sampleRate,
\r
7276 RtAudioFormat format, unsigned int *bufferSize,
\r
7277 RtAudio::StreamOptions *options )
\r
7280 #if defined(__RTAUDIO_DEBUG__)
\r
7281 snd_output_t *out;
\r
7282 snd_output_stdio_attach(&out, stderr, 0);
\r
7285 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7287 unsigned nDevices = 0;
\r
7288 int result, subdevice, card;
\r
7290 snd_ctl_t *chandle;
\r
7292 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7293 snprintf(name, sizeof(name), "%s", "default");
\r
7295 // Count cards and devices
\r
7297 snd_card_next( &card );
\r
7298 while ( card >= 0 ) {
\r
7299 sprintf( name, "hw:%d", card );
\r
7300 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7301 if ( result < 0 ) {
\r
7302 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7303 errorText_ = errorStream_.str();
\r
7308 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7309 if ( result < 0 ) break;
\r
7310 if ( subdevice < 0 ) break;
\r
7311 if ( nDevices == device ) {
\r
7312 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7313 snd_ctl_close( chandle );
\r
7318 snd_ctl_close( chandle );
\r
7319 snd_card_next( &card );
\r
7322 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7323 if ( result == 0 ) {
\r
7324 if ( nDevices == device ) {
\r
7325 strcpy( name, "default" );
\r
7331 if ( nDevices == 0 ) {
\r
7332 // This should not happen because a check is made before this function is called.
\r
7333 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7337 if ( device >= nDevices ) {
\r
7338 // This should not happen because a check is made before this function is called.
\r
7339 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7346 // The getDeviceInfo() function will not work for a device that is
\r
7347 // already open. Thus, we'll probe the system before opening a
\r
7348 // stream and save the results for use by getDeviceInfo().
\r
7349 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7350 this->saveDeviceInfo();
\r
7352 snd_pcm_stream_t stream;
\r
7353 if ( mode == OUTPUT )
\r
7354 stream = SND_PCM_STREAM_PLAYBACK;
\r
7356 stream = SND_PCM_STREAM_CAPTURE;
\r
7358 snd_pcm_t *phandle;
\r
7359 int openMode = SND_PCM_ASYNC;
\r
7360 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7361 if ( result < 0 ) {
\r
7362 if ( mode == OUTPUT )
\r
7363 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7365 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7366 errorText_ = errorStream_.str();
\r
7370 // Fill the parameter structure.
\r
7371 snd_pcm_hw_params_t *hw_params;
\r
7372 snd_pcm_hw_params_alloca( &hw_params );
\r
7373 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7374 if ( result < 0 ) {
\r
7375 snd_pcm_close( phandle );
\r
7376 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7377 errorText_ = errorStream_.str();
\r
7381 #if defined(__RTAUDIO_DEBUG__)
\r
7382 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7383 snd_pcm_hw_params_dump( hw_params, out );
\r
7386 // Set access ... check user preference.
\r
7387 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7388 stream_.userInterleaved = false;
\r
7389 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7390 if ( result < 0 ) {
\r
7391 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7392 stream_.deviceInterleaved[mode] = true;
\r
7395 stream_.deviceInterleaved[mode] = false;
\r
7398 stream_.userInterleaved = true;
\r
7399 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7400 if ( result < 0 ) {
\r
7401 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7402 stream_.deviceInterleaved[mode] = false;
\r
7405 stream_.deviceInterleaved[mode] = true;
\r
7408 if ( result < 0 ) {
\r
7409 snd_pcm_close( phandle );
\r
7410 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7411 errorText_ = errorStream_.str();
\r
7415 // Determine how to set the device format.
\r
7416 stream_.userFormat = format;
\r
7417 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7419 if ( format == RTAUDIO_SINT8 )
\r
7420 deviceFormat = SND_PCM_FORMAT_S8;
\r
7421 else if ( format == RTAUDIO_SINT16 )
\r
7422 deviceFormat = SND_PCM_FORMAT_S16;
\r
7423 else if ( format == RTAUDIO_SINT24 )
\r
7424 deviceFormat = SND_PCM_FORMAT_S24;
\r
7425 else if ( format == RTAUDIO_SINT32 )
\r
7426 deviceFormat = SND_PCM_FORMAT_S32;
\r
7427 else if ( format == RTAUDIO_FLOAT32 )
\r
7428 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7429 else if ( format == RTAUDIO_FLOAT64 )
\r
7430 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7432 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7433 stream_.deviceFormat[mode] = format;
\r
7437 // The user requested format is not natively supported by the device.
\r
7438 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7439 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7440 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7444 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7445 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7446 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7450 deviceFormat = SND_PCM_FORMAT_S32;
\r
7451 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7452 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7456 deviceFormat = SND_PCM_FORMAT_S24;
\r
7457 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7458 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7462 deviceFormat = SND_PCM_FORMAT_S16;
\r
7463 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7464 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7468 deviceFormat = SND_PCM_FORMAT_S8;
\r
7469 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7470 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7474 // If we get here, no supported format was found.
\r
7475 snd_pcm_close( phandle );
\r
7476 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7477 errorText_ = errorStream_.str();
\r
7481 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7482 if ( result < 0 ) {
\r
7483 snd_pcm_close( phandle );
\r
7484 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7485 errorText_ = errorStream_.str();
\r
7489 // Determine whether byte-swaping is necessary.
\r
7490 stream_.doByteSwap[mode] = false;
\r
7491 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7492 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7493 if ( result == 0 )
\r
7494 stream_.doByteSwap[mode] = true;
\r
7495 else if (result < 0) {
\r
7496 snd_pcm_close( phandle );
\r
7497 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7498 errorText_ = errorStream_.str();
\r
7503 // Set the sample rate.
\r
7504 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7505 if ( result < 0 ) {
\r
7506 snd_pcm_close( phandle );
\r
7507 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7508 errorText_ = errorStream_.str();
\r
7512 // Determine the number of channels for this device. We support a possible
\r
7513 // minimum device channel number > than the value requested by the user.
\r
7514 stream_.nUserChannels[mode] = channels;
\r
7515 unsigned int value;
\r
7516 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7517 unsigned int deviceChannels = value;
\r
7518 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7519 snd_pcm_close( phandle );
\r
7520 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7521 errorText_ = errorStream_.str();
\r
7525 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7526 if ( result < 0 ) {
\r
7527 snd_pcm_close( phandle );
\r
7528 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7529 errorText_ = errorStream_.str();
\r
7532 deviceChannels = value;
\r
7533 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7534 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7536 // Set the device channels.
\r
7537 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7538 if ( result < 0 ) {
\r
7539 snd_pcm_close( phandle );
\r
7540 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7541 errorText_ = errorStream_.str();
\r
7545 // Set the buffer (or period) size.
\r
7547 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7548 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7549 if ( result < 0 ) {
\r
7550 snd_pcm_close( phandle );
\r
7551 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7552 errorText_ = errorStream_.str();
\r
7555 *bufferSize = periodSize;
\r
7557 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7558 unsigned int periods = 0;
\r
7559 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7560 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7561 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7562 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7563 if ( result < 0 ) {
\r
7564 snd_pcm_close( phandle );
\r
7565 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7566 errorText_ = errorStream_.str();
\r
7570 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7571 // MUST be the same in both directions!
\r
7572 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7573 snd_pcm_close( phandle );
\r
7574 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7575 errorText_ = errorStream_.str();
\r
7579 stream_.bufferSize = *bufferSize;
\r
7581 // Install the hardware configuration
\r
7582 result = snd_pcm_hw_params( phandle, hw_params );
\r
7583 if ( result < 0 ) {
\r
7584 snd_pcm_close( phandle );
\r
7585 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7586 errorText_ = errorStream_.str();
\r
7590 #if defined(__RTAUDIO_DEBUG__)
\r
7591 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7592 snd_pcm_hw_params_dump( hw_params, out );
\r
7595 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7596 snd_pcm_sw_params_t *sw_params = NULL;
\r
7597 snd_pcm_sw_params_alloca( &sw_params );
\r
7598 snd_pcm_sw_params_current( phandle, sw_params );
\r
7599 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7600 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7601 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7603 // The following two settings were suggested by Theo Veenker
\r
7604 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7605 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7607 // here are two options for a fix
\r
7608 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7609 snd_pcm_uframes_t val;
\r
7610 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7611 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7613 result = snd_pcm_sw_params( phandle, sw_params );
\r
7614 if ( result < 0 ) {
\r
7615 snd_pcm_close( phandle );
\r
7616 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7617 errorText_ = errorStream_.str();
\r
7621 #if defined(__RTAUDIO_DEBUG__)
\r
7622 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7623 snd_pcm_sw_params_dump( sw_params, out );
\r
7626 // Set flags for buffer conversion
\r
7627 stream_.doConvertBuffer[mode] = false;
\r
7628 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7629 stream_.doConvertBuffer[mode] = true;
\r
7630 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7631 stream_.doConvertBuffer[mode] = true;
\r
7632 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7633 stream_.nUserChannels[mode] > 1 )
\r
7634 stream_.doConvertBuffer[mode] = true;
\r
7636 // Allocate the ApiHandle if necessary and then save.
\r
7637 AlsaHandle *apiInfo = 0;
\r
7638 if ( stream_.apiHandle == 0 ) {
\r
7640 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7642 catch ( std::bad_alloc& ) {
\r
7643 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7647 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7648 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7652 stream_.apiHandle = (void *) apiInfo;
\r
7653 apiInfo->handles[0] = 0;
\r
7654 apiInfo->handles[1] = 0;
\r
7657 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7659 apiInfo->handles[mode] = phandle;
\r
7662 // Allocate necessary internal buffers.
\r
7663 unsigned long bufferBytes;
\r
7664 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7665 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7666 if ( stream_.userBuffer[mode] == NULL ) {
\r
7667 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7671 if ( stream_.doConvertBuffer[mode] ) {
\r
7673 bool makeBuffer = true;
\r
7674 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7675 if ( mode == INPUT ) {
\r
7676 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7677 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7678 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7682 if ( makeBuffer ) {
\r
7683 bufferBytes *= *bufferSize;
\r
7684 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7685 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7686 if ( stream_.deviceBuffer == NULL ) {
\r
7687 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7693 stream_.sampleRate = sampleRate;
\r
7694 stream_.nBuffers = periods;
\r
7695 stream_.device[mode] = device;
\r
7696 stream_.state = STREAM_STOPPED;
\r
7698 // Setup the buffer conversion information structure.
\r
7699 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7701 // Setup thread if necessary.
\r
7702 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7703 // We had already set up an output stream.
\r
7704 stream_.mode = DUPLEX;
\r
7705 // Link the streams if possible.
\r
7706 apiInfo->synchronized = false;
\r
7707 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7708 apiInfo->synchronized = true;
\r
7710 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7711 error( RtAudioError::WARNING );
\r
7715 stream_.mode = mode;
\r
7717 // Setup callback thread.
\r
7718 stream_.callbackInfo.object = (void *) this;
\r
7720 // Set the thread attributes for joinable and realtime scheduling
\r
7721 // priority (optional). The higher priority will only take affect
\r
7722 // if the program is run as root or suid. Note, under Linux
\r
7723 // processes with CAP_SYS_NICE privilege, a user can change
\r
7724 // scheduling policy and priority (thus need not be root). See
\r
7725 // POSIX "capabilities".
\r
7726 pthread_attr_t attr;
\r
7727 pthread_attr_init( &attr );
\r
7728 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7730 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7731 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7732 // We previously attempted to increase the audio callback priority
\r
7733 // to SCHED_RR here via the attributes. However, while no errors
\r
7734 // were reported in doing so, it did not work. So, now this is
\r
7735 // done in the alsaCallbackHandler function.
\r
7736 stream_.callbackInfo.doRealtime = true;
\r
7737 int priority = options->priority;
\r
7738 int min = sched_get_priority_min( SCHED_RR );
\r
7739 int max = sched_get_priority_max( SCHED_RR );
\r
7740 if ( priority < min ) priority = min;
\r
7741 else if ( priority > max ) priority = max;
\r
7742 stream_.callbackInfo.priority = priority;
\r
7746 stream_.callbackInfo.isRunning = true;
\r
7747 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7748 pthread_attr_destroy( &attr );
\r
7750 stream_.callbackInfo.isRunning = false;
\r
7751 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7760 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7761 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7762 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7764 stream_.apiHandle = 0;
\r
7767 if ( phandle) snd_pcm_close( phandle );
\r
7769 for ( int i=0; i<2; i++ ) {
\r
7770 if ( stream_.userBuffer[i] ) {
\r
7771 free( stream_.userBuffer[i] );
\r
7772 stream_.userBuffer[i] = 0;
\r
7776 if ( stream_.deviceBuffer ) {
\r
7777 free( stream_.deviceBuffer );
\r
7778 stream_.deviceBuffer = 0;
\r
7781 stream_.state = STREAM_CLOSED;
\r
7785 void RtApiAlsa :: closeStream()
\r
7787 if ( stream_.state == STREAM_CLOSED ) {
\r
7788 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7789 error( RtAudioError::WARNING );
\r
7793 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7794 stream_.callbackInfo.isRunning = false;
\r
7795 MUTEX_LOCK( &stream_.mutex );
\r
7796 if ( stream_.state == STREAM_STOPPED ) {
\r
7797 apiInfo->runnable = true;
\r
7798 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7800 MUTEX_UNLOCK( &stream_.mutex );
\r
7801 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7803 if ( stream_.state == STREAM_RUNNING ) {
\r
7804 stream_.state = STREAM_STOPPED;
\r
7805 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7806 snd_pcm_drop( apiInfo->handles[0] );
\r
7807 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7808 snd_pcm_drop( apiInfo->handles[1] );
\r
7812 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7813 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7814 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7816 stream_.apiHandle = 0;
\r
7819 for ( int i=0; i<2; i++ ) {
\r
7820 if ( stream_.userBuffer[i] ) {
\r
7821 free( stream_.userBuffer[i] );
\r
7822 stream_.userBuffer[i] = 0;
\r
7826 if ( stream_.deviceBuffer ) {
\r
7827 free( stream_.deviceBuffer );
\r
7828 stream_.deviceBuffer = 0;
\r
7831 stream_.mode = UNINITIALIZED;
\r
7832 stream_.state = STREAM_CLOSED;
\r
7835 void RtApiAlsa :: startStream()
\r
7837 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7840 if ( stream_.state == STREAM_RUNNING ) {
\r
7841 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7842 error( RtAudioError::WARNING );
\r
7846 MUTEX_LOCK( &stream_.mutex );
\r
7849 snd_pcm_state_t state;
\r
7850 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7851 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7852 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7853 state = snd_pcm_state( handle[0] );
\r
7854 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7855 result = snd_pcm_prepare( handle[0] );
\r
7856 if ( result < 0 ) {
\r
7857 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7858 errorText_ = errorStream_.str();
\r
7864 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7865 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7866 state = snd_pcm_state( handle[1] );
\r
7867 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7868 result = snd_pcm_prepare( handle[1] );
\r
7869 if ( result < 0 ) {
\r
7870 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7871 errorText_ = errorStream_.str();
\r
7877 stream_.state = STREAM_RUNNING;
\r
7880 apiInfo->runnable = true;
\r
7881 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7882 MUTEX_UNLOCK( &stream_.mutex );
\r
7884 if ( result >= 0 ) return;
\r
7885 error( RtAudioError::SYSTEM_ERROR );
\r
7888 void RtApiAlsa :: stopStream()
\r
7891 if ( stream_.state == STREAM_STOPPED ) {
\r
7892 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7893 error( RtAudioError::WARNING );
\r
7897 stream_.state = STREAM_STOPPED;
\r
7898 MUTEX_LOCK( &stream_.mutex );
\r
7901 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7902 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7903 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7904 if ( apiInfo->synchronized )
\r
7905 result = snd_pcm_drop( handle[0] );
\r
7907 result = snd_pcm_drain( handle[0] );
\r
7908 if ( result < 0 ) {
\r
7909 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7910 errorText_ = errorStream_.str();
\r
7915 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7916 result = snd_pcm_drop( handle[1] );
\r
7917 if ( result < 0 ) {
\r
7918 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7919 errorText_ = errorStream_.str();
\r
7925 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7926 MUTEX_UNLOCK( &stream_.mutex );
\r
7928 if ( result >= 0 ) return;
\r
7929 error( RtAudioError::SYSTEM_ERROR );
\r
7932 void RtApiAlsa :: abortStream()
\r
7935 if ( stream_.state == STREAM_STOPPED ) {
\r
7936 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7937 error( RtAudioError::WARNING );
\r
7941 stream_.state = STREAM_STOPPED;
\r
7942 MUTEX_LOCK( &stream_.mutex );
\r
7945 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7946 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7947 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7948 result = snd_pcm_drop( handle[0] );
\r
7949 if ( result < 0 ) {
\r
7950 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7951 errorText_ = errorStream_.str();
\r
7956 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7957 result = snd_pcm_drop( handle[1] );
\r
7958 if ( result < 0 ) {
\r
7959 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7960 errorText_ = errorStream_.str();
\r
7966 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7967 MUTEX_UNLOCK( &stream_.mutex );
\r
7969 if ( result >= 0 ) return;
\r
7970 error( RtAudioError::SYSTEM_ERROR );
\r
7973 void RtApiAlsa :: callbackEvent()
\r
7975 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7976 if ( stream_.state == STREAM_STOPPED ) {
\r
7977 MUTEX_LOCK( &stream_.mutex );
\r
7978 while ( !apiInfo->runnable )
\r
7979 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7981 if ( stream_.state != STREAM_RUNNING ) {
\r
7982 MUTEX_UNLOCK( &stream_.mutex );
\r
7985 MUTEX_UNLOCK( &stream_.mutex );
\r
7988 if ( stream_.state == STREAM_CLOSED ) {
\r
7989 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7990 error( RtAudioError::WARNING );
\r
7994 int doStopStream = 0;
\r
7995 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7996 double streamTime = getStreamTime();
\r
7997 RtAudioStreamStatus status = 0;
\r
7998 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7999 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
8000 apiInfo->xrun[0] = false;
\r
8002 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
8003 status |= RTAUDIO_INPUT_OVERFLOW;
\r
8004 apiInfo->xrun[1] = false;
\r
8006 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
8007 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
8009 if ( doStopStream == 2 ) {
\r
8014 MUTEX_LOCK( &stream_.mutex );
\r
8016 // The state might change while waiting on a mutex.
\r
8017 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
8022 snd_pcm_t **handle;
\r
8023 snd_pcm_sframes_t frames;
\r
8024 RtAudioFormat format;
\r
8025 handle = (snd_pcm_t **) apiInfo->handles;
\r
8027 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
8029 // Setup parameters.
\r
8030 if ( stream_.doConvertBuffer[1] ) {
\r
8031 buffer = stream_.deviceBuffer;
\r
8032 channels = stream_.nDeviceChannels[1];
\r
8033 format = stream_.deviceFormat[1];
\r
8036 buffer = stream_.userBuffer[1];
\r
8037 channels = stream_.nUserChannels[1];
\r
8038 format = stream_.userFormat;
\r
8041 // Read samples from device in interleaved/non-interleaved format.
\r
8042 if ( stream_.deviceInterleaved[1] )
\r
8043 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
8045 void *bufs[channels];
\r
8046 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8047 for ( int i=0; i<channels; i++ )
\r
8048 bufs[i] = (void *) (buffer + (i * offset));
\r
8049 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
8052 if ( result < (int) stream_.bufferSize ) {
\r
8053 // Either an error or overrun occured.
\r
8054 if ( result == -EPIPE ) {
\r
8055 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
8056 if ( state == SND_PCM_STATE_XRUN ) {
\r
8057 apiInfo->xrun[1] = true;
\r
8058 result = snd_pcm_prepare( handle[1] );
\r
8059 if ( result < 0 ) {
\r
8060 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
8061 errorText_ = errorStream_.str();
\r
8065 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8066 errorText_ = errorStream_.str();
\r
8070 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
8071 errorText_ = errorStream_.str();
\r
8073 error( RtAudioError::WARNING );
\r
8077 // Do byte swapping if necessary.
\r
8078 if ( stream_.doByteSwap[1] )
\r
8079 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8081 // Do buffer conversion if necessary.
\r
8082 if ( stream_.doConvertBuffer[1] )
\r
8083 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8085 // Check stream latency
\r
8086 result = snd_pcm_delay( handle[1], &frames );
\r
8087 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8092 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8094 // Setup parameters and do buffer conversion if necessary.
\r
8095 if ( stream_.doConvertBuffer[0] ) {
\r
8096 buffer = stream_.deviceBuffer;
\r
8097 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8098 channels = stream_.nDeviceChannels[0];
\r
8099 format = stream_.deviceFormat[0];
\r
8102 buffer = stream_.userBuffer[0];
\r
8103 channels = stream_.nUserChannels[0];
\r
8104 format = stream_.userFormat;
\r
8107 // Do byte swapping if necessary.
\r
8108 if ( stream_.doByteSwap[0] )
\r
8109 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8111 // Write samples to device in interleaved/non-interleaved format.
\r
8112 if ( stream_.deviceInterleaved[0] )
\r
8113 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8115 void *bufs[channels];
\r
8116 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8117 for ( int i=0; i<channels; i++ )
\r
8118 bufs[i] = (void *) (buffer + (i * offset));
\r
8119 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8122 if ( result < (int) stream_.bufferSize ) {
\r
8123 // Either an error or underrun occured.
\r
8124 if ( result == -EPIPE ) {
\r
8125 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8126 if ( state == SND_PCM_STATE_XRUN ) {
\r
8127 apiInfo->xrun[0] = true;
\r
8128 result = snd_pcm_prepare( handle[0] );
\r
8129 if ( result < 0 ) {
\r
8130 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8131 errorText_ = errorStream_.str();
\r
8134 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8137 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8138 errorText_ = errorStream_.str();
\r
8142 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8143 errorText_ = errorStream_.str();
\r
8145 error( RtAudioError::WARNING );
\r
8149 // Check stream latency
\r
8150 result = snd_pcm_delay( handle[0], &frames );
\r
8151 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8155 MUTEX_UNLOCK( &stream_.mutex );
\r
8157 RtApi::tickStreamTime();
\r
8158 if ( doStopStream == 1 ) this->stopStream();
\r
8161 static void *alsaCallbackHandler( void *ptr )
\r
8163 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8164 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8165 bool *isRunning = &info->isRunning;
\r
8167 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8168 if ( info->doRealtime ) {
\r
8169 pthread_t tID = pthread_self(); // ID of this thread
\r
8170 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8171 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8175 while ( *isRunning == true ) {
\r
8176 pthread_testcancel();
\r
8177 object->callbackEvent();
\r
8180 pthread_exit( NULL );
\r
8183 //******************** End of __LINUX_ALSA__ *********************//
\r
8186 #if defined(__LINUX_PULSE__)
\r
8188 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8189 // and Tristan Matthews.
\r
8191 #include <pulse/error.h>
\r
8192 #include <pulse/simple.h>
\r
8195 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8196 44100, 48000, 96000, 0};
\r
8198 struct rtaudio_pa_format_mapping_t {
\r
8199 RtAudioFormat rtaudio_format;
\r
8200 pa_sample_format_t pa_format;
\r
8203 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8204 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8205 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8206 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8207 {0, PA_SAMPLE_INVALID}};
\r
8209 struct PulseAudioHandle {
\r
8210 pa_simple *s_play;
\r
8213 pthread_cond_t runnable_cv;
\r
8215 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8218 RtApiPulse::~RtApiPulse()
\r
8220 if ( stream_.state != STREAM_CLOSED )
\r
8224 unsigned int RtApiPulse::getDeviceCount( void )
\r
8229 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8231 RtAudio::DeviceInfo info;
\r
8232 info.probed = true;
\r
8233 info.name = "PulseAudio";
\r
8234 info.outputChannels = 2;
\r
8235 info.inputChannels = 2;
\r
8236 info.duplexChannels = 2;
\r
8237 info.isDefaultOutput = true;
\r
8238 info.isDefaultInput = true;
\r
8240 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8241 info.sampleRates.push_back( *sr );
\r
8243 info.preferredSampleRate = 48000;
\r
8244 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8249 static void *pulseaudio_callback( void * user )
\r
8251 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8252 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8253 volatile bool *isRunning = &cbi->isRunning;
\r
8255 while ( *isRunning ) {
\r
8256 pthread_testcancel();
\r
8257 context->callbackEvent();
\r
8260 pthread_exit( NULL );
\r
8263 void RtApiPulse::closeStream( void )
\r
8265 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8267 stream_.callbackInfo.isRunning = false;
\r
8269 MUTEX_LOCK( &stream_.mutex );
\r
8270 if ( stream_.state == STREAM_STOPPED ) {
\r
8271 pah->runnable = true;
\r
8272 pthread_cond_signal( &pah->runnable_cv );
\r
8274 MUTEX_UNLOCK( &stream_.mutex );
\r
8276 pthread_join( pah->thread, 0 );
\r
8277 if ( pah->s_play ) {
\r
8278 pa_simple_flush( pah->s_play, NULL );
\r
8279 pa_simple_free( pah->s_play );
\r
8282 pa_simple_free( pah->s_rec );
\r
8284 pthread_cond_destroy( &pah->runnable_cv );
\r
8286 stream_.apiHandle = 0;
\r
8289 if ( stream_.userBuffer[0] ) {
\r
8290 free( stream_.userBuffer[0] );
\r
8291 stream_.userBuffer[0] = 0;
\r
8293 if ( stream_.userBuffer[1] ) {
\r
8294 free( stream_.userBuffer[1] );
\r
8295 stream_.userBuffer[1] = 0;
\r
8298 stream_.state = STREAM_CLOSED;
\r
8299 stream_.mode = UNINITIALIZED;
\r
8302 void RtApiPulse::callbackEvent( void )
\r
8304 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8306 if ( stream_.state == STREAM_STOPPED ) {
\r
8307 MUTEX_LOCK( &stream_.mutex );
\r
8308 while ( !pah->runnable )
\r
8309 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8311 if ( stream_.state != STREAM_RUNNING ) {
\r
8312 MUTEX_UNLOCK( &stream_.mutex );
\r
8315 MUTEX_UNLOCK( &stream_.mutex );
\r
8318 if ( stream_.state == STREAM_CLOSED ) {
\r
8319 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8320 "this shouldn't happen!";
\r
8321 error( RtAudioError::WARNING );
\r
8325 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8326 double streamTime = getStreamTime();
\r
8327 RtAudioStreamStatus status = 0;
\r
8328 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8329 stream_.bufferSize, streamTime, status,
\r
8330 stream_.callbackInfo.userData );
\r
8332 if ( doStopStream == 2 ) {
\r
8337 MUTEX_LOCK( &stream_.mutex );
\r
8338 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8339 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8341 if ( stream_.state != STREAM_RUNNING )
\r
8346 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8347 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8348 convertBuffer( stream_.deviceBuffer,
\r
8349 stream_.userBuffer[OUTPUT],
\r
8350 stream_.convertInfo[OUTPUT] );
\r
8351 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8352 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8354 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8355 formatBytes( stream_.userFormat );
\r
8357 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8358 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8359 pa_strerror( pa_error ) << ".";
\r
8360 errorText_ = errorStream_.str();
\r
8361 error( RtAudioError::WARNING );
\r
8365 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8366 if ( stream_.doConvertBuffer[INPUT] )
\r
8367 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8368 formatBytes( stream_.deviceFormat[INPUT] );
\r
8370 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8371 formatBytes( stream_.userFormat );
\r
8373 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8374 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8375 pa_strerror( pa_error ) << ".";
\r
8376 errorText_ = errorStream_.str();
\r
8377 error( RtAudioError::WARNING );
\r
8379 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8380 convertBuffer( stream_.userBuffer[INPUT],
\r
8381 stream_.deviceBuffer,
\r
8382 stream_.convertInfo[INPUT] );
\r
8387 MUTEX_UNLOCK( &stream_.mutex );
\r
8388 RtApi::tickStreamTime();
\r
8390 if ( doStopStream == 1 )
\r
8394 void RtApiPulse::startStream( void )
\r
8396 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8398 if ( stream_.state == STREAM_CLOSED ) {
\r
8399 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8400 error( RtAudioError::INVALID_USE );
\r
8403 if ( stream_.state == STREAM_RUNNING ) {
\r
8404 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8405 error( RtAudioError::WARNING );
\r
8409 MUTEX_LOCK( &stream_.mutex );
\r
8411 stream_.state = STREAM_RUNNING;
\r
8413 pah->runnable = true;
\r
8414 pthread_cond_signal( &pah->runnable_cv );
\r
8415 MUTEX_UNLOCK( &stream_.mutex );
\r
8418 void RtApiPulse::stopStream( void )
\r
8420 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8422 if ( stream_.state == STREAM_CLOSED ) {
\r
8423 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8424 error( RtAudioError::INVALID_USE );
\r
8427 if ( stream_.state == STREAM_STOPPED ) {
\r
8428 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8429 error( RtAudioError::WARNING );
\r
8433 stream_.state = STREAM_STOPPED;
\r
8434 MUTEX_LOCK( &stream_.mutex );
\r
8436 if ( pah && pah->s_play ) {
\r
8438 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8439 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8440 pa_strerror( pa_error ) << ".";
\r
8441 errorText_ = errorStream_.str();
\r
8442 MUTEX_UNLOCK( &stream_.mutex );
\r
8443 error( RtAudioError::SYSTEM_ERROR );
\r
8448 stream_.state = STREAM_STOPPED;
\r
8449 MUTEX_UNLOCK( &stream_.mutex );
\r
8452 void RtApiPulse::abortStream( void )
\r
8454 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8456 if ( stream_.state == STREAM_CLOSED ) {
\r
8457 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8458 error( RtAudioError::INVALID_USE );
\r
8461 if ( stream_.state == STREAM_STOPPED ) {
\r
8462 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8463 error( RtAudioError::WARNING );
\r
8467 stream_.state = STREAM_STOPPED;
\r
8468 MUTEX_LOCK( &stream_.mutex );
\r
8470 if ( pah && pah->s_play ) {
\r
8472 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8473 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8474 pa_strerror( pa_error ) << ".";
\r
8475 errorText_ = errorStream_.str();
\r
8476 MUTEX_UNLOCK( &stream_.mutex );
\r
8477 error( RtAudioError::SYSTEM_ERROR );
\r
8482 stream_.state = STREAM_STOPPED;
\r
8483 MUTEX_UNLOCK( &stream_.mutex );
\r
8486 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8487 unsigned int channels, unsigned int firstChannel,
\r
8488 unsigned int sampleRate, RtAudioFormat format,
\r
8489 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8491 PulseAudioHandle *pah = 0;
\r
8492 unsigned long bufferBytes = 0;
\r
8493 pa_sample_spec ss;
\r
8495 if ( device != 0 ) return false;
\r
8496 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8497 if ( channels != 1 && channels != 2 ) {
\r
8498 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8501 ss.channels = channels;
\r
8503 if ( firstChannel != 0 ) return false;
\r
8505 bool sr_found = false;
\r
8506 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8507 if ( sampleRate == *sr ) {
\r
8509 stream_.sampleRate = sampleRate;
\r
8510 ss.rate = sampleRate;
\r
8514 if ( !sr_found ) {
\r
8515 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8519 bool sf_found = 0;
\r
8520 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8521 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8522 if ( format == sf->rtaudio_format ) {
\r
8524 stream_.userFormat = sf->rtaudio_format;
\r
8525 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8526 ss.format = sf->pa_format;
\r
8530 if ( !sf_found ) { // Use internal data format conversion.
\r
8531 stream_.userFormat = format;
\r
8532 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8533 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8536 // Set other stream parameters.
\r
8537 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8538 else stream_.userInterleaved = true;
\r
8539 stream_.deviceInterleaved[mode] = true;
\r
8540 stream_.nBuffers = 1;
\r
8541 stream_.doByteSwap[mode] = false;
\r
8542 stream_.nUserChannels[mode] = channels;
\r
8543 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8544 stream_.channelOffset[mode] = 0;
\r
8545 std::string streamName = "RtAudio";
\r
8547 // Set flags for buffer conversion.
\r
8548 stream_.doConvertBuffer[mode] = false;
\r
8549 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8550 stream_.doConvertBuffer[mode] = true;
\r
8551 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8552 stream_.doConvertBuffer[mode] = true;
\r
8554 // Allocate necessary internal buffers.
\r
8555 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8556 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8557 if ( stream_.userBuffer[mode] == NULL ) {
\r
8558 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8561 stream_.bufferSize = *bufferSize;
\r
8563 if ( stream_.doConvertBuffer[mode] ) {
\r
8565 bool makeBuffer = true;
\r
8566 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8567 if ( mode == INPUT ) {
\r
8568 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8569 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8570 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8574 if ( makeBuffer ) {
\r
8575 bufferBytes *= *bufferSize;
\r
8576 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8577 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8578 if ( stream_.deviceBuffer == NULL ) {
\r
8579 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8585 stream_.device[mode] = device;
\r
8587 // Setup the buffer conversion information structure.
\r
8588 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8590 if ( !stream_.apiHandle ) {
\r
8591 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8593 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8597 stream_.apiHandle = pah;
\r
8598 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8599 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8603 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8606 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8609 pa_buffer_attr buffer_attr;
\r
8610 buffer_attr.fragsize = bufferBytes;
\r
8611 buffer_attr.maxlength = -1;
\r
8613 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8614 if ( !pah->s_rec ) {
\r
8615 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8620 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8621 if ( !pah->s_play ) {
\r
8622 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8630 if ( stream_.mode == UNINITIALIZED )
\r
8631 stream_.mode = mode;
\r
8632 else if ( stream_.mode == mode )
\r
8635 stream_.mode = DUPLEX;
\r
8637 if ( !stream_.callbackInfo.isRunning ) {
\r
8638 stream_.callbackInfo.object = this;
\r
8639 stream_.callbackInfo.isRunning = true;
\r
8640 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8641 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8646 stream_.state = STREAM_STOPPED;
\r
8650 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8651 pthread_cond_destroy( &pah->runnable_cv );
\r
8653 stream_.apiHandle = 0;
\r
8656 for ( int i=0; i<2; i++ ) {
\r
8657 if ( stream_.userBuffer[i] ) {
\r
8658 free( stream_.userBuffer[i] );
\r
8659 stream_.userBuffer[i] = 0;
\r
8663 if ( stream_.deviceBuffer ) {
\r
8664 free( stream_.deviceBuffer );
\r
8665 stream_.deviceBuffer = 0;
\r
8671 //******************** End of __LINUX_PULSE__ *********************//
\r
8674 #if defined(__LINUX_OSS__)
\r
8676 #include <unistd.h>
\r
8677 #include <sys/ioctl.h>
\r
8678 #include <unistd.h>
\r
8679 #include <fcntl.h>
\r
8680 #include <sys/soundcard.h>
\r
8681 #include <errno.h>
\r
8684 static void *ossCallbackHandler(void * ptr);
\r
8686 // A structure to hold various information related to the OSS API
\r
8687 // implementation.
\r
8688 struct OssHandle {
\r
8689 int id[2]; // device ids
\r
8692 pthread_cond_t runnable;
\r
8695 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8698 RtApiOss :: RtApiOss()
\r
8700 // Nothing to do here.
\r
8703 RtApiOss :: ~RtApiOss()
\r
8705 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8708 unsigned int RtApiOss :: getDeviceCount( void )
\r
8710 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8711 if ( mixerfd == -1 ) {
\r
8712 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8713 error( RtAudioError::WARNING );
\r
8717 oss_sysinfo sysinfo;
\r
8718 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8720 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8721 error( RtAudioError::WARNING );
\r
8726 return sysinfo.numaudios;
\r
8729 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8731 RtAudio::DeviceInfo info;
\r
8732 info.probed = false;
\r
8734 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8735 if ( mixerfd == -1 ) {
\r
8736 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8737 error( RtAudioError::WARNING );
\r
8741 oss_sysinfo sysinfo;
\r
8742 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8743 if ( result == -1 ) {
\r
8745 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8746 error( RtAudioError::WARNING );
\r
8750 unsigned nDevices = sysinfo.numaudios;
\r
8751 if ( nDevices == 0 ) {
\r
8753 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8754 error( RtAudioError::INVALID_USE );
\r
8758 if ( device >= nDevices ) {
\r
8760 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8761 error( RtAudioError::INVALID_USE );
\r
8765 oss_audioinfo ainfo;
\r
8766 ainfo.dev = device;
\r
8767 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8769 if ( result == -1 ) {
\r
8770 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8771 errorText_ = errorStream_.str();
\r
8772 error( RtAudioError::WARNING );
\r
8777 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8778 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8779 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8780 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8781 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8784 // Probe data formats ... do for input
\r
8785 unsigned long mask = ainfo.iformats;
\r
8786 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8787 info.nativeFormats |= RTAUDIO_SINT16;
\r
8788 if ( mask & AFMT_S8 )
\r
8789 info.nativeFormats |= RTAUDIO_SINT8;
\r
8790 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8791 info.nativeFormats |= RTAUDIO_SINT32;
\r
8793 if ( mask & AFMT_FLOAT )
\r
8794 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8796 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8797 info.nativeFormats |= RTAUDIO_SINT24;
\r
8799 // Check that we have at least one supported format
\r
8800 if ( info.nativeFormats == 0 ) {
\r
8801 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8802 errorText_ = errorStream_.str();
\r
8803 error( RtAudioError::WARNING );
\r
8807 // Probe the supported sample rates.
\r
8808 info.sampleRates.clear();
\r
8809 if ( ainfo.nrates ) {
\r
8810 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8811 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8812 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8813 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8815 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8816 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8824 // Check min and max rate values;
\r
8825 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8826 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8827 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8829 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8830 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8835 if ( info.sampleRates.size() == 0 ) {
\r
8836 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8837 errorText_ = errorStream_.str();
\r
8838 error( RtAudioError::WARNING );
\r
8841 info.probed = true;
\r
8842 info.name = ainfo.name;
\r
8849 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8850 unsigned int firstChannel, unsigned int sampleRate,
\r
8851 RtAudioFormat format, unsigned int *bufferSize,
\r
8852 RtAudio::StreamOptions *options )
\r
8854 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8855 if ( mixerfd == -1 ) {
\r
8856 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8860 oss_sysinfo sysinfo;
\r
8861 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8862 if ( result == -1 ) {
\r
8864 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8868 unsigned nDevices = sysinfo.numaudios;
\r
8869 if ( nDevices == 0 ) {
\r
8870 // This should not happen because a check is made before this function is called.
\r
8872 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8876 if ( device >= nDevices ) {
\r
8877 // This should not happen because a check is made before this function is called.
\r
8879 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8883 oss_audioinfo ainfo;
\r
8884 ainfo.dev = device;
\r
8885 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8887 if ( result == -1 ) {
\r
8888 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8889 errorText_ = errorStream_.str();
\r
8893 // Check if device supports input or output
\r
8894 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8895 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8896 if ( mode == OUTPUT )
\r
8897 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8899 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8900 errorText_ = errorStream_.str();
\r
8905 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8906 if ( mode == OUTPUT )
\r
8907 flags |= O_WRONLY;
\r
8908 else { // mode == INPUT
\r
8909 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8910 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8911 close( handle->id[0] );
\r
8912 handle->id[0] = 0;
\r
8913 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8914 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8915 errorText_ = errorStream_.str();
\r
8918 // Check that the number previously set channels is the same.
\r
8919 if ( stream_.nUserChannels[0] != channels ) {
\r
8920 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8921 errorText_ = errorStream_.str();
\r
8927 flags |= O_RDONLY;
\r
8930 // Set exclusive access if specified.
\r
8931 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8933 // Try to open the device.
\r
8935 fd = open( ainfo.devnode, flags, 0 );
\r
8937 if ( errno == EBUSY )
\r
8938 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8940 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8941 errorText_ = errorStream_.str();
\r
8945 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8947 if ( flags | O_RDWR ) {
\r
8948 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8949 if ( result == -1) {
\r
8950 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8951 errorText_ = errorStream_.str();
\r
8957 // Check the device channel support.
\r
8958 stream_.nUserChannels[mode] = channels;
\r
8959 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8961 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8962 errorText_ = errorStream_.str();
\r
8966 // Set the number of channels.
\r
8967 int deviceChannels = channels + firstChannel;
\r
8968 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8969 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8971 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8972 errorText_ = errorStream_.str();
\r
8975 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8977 // Get the data format mask
\r
8979 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8980 if ( result == -1 ) {
\r
8982 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8983 errorText_ = errorStream_.str();
\r
8987 // Determine how to set the device format.
\r
8988 stream_.userFormat = format;
\r
8989 int deviceFormat = -1;
\r
8990 stream_.doByteSwap[mode] = false;
\r
8991 if ( format == RTAUDIO_SINT8 ) {
\r
8992 if ( mask & AFMT_S8 ) {
\r
8993 deviceFormat = AFMT_S8;
\r
8994 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8997 else if ( format == RTAUDIO_SINT16 ) {
\r
8998 if ( mask & AFMT_S16_NE ) {
\r
8999 deviceFormat = AFMT_S16_NE;
\r
9000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9002 else if ( mask & AFMT_S16_OE ) {
\r
9003 deviceFormat = AFMT_S16_OE;
\r
9004 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9005 stream_.doByteSwap[mode] = true;
\r
9008 else if ( format == RTAUDIO_SINT24 ) {
\r
9009 if ( mask & AFMT_S24_NE ) {
\r
9010 deviceFormat = AFMT_S24_NE;
\r
9011 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9013 else if ( mask & AFMT_S24_OE ) {
\r
9014 deviceFormat = AFMT_S24_OE;
\r
9015 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9016 stream_.doByteSwap[mode] = true;
\r
9019 else if ( format == RTAUDIO_SINT32 ) {
\r
9020 if ( mask & AFMT_S32_NE ) {
\r
9021 deviceFormat = AFMT_S32_NE;
\r
9022 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9024 else if ( mask & AFMT_S32_OE ) {
\r
9025 deviceFormat = AFMT_S32_OE;
\r
9026 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9027 stream_.doByteSwap[mode] = true;
\r
9031 if ( deviceFormat == -1 ) {
\r
9032 // The user requested format is not natively supported by the device.
\r
9033 if ( mask & AFMT_S16_NE ) {
\r
9034 deviceFormat = AFMT_S16_NE;
\r
9035 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9037 else if ( mask & AFMT_S32_NE ) {
\r
9038 deviceFormat = AFMT_S32_NE;
\r
9039 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9041 else if ( mask & AFMT_S24_NE ) {
\r
9042 deviceFormat = AFMT_S24_NE;
\r
9043 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9045 else if ( mask & AFMT_S16_OE ) {
\r
9046 deviceFormat = AFMT_S16_OE;
\r
9047 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9048 stream_.doByteSwap[mode] = true;
\r
9050 else if ( mask & AFMT_S32_OE ) {
\r
9051 deviceFormat = AFMT_S32_OE;
\r
9052 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9053 stream_.doByteSwap[mode] = true;
\r
9055 else if ( mask & AFMT_S24_OE ) {
\r
9056 deviceFormat = AFMT_S24_OE;
\r
9057 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9058 stream_.doByteSwap[mode] = true;
\r
9060 else if ( mask & AFMT_S8) {
\r
9061 deviceFormat = AFMT_S8;
\r
9062 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9066 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9067 // This really shouldn't happen ...
\r
9069 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9070 errorText_ = errorStream_.str();
\r
9074 // Set the data format.
\r
9075 int temp = deviceFormat;
\r
9076 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9077 if ( result == -1 || deviceFormat != temp ) {
\r
9079 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9080 errorText_ = errorStream_.str();
\r
9084 // Attempt to set the buffer size. According to OSS, the minimum
\r
9085 // number of buffers is two. The supposed minimum buffer size is 16
\r
9086 // bytes, so that will be our lower bound. The argument to this
\r
9087 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9088 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9089 // We'll check the actual value used near the end of the setup
\r
9091 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9092 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9094 if ( options ) buffers = options->numberOfBuffers;
\r
9095 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9096 if ( buffers < 2 ) buffers = 3;
\r
9097 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9098 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9099 if ( result == -1 ) {
\r
9101 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9102 errorText_ = errorStream_.str();
\r
9105 stream_.nBuffers = buffers;
\r
9107 // Save buffer size (in sample frames).
\r
9108 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9109 stream_.bufferSize = *bufferSize;
\r
9111 // Set the sample rate.
\r
9112 int srate = sampleRate;
\r
9113 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9114 if ( result == -1 ) {
\r
9116 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9117 errorText_ = errorStream_.str();
\r
9121 // Verify the sample rate setup worked.
\r
9122 if ( abs( srate - (int)sampleRate ) > 100 ) {
\r
9124 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9125 errorText_ = errorStream_.str();
\r
9128 stream_.sampleRate = sampleRate;
\r
9130 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9131 // We're doing duplex setup here.
\r
9132 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9133 stream_.nDeviceChannels[0] = deviceChannels;
\r
9136 // Set interleaving parameters.
\r
9137 stream_.userInterleaved = true;
\r
9138 stream_.deviceInterleaved[mode] = true;
\r
9139 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9140 stream_.userInterleaved = false;
\r
9142 // Set flags for buffer conversion
\r
9143 stream_.doConvertBuffer[mode] = false;
\r
9144 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9145 stream_.doConvertBuffer[mode] = true;
\r
9146 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9147 stream_.doConvertBuffer[mode] = true;
\r
9148 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9149 stream_.nUserChannels[mode] > 1 )
\r
9150 stream_.doConvertBuffer[mode] = true;
\r
9152 // Allocate the stream handles if necessary and then save.
\r
9153 if ( stream_.apiHandle == 0 ) {
\r
9155 handle = new OssHandle;
\r
9157 catch ( std::bad_alloc& ) {
\r
9158 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9162 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9163 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9167 stream_.apiHandle = (void *) handle;
\r
9170 handle = (OssHandle *) stream_.apiHandle;
\r
9172 handle->id[mode] = fd;
\r
9174 // Allocate necessary internal buffers.
\r
9175 unsigned long bufferBytes;
\r
9176 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9177 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9178 if ( stream_.userBuffer[mode] == NULL ) {
\r
9179 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9183 if ( stream_.doConvertBuffer[mode] ) {
\r
9185 bool makeBuffer = true;
\r
9186 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9187 if ( mode == INPUT ) {
\r
9188 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9189 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9190 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9194 if ( makeBuffer ) {
\r
9195 bufferBytes *= *bufferSize;
\r
9196 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9197 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9198 if ( stream_.deviceBuffer == NULL ) {
\r
9199 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9205 stream_.device[mode] = device;
\r
9206 stream_.state = STREAM_STOPPED;
\r
9208 // Setup the buffer conversion information structure.
\r
9209 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9211 // Setup thread if necessary.
\r
9212 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9213 // We had already set up an output stream.
\r
9214 stream_.mode = DUPLEX;
\r
9215 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9218 stream_.mode = mode;
\r
9220 // Setup callback thread.
\r
9221 stream_.callbackInfo.object = (void *) this;
\r
9223 // Set the thread attributes for joinable and realtime scheduling
\r
9224 // priority. The higher priority will only take affect if the
\r
9225 // program is run as root or suid.
\r
9226 pthread_attr_t attr;
\r
9227 pthread_attr_init( &attr );
\r
9228 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9229 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9230 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9231 struct sched_param param;
\r
9232 int priority = options->priority;
\r
9233 int min = sched_get_priority_min( SCHED_RR );
\r
9234 int max = sched_get_priority_max( SCHED_RR );
\r
9235 if ( priority < min ) priority = min;
\r
9236 else if ( priority > max ) priority = max;
\r
9237 param.sched_priority = priority;
\r
9238 pthread_attr_setschedparam( &attr, ¶m );
\r
9239 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9242 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9244 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9247 stream_.callbackInfo.isRunning = true;
\r
9248 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9249 pthread_attr_destroy( &attr );
\r
9251 stream_.callbackInfo.isRunning = false;
\r
9252 errorText_ = "RtApiOss::error creating callback thread!";
\r
9261 pthread_cond_destroy( &handle->runnable );
\r
9262 if ( handle->id[0] ) close( handle->id[0] );
\r
9263 if ( handle->id[1] ) close( handle->id[1] );
\r
9265 stream_.apiHandle = 0;
\r
9268 for ( int i=0; i<2; i++ ) {
\r
9269 if ( stream_.userBuffer[i] ) {
\r
9270 free( stream_.userBuffer[i] );
\r
9271 stream_.userBuffer[i] = 0;
\r
9275 if ( stream_.deviceBuffer ) {
\r
9276 free( stream_.deviceBuffer );
\r
9277 stream_.deviceBuffer = 0;
\r
9283 void RtApiOss :: closeStream()
\r
9285 if ( stream_.state == STREAM_CLOSED ) {
\r
9286 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9287 error( RtAudioError::WARNING );
\r
9291 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9292 stream_.callbackInfo.isRunning = false;
\r
9293 MUTEX_LOCK( &stream_.mutex );
\r
9294 if ( stream_.state == STREAM_STOPPED )
\r
9295 pthread_cond_signal( &handle->runnable );
\r
9296 MUTEX_UNLOCK( &stream_.mutex );
\r
9297 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9299 if ( stream_.state == STREAM_RUNNING ) {
\r
9300 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9301 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9303 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9304 stream_.state = STREAM_STOPPED;
\r
9308 pthread_cond_destroy( &handle->runnable );
\r
9309 if ( handle->id[0] ) close( handle->id[0] );
\r
9310 if ( handle->id[1] ) close( handle->id[1] );
\r
9312 stream_.apiHandle = 0;
\r
9315 for ( int i=0; i<2; i++ ) {
\r
9316 if ( stream_.userBuffer[i] ) {
\r
9317 free( stream_.userBuffer[i] );
\r
9318 stream_.userBuffer[i] = 0;
\r
9322 if ( stream_.deviceBuffer ) {
\r
9323 free( stream_.deviceBuffer );
\r
9324 stream_.deviceBuffer = 0;
\r
9327 stream_.mode = UNINITIALIZED;
\r
9328 stream_.state = STREAM_CLOSED;
\r
9331 void RtApiOss :: startStream()
\r
9334 if ( stream_.state == STREAM_RUNNING ) {
\r
9335 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9336 error( RtAudioError::WARNING );
\r
9340 MUTEX_LOCK( &stream_.mutex );
\r
9342 stream_.state = STREAM_RUNNING;
\r
9344 // No need to do anything else here ... OSS automatically starts
\r
9345 // when fed samples.
\r
9347 MUTEX_UNLOCK( &stream_.mutex );
\r
9349 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9350 pthread_cond_signal( &handle->runnable );
\r
9353 void RtApiOss :: stopStream()
\r
9356 if ( stream_.state == STREAM_STOPPED ) {
\r
9357 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9358 error( RtAudioError::WARNING );
\r
9362 MUTEX_LOCK( &stream_.mutex );
\r
9364 // The state might change while waiting on a mutex.
\r
9365 if ( stream_.state == STREAM_STOPPED ) {
\r
9366 MUTEX_UNLOCK( &stream_.mutex );
\r
9371 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9372 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9374 // Flush the output with zeros a few times.
\r
9377 RtAudioFormat format;
\r
9379 if ( stream_.doConvertBuffer[0] ) {
\r
9380 buffer = stream_.deviceBuffer;
\r
9381 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9382 format = stream_.deviceFormat[0];
\r
9385 buffer = stream_.userBuffer[0];
\r
9386 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9387 format = stream_.userFormat;
\r
9390 memset( buffer, 0, samples * formatBytes(format) );
\r
9391 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9392 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9393 if ( result == -1 ) {
\r
9394 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9395 error( RtAudioError::WARNING );
\r
9399 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9400 if ( result == -1 ) {
\r
9401 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9402 errorText_ = errorStream_.str();
\r
9405 handle->triggered = false;
\r
9408 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9409 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9410 if ( result == -1 ) {
\r
9411 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9412 errorText_ = errorStream_.str();
\r
9418 stream_.state = STREAM_STOPPED;
\r
9419 MUTEX_UNLOCK( &stream_.mutex );
\r
9421 if ( result != -1 ) return;
\r
9422 error( RtAudioError::SYSTEM_ERROR );
\r
9425 void RtApiOss :: abortStream()
\r
9428 if ( stream_.state == STREAM_STOPPED ) {
\r
9429 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9430 error( RtAudioError::WARNING );
\r
9434 MUTEX_LOCK( &stream_.mutex );
\r
9436 // The state might change while waiting on a mutex.
\r
9437 if ( stream_.state == STREAM_STOPPED ) {
\r
9438 MUTEX_UNLOCK( &stream_.mutex );
\r
9443 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9444 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9445 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9446 if ( result == -1 ) {
\r
9447 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9448 errorText_ = errorStream_.str();
\r
9451 handle->triggered = false;
\r
9454 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9455 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9456 if ( result == -1 ) {
\r
9457 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9458 errorText_ = errorStream_.str();
\r
9464 stream_.state = STREAM_STOPPED;
\r
9465 MUTEX_UNLOCK( &stream_.mutex );
\r
9467 if ( result != -1 ) return;
\r
9468 error( RtAudioError::SYSTEM_ERROR );
\r
9471 void RtApiOss :: callbackEvent()
\r
9473 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9474 if ( stream_.state == STREAM_STOPPED ) {
\r
9475 MUTEX_LOCK( &stream_.mutex );
\r
9476 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9477 if ( stream_.state != STREAM_RUNNING ) {
\r
9478 MUTEX_UNLOCK( &stream_.mutex );
\r
9481 MUTEX_UNLOCK( &stream_.mutex );
\r
9484 if ( stream_.state == STREAM_CLOSED ) {
\r
9485 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9486 error( RtAudioError::WARNING );
\r
9490 // Invoke user callback to get fresh output data.
\r
9491 int doStopStream = 0;
\r
9492 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9493 double streamTime = getStreamTime();
\r
9494 RtAudioStreamStatus status = 0;
\r
9495 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9496 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9497 handle->xrun[0] = false;
\r
9499 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9500 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9501 handle->xrun[1] = false;
\r
9503 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9504 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9505 if ( doStopStream == 2 ) {
\r
9506 this->abortStream();
\r
9510 MUTEX_LOCK( &stream_.mutex );
\r
9512 // The state might change while waiting on a mutex.
\r
9513 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9518 RtAudioFormat format;
\r
9520 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9522 // Setup parameters and do buffer conversion if necessary.
\r
9523 if ( stream_.doConvertBuffer[0] ) {
\r
9524 buffer = stream_.deviceBuffer;
\r
9525 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9526 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9527 format = stream_.deviceFormat[0];
\r
9530 buffer = stream_.userBuffer[0];
\r
9531 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9532 format = stream_.userFormat;
\r
9535 // Do byte swapping if necessary.
\r
9536 if ( stream_.doByteSwap[0] )
\r
9537 byteSwapBuffer( buffer, samples, format );
\r
9539 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9541 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9542 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9543 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9544 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9545 handle->triggered = true;
\r
9548 // Write samples to device.
\r
9549 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9551 if ( result == -1 ) {
\r
9552 // We'll assume this is an underrun, though there isn't a
\r
9553 // specific means for determining that.
\r
9554 handle->xrun[0] = true;
\r
9555 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9556 error( RtAudioError::WARNING );
\r
9557 // Continue on to input section.
\r
9561 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9563 // Setup parameters.
\r
9564 if ( stream_.doConvertBuffer[1] ) {
\r
9565 buffer = stream_.deviceBuffer;
\r
9566 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9567 format = stream_.deviceFormat[1];
\r
9570 buffer = stream_.userBuffer[1];
\r
9571 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9572 format = stream_.userFormat;
\r
9575 // Read samples from device.
\r
9576 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9578 if ( result == -1 ) {
\r
9579 // We'll assume this is an overrun, though there isn't a
\r
9580 // specific means for determining that.
\r
9581 handle->xrun[1] = true;
\r
9582 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9583 error( RtAudioError::WARNING );
\r
9587 // Do byte swapping if necessary.
\r
9588 if ( stream_.doByteSwap[1] )
\r
9589 byteSwapBuffer( buffer, samples, format );
\r
9591 // Do buffer conversion if necessary.
\r
9592 if ( stream_.doConvertBuffer[1] )
\r
9593 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9597 MUTEX_UNLOCK( &stream_.mutex );
\r
9599 RtApi::tickStreamTime();
\r
9600 if ( doStopStream == 1 ) this->stopStream();
\r
9603 static void *ossCallbackHandler( void *ptr )
\r
9605 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9606 RtApiOss *object = (RtApiOss *) info->object;
\r
9607 bool *isRunning = &info->isRunning;
\r
9609 while ( *isRunning == true ) {
\r
9610 pthread_testcancel();
\r
9611 object->callbackEvent();
\r
9614 pthread_exit( NULL );
\r
9617 //******************** End of __LINUX_OSS__ *********************//
\r
9621 // *************************************************** //
\r
9623 // Protected common (OS-independent) RtAudio methods.
\r
9625 // *************************************************** //
\r
9627 // This method can be modified to control the behavior of error
\r
9628 // message printing.
\r
9629 void RtApi :: error( RtAudioError::Type type )
\r
9631 errorStream_.str(""); // clear the ostringstream
\r
9633 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9634 if ( errorCallback ) {
\r
9635 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9637 if ( firstErrorOccurred_ )
\r
9640 firstErrorOccurred_ = true;
\r
9641 const std::string errorMessage = errorText_;
\r
9643 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9644 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9648 errorCallback( type, errorMessage );
\r
9649 firstErrorOccurred_ = false;
\r
9653 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9654 std::cerr << '\n' << errorText_ << "\n\n";
\r
9655 else if ( type != RtAudioError::WARNING )
\r
9656 throw( RtAudioError( errorText_, type ) );
\r
9659 void RtApi :: verifyStream()
\r
9661 if ( stream_.state == STREAM_CLOSED ) {
\r
9662 errorText_ = "RtApi:: a stream is not open!";
\r
9663 error( RtAudioError::INVALID_USE );
\r
9667 void RtApi :: clearStreamInfo()
\r
9669 stream_.mode = UNINITIALIZED;
\r
9670 stream_.state = STREAM_CLOSED;
\r
9671 stream_.sampleRate = 0;
\r
9672 stream_.bufferSize = 0;
\r
9673 stream_.nBuffers = 0;
\r
9674 stream_.userFormat = 0;
\r
9675 stream_.userInterleaved = true;
\r
9676 stream_.streamTime = 0.0;
\r
9677 stream_.apiHandle = 0;
\r
9678 stream_.deviceBuffer = 0;
\r
9679 stream_.callbackInfo.callback = 0;
\r
9680 stream_.callbackInfo.userData = 0;
\r
9681 stream_.callbackInfo.isRunning = false;
\r
9682 stream_.callbackInfo.errorCallback = 0;
\r
9683 for ( int i=0; i<2; i++ ) {
\r
9684 stream_.device[i] = 11111;
\r
9685 stream_.doConvertBuffer[i] = false;
\r
9686 stream_.deviceInterleaved[i] = true;
\r
9687 stream_.doByteSwap[i] = false;
\r
9688 stream_.nUserChannels[i] = 0;
\r
9689 stream_.nDeviceChannels[i] = 0;
\r
9690 stream_.channelOffset[i] = 0;
\r
9691 stream_.deviceFormat[i] = 0;
\r
9692 stream_.latency[i] = 0;
\r
9693 stream_.userBuffer[i] = 0;
\r
9694 stream_.convertInfo[i].channels = 0;
\r
9695 stream_.convertInfo[i].inJump = 0;
\r
9696 stream_.convertInfo[i].outJump = 0;
\r
9697 stream_.convertInfo[i].inFormat = 0;
\r
9698 stream_.convertInfo[i].outFormat = 0;
\r
9699 stream_.convertInfo[i].inOffset.clear();
\r
9700 stream_.convertInfo[i].outOffset.clear();
\r
9704 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9706 if ( format == RTAUDIO_SINT16 )
\r
9708 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9710 else if ( format == RTAUDIO_FLOAT64 )
\r
9712 else if ( format == RTAUDIO_SINT24 )
\r
9714 else if ( format == RTAUDIO_SINT8 )
\r
9717 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9718 error( RtAudioError::WARNING );
\r
9723 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9725 if ( mode == INPUT ) { // convert device to user buffer
\r
9726 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9727 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9728 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9729 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9731 else { // convert user to device buffer
\r
9732 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9733 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9734 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9735 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9738 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9739 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9741 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9743 // Set up the interleave/deinterleave offsets.
\r
9744 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9745 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9746 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9747 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9748 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9749 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9750 stream_.convertInfo[mode].inJump = 1;
\r
9754 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9755 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9756 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9757 stream_.convertInfo[mode].outJump = 1;
\r
9761 else { // no (de)interleaving
\r
9762 if ( stream_.userInterleaved ) {
\r
9763 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9764 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9765 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9769 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9770 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9771 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9772 stream_.convertInfo[mode].inJump = 1;
\r
9773 stream_.convertInfo[mode].outJump = 1;
\r
9778 // Add channel offset.
\r
9779 if ( firstChannel > 0 ) {
\r
9780 if ( stream_.deviceInterleaved[mode] ) {
\r
9781 if ( mode == OUTPUT ) {
\r
9782 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9783 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9786 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9787 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9791 if ( mode == OUTPUT ) {
\r
9792 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9793 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9796 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9797 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9803 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9805 // This function does format conversion, input/output channel compensation, and
\r
9806 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9807 // the lower three bytes of a 32-bit integer.
\r
9809 // Clear our device buffer when in/out duplex device channels are different
\r
9810 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9811 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9812 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9815 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9817 Float64 *out = (Float64 *)outBuffer;
\r
9819 if (info.inFormat == RTAUDIO_SINT8) {
\r
9820 signed char *in = (signed char *)inBuffer;
\r
9821 scale = 1.0 / 127.5;
\r
9822 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9823 for (j=0; j<info.channels; j++) {
\r
9824 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9825 out[info.outOffset[j]] += 0.5;
\r
9826 out[info.outOffset[j]] *= scale;
\r
9828 in += info.inJump;
\r
9829 out += info.outJump;
\r
9832 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9833 Int16 *in = (Int16 *)inBuffer;
\r
9834 scale = 1.0 / 32767.5;
\r
9835 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9836 for (j=0; j<info.channels; j++) {
\r
9837 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9838 out[info.outOffset[j]] += 0.5;
\r
9839 out[info.outOffset[j]] *= scale;
\r
9841 in += info.inJump;
\r
9842 out += info.outJump;
\r
9845 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9846 Int24 *in = (Int24 *)inBuffer;
\r
9847 scale = 1.0 / 8388607.5;
\r
9848 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9849 for (j=0; j<info.channels; j++) {
\r
9850 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9851 out[info.outOffset[j]] += 0.5;
\r
9852 out[info.outOffset[j]] *= scale;
\r
9854 in += info.inJump;
\r
9855 out += info.outJump;
\r
9858 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9859 Int32 *in = (Int32 *)inBuffer;
\r
9860 scale = 1.0 / 2147483647.5;
\r
9861 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9862 for (j=0; j<info.channels; j++) {
\r
9863 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9864 out[info.outOffset[j]] += 0.5;
\r
9865 out[info.outOffset[j]] *= scale;
\r
9867 in += info.inJump;
\r
9868 out += info.outJump;
\r
9871 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9872 Float32 *in = (Float32 *)inBuffer;
\r
9873 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9874 for (j=0; j<info.channels; j++) {
\r
9875 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9877 in += info.inJump;
\r
9878 out += info.outJump;
\r
9881 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9882 // Channel compensation and/or (de)interleaving only.
\r
9883 Float64 *in = (Float64 *)inBuffer;
\r
9884 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9885 for (j=0; j<info.channels; j++) {
\r
9886 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9888 in += info.inJump;
\r
9889 out += info.outJump;
\r
9893 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9895 Float32 *out = (Float32 *)outBuffer;
\r
9897 if (info.inFormat == RTAUDIO_SINT8) {
\r
9898 signed char *in = (signed char *)inBuffer;
\r
9899 scale = (Float32) ( 1.0 / 127.5 );
\r
9900 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9901 for (j=0; j<info.channels; j++) {
\r
9902 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9903 out[info.outOffset[j]] += 0.5;
\r
9904 out[info.outOffset[j]] *= scale;
\r
9906 in += info.inJump;
\r
9907 out += info.outJump;
\r
9910 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9911 Int16 *in = (Int16 *)inBuffer;
\r
9912 scale = (Float32) ( 1.0 / 32767.5 );
\r
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9914 for (j=0; j<info.channels; j++) {
\r
9915 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9916 out[info.outOffset[j]] += 0.5;
\r
9917 out[info.outOffset[j]] *= scale;
\r
9919 in += info.inJump;
\r
9920 out += info.outJump;
\r
9923 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9924 Int24 *in = (Int24 *)inBuffer;
\r
9925 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9926 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9927 for (j=0; j<info.channels; j++) {
\r
9928 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9929 out[info.outOffset[j]] += 0.5;
\r
9930 out[info.outOffset[j]] *= scale;
\r
9932 in += info.inJump;
\r
9933 out += info.outJump;
\r
9936 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9937 Int32 *in = (Int32 *)inBuffer;
\r
9938 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9939 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9940 for (j=0; j<info.channels; j++) {
\r
9941 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9942 out[info.outOffset[j]] += 0.5;
\r
9943 out[info.outOffset[j]] *= scale;
\r
9945 in += info.inJump;
\r
9946 out += info.outJump;
\r
9949 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9950 // Channel compensation and/or (de)interleaving only.
\r
9951 Float32 *in = (Float32 *)inBuffer;
\r
9952 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9953 for (j=0; j<info.channels; j++) {
\r
9954 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9956 in += info.inJump;
\r
9957 out += info.outJump;
\r
9960 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9961 Float64 *in = (Float64 *)inBuffer;
\r
9962 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9963 for (j=0; j<info.channels; j++) {
\r
9964 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9966 in += info.inJump;
\r
9967 out += info.outJump;
\r
9971 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9972 Int32 *out = (Int32 *)outBuffer;
\r
9973 if (info.inFormat == RTAUDIO_SINT8) {
\r
9974 signed char *in = (signed char *)inBuffer;
\r
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9976 for (j=0; j<info.channels; j++) {
\r
9977 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9978 out[info.outOffset[j]] <<= 24;
\r
9980 in += info.inJump;
\r
9981 out += info.outJump;
\r
9984 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9985 Int16 *in = (Int16 *)inBuffer;
\r
9986 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9987 for (j=0; j<info.channels; j++) {
\r
9988 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9989 out[info.outOffset[j]] <<= 16;
\r
9991 in += info.inJump;
\r
9992 out += info.outJump;
\r
9995 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9996 Int24 *in = (Int24 *)inBuffer;
\r
9997 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9998 for (j=0; j<info.channels; j++) {
\r
9999 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
10000 out[info.outOffset[j]] <<= 8;
\r
10002 in += info.inJump;
\r
10003 out += info.outJump;
\r
10006 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10007 // Channel compensation and/or (de)interleaving only.
\r
10008 Int32 *in = (Int32 *)inBuffer;
\r
10009 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10010 for (j=0; j<info.channels; j++) {
\r
10011 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10013 in += info.inJump;
\r
10014 out += info.outJump;
\r
10017 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10018 Float32 *in = (Float32 *)inBuffer;
\r
10019 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10020 for (j=0; j<info.channels; j++) {
\r
10021 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10023 in += info.inJump;
\r
10024 out += info.outJump;
\r
10027 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10028 Float64 *in = (Float64 *)inBuffer;
\r
10029 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10030 for (j=0; j<info.channels; j++) {
\r
10031 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10033 in += info.inJump;
\r
10034 out += info.outJump;
\r
10038 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10039 Int24 *out = (Int24 *)outBuffer;
\r
10040 if (info.inFormat == RTAUDIO_SINT8) {
\r
10041 signed char *in = (signed char *)inBuffer;
\r
10042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10043 for (j=0; j<info.channels; j++) {
\r
10044 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10045 //out[info.outOffset[j]] <<= 16;
\r
10047 in += info.inJump;
\r
10048 out += info.outJump;
\r
10051 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10052 Int16 *in = (Int16 *)inBuffer;
\r
10053 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10054 for (j=0; j<info.channels; j++) {
\r
10055 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10056 //out[info.outOffset[j]] <<= 8;
\r
10058 in += info.inJump;
\r
10059 out += info.outJump;
\r
10062 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10063 // Channel compensation and/or (de)interleaving only.
\r
10064 Int24 *in = (Int24 *)inBuffer;
\r
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10066 for (j=0; j<info.channels; j++) {
\r
10067 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10069 in += info.inJump;
\r
10070 out += info.outJump;
\r
10073 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10074 Int32 *in = (Int32 *)inBuffer;
\r
10075 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10076 for (j=0; j<info.channels; j++) {
\r
10077 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10078 //out[info.outOffset[j]] >>= 8;
\r
10080 in += info.inJump;
\r
10081 out += info.outJump;
\r
10084 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10085 Float32 *in = (Float32 *)inBuffer;
\r
10086 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10087 for (j=0; j<info.channels; j++) {
\r
10088 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10090 in += info.inJump;
\r
10091 out += info.outJump;
\r
10094 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10095 Float64 *in = (Float64 *)inBuffer;
\r
10096 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10097 for (j=0; j<info.channels; j++) {
\r
10098 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10100 in += info.inJump;
\r
10101 out += info.outJump;
\r
10105 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10106 Int16 *out = (Int16 *)outBuffer;
\r
10107 if (info.inFormat == RTAUDIO_SINT8) {
\r
10108 signed char *in = (signed char *)inBuffer;
\r
10109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10110 for (j=0; j<info.channels; j++) {
\r
10111 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10112 out[info.outOffset[j]] <<= 8;
\r
10114 in += info.inJump;
\r
10115 out += info.outJump;
\r
10118 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10119 // Channel compensation and/or (de)interleaving only.
\r
10120 Int16 *in = (Int16 *)inBuffer;
\r
10121 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10122 for (j=0; j<info.channels; j++) {
\r
10123 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10125 in += info.inJump;
\r
10126 out += info.outJump;
\r
10129 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10130 Int24 *in = (Int24 *)inBuffer;
\r
10131 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10132 for (j=0; j<info.channels; j++) {
\r
10133 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10135 in += info.inJump;
\r
10136 out += info.outJump;
\r
10139 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10140 Int32 *in = (Int32 *)inBuffer;
\r
10141 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10142 for (j=0; j<info.channels; j++) {
\r
10143 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10145 in += info.inJump;
\r
10146 out += info.outJump;
\r
10149 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10150 Float32 *in = (Float32 *)inBuffer;
\r
10151 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10152 for (j=0; j<info.channels; j++) {
\r
10153 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10155 in += info.inJump;
\r
10156 out += info.outJump;
\r
10159 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10160 Float64 *in = (Float64 *)inBuffer;
\r
10161 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10162 for (j=0; j<info.channels; j++) {
\r
10163 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10165 in += info.inJump;
\r
10166 out += info.outJump;
\r
10170 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10171 signed char *out = (signed char *)outBuffer;
\r
10172 if (info.inFormat == RTAUDIO_SINT8) {
\r
10173 // Channel compensation and/or (de)interleaving only.
\r
10174 signed char *in = (signed char *)inBuffer;
\r
10175 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10176 for (j=0; j<info.channels; j++) {
\r
10177 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10179 in += info.inJump;
\r
10180 out += info.outJump;
\r
10183 if (info.inFormat == RTAUDIO_SINT16) {
\r
10184 Int16 *in = (Int16 *)inBuffer;
\r
10185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10186 for (j=0; j<info.channels; j++) {
\r
10187 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10189 in += info.inJump;
\r
10190 out += info.outJump;
\r
10193 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10194 Int24 *in = (Int24 *)inBuffer;
\r
10195 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10196 for (j=0; j<info.channels; j++) {
\r
10197 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10199 in += info.inJump;
\r
10200 out += info.outJump;
\r
10203 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10204 Int32 *in = (Int32 *)inBuffer;
\r
10205 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10206 for (j=0; j<info.channels; j++) {
\r
10207 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10209 in += info.inJump;
\r
10210 out += info.outJump;
\r
10213 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10214 Float32 *in = (Float32 *)inBuffer;
\r
10215 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10216 for (j=0; j<info.channels; j++) {
\r
10217 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10219 in += info.inJump;
\r
10220 out += info.outJump;
\r
10223 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10224 Float64 *in = (Float64 *)inBuffer;
\r
10225 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10226 for (j=0; j<info.channels; j++) {
\r
10227 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10229 in += info.inJump;
\r
10230 out += info.outJump;
\r
10236 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10237 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10238 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10240 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10246 if ( format == RTAUDIO_SINT16 ) {
\r
10247 for ( unsigned int i=0; i<samples; i++ ) {
\r
10248 // Swap 1st and 2nd bytes.
\r
10250 *(ptr) = *(ptr+1);
\r
10253 // Increment 2 bytes.
\r
10257 else if ( format == RTAUDIO_SINT32 ||
\r
10258 format == RTAUDIO_FLOAT32 ) {
\r
10259 for ( unsigned int i=0; i<samples; i++ ) {
\r
10260 // Swap 1st and 4th bytes.
\r
10262 *(ptr) = *(ptr+3);
\r
10265 // Swap 2nd and 3rd bytes.
\r
10268 *(ptr) = *(ptr+1);
\r
10271 // Increment 3 more bytes.
\r
10275 else if ( format == RTAUDIO_SINT24 ) {
\r
10276 for ( unsigned int i=0; i<samples; i++ ) {
\r
10277 // Swap 1st and 3rd bytes.
\r
10279 *(ptr) = *(ptr+2);
\r
10282 // Increment 2 more bytes.
\r
10286 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10287 for ( unsigned int i=0; i<samples; i++ ) {
\r
10288 // Swap 1st and 8th bytes
\r
10290 *(ptr) = *(ptr+7);
\r
10293 // Swap 2nd and 7th bytes
\r
10296 *(ptr) = *(ptr+5);
\r
10299 // Swap 3rd and 6th bytes
\r
10302 *(ptr) = *(ptr+3);
\r
10305 // Swap 4th and 5th bytes
\r
10308 *(ptr) = *(ptr+1);
\r
10311 // Increment 5 more bytes.
\r
10317 // Indentation settings for Vim and Emacs
\r
10319 // Local Variables:
\r
10320 // c-basic-offset: 2
\r
10321 // indent-tabs-mode: nil
\r
10324 // vim: et sts=2 sw=2
\r