1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
49 #include <algorithm>
\r
51 // Static variable definitions.
\r
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
53 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
66 static std::string convertCharPointerToStdString(const char *text)
\r
68 return std::string(text);
\r
71 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
74 std::string s( length-1, '\0' );
\r
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
90 // *************************************************** //
\r
92 // RtAudio definitions.
\r
94 // *************************************************** //
\r
96 std::string RtAudio :: getVersion( void )
\r
98 return RTAUDIO_VERSION;
\r
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
\r
105 // The order here will control the order of RtAudio's API search in
\r
106 // the constructor.
\r
107 #if defined(__UNIX_JACK__)
\r
108 apis.push_back( UNIX_JACK );
\r
110 #if defined(__LINUX_ALSA__)
\r
111 apis.push_back( LINUX_ALSA );
\r
113 #if defined(__LINUX_PULSE__)
\r
114 apis.push_back( LINUX_PULSE );
\r
116 #if defined(__LINUX_OSS__)
\r
117 apis.push_back( LINUX_OSS );
\r
119 #if defined(__WINDOWS_ASIO__)
\r
120 apis.push_back( WINDOWS_ASIO );
\r
122 #if defined(__WINDOWS_WASAPI__)
\r
123 apis.push_back( WINDOWS_WASAPI );
\r
125 #if defined(__WINDOWS_DS__)
\r
126 apis.push_back( WINDOWS_DS );
\r
128 #if defined(__MACOSX_CORE__)
\r
129 apis.push_back( MACOSX_CORE );
\r
131 #if defined(__RTAUDIO_DUMMY__)
\r
132 apis.push_back( RTAUDIO_DUMMY );
\r
136 void RtAudio :: openRtApi( RtAudio::Api api )
\r
142 #if defined(__UNIX_JACK__)
\r
143 if ( api == UNIX_JACK )
\r
144 rtapi_ = new RtApiJack();
\r
146 #if defined(__LINUX_ALSA__)
\r
147 if ( api == LINUX_ALSA )
\r
148 rtapi_ = new RtApiAlsa();
\r
150 #if defined(__LINUX_PULSE__)
\r
151 if ( api == LINUX_PULSE )
\r
152 rtapi_ = new RtApiPulse();
\r
154 #if defined(__LINUX_OSS__)
\r
155 if ( api == LINUX_OSS )
\r
156 rtapi_ = new RtApiOss();
\r
158 #if defined(__WINDOWS_ASIO__)
\r
159 if ( api == WINDOWS_ASIO )
\r
160 rtapi_ = new RtApiAsio();
\r
162 #if defined(__WINDOWS_WASAPI__)
\r
163 if ( api == WINDOWS_WASAPI )
\r
164 rtapi_ = new RtApiWasapi();
\r
166 #if defined(__WINDOWS_DS__)
\r
167 if ( api == WINDOWS_DS )
\r
168 rtapi_ = new RtApiDs();
\r
170 #if defined(__MACOSX_CORE__)
\r
171 if ( api == MACOSX_CORE )
\r
172 rtapi_ = new RtApiCore();
\r
174 #if defined(__RTAUDIO_DUMMY__)
\r
175 if ( api == RTAUDIO_DUMMY )
\r
176 rtapi_ = new RtApiDummy();
\r
180 RtAudio :: RtAudio( RtAudio::Api api )
\r
184 if ( api != UNSPECIFIED ) {
\r
185 // Attempt to open the specified API.
\r
187 if ( rtapi_ ) return;
\r
189 // No compiled support for specified API value. Issue a debug
\r
190 // warning and continue as if no API was specified.
\r
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
194 // Iterate through the compiled APIs and return as soon as we find
\r
195 // one with at least one device or we reach the end of the list.
\r
196 std::vector< RtAudio::Api > apis;
\r
197 getCompiledApi( apis );
\r
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
199 openRtApi( apis[i] );
\r
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
203 if ( rtapi_ ) return;
\r
205 // It should not be possible to get here because the preprocessor
\r
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
207 // API-specific definitions are passed to the compiler. But just in
\r
208 // case something weird happens, we'll thow an error.
\r
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
213 RtAudio :: ~RtAudio()
\r
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
220 RtAudio::StreamParameters *inputParameters,
\r
221 RtAudioFormat format, unsigned int sampleRate,
\r
222 unsigned int *bufferFrames,
\r
223 RtAudioCallback callback, void *userData,
\r
224 RtAudio::StreamOptions *options,
\r
225 RtAudioErrorCallback errorCallback )
\r
227 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
228 sampleRate, bufferFrames, callback,
\r
229 userData, options, errorCallback );
\r
232 // *************************************************** //
\r
234 // Public RtApi definitions (see end of file for
\r
235 // private or protected utility functions).
\r
237 // *************************************************** //
\r
241 stream_.state = STREAM_CLOSED;
\r
242 stream_.mode = UNINITIALIZED;
\r
243 stream_.apiHandle = 0;
\r
244 stream_.userBuffer[0] = 0;
\r
245 stream_.userBuffer[1] = 0;
\r
246 MUTEX_INITIALIZE( &stream_.mutex );
\r
247 showWarnings_ = true;
\r
248 firstErrorOccurred_ = false;
\r
253 MUTEX_DESTROY( &stream_.mutex );
\r
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
257 RtAudio::StreamParameters *iParams,
\r
258 RtAudioFormat format, unsigned int sampleRate,
\r
259 unsigned int *bufferFrames,
\r
260 RtAudioCallback callback, void *userData,
\r
261 RtAudio::StreamOptions *options,
\r
262 RtAudioErrorCallback errorCallback )
\r
264 if ( stream_.state != STREAM_CLOSED ) {
\r
265 errorText_ = "RtApi::openStream: a stream is already open!";
\r
266 error( RtAudioError::INVALID_USE );
\r
270 // Clear stream information potentially left from a previously open stream.
\r
273 if ( oParams && oParams->nChannels < 1 ) {
\r
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 if ( iParams && iParams->nChannels < 1 ) {
\r
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
281 error( RtAudioError::INVALID_USE );
\r
285 if ( oParams == NULL && iParams == NULL ) {
\r
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
287 error( RtAudioError::INVALID_USE );
\r
291 if ( formatBytes(format) == 0 ) {
\r
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
293 error( RtAudioError::INVALID_USE );
\r
297 unsigned int nDevices = getDeviceCount();
\r
298 unsigned int oChannels = 0;
\r
300 oChannels = oParams->nChannels;
\r
301 if ( oParams->deviceId >= nDevices ) {
\r
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
303 error( RtAudioError::INVALID_USE );
\r
308 unsigned int iChannels = 0;
\r
310 iChannels = iParams->nChannels;
\r
311 if ( iParams->deviceId >= nDevices ) {
\r
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
313 error( RtAudioError::INVALID_USE );
\r
320 if ( oChannels > 0 ) {
\r
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
323 sampleRate, format, bufferFrames, options );
\r
324 if ( result == false ) {
\r
325 error( RtAudioError::SYSTEM_ERROR );
\r
330 if ( iChannels > 0 ) {
\r
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
333 sampleRate, format, bufferFrames, options );
\r
334 if ( result == false ) {
\r
335 if ( oChannels > 0 ) closeStream();
\r
336 error( RtAudioError::SYSTEM_ERROR );
\r
341 stream_.callbackInfo.callback = (void *) callback;
\r
342 stream_.callbackInfo.userData = userData;
\r
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
346 stream_.state = STREAM_STOPPED;
\r
349 unsigned int RtApi :: getDefaultInputDevice( void )
\r
351 // Should be implemented in subclasses if possible.
\r
355 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
357 // Should be implemented in subclasses if possible.
\r
361 void RtApi :: closeStream( void )
\r
363 // MUST be implemented in subclasses!
\r
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
370 RtAudio::StreamOptions * /*options*/ )
\r
372 // MUST be implemented in subclasses!
\r
376 void RtApi :: tickStreamTime( void )
\r
378 // Subclasses that do not provide their own implementation of
\r
379 // getStreamTime should call this function once per buffer I/O to
\r
380 // provide basic stream time support.
\r
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
384 #if defined( HAVE_GETTIMEOFDAY )
\r
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
389 long RtApi :: getStreamLatency( void )
\r
393 long totalLatency = 0;
\r
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
395 totalLatency = stream_.latency[0];
\r
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
397 totalLatency += stream_.latency[1];
\r
399 return totalLatency;
\r
402 double RtApi :: getStreamTime( void )
\r
406 #if defined( HAVE_GETTIMEOFDAY )
\r
407 // Return a very accurate estimate of the stream time by
\r
408 // adding in the elapsed time since the last tick.
\r
409 struct timeval then;
\r
410 struct timeval now;
\r
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
413 return stream_.streamTime;
\r
415 gettimeofday( &now, NULL );
\r
416 then = stream_.lastTickTimestamp;
\r
417 return stream_.streamTime +
\r
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
419 (then.tv_sec + 0.000001 * then.tv_usec));
\r
421 return stream_.streamTime;
\r
425 void RtApi :: setStreamTime( double time )
\r
430 stream_.streamTime = time;
\r
431 #if defined( HAVE_GETTIMEOFDAY )
\r
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
436 unsigned int RtApi :: getStreamSampleRate( void )
\r
440 return stream_.sampleRate;
\r
444 // *************************************************** //
\r
446 // OS/API-specific methods.
\r
448 // *************************************************** //
\r
450 #if defined(__MACOSX_CORE__)
\r
452 // The OS X CoreAudio API is designed to use a separate callback
\r
453 // procedure for each of its audio devices. A single RtAudio duplex
\r
454 // stream using two different devices is supported here, though it
\r
455 // cannot be guaranteed to always behave correctly because we cannot
\r
456 // synchronize these two callbacks.
\r
458 // A property listener is installed for over/underrun information.
\r
459 // However, no functionality is currently provided to allow property
\r
460 // listeners to trigger user handlers because it is unclear what could
\r
461 // be done if a critical stream parameter (buffer size, sample rate,
\r
462 // device disconnect) notification arrived. The listeners entail
\r
463 // quite a bit of extra code and most likely, a user program wouldn't
\r
464 // be prepared for the result anyway. However, we do provide a flag
\r
465 // to the client callback function to inform of an over/underrun.
\r
467 // A structure to hold various information related to the CoreAudio API
\r
469 struct CoreHandle {
\r
470 AudioDeviceID id[2]; // device ids
\r
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
472 AudioDeviceIOProcID procId[2];
\r
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
475 UInt32 nStreams[2]; // number of streams to use
\r
477 char *deviceBuffer;
\r
478 pthread_cond_t condition;
\r
479 int drainCounter; // Tracks callback counts when draining
\r
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
486 RtApiCore:: RtApiCore()
\r
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
489 // This is a largely undocumented but absolutely necessary
\r
490 // requirement starting with OS-X 10.6. If not called, queries and
\r
491 // updates to various audio device properties are not handled
\r
493 CFRunLoopRef theRunLoop = NULL;
\r
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
495 kAudioObjectPropertyScopeGlobal,
\r
496 kAudioObjectPropertyElementMaster };
\r
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
498 if ( result != noErr ) {
\r
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
500 error( RtAudioError::WARNING );
\r
505 RtApiCore :: ~RtApiCore()
\r
507 // The subclass destructor gets called before the base class
\r
508 // destructor, so close an existing stream before deallocating
\r
509 // apiDeviceId memory.
\r
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
513 unsigned int RtApiCore :: getDeviceCount( void )
\r
515 // Find out how many audio devices there are, if any.
\r
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
519 if ( result != noErr ) {
\r
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
521 error( RtAudioError::WARNING );
\r
525 return dataSize / sizeof( AudioDeviceID );
\r
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
530 unsigned int nDevices = getDeviceCount();
\r
531 if ( nDevices <= 1 ) return 0;
\r
534 UInt32 dataSize = sizeof( AudioDeviceID );
\r
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
537 if ( result != noErr ) {
\r
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
539 error( RtAudioError::WARNING );
\r
543 dataSize *= nDevices;
\r
544 AudioDeviceID deviceList[ nDevices ];
\r
545 property.mSelector = kAudioHardwarePropertyDevices;
\r
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
547 if ( result != noErr ) {
\r
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
549 error( RtAudioError::WARNING );
\r
553 for ( unsigned int i=0; i<nDevices; i++ )
\r
554 if ( id == deviceList[i] ) return i;
\r
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
557 error( RtAudioError::WARNING );
\r
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
563 unsigned int nDevices = getDeviceCount();
\r
564 if ( nDevices <= 1 ) return 0;
\r
567 UInt32 dataSize = sizeof( AudioDeviceID );
\r
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
570 if ( result != noErr ) {
\r
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
572 error( RtAudioError::WARNING );
\r
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
577 AudioDeviceID deviceList[ nDevices ];
\r
578 property.mSelector = kAudioHardwarePropertyDevices;
\r
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
580 if ( result != noErr ) {
\r
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
582 error( RtAudioError::WARNING );
\r
586 for ( unsigned int i=0; i<nDevices; i++ )
\r
587 if ( id == deviceList[i] ) return i;
\r
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
590 error( RtAudioError::WARNING );
\r
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
596 RtAudio::DeviceInfo info;
\r
597 info.probed = false;
\r
600 unsigned int nDevices = getDeviceCount();
\r
601 if ( nDevices == 0 ) {
\r
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
603 error( RtAudioError::INVALID_USE );
\r
607 if ( device >= nDevices ) {
\r
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
609 error( RtAudioError::INVALID_USE );
\r
613 AudioDeviceID deviceList[ nDevices ];
\r
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
616 kAudioObjectPropertyScopeGlobal,
\r
617 kAudioObjectPropertyElementMaster };
\r
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
619 0, NULL, &dataSize, (void *) &deviceList );
\r
620 if ( result != noErr ) {
\r
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
622 error( RtAudioError::WARNING );
\r
626 AudioDeviceID id = deviceList[ device ];
\r
628 // Get the device name.
\r
630 CFStringRef cfname;
\r
631 dataSize = sizeof( CFStringRef );
\r
632 property.mSelector = kAudioObjectPropertyManufacturer;
\r
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
634 if ( result != noErr ) {
\r
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
636 errorText_ = errorStream_.str();
\r
637 error( RtAudioError::WARNING );
\r
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
642 int length = CFStringGetLength(cfname);
\r
643 char *mname = (char *)malloc(length * 3 + 1);
\r
644 #if defined( UNICODE ) || defined( _UNICODE )
\r
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
649 info.name.append( (const char *)mname, strlen(mname) );
\r
650 info.name.append( ": " );
\r
651 CFRelease( cfname );
\r
654 property.mSelector = kAudioObjectPropertyName;
\r
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
656 if ( result != noErr ) {
\r
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
658 errorText_ = errorStream_.str();
\r
659 error( RtAudioError::WARNING );
\r
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
664 length = CFStringGetLength(cfname);
\r
665 char *name = (char *)malloc(length * 3 + 1);
\r
666 #if defined( UNICODE ) || defined( _UNICODE )
\r
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
671 info.name.append( (const char *)name, strlen(name) );
\r
672 CFRelease( cfname );
\r
675 // Get the output stream "configuration".
\r
676 AudioBufferList *bufferList = nil;
\r
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
678 property.mScope = kAudioDevicePropertyScopeOutput;
\r
679 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
682 if ( result != noErr || dataSize == 0 ) {
\r
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
684 errorText_ = errorStream_.str();
\r
685 error( RtAudioError::WARNING );
\r
689 // Allocate the AudioBufferList.
\r
690 bufferList = (AudioBufferList *) malloc( dataSize );
\r
691 if ( bufferList == NULL ) {
\r
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
693 error( RtAudioError::WARNING );
\r
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
698 if ( result != noErr || dataSize == 0 ) {
\r
699 free( bufferList );
\r
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
701 errorText_ = errorStream_.str();
\r
702 error( RtAudioError::WARNING );
\r
706 // Get output channel information.
\r
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
708 for ( i=0; i<nStreams; i++ )
\r
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
710 free( bufferList );
\r
712 // Get the input stream "configuration".
\r
713 property.mScope = kAudioDevicePropertyScopeInput;
\r
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
715 if ( result != noErr || dataSize == 0 ) {
\r
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
717 errorText_ = errorStream_.str();
\r
718 error( RtAudioError::WARNING );
\r
722 // Allocate the AudioBufferList.
\r
723 bufferList = (AudioBufferList *) malloc( dataSize );
\r
724 if ( bufferList == NULL ) {
\r
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
726 error( RtAudioError::WARNING );
\r
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
731 if (result != noErr || dataSize == 0) {
\r
732 free( bufferList );
\r
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
734 errorText_ = errorStream_.str();
\r
735 error( RtAudioError::WARNING );
\r
739 // Get input channel information.
\r
740 nStreams = bufferList->mNumberBuffers;
\r
741 for ( i=0; i<nStreams; i++ )
\r
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
743 free( bufferList );
\r
745 // If device opens for both playback and capture, we determine the channels.
\r
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
749 // Probe the device sample rates.
\r
750 bool isInput = false;
\r
751 if ( info.outputChannels == 0 ) isInput = true;
\r
753 // Determine the supported sample rates.
\r
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
759 errorText_ = errorStream_.str();
\r
760 error( RtAudioError::WARNING );
\r
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
765 AudioValueRange rangeList[ nRanges ];
\r
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
767 if ( result != kAudioHardwareNoError ) {
\r
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
769 errorText_ = errorStream_.str();
\r
770 error( RtAudioError::WARNING );
\r
774 // The sample rate reporting mechanism is a bit of a mystery. It
\r
775 // seems that it can either return individual rates or a range of
\r
776 // rates. I assume that if the min / max range values are the same,
\r
777 // then that represents a single supported rate and if the min / max
\r
778 // range values are different, the device supports an arbitrary
\r
779 // range of values (though there might be multiple ranges, so we'll
\r
780 // use the most conservative range).
\r
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
782 bool haveValueRange = false;
\r
783 info.sampleRates.clear();
\r
784 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
787 info.sampleRates.push_back( tmpSr );
\r
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
790 info.preferredSampleRate = tmpSr;
\r
793 haveValueRange = true;
\r
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
799 if ( haveValueRange ) {
\r
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
805 info.preferredSampleRate = SAMPLE_RATES[k];
\r
810 // Sort and remove any redundant values
\r
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
814 if ( info.sampleRates.size() == 0 ) {
\r
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
816 errorText_ = errorStream_.str();
\r
817 error( RtAudioError::WARNING );
\r
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
822 // Thus, any other "physical" formats supported by the device are of
\r
823 // no interest to the client.
\r
824 info.nativeFormats = RTAUDIO_FLOAT32;
\r
826 if ( info.outputChannels > 0 )
\r
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
828 if ( info.inputChannels > 0 )
\r
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
831 info.probed = true;
\r
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
836 const AudioTimeStamp* /*inNow*/,
\r
837 const AudioBufferList* inInputData,
\r
838 const AudioTimeStamp* /*inInputTime*/,
\r
839 AudioBufferList* outOutputData,
\r
840 const AudioTimeStamp* /*inOutputTime*/,
\r
841 void* infoPointer )
\r
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
845 RtApiCore *object = (RtApiCore *) info->object;
\r
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
847 return kAudioHardwareUnspecifiedError;
\r
849 return kAudioHardwareNoError;
\r
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
854 const AudioObjectPropertyAddress properties[],
\r
855 void* handlePointer )
\r
857 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
861 handle->xrun[1] = true;
\r
863 handle->xrun[0] = true;
\r
867 return kAudioHardwareNoError;
\r
870 static OSStatus rateListener( AudioObjectID inDevice,
\r
871 UInt32 /*nAddresses*/,
\r
872 const AudioObjectPropertyAddress /*properties*/[],
\r
873 void* ratePointer )
\r
875 Float64 *rate = (Float64 *) ratePointer;
\r
876 UInt32 dataSize = sizeof( Float64 );
\r
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
878 kAudioObjectPropertyScopeGlobal,
\r
879 kAudioObjectPropertyElementMaster };
\r
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
881 return kAudioHardwareNoError;
\r
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
885 unsigned int firstChannel, unsigned int sampleRate,
\r
886 RtAudioFormat format, unsigned int *bufferSize,
\r
887 RtAudio::StreamOptions *options )
\r
890 unsigned int nDevices = getDeviceCount();
\r
891 if ( nDevices == 0 ) {
\r
892 // This should not happen because a check is made before this function is called.
\r
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
897 if ( device >= nDevices ) {
\r
898 // This should not happen because a check is made before this function is called.
\r
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
903 AudioDeviceID deviceList[ nDevices ];
\r
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
906 kAudioObjectPropertyScopeGlobal,
\r
907 kAudioObjectPropertyElementMaster };
\r
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
909 0, NULL, &dataSize, (void *) &deviceList );
\r
910 if ( result != noErr ) {
\r
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
915 AudioDeviceID id = deviceList[ device ];
\r
917 // Setup for stream mode.
\r
918 bool isInput = false;
\r
919 if ( mode == INPUT ) {
\r
921 property.mScope = kAudioDevicePropertyScopeInput;
\r
924 property.mScope = kAudioDevicePropertyScopeOutput;
\r
926 // Get the stream "configuration".
\r
927 AudioBufferList *bufferList = nil;
\r
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
931 if ( result != noErr || dataSize == 0 ) {
\r
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
933 errorText_ = errorStream_.str();
\r
937 // Allocate the AudioBufferList.
\r
938 bufferList = (AudioBufferList *) malloc( dataSize );
\r
939 if ( bufferList == NULL ) {
\r
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
945 if (result != noErr || dataSize == 0) {
\r
946 free( bufferList );
\r
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
948 errorText_ = errorStream_.str();
\r
952 // Search for one or more streams that contain the desired number of
\r
953 // channels. CoreAudio devices can have an arbitrary number of
\r
954 // streams and each stream can have an arbitrary number of channels.
\r
955 // For each stream, a single buffer of interleaved samples is
\r
956 // provided. RtAudio prefers the use of one stream of interleaved
\r
957 // data or multiple consecutive single-channel streams. However, we
\r
958 // now support multiple consecutive multi-channel streams of
\r
959 // interleaved data as well.
\r
960 UInt32 iStream, offsetCounter = firstChannel;
\r
961 UInt32 nStreams = bufferList->mNumberBuffers;
\r
962 bool monoMode = false;
\r
963 bool foundStream = false;
\r
965 // First check that the device supports the requested number of
\r
967 UInt32 deviceChannels = 0;
\r
968 for ( iStream=0; iStream<nStreams; iStream++ )
\r
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
971 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
972 free( bufferList );
\r
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
974 errorText_ = errorStream_.str();
\r
978 // Look for a single stream meeting our needs.
\r
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
982 if ( streamChannels >= channels + offsetCounter ) {
\r
983 firstStream = iStream;
\r
984 channelOffset = offsetCounter;
\r
985 foundStream = true;
\r
988 if ( streamChannels > offsetCounter ) break;
\r
989 offsetCounter -= streamChannels;
\r
992 // If we didn't find a single stream above, then we should be able
\r
993 // to meet the channel specification with multiple streams.
\r
994 if ( foundStream == false ) {
\r
996 offsetCounter = firstChannel;
\r
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
999 if ( streamChannels > offsetCounter ) break;
\r
1000 offsetCounter -= streamChannels;
\r
1003 firstStream = iStream;
\r
1004 channelOffset = offsetCounter;
\r
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1007 if ( streamChannels > 1 ) monoMode = false;
\r
1008 while ( channelCounter > 0 ) {
\r
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1010 if ( streamChannels > 1 ) monoMode = false;
\r
1011 channelCounter -= streamChannels;
\r
1016 free( bufferList );
\r
1018 // Determine the buffer size.
\r
1019 AudioValueRange bufferRange;
\r
1020 dataSize = sizeof( AudioValueRange );
\r
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1024 if ( result != noErr ) {
\r
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1026 errorText_ = errorStream_.str();
\r
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1034 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1035 // need to make this setting for the master channel.
\r
1036 UInt32 theSize = (UInt32) *bufferSize;
\r
1037 dataSize = sizeof( UInt32 );
\r
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1041 if ( result != noErr ) {
\r
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1043 errorText_ = errorStream_.str();
\r
1047 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1048 // MUST be the same in both directions!
\r
1049 *bufferSize = theSize;
\r
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1052 errorText_ = errorStream_.str();
\r
1056 stream_.bufferSize = *bufferSize;
\r
1057 stream_.nBuffers = 1;
\r
1059 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1062 dataSize = sizeof( hog_pid );
\r
1063 property.mSelector = kAudioDevicePropertyHogMode;
\r
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1065 if ( result != noErr ) {
\r
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1067 errorText_ = errorStream_.str();
\r
1071 if ( hog_pid != getpid() ) {
\r
1072 hog_pid = getpid();
\r
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1074 if ( result != noErr ) {
\r
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1076 errorText_ = errorStream_.str();
\r
1082 // Check and if necessary, change the sample rate for the device.
\r
1083 Float64 nominalRate;
\r
1084 dataSize = sizeof( Float64 );
\r
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1087 if ( result != noErr ) {
\r
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1089 errorText_ = errorStream_.str();
\r
1093 // Only change the sample rate if off by more than 1 Hz.
\r
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1096 // Set a property listener for the sample rate change
\r
1097 Float64 reportedRate = 0.0;
\r
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1100 if ( result != noErr ) {
\r
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1102 errorText_ = errorStream_.str();
\r
1106 nominalRate = (Float64) sampleRate;
\r
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1108 if ( result != noErr ) {
\r
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1111 errorText_ = errorStream_.str();
\r
1115 // Now wait until the reported nominal rate is what we just set.
\r
1116 UInt32 microCounter = 0;
\r
1117 while ( reportedRate != nominalRate ) {
\r
1118 microCounter += 5000;
\r
1119 if ( microCounter > 5000000 ) break;
\r
1123 // Remove the property listener.
\r
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1126 if ( microCounter > 5000000 ) {
\r
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1128 errorText_ = errorStream_.str();
\r
1133 // Now set the stream format for all streams. Also, check the
\r
1134 // physical format of the device and change that if necessary.
\r
1135 AudioStreamBasicDescription description;
\r
1136 dataSize = sizeof( AudioStreamBasicDescription );
\r
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1139 if ( result != noErr ) {
\r
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1141 errorText_ = errorStream_.str();
\r
1145 // Set the sample rate and data format id. However, only make the
\r
1146 // change if the sample rate is not within 1.0 of the desired
\r
1147 // rate and the format is not linear pcm.
\r
1148 bool updateFormat = false;
\r
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1150 description.mSampleRate = (Float64) sampleRate;
\r
1151 updateFormat = true;
\r
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1155 description.mFormatID = kAudioFormatLinearPCM;
\r
1156 updateFormat = true;
\r
1159 if ( updateFormat ) {
\r
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1161 if ( result != noErr ) {
\r
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1163 errorText_ = errorStream_.str();
\r
1168 // Now check the physical format.
\r
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1171 if ( result != noErr ) {
\r
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1173 errorText_ = errorStream_.str();
\r
1177 //std::cout << "Current physical stream format:" << std::endl;
\r
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1184 description.mFormatID = kAudioFormatLinearPCM;
\r
1185 //description.mSampleRate = (Float64) sampleRate;
\r
1186 AudioStreamBasicDescription testDescription = description;
\r
1187 UInt32 formatFlags;
\r
1189 // We'll try higher bit rates first and then work our way down.
\r
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1204 bool setPhysicalFormat = false;
\r
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1206 testDescription = description;
\r
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1208 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1215 if ( result == noErr ) {
\r
1216 setPhysicalFormat = true;
\r
1217 //std::cout << "Updated physical stream format:" << std::endl;
\r
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1226 if ( !setPhysicalFormat ) {
\r
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1228 errorText_ = errorStream_.str();
\r
1231 } // done setting virtual/physical formats.
\r
1233 // Get the stream / device latency.
\r
1235 dataSize = sizeof( UInt32 );
\r
1236 property.mSelector = kAudioDevicePropertyLatency;
\r
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1242 errorText_ = errorStream_.str();
\r
1243 error( RtAudioError::WARNING );
\r
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1248 // always be presented in native-endian format, so we should never
\r
1249 // need to byte swap.
\r
1250 stream_.doByteSwap[mode] = false;
\r
1252 // From the CoreAudio documentation, PCM data must be supplied as
\r
1254 stream_.userFormat = format;
\r
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1257 if ( streamCount == 1 )
\r
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1259 else // multiple streams
\r
1260 stream_.nDeviceChannels[mode] = channels;
\r
1261 stream_.nUserChannels[mode] = channels;
\r
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1264 else stream_.userInterleaved = true;
\r
1265 stream_.deviceInterleaved[mode] = true;
\r
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1268 // Set flags for buffer conversion.
\r
1269 stream_.doConvertBuffer[mode] = false;
\r
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1271 stream_.doConvertBuffer[mode] = true;
\r
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1274 if ( streamCount == 1 ) {
\r
1275 if ( stream_.nUserChannels[mode] > 1 &&
\r
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1277 stream_.doConvertBuffer[mode] = true;
\r
1279 else if ( monoMode && stream_.userInterleaved )
\r
1280 stream_.doConvertBuffer[mode] = true;
\r
1282 // Allocate our CoreHandle structure for the stream.
\r
1283 CoreHandle *handle = 0;
\r
1284 if ( stream_.apiHandle == 0 ) {
\r
1286 handle = new CoreHandle;
\r
1288 catch ( std::bad_alloc& ) {
\r
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1297 stream_.apiHandle = (void *) handle;
\r
1300 handle = (CoreHandle *) stream_.apiHandle;
\r
1301 handle->iStream[mode] = firstStream;
\r
1302 handle->nStreams[mode] = streamCount;
\r
1303 handle->id[mode] = id;
\r
1305 // Allocate necessary internal buffers.
\r
1306 unsigned long bufferBytes;
\r
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1311 if ( stream_.userBuffer[mode] == NULL ) {
\r
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1316 // If possible, we will make use of the CoreAudio stream buffers as
\r
1317 // "device buffers". However, we can't do this if using multiple
\r
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1321 bool makeBuffer = true;
\r
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1323 if ( mode == INPUT ) {
\r
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1330 if ( makeBuffer ) {
\r
1331 bufferBytes *= *bufferSize;
\r
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1334 if ( stream_.deviceBuffer == NULL ) {
\r
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1341 stream_.sampleRate = sampleRate;
\r
1342 stream_.device[mode] = device;
\r
1343 stream_.state = STREAM_STOPPED;
\r
1344 stream_.callbackInfo.object = (void *) this;
\r
1346 // Setup the buffer conversion information structure.
\r
1347 if ( stream_.doConvertBuffer[mode] ) {
\r
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1349 else setConvertInfo( mode, channelOffset );
\r
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1353 // Only one callback procedure per device.
\r
1354 stream_.mode = DUPLEX;
\r
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1362 if ( result != noErr ) {
\r
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1364 errorText_ = errorStream_.str();
\r
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1368 stream_.mode = DUPLEX;
\r
1370 stream_.mode = mode;
\r
1373 // Setup the device property listener for over/underload.
\r
1374 property.mSelector = kAudioDeviceProcessorOverload;
\r
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1382 pthread_cond_destroy( &handle->condition );
\r
1384 stream_.apiHandle = 0;
\r
1387 for ( int i=0; i<2; i++ ) {
\r
1388 if ( stream_.userBuffer[i] ) {
\r
1389 free( stream_.userBuffer[i] );
\r
1390 stream_.userBuffer[i] = 0;
\r
1394 if ( stream_.deviceBuffer ) {
\r
1395 free( stream_.deviceBuffer );
\r
1396 stream_.deviceBuffer = 0;
\r
1399 stream_.state = STREAM_CLOSED;
\r
1403 void RtApiCore :: closeStream( void )
\r
1405 if ( stream_.state == STREAM_CLOSED ) {
\r
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1407 error( RtAudioError::WARNING );
\r
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1415 kAudioObjectPropertyScopeGlobal,
\r
1416 kAudioObjectPropertyElementMaster };
\r
1418 property.mSelector = kAudioDeviceProcessorOverload;
\r
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1422 error( RtAudioError::WARNING );
\r
1425 if ( stream_.state == STREAM_RUNNING )
\r
1426 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1438 kAudioObjectPropertyScopeGlobal,
\r
1439 kAudioObjectPropertyElementMaster };
\r
1441 property.mSelector = kAudioDeviceProcessorOverload;
\r
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1445 error( RtAudioError::WARNING );
\r
1448 if ( stream_.state == STREAM_RUNNING )
\r
1449 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1458 for ( int i=0; i<2; i++ ) {
\r
1459 if ( stream_.userBuffer[i] ) {
\r
1460 free( stream_.userBuffer[i] );
\r
1461 stream_.userBuffer[i] = 0;
\r
1465 if ( stream_.deviceBuffer ) {
\r
1466 free( stream_.deviceBuffer );
\r
1467 stream_.deviceBuffer = 0;
\r
1470 // Destroy pthread condition variable.
\r
1471 pthread_cond_destroy( &handle->condition );
\r
1473 stream_.apiHandle = 0;
\r
1475 stream_.mode = UNINITIALIZED;
\r
1476 stream_.state = STREAM_CLOSED;
\r
1479 void RtApiCore :: startStream( void )
\r
1482 if ( stream_.state == STREAM_RUNNING ) {
\r
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1484 error( RtAudioError::WARNING );
\r
1488 OSStatus result = noErr;
\r
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1493 if ( result != noErr ) {
\r
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1495 errorText_ = errorStream_.str();
\r
1500 if ( stream_.mode == INPUT ||
\r
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1504 if ( result != noErr ) {
\r
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1506 errorText_ = errorStream_.str();
\r
1511 handle->drainCounter = 0;
\r
1512 handle->internalDrain = false;
\r
1513 stream_.state = STREAM_RUNNING;
\r
1516 if ( result == noErr ) return;
\r
1517 error( RtAudioError::SYSTEM_ERROR );
\r
1520 void RtApiCore :: stopStream( void )
\r
1523 if ( stream_.state == STREAM_STOPPED ) {
\r
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1525 error( RtAudioError::WARNING );
\r
1529 OSStatus result = noErr;
\r
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1533 if ( handle->drainCounter == 0 ) {
\r
1534 handle->drainCounter = 2;
\r
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1539 if ( result != noErr ) {
\r
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1541 errorText_ = errorStream_.str();
\r
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1549 if ( result != noErr ) {
\r
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1551 errorText_ = errorStream_.str();
\r
1556 stream_.state = STREAM_STOPPED;
\r
1559 if ( result == noErr ) return;
\r
1560 error( RtAudioError::SYSTEM_ERROR );
\r
1563 void RtApiCore :: abortStream( void )
\r
1566 if ( stream_.state == STREAM_STOPPED ) {
\r
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1568 error( RtAudioError::WARNING );
\r
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1573 handle->drainCounter = 2;
\r
1578 // This function will be called by a spawned thread when the user
\r
1579 // callback function signals that the stream should be stopped or
\r
1580 // aborted. It is better to handle it this way because the
\r
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1582 // function is called.
\r
1583 static void *coreStopStream( void *ptr )
\r
1585 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1586 RtApiCore *object = (RtApiCore *) info->object;
\r
1588 object->stopStream();
\r
1589 pthread_exit( NULL );
\r
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1593 const AudioBufferList *inBufferList,
\r
1594 const AudioBufferList *outBufferList )
\r
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1597 if ( stream_.state == STREAM_CLOSED ) {
\r
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1599 error( RtAudioError::WARNING );
\r
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1606 // Check if we were draining the stream and signal is finished.
\r
1607 if ( handle->drainCounter > 3 ) {
\r
1608 ThreadHandle threadId;
\r
1610 stream_.state = STREAM_STOPPING;
\r
1611 if ( handle->internalDrain == true )
\r
1612 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1613 else // external call to stopStream()
\r
1614 pthread_cond_signal( &handle->condition );
\r
1618 AudioDeviceID outputDevice = handle->id[0];
\r
1620 // Invoke user callback to get fresh output data UNLESS we are
\r
1621 // draining stream or duplex mode AND the input/output devices are
\r
1622 // different AND this function is called for the input device.
\r
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1625 double streamTime = getStreamTime();
\r
1626 RtAudioStreamStatus status = 0;
\r
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1629 handle->xrun[0] = false;
\r
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1632 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1633 handle->xrun[1] = false;
\r
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1637 stream_.bufferSize, streamTime, status, info->userData );
\r
1638 if ( cbReturnValue == 2 ) {
\r
1639 stream_.state = STREAM_STOPPING;
\r
1640 handle->drainCounter = 2;
\r
1644 else if ( cbReturnValue == 1 ) {
\r
1645 handle->drainCounter = 1;
\r
1646 handle->internalDrain = true;
\r
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1654 if ( handle->nStreams[0] == 1 ) {
\r
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1659 else { // fill multiple streams with zeros
\r
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1667 else if ( handle->nStreams[0] == 1 ) {
\r
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1672 else { // copy from user buffer
\r
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1674 stream_.userBuffer[0],
\r
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1678 else { // fill multiple streams
\r
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1680 if ( stream_.doConvertBuffer[0] ) {
\r
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1692 else { // fill multiple multi-channel streams with interleaved data
\r
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1694 Float32 *out, *in;
\r
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1697 UInt32 inChannels = stream_.nUserChannels[0];
\r
1698 if ( stream_.doConvertBuffer[0] ) {
\r
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1700 inChannels = stream_.nDeviceChannels[0];
\r
1703 if ( inInterleaved ) inOffset = 1;
\r
1704 else inOffset = stream_.bufferSize;
\r
1706 channelsLeft = inChannels;
\r
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1713 // Account for possible channel offset in first stream
\r
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1715 streamChannels -= stream_.channelOffset[0];
\r
1716 outJump = stream_.channelOffset[0];
\r
1720 // Account for possible unfilled channels at end of the last stream
\r
1721 if ( streamChannels > channelsLeft ) {
\r
1722 outJump = streamChannels - channelsLeft;
\r
1723 streamChannels = channelsLeft;
\r
1726 // Determine input buffer offsets and skips
\r
1727 if ( inInterleaved ) {
\r
1728 inJump = inChannels;
\r
1729 in += inChannels - channelsLeft;
\r
1733 in += (inChannels - channelsLeft) * inOffset;
\r
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1738 *out++ = in[j*inOffset];
\r
1743 channelsLeft -= streamChannels;
\r
1749 // Don't bother draining input
\r
1750 if ( handle->drainCounter ) {
\r
1751 handle->drainCounter++;
\r
1755 AudioDeviceID inputDevice;
\r
1756 inputDevice = handle->id[1];
\r
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1759 if ( handle->nStreams[1] == 1 ) {
\r
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1761 convertBuffer( stream_.userBuffer[1],
\r
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1763 stream_.convertInfo[1] );
\r
1765 else { // copy to user buffer
\r
1766 memcpy( stream_.userBuffer[1],
\r
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1771 else { // read from multiple streams
\r
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1782 else { // read from multiple multi-channel streams
\r
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1784 Float32 *out, *in;
\r
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1787 UInt32 outChannels = stream_.nUserChannels[1];
\r
1788 if ( stream_.doConvertBuffer[1] ) {
\r
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1790 outChannels = stream_.nDeviceChannels[1];
\r
1793 if ( outInterleaved ) outOffset = 1;
\r
1794 else outOffset = stream_.bufferSize;
\r
1796 channelsLeft = outChannels;
\r
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1803 // Account for possible channel offset in first stream
\r
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1805 streamChannels -= stream_.channelOffset[1];
\r
1806 inJump = stream_.channelOffset[1];
\r
1810 // Account for possible unread channels at end of the last stream
\r
1811 if ( streamChannels > channelsLeft ) {
\r
1812 inJump = streamChannels - channelsLeft;
\r
1813 streamChannels = channelsLeft;
\r
1816 // Determine output buffer offsets and skips
\r
1817 if ( outInterleaved ) {
\r
1818 outJump = outChannels;
\r
1819 out += outChannels - channelsLeft;
\r
1823 out += (outChannels - channelsLeft) * outOffset;
\r
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1828 out[j*outOffset] = *in++;
\r
1833 channelsLeft -= streamChannels;
\r
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1838 convertBuffer( stream_.userBuffer[1],
\r
1839 stream_.deviceBuffer,
\r
1840 stream_.convertInfo[1] );
\r
1846 //MUTEX_UNLOCK( &stream_.mutex );
\r
1848 RtApi::tickStreamTime();
\r
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1856 case kAudioHardwareNotRunningError:
\r
1857 return "kAudioHardwareNotRunningError";
\r
1859 case kAudioHardwareUnspecifiedError:
\r
1860 return "kAudioHardwareUnspecifiedError";
\r
1862 case kAudioHardwareUnknownPropertyError:
\r
1863 return "kAudioHardwareUnknownPropertyError";
\r
1865 case kAudioHardwareBadPropertySizeError:
\r
1866 return "kAudioHardwareBadPropertySizeError";
\r
1868 case kAudioHardwareIllegalOperationError:
\r
1869 return "kAudioHardwareIllegalOperationError";
\r
1871 case kAudioHardwareBadObjectError:
\r
1872 return "kAudioHardwareBadObjectError";
\r
1874 case kAudioHardwareBadDeviceError:
\r
1875 return "kAudioHardwareBadDeviceError";
\r
1877 case kAudioHardwareBadStreamError:
\r
1878 return "kAudioHardwareBadStreamError";
\r
1880 case kAudioHardwareUnsupportedOperationError:
\r
1881 return "kAudioHardwareUnsupportedOperationError";
\r
1883 case kAudioDeviceUnsupportedFormatError:
\r
1884 return "kAudioDeviceUnsupportedFormatError";
\r
1886 case kAudioDevicePermissionsError:
\r
1887 return "kAudioDevicePermissionsError";
\r
1890 return "CoreAudio unknown error";
\r
1894 //******************** End of __MACOSX_CORE__ *********************//
\r
1897 #if defined(__UNIX_JACK__)
\r
1899 // JACK is a low-latency audio server, originally written for the
\r
1900 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1901 // connect a number of different applications to an audio device, as
\r
1902 // well as allowing them to share audio between themselves.
\r
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1905 // have ports connected to the server. The JACK server is typically
\r
1906 // started in a terminal as follows:
\r
1908 // .jackd -d alsa -d hw:0
\r
1910 // or through an interface program such as qjackctl. Many of the
\r
1911 // parameters normally set for a stream are fixed by the JACK server
\r
1912 // and can be specified when the JACK server is started. In
\r
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1918 // frames, and number of buffers = 4. Once the server is running, it
\r
1919 // is not possible to override these values. If the values are not
\r
1920 // specified in the command-line, the JACK server uses default values.
\r
1922 // The JACK server does not have to be running when an instance of
\r
1923 // RtApiJack is created, though the function getDeviceCount() will
\r
1924 // report 0 devices found until JACK has been started. When no
\r
1925 // devices are available (i.e., the JACK server is not running), a
\r
1926 // stream cannot be opened.
\r
1928 #include <jack/jack.h>
\r
1929 #include <unistd.h>
\r
1932 // A structure to hold various information related to the Jack API
\r
1933 // implementation.
\r
1934 struct JackHandle {
\r
1935 jack_client_t *client;
\r
1936 jack_port_t **ports[2];
\r
1937 std::string deviceName[2];
\r
1939 pthread_cond_t condition;
\r
1940 int drainCounter; // Tracks callback counts when draining
\r
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1947 static void jackSilentError( const char * ) {};
\r
1949 RtApiJack :: RtApiJack()
\r
1950 :shouldAutoconnect_(true) {
\r
1951 // Nothing to do here.
\r
1952 #if !defined(__RTAUDIO_DEBUG__)
\r
1953 // Turn off Jack's internal error reporting.
\r
1954 jack_set_error_function( &jackSilentError );
\r
1958 RtApiJack :: ~RtApiJack()
\r
1960 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1963 unsigned int RtApiJack :: getDeviceCount( void )
\r
1965 // See if we can become a jack client.
\r
1966 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1967 jack_status_t *status = NULL;
\r
1968 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1969 if ( client == 0 ) return 0;
\r
1971 const char **ports;
\r
1972 std::string port, previousPort;
\r
1973 unsigned int nChannels = 0, nDevices = 0;
\r
1974 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1976 // Parse the port names up to the first colon (:).
\r
1977 size_t iColon = 0;
\r
1979 port = (char *) ports[ nChannels ];
\r
1980 iColon = port.find(":");
\r
1981 if ( iColon != std::string::npos ) {
\r
1982 port = port.substr( 0, iColon + 1 );
\r
1983 if ( port != previousPort ) {
\r
1985 previousPort = port;
\r
1988 } while ( ports[++nChannels] );
\r
1992 jack_client_close( client );
\r
1996 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1998 RtAudio::DeviceInfo info;
\r
1999 info.probed = false;
\r
2001 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
2002 jack_status_t *status = NULL;
\r
2003 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2004 if ( client == 0 ) {
\r
2005 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2006 error( RtAudioError::WARNING );
\r
2010 const char **ports;
\r
2011 std::string port, previousPort;
\r
2012 unsigned int nPorts = 0, nDevices = 0;
\r
2013 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2015 // Parse the port names up to the first colon (:).
\r
2016 size_t iColon = 0;
\r
2018 port = (char *) ports[ nPorts ];
\r
2019 iColon = port.find(":");
\r
2020 if ( iColon != std::string::npos ) {
\r
2021 port = port.substr( 0, iColon );
\r
2022 if ( port != previousPort ) {
\r
2023 if ( nDevices == device ) info.name = port;
\r
2025 previousPort = port;
\r
2028 } while ( ports[++nPorts] );
\r
2032 if ( device >= nDevices ) {
\r
2033 jack_client_close( client );
\r
2034 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2035 error( RtAudioError::INVALID_USE );
\r
2039 // Get the current jack server sample rate.
\r
2040 info.sampleRates.clear();
\r
2042 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2043 info.sampleRates.push_back( info.preferredSampleRate );
\r
2045 // Count the available ports containing the client name as device
\r
2046 // channels. Jack "input ports" equal RtAudio output channels.
\r
2047 unsigned int nChannels = 0;
\r
2048 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2050 while ( ports[ nChannels ] ) nChannels++;
\r
2052 info.outputChannels = nChannels;
\r
2055 // Jack "output ports" equal RtAudio input channels.
\r
2057 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2059 while ( ports[ nChannels ] ) nChannels++;
\r
2061 info.inputChannels = nChannels;
\r
2064 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2065 jack_client_close(client);
\r
2066 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2067 error( RtAudioError::WARNING );
\r
2071 // If device opens for both playback and capture, we determine the channels.
\r
2072 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2073 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2075 // Jack always uses 32-bit floats.
\r
2076 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2078 // Jack doesn't provide default devices so we'll use the first available one.
\r
2079 if ( device == 0 && info.outputChannels > 0 )
\r
2080 info.isDefaultOutput = true;
\r
2081 if ( device == 0 && info.inputChannels > 0 )
\r
2082 info.isDefaultInput = true;
\r
2084 jack_client_close(client);
\r
2085 info.probed = true;
\r
2089 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2091 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2093 RtApiJack *object = (RtApiJack *) info->object;
\r
2094 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2099 // This function will be called by a spawned thread when the Jack
\r
2100 // server signals that it is shutting down. It is necessary to handle
\r
2101 // it this way because the jackShutdown() function must return before
\r
2102 // the jack_deactivate() function (in closeStream()) will return.
\r
2103 static void *jackCloseStream( void *ptr )
\r
2105 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2106 RtApiJack *object = (RtApiJack *) info->object;
\r
2108 object->closeStream();
\r
2110 pthread_exit( NULL );
\r
2112 static void jackShutdown( void *infoPointer )
\r
2114 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2115 RtApiJack *object = (RtApiJack *) info->object;
\r
2117 // Check current stream state. If stopped, then we'll assume this
\r
2118 // was called as a result of a call to RtApiJack::stopStream (the
\r
2119 // deactivation of a client handle causes this function to be called).
\r
2120 // If not, we'll assume the Jack server is shutting down or some
\r
2121 // other problem occurred and we should close the stream.
\r
2122 if ( object->isStreamRunning() == false ) return;
\r
2124 ThreadHandle threadId;
\r
2125 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2126 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2129 static int jackXrun( void *infoPointer )
\r
2131 JackHandle *handle = (JackHandle *) infoPointer;
\r
2133 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2134 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2139 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2140 unsigned int firstChannel, unsigned int sampleRate,
\r
2141 RtAudioFormat format, unsigned int *bufferSize,
\r
2142 RtAudio::StreamOptions *options )
\r
2144 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2146 // Look for jack server and try to become a client (only do once per stream).
\r
2147 jack_client_t *client = 0;
\r
2148 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2149 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2150 jack_status_t *status = NULL;
\r
2151 if ( options && !options->streamName.empty() )
\r
2152 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2154 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2155 if ( client == 0 ) {
\r
2156 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2157 error( RtAudioError::WARNING );
\r
2162 // The handle must have been created on an earlier pass.
\r
2163 client = handle->client;
\r
2166 const char **ports;
\r
2167 std::string port, previousPort, deviceName;
\r
2168 unsigned int nPorts = 0, nDevices = 0;
\r
2169 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2171 // Parse the port names up to the first colon (:).
\r
2172 size_t iColon = 0;
\r
2174 port = (char *) ports[ nPorts ];
\r
2175 iColon = port.find(":");
\r
2176 if ( iColon != std::string::npos ) {
\r
2177 port = port.substr( 0, iColon );
\r
2178 if ( port != previousPort ) {
\r
2179 if ( nDevices == device ) deviceName = port;
\r
2181 previousPort = port;
\r
2184 } while ( ports[++nPorts] );
\r
2188 if ( device >= nDevices ) {
\r
2189 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2193 // Count the available ports containing the client name as device
\r
2194 // channels. Jack "input ports" equal RtAudio output channels.
\r
2195 unsigned int nChannels = 0;
\r
2196 unsigned long flag = JackPortIsInput;
\r
2197 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2198 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2200 while ( ports[ nChannels ] ) nChannels++;
\r
2204 // Compare the jack ports for specified client to the requested number of channels.
\r
2205 if ( nChannels < (channels + firstChannel) ) {
\r
2206 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2207 errorText_ = errorStream_.str();
\r
2211 // Check the jack server sample rate.
\r
2212 unsigned int jackRate = jack_get_sample_rate( client );
\r
2213 if ( sampleRate != jackRate ) {
\r
2214 jack_client_close( client );
\r
2215 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2216 errorText_ = errorStream_.str();
\r
2219 stream_.sampleRate = jackRate;
\r
2221 // Get the latency of the JACK port.
\r
2222 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2223 if ( ports[ firstChannel ] ) {
\r
2224 // Added by Ge Wang
\r
2225 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2226 // the range (usually the min and max are equal)
\r
2227 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2228 // get the latency range
\r
2229 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2230 // be optimistic, use the min!
\r
2231 stream_.latency[mode] = latrange.min;
\r
2232 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2236 // The jack server always uses 32-bit floating-point data.
\r
2237 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2238 stream_.userFormat = format;
\r
2240 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2241 else stream_.userInterleaved = true;
\r
2243 // Jack always uses non-interleaved buffers.
\r
2244 stream_.deviceInterleaved[mode] = false;
\r
2246 // Jack always provides host byte-ordered data.
\r
2247 stream_.doByteSwap[mode] = false;
\r
2249 // Get the buffer size. The buffer size and number of buffers
\r
2250 // (periods) is set when the jack server is started.
\r
2251 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2252 *bufferSize = stream_.bufferSize;
\r
2254 stream_.nDeviceChannels[mode] = channels;
\r
2255 stream_.nUserChannels[mode] = channels;
\r
2257 // Set flags for buffer conversion.
\r
2258 stream_.doConvertBuffer[mode] = false;
\r
2259 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2260 stream_.doConvertBuffer[mode] = true;
\r
2261 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2262 stream_.nUserChannels[mode] > 1 )
\r
2263 stream_.doConvertBuffer[mode] = true;
\r
2265 // Allocate our JackHandle structure for the stream.
\r
2266 if ( handle == 0 ) {
\r
2268 handle = new JackHandle;
\r
2270 catch ( std::bad_alloc& ) {
\r
2271 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2275 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2276 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2279 stream_.apiHandle = (void *) handle;
\r
2280 handle->client = client;
\r
2282 handle->deviceName[mode] = deviceName;
\r
2284 // Allocate necessary internal buffers.
\r
2285 unsigned long bufferBytes;
\r
2286 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2287 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2288 if ( stream_.userBuffer[mode] == NULL ) {
\r
2289 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2293 if ( stream_.doConvertBuffer[mode] ) {
\r
2295 bool makeBuffer = true;
\r
2296 if ( mode == OUTPUT )
\r
2297 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2298 else { // mode == INPUT
\r
2299 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2300 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2301 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2302 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2306 if ( makeBuffer ) {
\r
2307 bufferBytes *= *bufferSize;
\r
2308 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2309 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2310 if ( stream_.deviceBuffer == NULL ) {
\r
2311 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2317 // Allocate memory for the Jack ports (channels) identifiers.
\r
2318 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2319 if ( handle->ports[mode] == NULL ) {
\r
2320 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2324 stream_.device[mode] = device;
\r
2325 stream_.channelOffset[mode] = firstChannel;
\r
2326 stream_.state = STREAM_STOPPED;
\r
2327 stream_.callbackInfo.object = (void *) this;
\r
2329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2330 // We had already set up the stream for output.
\r
2331 stream_.mode = DUPLEX;
\r
2333 stream_.mode = mode;
\r
2334 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2335 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2336 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2339 // Register our ports.
\r
2341 if ( mode == OUTPUT ) {
\r
2342 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2343 snprintf( label, 64, "outport %d", i );
\r
2344 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2345 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2349 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2350 snprintf( label, 64, "inport %d", i );
\r
2351 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2352 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2356 // Setup the buffer conversion information structure. We don't use
\r
2357 // buffers to do channel offsets, so we override that parameter
\r
2359 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2361 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
\r
2367 pthread_cond_destroy( &handle->condition );
\r
2368 jack_client_close( handle->client );
\r
2370 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2371 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2374 stream_.apiHandle = 0;
\r
2377 for ( int i=0; i<2; i++ ) {
\r
2378 if ( stream_.userBuffer[i] ) {
\r
2379 free( stream_.userBuffer[i] );
\r
2380 stream_.userBuffer[i] = 0;
\r
2384 if ( stream_.deviceBuffer ) {
\r
2385 free( stream_.deviceBuffer );
\r
2386 stream_.deviceBuffer = 0;
\r
2392 void RtApiJack :: closeStream( void )
\r
2394 if ( stream_.state == STREAM_CLOSED ) {
\r
2395 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2396 error( RtAudioError::WARNING );
\r
2400 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2403 if ( stream_.state == STREAM_RUNNING )
\r
2404 jack_deactivate( handle->client );
\r
2406 jack_client_close( handle->client );
\r
2410 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2411 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2412 pthread_cond_destroy( &handle->condition );
\r
2414 stream_.apiHandle = 0;
\r
2417 for ( int i=0; i<2; i++ ) {
\r
2418 if ( stream_.userBuffer[i] ) {
\r
2419 free( stream_.userBuffer[i] );
\r
2420 stream_.userBuffer[i] = 0;
\r
2424 if ( stream_.deviceBuffer ) {
\r
2425 free( stream_.deviceBuffer );
\r
2426 stream_.deviceBuffer = 0;
\r
2429 stream_.mode = UNINITIALIZED;
\r
2430 stream_.state = STREAM_CLOSED;
\r
2433 void RtApiJack :: startStream( void )
\r
2436 if ( stream_.state == STREAM_RUNNING ) {
\r
2437 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2438 error( RtAudioError::WARNING );
\r
2442 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2443 int result = jack_activate( handle->client );
\r
2445 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2449 const char **ports;
\r
2451 // Get the list of available ports.
\r
2452 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
\r
2454 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2455 if ( ports == NULL) {
\r
2456 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2460 // Now make the port connections. Since RtAudio wasn't designed to
\r
2461 // allow the user to select particular channels of a device, we'll
\r
2462 // just open the first "nChannels" ports with offset.
\r
2463 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2465 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2466 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2469 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2476 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
\r
2478 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2479 if ( ports == NULL) {
\r
2480 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2484 // Now make the port connections. See note above.
\r
2485 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2487 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2488 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2491 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2498 handle->drainCounter = 0;
\r
2499 handle->internalDrain = false;
\r
2500 stream_.state = STREAM_RUNNING;
\r
2503 if ( result == 0 ) return;
\r
2504 error( RtAudioError::SYSTEM_ERROR );
\r
2507 void RtApiJack :: stopStream( void )
\r
2510 if ( stream_.state == STREAM_STOPPED ) {
\r
2511 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2512 error( RtAudioError::WARNING );
\r
2516 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2517 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2519 if ( handle->drainCounter == 0 ) {
\r
2520 handle->drainCounter = 2;
\r
2521 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2525 jack_deactivate( handle->client );
\r
2526 stream_.state = STREAM_STOPPED;
\r
2529 void RtApiJack :: abortStream( void )
\r
2532 if ( stream_.state == STREAM_STOPPED ) {
\r
2533 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2534 error( RtAudioError::WARNING );
\r
2538 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2539 handle->drainCounter = 2;
\r
2544 // This function will be called by a spawned thread when the user
\r
2545 // callback function signals that the stream should be stopped or
\r
2546 // aborted. It is necessary to handle it this way because the
\r
2547 // callbackEvent() function must return before the jack_deactivate()
\r
2548 // function will return.
\r
2549 static void *jackStopStream( void *ptr )
\r
2551 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2552 RtApiJack *object = (RtApiJack *) info->object;
\r
2554 object->stopStream();
\r
2555 pthread_exit( NULL );
\r
2558 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2560 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2561 if ( stream_.state == STREAM_CLOSED ) {
\r
2562 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2563 error( RtAudioError::WARNING );
\r
2566 if ( stream_.bufferSize != nframes ) {
\r
2567 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2568 error( RtAudioError::WARNING );
\r
2572 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2573 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2575 // Check if we were draining the stream and signal is finished.
\r
2576 if ( handle->drainCounter > 3 ) {
\r
2577 ThreadHandle threadId;
\r
2579 stream_.state = STREAM_STOPPING;
\r
2580 if ( handle->internalDrain == true )
\r
2581 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2583 pthread_cond_signal( &handle->condition );
\r
2587 // Invoke user callback first, to get fresh output data.
\r
2588 if ( handle->drainCounter == 0 ) {
\r
2589 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2590 double streamTime = getStreamTime();
\r
2591 RtAudioStreamStatus status = 0;
\r
2592 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2593 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2594 handle->xrun[0] = false;
\r
2596 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2597 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2598 handle->xrun[1] = false;
\r
2600 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2601 stream_.bufferSize, streamTime, status, info->userData );
\r
2602 if ( cbReturnValue == 2 ) {
\r
2603 stream_.state = STREAM_STOPPING;
\r
2604 handle->drainCounter = 2;
\r
2606 pthread_create( &id, NULL, jackStopStream, info );
\r
2609 else if ( cbReturnValue == 1 ) {
\r
2610 handle->drainCounter = 1;
\r
2611 handle->internalDrain = true;
\r
2615 jack_default_audio_sample_t *jackbuffer;
\r
2616 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2617 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2619 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2621 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2622 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2623 memset( jackbuffer, 0, bufferBytes );
\r
2627 else if ( stream_.doConvertBuffer[0] ) {
\r
2629 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2631 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2632 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2633 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2636 else { // no buffer conversion
\r
2637 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2638 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2639 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2644 // Don't bother draining input
\r
2645 if ( handle->drainCounter ) {
\r
2646 handle->drainCounter++;
\r
2650 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2652 if ( stream_.doConvertBuffer[1] ) {
\r
2653 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2654 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2655 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2657 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2659 else { // no buffer conversion
\r
2660 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2661 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2662 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2668 RtApi::tickStreamTime();
\r
2671 //******************** End of __UNIX_JACK__ *********************//
\r
2674 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2676 // The ASIO API is designed around a callback scheme, so this
\r
2677 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2678 // Jack. The primary constraint with ASIO is that it only allows
\r
2679 // access to a single driver at a time. Thus, it is not possible to
\r
2680 // have more than one simultaneous RtAudio stream.
\r
2682 // This implementation also requires a number of external ASIO files
\r
2683 // and a few global variables. The ASIO callback scheme does not
\r
2684 // allow for the passing of user data, so we must create a global
\r
2685 // pointer to our callbackInfo structure.
\r
2687 // On unix systems, we make use of a pthread condition variable.
\r
2688 // Since there is no equivalent in Windows, I hacked something based
\r
2689 // on information found in
\r
2690 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2692 #include "asiosys.h"
\r
2694 #include "iasiothiscallresolver.h"
\r
2695 #include "asiodrivers.h"
\r
2698 static AsioDrivers drivers;
\r
2699 static ASIOCallbacks asioCallbacks;
\r
2700 static ASIODriverInfo driverInfo;
\r
2701 static CallbackInfo *asioCallbackInfo;
\r
2702 static bool asioXRun;
\r
2704 struct AsioHandle {
\r
2705 int drainCounter; // Tracks callback counts when draining
\r
2706 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2707 ASIOBufferInfo *bufferInfos;
\r
2711 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2714 // Function declarations (definitions at end of section)
\r
2715 static const char* getAsioErrorString( ASIOError result );
\r
2716 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2717 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2719 RtApiAsio :: RtApiAsio()
\r
2721 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2722 // CoInitialize beforehand, but it must be for appartment threading
\r
2723 // (in which case, CoInitilialize will return S_FALSE here).
\r
2724 coInitialized_ = false;
\r
2725 HRESULT hr = CoInitialize( NULL );
\r
2726 if ( FAILED(hr) ) {
\r
2727 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2728 error( RtAudioError::WARNING );
\r
2730 coInitialized_ = true;
\r
2732 drivers.removeCurrentDriver();
\r
2733 driverInfo.asioVersion = 2;
\r
2735 // See note in DirectSound implementation about GetDesktopWindow().
\r
2736 driverInfo.sysRef = GetForegroundWindow();
\r
2739 RtApiAsio :: ~RtApiAsio()
\r
2741 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2742 if ( coInitialized_ ) CoUninitialize();
\r
2745 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2747 return (unsigned int) drivers.asioGetNumDev();
\r
2750 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2752 RtAudio::DeviceInfo info;
\r
2753 info.probed = false;
\r
2756 unsigned int nDevices = getDeviceCount();
\r
2757 if ( nDevices == 0 ) {
\r
2758 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2759 error( RtAudioError::INVALID_USE );
\r
2763 if ( device >= nDevices ) {
\r
2764 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2765 error( RtAudioError::INVALID_USE );
\r
2769 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2770 if ( stream_.state != STREAM_CLOSED ) {
\r
2771 if ( device >= devices_.size() ) {
\r
2772 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2773 error( RtAudioError::WARNING );
\r
2776 return devices_[ device ];
\r
2779 char driverName[32];
\r
2780 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2781 if ( result != ASE_OK ) {
\r
2782 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2783 errorText_ = errorStream_.str();
\r
2784 error( RtAudioError::WARNING );
\r
2788 info.name = driverName;
\r
2790 if ( !drivers.loadDriver( driverName ) ) {
\r
2791 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2792 errorText_ = errorStream_.str();
\r
2793 error( RtAudioError::WARNING );
\r
2797 result = ASIOInit( &driverInfo );
\r
2798 if ( result != ASE_OK ) {
\r
2799 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2800 errorText_ = errorStream_.str();
\r
2801 error( RtAudioError::WARNING );
\r
2805 // Determine the device channel information.
\r
2806 long inputChannels, outputChannels;
\r
2807 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2808 if ( result != ASE_OK ) {
\r
2809 drivers.removeCurrentDriver();
\r
2810 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2811 errorText_ = errorStream_.str();
\r
2812 error( RtAudioError::WARNING );
\r
2816 info.outputChannels = outputChannels;
\r
2817 info.inputChannels = inputChannels;
\r
2818 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2819 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2821 // Determine the supported sample rates.
\r
2822 info.sampleRates.clear();
\r
2823 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2824 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2825 if ( result == ASE_OK ) {
\r
2826 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2828 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2829 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2833 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2834 ASIOChannelInfo channelInfo;
\r
2835 channelInfo.channel = 0;
\r
2836 channelInfo.isInput = true;
\r
2837 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2838 result = ASIOGetChannelInfo( &channelInfo );
\r
2839 if ( result != ASE_OK ) {
\r
2840 drivers.removeCurrentDriver();
\r
2841 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2842 errorText_ = errorStream_.str();
\r
2843 error( RtAudioError::WARNING );
\r
2847 info.nativeFormats = 0;
\r
2848 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2849 info.nativeFormats |= RTAUDIO_SINT16;
\r
2850 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2851 info.nativeFormats |= RTAUDIO_SINT32;
\r
2852 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2853 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2854 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2855 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2856 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2857 info.nativeFormats |= RTAUDIO_SINT24;
\r
2859 if ( info.outputChannels > 0 )
\r
2860 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2861 if ( info.inputChannels > 0 )
\r
2862 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2864 info.probed = true;
\r
2865 drivers.removeCurrentDriver();
\r
2869 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2871 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2872 object->callbackEvent( index );
\r
2875 void RtApiAsio :: saveDeviceInfo( void )
\r
2879 unsigned int nDevices = getDeviceCount();
\r
2880 devices_.resize( nDevices );
\r
2881 for ( unsigned int i=0; i<nDevices; i++ )
\r
2882 devices_[i] = getDeviceInfo( i );
\r
2885 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2886 unsigned int firstChannel, unsigned int sampleRate,
\r
2887 RtAudioFormat format, unsigned int *bufferSize,
\r
2888 RtAudio::StreamOptions *options )
\r
2889 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2891 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2893 // For ASIO, a duplex stream MUST use the same driver.
\r
2894 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2895 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2899 char driverName[32];
\r
2900 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2901 if ( result != ASE_OK ) {
\r
2902 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2903 errorText_ = errorStream_.str();
\r
2907 // Only load the driver once for duplex stream.
\r
2908 if ( !isDuplexInput ) {
\r
2909 // The getDeviceInfo() function will not work when a stream is open
\r
2910 // because ASIO does not allow multiple devices to run at the same
\r
2911 // time. Thus, we'll probe the system before opening a stream and
\r
2912 // save the results for use by getDeviceInfo().
\r
2913 this->saveDeviceInfo();
\r
2915 if ( !drivers.loadDriver( driverName ) ) {
\r
2916 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2917 errorText_ = errorStream_.str();
\r
2921 result = ASIOInit( &driverInfo );
\r
2922 if ( result != ASE_OK ) {
\r
2923 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2924 errorText_ = errorStream_.str();
\r
2929 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2930 bool buffersAllocated = false;
\r
2931 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2932 unsigned int nChannels;
\r
2935 // Check the device channel count.
\r
2936 long inputChannels, outputChannels;
\r
2937 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2938 if ( result != ASE_OK ) {
\r
2939 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2940 errorText_ = errorStream_.str();
\r
2944 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2945 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2946 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2947 errorText_ = errorStream_.str();
\r
2950 stream_.nDeviceChannels[mode] = channels;
\r
2951 stream_.nUserChannels[mode] = channels;
\r
2952 stream_.channelOffset[mode] = firstChannel;
\r
2954 // Verify the sample rate is supported.
\r
2955 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2956 if ( result != ASE_OK ) {
\r
2957 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2958 errorText_ = errorStream_.str();
\r
2962 // Get the current sample rate
\r
2963 ASIOSampleRate currentRate;
\r
2964 result = ASIOGetSampleRate( ¤tRate );
\r
2965 if ( result != ASE_OK ) {
\r
2966 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2967 errorText_ = errorStream_.str();
\r
2971 // Set the sample rate only if necessary
\r
2972 if ( currentRate != sampleRate ) {
\r
2973 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2974 if ( result != ASE_OK ) {
\r
2975 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2976 errorText_ = errorStream_.str();
\r
2981 // Determine the driver data type.
\r
2982 ASIOChannelInfo channelInfo;
\r
2983 channelInfo.channel = 0;
\r
2984 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2985 else channelInfo.isInput = true;
\r
2986 result = ASIOGetChannelInfo( &channelInfo );
\r
2987 if ( result != ASE_OK ) {
\r
2988 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2989 errorText_ = errorStream_.str();
\r
2993 // Assuming WINDOWS host is always little-endian.
\r
2994 stream_.doByteSwap[mode] = false;
\r
2995 stream_.userFormat = format;
\r
2996 stream_.deviceFormat[mode] = 0;
\r
2997 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2998 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2999 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
3001 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
3003 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3005 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3006 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3007 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3009 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3011 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3013 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3014 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3015 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3018 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3019 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3020 errorText_ = errorStream_.str();
\r
3024 // Set the buffer size. For a duplex stream, this will end up
\r
3025 // setting the buffer size based on the input constraints, which
\r
3027 long minSize, maxSize, preferSize, granularity;
\r
3028 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3029 if ( result != ASE_OK ) {
\r
3030 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3031 errorText_ = errorStream_.str();
\r
3035 if ( isDuplexInput ) {
\r
3036 // When this is the duplex input (output was opened before), then we have to use the same
\r
3037 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3038 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3039 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3040 // to the "bufferSize" param as usual to set up processing buffers.
\r
3042 *bufferSize = stream_.bufferSize;
\r
3045 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3046 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3047 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3048 else if ( granularity == -1 ) {
\r
3049 // Make sure bufferSize is a power of two.
\r
3050 int log2_of_min_size = 0;
\r
3051 int log2_of_max_size = 0;
\r
3053 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3054 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3055 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3058 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3059 int min_delta_num = log2_of_min_size;
\r
3061 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3062 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3063 if (current_delta < min_delta) {
\r
3064 min_delta = current_delta;
\r
3065 min_delta_num = i;
\r
3069 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3070 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3071 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3073 else if ( granularity != 0 ) {
\r
3074 // Set to an even multiple of granularity, rounding up.
\r
3075 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3080 // we don't use it anymore, see above!
\r
3081 // Just left it here for the case...
\r
3082 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3083 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3088 stream_.bufferSize = *bufferSize;
\r
3089 stream_.nBuffers = 2;
\r
3091 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3092 else stream_.userInterleaved = true;
\r
3094 // ASIO always uses non-interleaved buffers.
\r
3095 stream_.deviceInterleaved[mode] = false;
\r
3097 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3098 if ( handle == 0 ) {
\r
3100 handle = new AsioHandle;
\r
3102 catch ( std::bad_alloc& ) {
\r
3103 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3106 handle->bufferInfos = 0;
\r
3108 // Create a manual-reset event.
\r
3109 handle->condition = CreateEvent( NULL, // no security
\r
3110 TRUE, // manual-reset
\r
3111 FALSE, // non-signaled initially
\r
3112 NULL ); // unnamed
\r
3113 stream_.apiHandle = (void *) handle;
\r
3116 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3117 // and output separately, we'll have to dispose of previously
\r
3118 // created output buffers for a duplex stream.
\r
3119 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3120 ASIODisposeBuffers();
\r
3121 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3124 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3126 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3127 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3128 if ( handle->bufferInfos == NULL ) {
\r
3129 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3130 errorText_ = errorStream_.str();
\r
3134 ASIOBufferInfo *infos;
\r
3135 infos = handle->bufferInfos;
\r
3136 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3137 infos->isInput = ASIOFalse;
\r
3138 infos->channelNum = i + stream_.channelOffset[0];
\r
3139 infos->buffers[0] = infos->buffers[1] = 0;
\r
3141 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3142 infos->isInput = ASIOTrue;
\r
3143 infos->channelNum = i + stream_.channelOffset[1];
\r
3144 infos->buffers[0] = infos->buffers[1] = 0;
\r
3147 // prepare for callbacks
\r
3148 stream_.sampleRate = sampleRate;
\r
3149 stream_.device[mode] = device;
\r
3150 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3152 // store this class instance before registering callbacks, that are going to use it
\r
3153 asioCallbackInfo = &stream_.callbackInfo;
\r
3154 stream_.callbackInfo.object = (void *) this;
\r
3156 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3157 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3158 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3159 asioCallbacks.asioMessage = &asioMessages;
\r
3160 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3161 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3162 if ( result != ASE_OK ) {
\r
3163 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3164 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3165 // in that case, let's be naïve and try that instead
\r
3166 *bufferSize = preferSize;
\r
3167 stream_.bufferSize = *bufferSize;
\r
3168 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3171 if ( result != ASE_OK ) {
\r
3172 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3173 errorText_ = errorStream_.str();
\r
3176 buffersAllocated = true;
\r
3177 stream_.state = STREAM_STOPPED;
\r
3179 // Set flags for buffer conversion.
\r
3180 stream_.doConvertBuffer[mode] = false;
\r
3181 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3182 stream_.doConvertBuffer[mode] = true;
\r
3183 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3184 stream_.nUserChannels[mode] > 1 )
\r
3185 stream_.doConvertBuffer[mode] = true;
\r
3187 // Allocate necessary internal buffers
\r
3188 unsigned long bufferBytes;
\r
3189 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3190 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3191 if ( stream_.userBuffer[mode] == NULL ) {
\r
3192 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3196 if ( stream_.doConvertBuffer[mode] ) {
\r
3198 bool makeBuffer = true;
\r
3199 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3200 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3201 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3202 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3205 if ( makeBuffer ) {
\r
3206 bufferBytes *= *bufferSize;
\r
3207 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3208 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3209 if ( stream_.deviceBuffer == NULL ) {
\r
3210 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3216 // Determine device latencies
\r
3217 long inputLatency, outputLatency;
\r
3218 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3219 if ( result != ASE_OK ) {
\r
3220 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3221 errorText_ = errorStream_.str();
\r
3222 error( RtAudioError::WARNING); // warn but don't fail
\r
3225 stream_.latency[0] = outputLatency;
\r
3226 stream_.latency[1] = inputLatency;
\r
3229 // Setup the buffer conversion information structure. We don't use
\r
3230 // buffers to do channel offsets, so we override that parameter
\r
3232 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3237 if ( !isDuplexInput ) {
\r
3238 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3239 // So we clean up for single channel only
\r
3241 if ( buffersAllocated )
\r
3242 ASIODisposeBuffers();
\r
3244 drivers.removeCurrentDriver();
\r
3247 CloseHandle( handle->condition );
\r
3248 if ( handle->bufferInfos )
\r
3249 free( handle->bufferInfos );
\r
3252 stream_.apiHandle = 0;
\r
3256 if ( stream_.userBuffer[mode] ) {
\r
3257 free( stream_.userBuffer[mode] );
\r
3258 stream_.userBuffer[mode] = 0;
\r
3261 if ( stream_.deviceBuffer ) {
\r
3262 free( stream_.deviceBuffer );
\r
3263 stream_.deviceBuffer = 0;
\r
3268 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3270 void RtApiAsio :: closeStream()
\r
3272 if ( stream_.state == STREAM_CLOSED ) {
\r
3273 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3274 error( RtAudioError::WARNING );
\r
3278 if ( stream_.state == STREAM_RUNNING ) {
\r
3279 stream_.state = STREAM_STOPPED;
\r
3282 ASIODisposeBuffers();
\r
3283 drivers.removeCurrentDriver();
\r
3285 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3287 CloseHandle( handle->condition );
\r
3288 if ( handle->bufferInfos )
\r
3289 free( handle->bufferInfos );
\r
3291 stream_.apiHandle = 0;
\r
3294 for ( int i=0; i<2; i++ ) {
\r
3295 if ( stream_.userBuffer[i] ) {
\r
3296 free( stream_.userBuffer[i] );
\r
3297 stream_.userBuffer[i] = 0;
\r
3301 if ( stream_.deviceBuffer ) {
\r
3302 free( stream_.deviceBuffer );
\r
3303 stream_.deviceBuffer = 0;
\r
3306 stream_.mode = UNINITIALIZED;
\r
3307 stream_.state = STREAM_CLOSED;
\r
3310 bool stopThreadCalled = false;
\r
3312 void RtApiAsio :: startStream()
\r
3315 if ( stream_.state == STREAM_RUNNING ) {
\r
3316 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3317 error( RtAudioError::WARNING );
\r
3321 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3322 ASIOError result = ASIOStart();
\r
3323 if ( result != ASE_OK ) {
\r
3324 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3325 errorText_ = errorStream_.str();
\r
3329 handle->drainCounter = 0;
\r
3330 handle->internalDrain = false;
\r
3331 ResetEvent( handle->condition );
\r
3332 stream_.state = STREAM_RUNNING;
\r
3336 stopThreadCalled = false;
\r
3338 if ( result == ASE_OK ) return;
\r
3339 error( RtAudioError::SYSTEM_ERROR );
\r
3342 void RtApiAsio :: stopStream()
\r
3345 if ( stream_.state == STREAM_STOPPED ) {
\r
3346 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3347 error( RtAudioError::WARNING );
\r
3351 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3352 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3353 if ( handle->drainCounter == 0 ) {
\r
3354 handle->drainCounter = 2;
\r
3355 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3359 stream_.state = STREAM_STOPPED;
\r
3361 ASIOError result = ASIOStop();
\r
3362 if ( result != ASE_OK ) {
\r
3363 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3364 errorText_ = errorStream_.str();
\r
3367 if ( result == ASE_OK ) return;
\r
3368 error( RtAudioError::SYSTEM_ERROR );
\r
3371 void RtApiAsio :: abortStream()
\r
3374 if ( stream_.state == STREAM_STOPPED ) {
\r
3375 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3376 error( RtAudioError::WARNING );
\r
3380 // The following lines were commented-out because some behavior was
\r
3381 // noted where the device buffers need to be zeroed to avoid
\r
3382 // continuing sound, even when the device buffers are completely
\r
3383 // disposed. So now, calling abort is the same as calling stop.
\r
3384 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3385 // handle->drainCounter = 2;
\r
3389 // This function will be called by a spawned thread when the user
\r
3390 // callback function signals that the stream should be stopped or
\r
3391 // aborted. It is necessary to handle it this way because the
\r
3392 // callbackEvent() function must return before the ASIOStop()
\r
3393 // function will return.
\r
3394 static unsigned __stdcall asioStopStream( void *ptr )
\r
3396 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3397 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3399 object->stopStream();
\r
3400 _endthreadex( 0 );
\r
3404 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3406 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3407 if ( stream_.state == STREAM_CLOSED ) {
\r
3408 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3409 error( RtAudioError::WARNING );
\r
3413 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3414 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3416 // Check if we were draining the stream and signal if finished.
\r
3417 if ( handle->drainCounter > 3 ) {
\r
3419 stream_.state = STREAM_STOPPING;
\r
3420 if ( handle->internalDrain == false )
\r
3421 SetEvent( handle->condition );
\r
3422 else { // spawn a thread to stop the stream
\r
3423 unsigned threadId;
\r
3424 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3425 &stream_.callbackInfo, 0, &threadId );
\r
3430 // Invoke user callback to get fresh output data UNLESS we are
\r
3431 // draining stream.
\r
3432 if ( handle->drainCounter == 0 ) {
\r
3433 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3434 double streamTime = getStreamTime();
\r
3435 RtAudioStreamStatus status = 0;
\r
3436 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3437 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3440 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3441 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3444 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3445 stream_.bufferSize, streamTime, status, info->userData );
\r
3446 if ( cbReturnValue == 2 ) {
\r
3447 stream_.state = STREAM_STOPPING;
\r
3448 handle->drainCounter = 2;
\r
3449 unsigned threadId;
\r
3450 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3451 &stream_.callbackInfo, 0, &threadId );
\r
3454 else if ( cbReturnValue == 1 ) {
\r
3455 handle->drainCounter = 1;
\r
3456 handle->internalDrain = true;
\r
3460 unsigned int nChannels, bufferBytes, i, j;
\r
3461 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3462 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3464 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3466 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3468 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3469 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3470 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3474 else if ( stream_.doConvertBuffer[0] ) {
\r
3476 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3477 if ( stream_.doByteSwap[0] )
\r
3478 byteSwapBuffer( stream_.deviceBuffer,
\r
3479 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3480 stream_.deviceFormat[0] );
\r
3482 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3483 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3484 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3485 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3491 if ( stream_.doByteSwap[0] )
\r
3492 byteSwapBuffer( stream_.userBuffer[0],
\r
3493 stream_.bufferSize * stream_.nUserChannels[0],
\r
3494 stream_.userFormat );
\r
3496 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3497 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3498 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3499 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3505 // Don't bother draining input
\r
3506 if ( handle->drainCounter ) {
\r
3507 handle->drainCounter++;
\r
3511 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3513 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3515 if (stream_.doConvertBuffer[1]) {
\r
3517 // Always interleave ASIO input data.
\r
3518 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3519 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3520 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3521 handle->bufferInfos[i].buffers[bufferIndex],
\r
3525 if ( stream_.doByteSwap[1] )
\r
3526 byteSwapBuffer( stream_.deviceBuffer,
\r
3527 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3528 stream_.deviceFormat[1] );
\r
3529 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3533 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3534 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3535 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3536 handle->bufferInfos[i].buffers[bufferIndex],
\r
3541 if ( stream_.doByteSwap[1] )
\r
3542 byteSwapBuffer( stream_.userBuffer[1],
\r
3543 stream_.bufferSize * stream_.nUserChannels[1],
\r
3544 stream_.userFormat );
\r
3549 // The following call was suggested by Malte Clasen. While the API
\r
3550 // documentation indicates it should not be required, some device
\r
3551 // drivers apparently do not function correctly without it.
\r
3552 ASIOOutputReady();
\r
3554 RtApi::tickStreamTime();
\r
3558 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3560 // The ASIO documentation says that this usually only happens during
\r
3561 // external sync. Audio processing is not stopped by the driver,
\r
3562 // actual sample rate might not have even changed, maybe only the
\r
3563 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3566 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3568 object->stopStream();
\r
3570 catch ( RtAudioError &exception ) {
\r
3571 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3575 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3578 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3582 switch( selector ) {
\r
3583 case kAsioSelectorSupported:
\r
3584 if ( value == kAsioResetRequest
\r
3585 || value == kAsioEngineVersion
\r
3586 || value == kAsioResyncRequest
\r
3587 || value == kAsioLatenciesChanged
\r
3588 // The following three were added for ASIO 2.0, you don't
\r
3589 // necessarily have to support them.
\r
3590 || value == kAsioSupportsTimeInfo
\r
3591 || value == kAsioSupportsTimeCode
\r
3592 || value == kAsioSupportsInputMonitor)
\r
3595 case kAsioResetRequest:
\r
3596 // Defer the task and perform the reset of the driver during the
\r
3597 // next "safe" situation. You cannot reset the driver right now,
\r
3598 // as this code is called from the driver. Reset the driver is
\r
3599 // done by completely destruct is. I.e. ASIOStop(),
\r
3600 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3602 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3605 case kAsioResyncRequest:
\r
3606 // This informs the application that the driver encountered some
\r
3607 // non-fatal data loss. It is used for synchronization purposes
\r
3608 // of different media. Added mainly to work around the Win16Mutex
\r
3609 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3610 // which could lose data because the Mutex was held too long by
\r
3611 // another thread. However a driver can issue it in other
\r
3612 // situations, too.
\r
3613 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3617 case kAsioLatenciesChanged:
\r
3618 // This will inform the host application that the drivers were
\r
3619 // latencies changed. Beware, it this does not mean that the
\r
3620 // buffer sizes have changed! You might need to update internal
\r
3622 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3625 case kAsioEngineVersion:
\r
3626 // Return the supported ASIO version of the host application. If
\r
3627 // a host application does not implement this selector, ASIO 1.0
\r
3628 // is assumed by the driver.
\r
3631 case kAsioSupportsTimeInfo:
\r
3632 // Informs the driver whether the
\r
3633 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3634 // For compatibility with ASIO 1.0 drivers the host application
\r
3635 // should always support the "old" bufferSwitch method, too.
\r
3638 case kAsioSupportsTimeCode:
\r
3639 // Informs the driver whether application is interested in time
\r
3640 // code info. If an application does not need to know about time
\r
3641 // code, the driver has less work to do.
\r
3648 static const char* getAsioErrorString( ASIOError result )
\r
3653 const char*message;
\r
3656 static const Messages m[] =
\r
3658 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3659 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3660 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3661 { ASE_InvalidMode, "Invalid mode." },
\r
3662 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3663 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3664 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3667 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3668 if ( m[i].value == result ) return m[i].message;
\r
3670 return "Unknown error.";
\r
3673 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3677 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3679 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3680 // - Introduces support for the Windows WASAPI API
\r
3681 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3682 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3683 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3688 #include <audioclient.h>
\r
3690 #include <mmdeviceapi.h>
\r
3691 #include <functiondiscoverykeys_devpkey.h>
\r
3693 //=============================================================================
\r
3695 #define SAFE_RELEASE( objectPtr )\
\r
3698 objectPtr->Release();\
\r
3699 objectPtr = NULL;\
\r
3702 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3704 //-----------------------------------------------------------------------------
\r
3706 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3707 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3708 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3709 // provide intermediate storage for read / write synchronization.
\r
3710 class WasapiBuffer
\r
3714 : buffer_( NULL ),
\r
3723 // sets the length of the internal ring buffer
\r
3724 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3727 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3729 bufferSize_ = bufferSize;
\r
3734 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3735 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3737 if ( !buffer || // incoming buffer is NULL
\r
3738 bufferSize == 0 || // incoming buffer has no data
\r
3739 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3744 unsigned int relOutIndex = outIndex_;
\r
3745 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3746 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3747 relOutIndex += bufferSize_;
\r
3750 // "in" index can end on the "out" index but cannot begin at it
\r
3751 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3752 return false; // not enough space between "in" index and "out" index
\r
3755 // copy buffer from external to internal
\r
3756 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3757 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3758 int fromInSize = bufferSize - fromZeroSize;
\r
3762 case RTAUDIO_SINT8:
\r
3763 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3764 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3766 case RTAUDIO_SINT16:
\r
3767 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3768 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3770 case RTAUDIO_SINT24:
\r
3771 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3772 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3774 case RTAUDIO_SINT32:
\r
3775 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3776 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3778 case RTAUDIO_FLOAT32:
\r
3779 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3780 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3782 case RTAUDIO_FLOAT64:
\r
3783 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3784 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3788 // update "in" index
\r
3789 inIndex_ += bufferSize;
\r
3790 inIndex_ %= bufferSize_;
\r
3795 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3796 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3798 if ( !buffer || // incoming buffer is NULL
\r
3799 bufferSize == 0 || // incoming buffer has no data
\r
3800 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3805 unsigned int relInIndex = inIndex_;
\r
3806 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3807 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3808 relInIndex += bufferSize_;
\r
3811 // "out" index can begin at and end on the "in" index
\r
3812 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3813 return false; // not enough space between "out" index and "in" index
\r
3816 // copy buffer from internal to external
\r
3817 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3818 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3819 int fromOutSize = bufferSize - fromZeroSize;
\r
3823 case RTAUDIO_SINT8:
\r
3824 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3825 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3827 case RTAUDIO_SINT16:
\r
3828 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3829 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3831 case RTAUDIO_SINT24:
\r
3832 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3833 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3835 case RTAUDIO_SINT32:
\r
3836 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3837 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3839 case RTAUDIO_FLOAT32:
\r
3840 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3841 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3843 case RTAUDIO_FLOAT64:
\r
3844 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3845 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3849 // update "out" index
\r
3850 outIndex_ += bufferSize;
\r
3851 outIndex_ %= bufferSize_;
\r
3858 unsigned int bufferSize_;
\r
3859 unsigned int inIndex_;
\r
3860 unsigned int outIndex_;
\r
3863 //-----------------------------------------------------------------------------
\r
3865 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3866 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3867 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3868 // This sample rate converter works best with conversions between one rate and its multiple.
\r
3869 void convertBufferWasapi( char* outBuffer,
\r
3870 const char* inBuffer,
\r
3871 const unsigned int& channelCount,
\r
3872 const unsigned int& inSampleRate,
\r
3873 const unsigned int& outSampleRate,
\r
3874 const unsigned int& inSampleCount,
\r
3875 unsigned int& outSampleCount,
\r
3876 const RtAudioFormat& format )
\r
3878 // calculate the new outSampleCount and relative sampleStep
\r
3879 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3880 float sampleRatioInv = ( float ) 1 / sampleRatio;
\r
3881 float sampleStep = 1.0f / sampleRatio;
\r
3882 float inSampleFraction = 0.0f;
\r
3884 outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
\r
3886 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
\r
3887 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
\r
3889 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3890 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3892 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3896 case RTAUDIO_SINT8:
\r
3897 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3899 case RTAUDIO_SINT16:
\r
3900 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3902 case RTAUDIO_SINT24:
\r
3903 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3905 case RTAUDIO_SINT32:
\r
3906 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3908 case RTAUDIO_FLOAT32:
\r
3909 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3911 case RTAUDIO_FLOAT64:
\r
3912 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3916 // jump to next in sample
\r
3917 inSampleFraction += sampleStep;
\r
3920 else // else interpolate
\r
3922 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3923 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3925 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3926 float inSampleDec = inSampleFraction - inSample;
\r
3927 unsigned int frameInSample = inSample * channelCount;
\r
3928 unsigned int frameOutSample = outSample * channelCount;
\r
3932 case RTAUDIO_SINT8:
\r
3934 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3936 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
\r
3937 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3938 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
\r
3939 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3943 case RTAUDIO_SINT16:
\r
3945 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3947 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
\r
3948 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3949 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
\r
3950 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3954 case RTAUDIO_SINT24:
\r
3956 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3958 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
\r
3959 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
\r
3960 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3961 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3965 case RTAUDIO_SINT32:
\r
3967 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3969 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
\r
3970 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3971 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3972 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3976 case RTAUDIO_FLOAT32:
\r
3978 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3980 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
\r
3981 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3982 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3983 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3987 case RTAUDIO_FLOAT64:
\r
3989 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3991 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
\r
3992 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3993 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3994 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
4000 // jump to next in sample
\r
4001 inSampleFraction += sampleStep;
\r
4006 //-----------------------------------------------------------------------------
\r
4008 // A structure to hold various information related to the WASAPI implementation.
\r
4009 struct WasapiHandle
\r
4011 IAudioClient* captureAudioClient;
\r
4012 IAudioClient* renderAudioClient;
\r
4013 IAudioCaptureClient* captureClient;
\r
4014 IAudioRenderClient* renderClient;
\r
4015 HANDLE captureEvent;
\r
4016 HANDLE renderEvent;
\r
4019 : captureAudioClient( NULL ),
\r
4020 renderAudioClient( NULL ),
\r
4021 captureClient( NULL ),
\r
4022 renderClient( NULL ),
\r
4023 captureEvent( NULL ),
\r
4024 renderEvent( NULL ) {}
\r
4027 //=============================================================================
\r
4029 RtApiWasapi::RtApiWasapi()
\r
4030 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
4032 // WASAPI can run either apartment or multi-threaded
\r
4033 HRESULT hr = CoInitialize( NULL );
\r
4034 if ( !FAILED( hr ) )
\r
4035 coInitialized_ = true;
\r
4037 // Instantiate device enumerator
\r
4038 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
4039 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
4040 ( void** ) &deviceEnumerator_ );
\r
4042 if ( FAILED( hr ) ) {
\r
4043 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
4044 error( RtAudioError::DRIVER_ERROR );
\r
4048 //-----------------------------------------------------------------------------
\r
4050 RtApiWasapi::~RtApiWasapi()
\r
4052 if ( stream_.state != STREAM_CLOSED )
\r
4055 SAFE_RELEASE( deviceEnumerator_ );
\r
4057 // If this object previously called CoInitialize()
\r
4058 if ( coInitialized_ )
\r
4062 //=============================================================================
\r
4064 unsigned int RtApiWasapi::getDeviceCount( void )
\r
4066 unsigned int captureDeviceCount = 0;
\r
4067 unsigned int renderDeviceCount = 0;
\r
4069 IMMDeviceCollection* captureDevices = NULL;
\r
4070 IMMDeviceCollection* renderDevices = NULL;
\r
4072 // Count capture devices
\r
4073 errorText_.clear();
\r
4074 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4075 if ( FAILED( hr ) ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4080 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4081 if ( FAILED( hr ) ) {
\r
4082 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4086 // Count render devices
\r
4087 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4088 if ( FAILED( hr ) ) {
\r
4089 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4093 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4094 if ( FAILED( hr ) ) {
\r
4095 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4100 // release all references
\r
4101 SAFE_RELEASE( captureDevices );
\r
4102 SAFE_RELEASE( renderDevices );
\r
4104 if ( errorText_.empty() )
\r
4105 return captureDeviceCount + renderDeviceCount;
\r
4107 error( RtAudioError::DRIVER_ERROR );
\r
4111 //-----------------------------------------------------------------------------
\r
4113 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4115 RtAudio::DeviceInfo info;
\r
4116 unsigned int captureDeviceCount = 0;
\r
4117 unsigned int renderDeviceCount = 0;
\r
4118 std::string defaultDeviceName;
\r
4119 bool isCaptureDevice = false;
\r
4121 PROPVARIANT deviceNameProp;
\r
4122 PROPVARIANT defaultDeviceNameProp;
\r
4124 IMMDeviceCollection* captureDevices = NULL;
\r
4125 IMMDeviceCollection* renderDevices = NULL;
\r
4126 IMMDevice* devicePtr = NULL;
\r
4127 IMMDevice* defaultDevicePtr = NULL;
\r
4128 IAudioClient* audioClient = NULL;
\r
4129 IPropertyStore* devicePropStore = NULL;
\r
4130 IPropertyStore* defaultDevicePropStore = NULL;
\r
4132 WAVEFORMATEX* deviceFormat = NULL;
\r
4133 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4136 info.probed = false;
\r
4138 // Count capture devices
\r
4139 errorText_.clear();
\r
4140 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4141 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4142 if ( FAILED( hr ) ) {
\r
4143 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4147 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4148 if ( FAILED( hr ) ) {
\r
4149 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4153 // Count render devices
\r
4154 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4155 if ( FAILED( hr ) ) {
\r
4156 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4160 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4161 if ( FAILED( hr ) ) {
\r
4162 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4166 // validate device index
\r
4167 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4168 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4169 errorType = RtAudioError::INVALID_USE;
\r
4173 // determine whether index falls within capture or render devices
\r
4174 if ( device >= renderDeviceCount ) {
\r
4175 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4176 if ( FAILED( hr ) ) {
\r
4177 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4180 isCaptureDevice = true;
\r
4183 hr = renderDevices->Item( device, &devicePtr );
\r
4184 if ( FAILED( hr ) ) {
\r
4185 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4188 isCaptureDevice = false;
\r
4191 // get default device name
\r
4192 if ( isCaptureDevice ) {
\r
4193 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4194 if ( FAILED( hr ) ) {
\r
4195 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4200 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4201 if ( FAILED( hr ) ) {
\r
4202 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4207 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4208 if ( FAILED( hr ) ) {
\r
4209 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4212 PropVariantInit( &defaultDeviceNameProp );
\r
4214 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4215 if ( FAILED( hr ) ) {
\r
4216 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4220 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4223 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4224 if ( FAILED( hr ) ) {
\r
4225 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4229 PropVariantInit( &deviceNameProp );
\r
4231 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4232 if ( FAILED( hr ) ) {
\r
4233 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4237 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4240 if ( isCaptureDevice ) {
\r
4241 info.isDefaultInput = info.name == defaultDeviceName;
\r
4242 info.isDefaultOutput = false;
\r
4245 info.isDefaultInput = false;
\r
4246 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4250 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4251 if ( FAILED( hr ) ) {
\r
4252 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4256 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4257 if ( FAILED( hr ) ) {
\r
4258 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4262 if ( isCaptureDevice ) {
\r
4263 info.inputChannels = deviceFormat->nChannels;
\r
4264 info.outputChannels = 0;
\r
4265 info.duplexChannels = 0;
\r
4268 info.inputChannels = 0;
\r
4269 info.outputChannels = deviceFormat->nChannels;
\r
4270 info.duplexChannels = 0;
\r
4274 info.sampleRates.clear();
\r
4276 // allow support for all sample rates as we have a built-in sample rate converter
\r
4277 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4278 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4280 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4283 info.nativeFormats = 0;
\r
4285 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4286 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4287 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4289 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4290 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4292 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4293 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4296 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4297 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4298 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4300 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4301 info.nativeFormats |= RTAUDIO_SINT8;
\r
4303 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4304 info.nativeFormats |= RTAUDIO_SINT16;
\r
4306 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4307 info.nativeFormats |= RTAUDIO_SINT24;
\r
4309 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4310 info.nativeFormats |= RTAUDIO_SINT32;
\r
4315 info.probed = true;
\r
4318 // release all references
\r
4319 PropVariantClear( &deviceNameProp );
\r
4320 PropVariantClear( &defaultDeviceNameProp );
\r
4322 SAFE_RELEASE( captureDevices );
\r
4323 SAFE_RELEASE( renderDevices );
\r
4324 SAFE_RELEASE( devicePtr );
\r
4325 SAFE_RELEASE( defaultDevicePtr );
\r
4326 SAFE_RELEASE( audioClient );
\r
4327 SAFE_RELEASE( devicePropStore );
\r
4328 SAFE_RELEASE( defaultDevicePropStore );
\r
4330 CoTaskMemFree( deviceFormat );
\r
4331 CoTaskMemFree( closestMatchFormat );
\r
4333 if ( !errorText_.empty() )
\r
4334 error( errorType );
\r
4338 //-----------------------------------------------------------------------------
\r
4340 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4342 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4343 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4351 //-----------------------------------------------------------------------------
\r
4353 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4355 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4356 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4364 //-----------------------------------------------------------------------------
\r
4366 void RtApiWasapi::closeStream( void )
\r
4368 if ( stream_.state == STREAM_CLOSED ) {
\r
4369 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4370 error( RtAudioError::WARNING );
\r
4374 if ( stream_.state != STREAM_STOPPED )
\r
4377 // clean up stream memory
\r
4378 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4379 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4381 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4382 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4384 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4385 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4387 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4388 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4390 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4391 stream_.apiHandle = NULL;
\r
4393 for ( int i = 0; i < 2; i++ ) {
\r
4394 if ( stream_.userBuffer[i] ) {
\r
4395 free( stream_.userBuffer[i] );
\r
4396 stream_.userBuffer[i] = 0;
\r
4400 if ( stream_.deviceBuffer ) {
\r
4401 free( stream_.deviceBuffer );
\r
4402 stream_.deviceBuffer = 0;
\r
4405 // update stream state
\r
4406 stream_.state = STREAM_CLOSED;
\r
4409 //-----------------------------------------------------------------------------
\r
4411 void RtApiWasapi::startStream( void )
\r
4415 if ( stream_.state == STREAM_RUNNING ) {
\r
4416 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4417 error( RtAudioError::WARNING );
\r
4421 // update stream state
\r
4422 stream_.state = STREAM_RUNNING;
\r
4424 // create WASAPI stream thread
\r
4425 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4427 if ( !stream_.callbackInfo.thread ) {
\r
4428 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4429 error( RtAudioError::THREAD_ERROR );
\r
4432 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4433 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4437 //-----------------------------------------------------------------------------
\r
4439 void RtApiWasapi::stopStream( void )
\r
4443 if ( stream_.state == STREAM_STOPPED ) {
\r
4444 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4445 error( RtAudioError::WARNING );
\r
4449 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4450 stream_.state = STREAM_STOPPING;
\r
4452 // wait until stream thread is stopped
\r
4453 while( stream_.state != STREAM_STOPPED ) {
\r
4457 // Wait for the last buffer to play before stopping.
\r
4458 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4460 // stop capture client if applicable
\r
4461 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4462 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4463 if ( FAILED( hr ) ) {
\r
4464 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4465 error( RtAudioError::DRIVER_ERROR );
\r
4470 // stop render client if applicable
\r
4471 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4472 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4473 if ( FAILED( hr ) ) {
\r
4474 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4475 error( RtAudioError::DRIVER_ERROR );
\r
4480 // close thread handle
\r
4481 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4482 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4483 error( RtAudioError::THREAD_ERROR );
\r
4487 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4490 //-----------------------------------------------------------------------------
\r
4492 void RtApiWasapi::abortStream( void )
\r
4496 if ( stream_.state == STREAM_STOPPED ) {
\r
4497 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4498 error( RtAudioError::WARNING );
\r
4502 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4503 stream_.state = STREAM_STOPPING;
\r
4505 // wait until stream thread is stopped
\r
4506 while ( stream_.state != STREAM_STOPPED ) {
\r
4510 // stop capture client if applicable
\r
4511 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4512 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4513 if ( FAILED( hr ) ) {
\r
4514 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4515 error( RtAudioError::DRIVER_ERROR );
\r
4520 // stop render client if applicable
\r
4521 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4522 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4523 if ( FAILED( hr ) ) {
\r
4524 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4525 error( RtAudioError::DRIVER_ERROR );
\r
4530 // close thread handle
\r
4531 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4532 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4533 error( RtAudioError::THREAD_ERROR );
\r
4537 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4540 //-----------------------------------------------------------------------------
\r
4542 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4543 unsigned int firstChannel, unsigned int sampleRate,
\r
4544 RtAudioFormat format, unsigned int* bufferSize,
\r
4545 RtAudio::StreamOptions* options )
\r
4547 bool methodResult = FAILURE;
\r
4548 unsigned int captureDeviceCount = 0;
\r
4549 unsigned int renderDeviceCount = 0;
\r
4551 IMMDeviceCollection* captureDevices = NULL;
\r
4552 IMMDeviceCollection* renderDevices = NULL;
\r
4553 IMMDevice* devicePtr = NULL;
\r
4554 WAVEFORMATEX* deviceFormat = NULL;
\r
4555 unsigned int bufferBytes;
\r
4556 stream_.state = STREAM_STOPPED;
\r
4558 // create API Handle if not already created
\r
4559 if ( !stream_.apiHandle )
\r
4560 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4562 // Count capture devices
\r
4563 errorText_.clear();
\r
4564 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4565 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4566 if ( FAILED( hr ) ) {
\r
4567 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4571 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4572 if ( FAILED( hr ) ) {
\r
4573 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4577 // Count render devices
\r
4578 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4579 if ( FAILED( hr ) ) {
\r
4580 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4584 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4585 if ( FAILED( hr ) ) {
\r
4586 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4590 // validate device index
\r
4591 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4592 errorType = RtAudioError::INVALID_USE;
\r
4593 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4597 // determine whether index falls within capture or render devices
\r
4598 if ( device >= renderDeviceCount ) {
\r
4599 if ( mode != INPUT ) {
\r
4600 errorType = RtAudioError::INVALID_USE;
\r
4601 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4605 // retrieve captureAudioClient from devicePtr
\r
4606 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4608 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4609 if ( FAILED( hr ) ) {
\r
4610 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4614 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4615 NULL, ( void** ) &captureAudioClient );
\r
4616 if ( FAILED( hr ) ) {
\r
4617 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4621 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4622 if ( FAILED( hr ) ) {
\r
4623 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4627 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4628 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4631 if ( mode != OUTPUT ) {
\r
4632 errorType = RtAudioError::INVALID_USE;
\r
4633 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4637 // retrieve renderAudioClient from devicePtr
\r
4638 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4640 hr = renderDevices->Item( device, &devicePtr );
\r
4641 if ( FAILED( hr ) ) {
\r
4642 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4646 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4647 NULL, ( void** ) &renderAudioClient );
\r
4648 if ( FAILED( hr ) ) {
\r
4649 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4653 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4654 if ( FAILED( hr ) ) {
\r
4655 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4659 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4660 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4663 // fill stream data
\r
4664 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4665 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4666 stream_.mode = DUPLEX;
\r
4669 stream_.mode = mode;
\r
4672 stream_.device[mode] = device;
\r
4673 stream_.doByteSwap[mode] = false;
\r
4674 stream_.sampleRate = sampleRate;
\r
4675 stream_.bufferSize = *bufferSize;
\r
4676 stream_.nBuffers = 1;
\r
4677 stream_.nUserChannels[mode] = channels;
\r
4678 stream_.channelOffset[mode] = firstChannel;
\r
4679 stream_.userFormat = format;
\r
4680 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4682 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4683 stream_.userInterleaved = false;
\r
4685 stream_.userInterleaved = true;
\r
4686 stream_.deviceInterleaved[mode] = true;
\r
4688 // Set flags for buffer conversion.
\r
4689 stream_.doConvertBuffer[mode] = false;
\r
4690 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4691 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4692 stream_.doConvertBuffer[mode] = true;
\r
4693 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4694 stream_.nUserChannels[mode] > 1 )
\r
4695 stream_.doConvertBuffer[mode] = true;
\r
4697 if ( stream_.doConvertBuffer[mode] )
\r
4698 setConvertInfo( mode, 0 );
\r
4700 // Allocate necessary internal buffers
\r
4701 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4703 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4704 if ( !stream_.userBuffer[mode] ) {
\r
4705 errorType = RtAudioError::MEMORY_ERROR;
\r
4706 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4710 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4711 stream_.callbackInfo.priority = 15;
\r
4713 stream_.callbackInfo.priority = 0;
\r
4715 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4716 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4718 methodResult = SUCCESS;
\r
4722 SAFE_RELEASE( captureDevices );
\r
4723 SAFE_RELEASE( renderDevices );
\r
4724 SAFE_RELEASE( devicePtr );
\r
4725 CoTaskMemFree( deviceFormat );
\r
4727 // if method failed, close the stream
\r
4728 if ( methodResult == FAILURE )
\r
4731 if ( !errorText_.empty() )
\r
4732 error( errorType );
\r
4733 return methodResult;
\r
4736 //=============================================================================
\r
4738 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4741 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4746 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4749 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4754 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4757 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4762 //-----------------------------------------------------------------------------
\r
4764 void RtApiWasapi::wasapiThread()
\r
4766 // as this is a new thread, we must CoInitialize it
\r
4767 CoInitialize( NULL );
\r
4771 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4772 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4773 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4774 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4775 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4776 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4778 WAVEFORMATEX* captureFormat = NULL;
\r
4779 WAVEFORMATEX* renderFormat = NULL;
\r
4780 float captureSrRatio = 0.0f;
\r
4781 float renderSrRatio = 0.0f;
\r
4782 WasapiBuffer captureBuffer;
\r
4783 WasapiBuffer renderBuffer;
\r
4785 // declare local stream variables
\r
4786 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4787 BYTE* streamBuffer = NULL;
\r
4788 unsigned long captureFlags = 0;
\r
4789 unsigned int bufferFrameCount = 0;
\r
4790 unsigned int numFramesPadding = 0;
\r
4791 unsigned int convBufferSize = 0;
\r
4792 bool callbackPushed = false;
\r
4793 bool callbackPulled = false;
\r
4794 bool callbackStopped = false;
\r
4795 int callbackResult = 0;
\r
4797 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4798 char* convBuffer = NULL;
\r
4799 unsigned int convBuffSize = 0;
\r
4800 unsigned int deviceBuffSize = 0;
\r
4802 errorText_.clear();
\r
4803 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4805 // Attempt to assign "Pro Audio" characteristic to thread
\r
4806 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4808 DWORD taskIndex = 0;
\r
4809 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4810 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4811 FreeLibrary( AvrtDll );
\r
4814 // start capture stream if applicable
\r
4815 if ( captureAudioClient ) {
\r
4816 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4817 if ( FAILED( hr ) ) {
\r
4818 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4822 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4824 // initialize capture stream according to desire buffer size
\r
4825 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4826 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4828 if ( !captureClient ) {
\r
4829 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4830 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4831 desiredBufferPeriod,
\r
4832 desiredBufferPeriod,
\r
4835 if ( FAILED( hr ) ) {
\r
4836 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4840 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4841 ( void** ) &captureClient );
\r
4842 if ( FAILED( hr ) ) {
\r
4843 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4847 // configure captureEvent to trigger on every available capture buffer
\r
4848 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4849 if ( !captureEvent ) {
\r
4850 errorType = RtAudioError::SYSTEM_ERROR;
\r
4851 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4855 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4856 if ( FAILED( hr ) ) {
\r
4857 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4861 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4862 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4865 unsigned int inBufferSize = 0;
\r
4866 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4867 if ( FAILED( hr ) ) {
\r
4868 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4872 // scale outBufferSize according to stream->user sample rate ratio
\r
4873 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4874 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4876 // set captureBuffer size
\r
4877 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4879 // reset the capture stream
\r
4880 hr = captureAudioClient->Reset();
\r
4881 if ( FAILED( hr ) ) {
\r
4882 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4886 // start the capture stream
\r
4887 hr = captureAudioClient->Start();
\r
4888 if ( FAILED( hr ) ) {
\r
4889 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4894 // start render stream if applicable
\r
4895 if ( renderAudioClient ) {
\r
4896 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4897 if ( FAILED( hr ) ) {
\r
4898 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4902 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4904 // initialize render stream according to desire buffer size
\r
4905 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4906 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4908 if ( !renderClient ) {
\r
4909 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4910 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4911 desiredBufferPeriod,
\r
4912 desiredBufferPeriod,
\r
4915 if ( FAILED( hr ) ) {
\r
4916 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4920 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4921 ( void** ) &renderClient );
\r
4922 if ( FAILED( hr ) ) {
\r
4923 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4927 // configure renderEvent to trigger on every available render buffer
\r
4928 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4929 if ( !renderEvent ) {
\r
4930 errorType = RtAudioError::SYSTEM_ERROR;
\r
4931 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4935 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4936 if ( FAILED( hr ) ) {
\r
4937 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4941 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4942 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4945 unsigned int outBufferSize = 0;
\r
4946 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4947 if ( FAILED( hr ) ) {
\r
4948 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4952 // scale inBufferSize according to user->stream sample rate ratio
\r
4953 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4954 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4956 // set renderBuffer size
\r
4957 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4959 // reset the render stream
\r
4960 hr = renderAudioClient->Reset();
\r
4961 if ( FAILED( hr ) ) {
\r
4962 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4966 // start the render stream
\r
4967 hr = renderAudioClient->Start();
\r
4968 if ( FAILED( hr ) ) {
\r
4969 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4974 if ( stream_.mode == INPUT ) {
\r
4975 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4976 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4978 else if ( stream_.mode == OUTPUT ) {
\r
4979 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4980 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4982 else if ( stream_.mode == DUPLEX ) {
\r
4983 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4984 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4985 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4986 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4989 convBuffer = ( char* ) malloc( convBuffSize );
\r
4990 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4991 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4992 errorType = RtAudioError::MEMORY_ERROR;
\r
4993 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4997 // stream process loop
\r
4998 while ( stream_.state != STREAM_STOPPING ) {
\r
4999 if ( !callbackPulled ) {
\r
5002 // 1. Pull callback buffer from inputBuffer
\r
5003 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
5004 // Convert callback buffer to user format
\r
5006 if ( captureAudioClient ) {
\r
5007 // Pull callback buffer from inputBuffer
\r
5008 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
5009 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
5010 stream_.deviceFormat[INPUT] );
\r
5012 if ( callbackPulled ) {
\r
5013 // Convert callback buffer to user sample rate
\r
5014 convertBufferWasapi( stream_.deviceBuffer,
\r
5016 stream_.nDeviceChannels[INPUT],
\r
5017 captureFormat->nSamplesPerSec,
\r
5018 stream_.sampleRate,
\r
5019 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
5021 stream_.deviceFormat[INPUT] );
\r
5023 if ( stream_.doConvertBuffer[INPUT] ) {
\r
5024 // Convert callback buffer to user format
\r
5025 convertBuffer( stream_.userBuffer[INPUT],
\r
5026 stream_.deviceBuffer,
\r
5027 stream_.convertInfo[INPUT] );
\r
5030 // no further conversion, simple copy deviceBuffer to userBuffer
\r
5031 memcpy( stream_.userBuffer[INPUT],
\r
5032 stream_.deviceBuffer,
\r
5033 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
5038 // if there is no capture stream, set callbackPulled flag
\r
5039 callbackPulled = true;
\r
5042 // Execute Callback
\r
5043 // ================
\r
5044 // 1. Execute user callback method
\r
5045 // 2. Handle return value from callback
\r
5047 // if callback has not requested the stream to stop
\r
5048 if ( callbackPulled && !callbackStopped ) {
\r
5049 // Execute user callback method
\r
5050 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
5051 stream_.userBuffer[INPUT],
\r
5052 stream_.bufferSize,
\r
5054 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
5055 stream_.callbackInfo.userData );
\r
5057 // Handle return value from callback
\r
5058 if ( callbackResult == 1 ) {
\r
5059 // instantiate a thread to stop this thread
\r
5060 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
5061 if ( !threadHandle ) {
\r
5062 errorType = RtAudioError::THREAD_ERROR;
\r
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
5066 else if ( !CloseHandle( threadHandle ) ) {
\r
5067 errorType = RtAudioError::THREAD_ERROR;
\r
5068 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
5072 callbackStopped = true;
\r
5074 else if ( callbackResult == 2 ) {
\r
5075 // instantiate a thread to stop this thread
\r
5076 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
5077 if ( !threadHandle ) {
\r
5078 errorType = RtAudioError::THREAD_ERROR;
\r
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5082 else if ( !CloseHandle( threadHandle ) ) {
\r
5083 errorType = RtAudioError::THREAD_ERROR;
\r
5084 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5088 callbackStopped = true;
\r
5093 // Callback Output
\r
5094 // ===============
\r
5095 // 1. Convert callback buffer to stream format
\r
5096 // 2. Convert callback buffer to stream sample rate and channel count
\r
5097 // 3. Push callback buffer into outputBuffer
\r
5099 if ( renderAudioClient && callbackPulled ) {
\r
5100 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5101 // Convert callback buffer to stream format
\r
5102 convertBuffer( stream_.deviceBuffer,
\r
5103 stream_.userBuffer[OUTPUT],
\r
5104 stream_.convertInfo[OUTPUT] );
\r
5108 // Convert callback buffer to stream sample rate
\r
5109 convertBufferWasapi( convBuffer,
\r
5110 stream_.deviceBuffer,
\r
5111 stream_.nDeviceChannels[OUTPUT],
\r
5112 stream_.sampleRate,
\r
5113 renderFormat->nSamplesPerSec,
\r
5114 stream_.bufferSize,
\r
5116 stream_.deviceFormat[OUTPUT] );
\r
5118 // Push callback buffer into outputBuffer
\r
5119 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5120 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5121 stream_.deviceFormat[OUTPUT] );
\r
5124 // if there is no render stream, set callbackPushed flag
\r
5125 callbackPushed = true;
\r
5130 // 1. Get capture buffer from stream
\r
5131 // 2. Push capture buffer into inputBuffer
\r
5132 // 3. If 2. was successful: Release capture buffer
\r
5134 if ( captureAudioClient ) {
\r
5135 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5136 if ( !callbackPulled ) {
\r
5137 WaitForSingleObject( captureEvent, INFINITE );
\r
5140 // Get capture buffer from stream
\r
5141 hr = captureClient->GetBuffer( &streamBuffer,
\r
5142 &bufferFrameCount,
\r
5143 &captureFlags, NULL, NULL );
\r
5144 if ( FAILED( hr ) ) {
\r
5145 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5149 if ( bufferFrameCount != 0 ) {
\r
5150 // Push capture buffer into inputBuffer
\r
5151 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5152 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5153 stream_.deviceFormat[INPUT] ) )
\r
5155 // Release capture buffer
\r
5156 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5157 if ( FAILED( hr ) ) {
\r
5158 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5164 // Inform WASAPI that capture was unsuccessful
\r
5165 hr = captureClient->ReleaseBuffer( 0 );
\r
5166 if ( FAILED( hr ) ) {
\r
5167 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5174 // Inform WASAPI that capture was unsuccessful
\r
5175 hr = captureClient->ReleaseBuffer( 0 );
\r
5176 if ( FAILED( hr ) ) {
\r
5177 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5185 // 1. Get render buffer from stream
\r
5186 // 2. Pull next buffer from outputBuffer
\r
5187 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5188 // Release render buffer
\r
5190 if ( renderAudioClient ) {
\r
5191 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5192 if ( callbackPulled && !callbackPushed ) {
\r
5193 WaitForSingleObject( renderEvent, INFINITE );
\r
5196 // Get render buffer from stream
\r
5197 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5198 if ( FAILED( hr ) ) {
\r
5199 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5203 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5204 if ( FAILED( hr ) ) {
\r
5205 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5209 bufferFrameCount -= numFramesPadding;
\r
5211 if ( bufferFrameCount != 0 ) {
\r
5212 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5213 if ( FAILED( hr ) ) {
\r
5214 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5218 // Pull next buffer from outputBuffer
\r
5219 // Fill render buffer with next buffer
\r
5220 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5221 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5222 stream_.deviceFormat[OUTPUT] ) )
\r
5224 // Release render buffer
\r
5225 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5226 if ( FAILED( hr ) ) {
\r
5227 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5233 // Inform WASAPI that render was unsuccessful
\r
5234 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5235 if ( FAILED( hr ) ) {
\r
5236 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5243 // Inform WASAPI that render was unsuccessful
\r
5244 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5245 if ( FAILED( hr ) ) {
\r
5246 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5252 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5253 if ( callbackPushed ) {
\r
5254 callbackPulled = false;
\r
5255 // tick stream time
\r
5256 RtApi::tickStreamTime();
\r
5263 CoTaskMemFree( captureFormat );
\r
5264 CoTaskMemFree( renderFormat );
\r
5266 free ( convBuffer );
\r
5270 // update stream state
\r
5271 stream_.state = STREAM_STOPPED;
\r
5273 if ( errorText_.empty() )
\r
5276 error( errorType );
\r
5279 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5283 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5285 // Modified by Robin Davies, October 2005
\r
5286 // - Improvements to DirectX pointer chasing.
\r
5287 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5288 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5289 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5290 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5292 #include <mmsystem.h>
\r
5293 #include <mmreg.h>
\r
5294 #include <dsound.h>
\r
5295 #include <assert.h>
\r
5296 #include <algorithm>
\r
5298 #if defined(__MINGW32__)
\r
5299 // missing from latest mingw winapi
\r
5300 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5301 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5302 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5303 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5306 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5308 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5309 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5312 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5314 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5315 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5316 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5317 return pointer >= earlierPointer && pointer < laterPointer;
\r
5320 // A structure to hold various information related to the DirectSound
\r
5321 // API implementation.
\r
5323 unsigned int drainCounter; // Tracks callback counts when draining
\r
5324 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5328 UINT bufferPointer[2];
\r
5329 DWORD dsBufferSize[2];
\r
5330 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5334 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5337 // Declarations for utility functions, callbacks, and structures
\r
5338 // specific to the DirectSound implementation.
\r
5339 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5340 LPCTSTR description,
\r
5342 LPVOID lpContext );
\r
5344 static const char* getErrorString( int code );
\r
5346 static unsigned __stdcall callbackHandler( void *ptr );
\r
5355 : found(false) { validId[0] = false; validId[1] = false; }
\r
5358 struct DsProbeData {
\r
5360 std::vector<struct DsDevice>* dsDevices;
\r
5363 RtApiDs :: RtApiDs()
\r
5365 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5366 // accept whatever the mainline chose for a threading model.
\r
5367 coInitialized_ = false;
\r
5368 HRESULT hr = CoInitialize( NULL );
\r
5369 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5372 RtApiDs :: ~RtApiDs()
\r
5374 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5375 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5378 // The DirectSound default output is always the first device.
\r
5379 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5384 // The DirectSound default input is always the first input device,
\r
5385 // which is the first capture device enumerated.
\r
5386 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5391 unsigned int RtApiDs :: getDeviceCount( void )
\r
5393 // Set query flag for previously found devices to false, so that we
\r
5394 // can check for any devices that have disappeared.
\r
5395 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5396 dsDevices[i].found = false;
\r
5398 // Query DirectSound devices.
\r
5399 struct DsProbeData probeInfo;
\r
5400 probeInfo.isInput = false;
\r
5401 probeInfo.dsDevices = &dsDevices;
\r
5402 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5403 if ( FAILED( result ) ) {
\r
5404 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5405 errorText_ = errorStream_.str();
\r
5406 error( RtAudioError::WARNING );
\r
5409 // Query DirectSoundCapture devices.
\r
5410 probeInfo.isInput = true;
\r
5411 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5412 if ( FAILED( result ) ) {
\r
5413 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5414 errorText_ = errorStream_.str();
\r
5415 error( RtAudioError::WARNING );
\r
5418 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5419 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5420 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5424 return static_cast<unsigned int>(dsDevices.size());
\r
5427 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5429 RtAudio::DeviceInfo info;
\r
5430 info.probed = false;
\r
5432 if ( dsDevices.size() == 0 ) {
\r
5433 // Force a query of all devices
\r
5435 if ( dsDevices.size() == 0 ) {
\r
5436 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5437 error( RtAudioError::INVALID_USE );
\r
5442 if ( device >= dsDevices.size() ) {
\r
5443 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5444 error( RtAudioError::INVALID_USE );
\r
5449 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5451 LPDIRECTSOUND output;
\r
5453 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5454 if ( FAILED( result ) ) {
\r
5455 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5456 errorText_ = errorStream_.str();
\r
5457 error( RtAudioError::WARNING );
\r
5461 outCaps.dwSize = sizeof( outCaps );
\r
5462 result = output->GetCaps( &outCaps );
\r
5463 if ( FAILED( result ) ) {
\r
5464 output->Release();
\r
5465 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5466 errorText_ = errorStream_.str();
\r
5467 error( RtAudioError::WARNING );
\r
5471 // Get output channel information.
\r
5472 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5474 // Get sample rate information.
\r
5475 info.sampleRates.clear();
\r
5476 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5477 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5478 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5479 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5481 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5482 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5486 // Get format information.
\r
5487 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5488 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5490 output->Release();
\r
5492 if ( getDefaultOutputDevice() == device )
\r
5493 info.isDefaultOutput = true;
\r
5495 if ( dsDevices[ device ].validId[1] == false ) {
\r
5496 info.name = dsDevices[ device ].name;
\r
5497 info.probed = true;
\r
5503 LPDIRECTSOUNDCAPTURE input;
\r
5504 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5505 if ( FAILED( result ) ) {
\r
5506 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5507 errorText_ = errorStream_.str();
\r
5508 error( RtAudioError::WARNING );
\r
5513 inCaps.dwSize = sizeof( inCaps );
\r
5514 result = input->GetCaps( &inCaps );
\r
5515 if ( FAILED( result ) ) {
\r
5517 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5518 errorText_ = errorStream_.str();
\r
5519 error( RtAudioError::WARNING );
\r
5523 // Get input channel information.
\r
5524 info.inputChannels = inCaps.dwChannels;
\r
5526 // Get sample rate and format information.
\r
5527 std::vector<unsigned int> rates;
\r
5528 if ( inCaps.dwChannels >= 2 ) {
\r
5529 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5530 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5531 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5532 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5533 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5534 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5535 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5536 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5538 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5539 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5540 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5541 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5542 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5544 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5545 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5546 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5547 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5548 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5551 else if ( inCaps.dwChannels == 1 ) {
\r
5552 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5553 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5554 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5555 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5556 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5557 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5558 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5559 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5561 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5562 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5563 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5564 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5565 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5567 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5568 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5569 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5570 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5571 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5574 else info.inputChannels = 0; // technically, this would be an error
\r
5578 if ( info.inputChannels == 0 ) return info;
\r
5580 // Copy the supported rates to the info structure but avoid duplication.
\r
5582 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5584 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5585 if ( rates[i] == info.sampleRates[j] ) {
\r
5590 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5592 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5594 // If device opens for both playback and capture, we determine the channels.
\r
5595 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5596 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5598 if ( device == 0 ) info.isDefaultInput = true;
\r
5600 // Copy name and return.
\r
5601 info.name = dsDevices[ device ].name;
\r
5602 info.probed = true;
\r
5606 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5607 unsigned int firstChannel, unsigned int sampleRate,
\r
5608 RtAudioFormat format, unsigned int *bufferSize,
\r
5609 RtAudio::StreamOptions *options )
\r
5611 if ( channels + firstChannel > 2 ) {
\r
5612 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5616 size_t nDevices = dsDevices.size();
\r
5617 if ( nDevices == 0 ) {
\r
5618 // This should not happen because a check is made before this function is called.
\r
5619 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5623 if ( device >= nDevices ) {
\r
5624 // This should not happen because a check is made before this function is called.
\r
5625 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5629 if ( mode == OUTPUT ) {
\r
5630 if ( dsDevices[ device ].validId[0] == false ) {
\r
5631 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5632 errorText_ = errorStream_.str();
\r
5636 else { // mode == INPUT
\r
5637 if ( dsDevices[ device ].validId[1] == false ) {
\r
5638 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5639 errorText_ = errorStream_.str();
\r
5644 // According to a note in PortAudio, using GetDesktopWindow()
\r
5645 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5646 // that occur when the application's window is not the foreground
\r
5647 // window. Also, if the application window closes before the
\r
5648 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5649 // problems when using GetDesktopWindow() but it seems fine now
\r
5650 // (January 2010). I'll leave it commented here.
\r
5651 // HWND hWnd = GetForegroundWindow();
\r
5652 HWND hWnd = GetDesktopWindow();
\r
5654 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5655 // two. This is a judgement call and a value of two is probably too
\r
5656 // low for capture, but it should work for playback.
\r
5658 if ( options ) nBuffers = options->numberOfBuffers;
\r
5659 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5660 if ( nBuffers < 2 ) nBuffers = 3;
\r
5662 // Check the lower range of the user-specified buffer size and set
\r
5663 // (arbitrarily) to a lower bound of 32.
\r
5664 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5666 // Create the wave format structure. The data format setting will
\r
5667 // be determined later.
\r
5668 WAVEFORMATEX waveFormat;
\r
5669 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5670 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5671 waveFormat.nChannels = channels + firstChannel;
\r
5672 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5674 // Determine the device buffer size. By default, we'll use the value
\r
5675 // defined above (32K), but we will grow it to make allowances for
\r
5676 // very large software buffer sizes.
\r
5677 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5678 DWORD dsPointerLeadTime = 0;
\r
5680 void *ohandle = 0, *bhandle = 0;
\r
5682 if ( mode == OUTPUT ) {
\r
5684 LPDIRECTSOUND output;
\r
5685 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5686 if ( FAILED( result ) ) {
\r
5687 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5688 errorText_ = errorStream_.str();
\r
5693 outCaps.dwSize = sizeof( outCaps );
\r
5694 result = output->GetCaps( &outCaps );
\r
5695 if ( FAILED( result ) ) {
\r
5696 output->Release();
\r
5697 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5698 errorText_ = errorStream_.str();
\r
5702 // Check channel information.
\r
5703 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5704 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5705 errorText_ = errorStream_.str();
\r
5709 // Check format information. Use 16-bit format unless not
\r
5710 // supported or user requests 8-bit.
\r
5711 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5712 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5713 waveFormat.wBitsPerSample = 16;
\r
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5717 waveFormat.wBitsPerSample = 8;
\r
5718 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5720 stream_.userFormat = format;
\r
5722 // Update wave format structure and buffer information.
\r
5723 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5724 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5725 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5727 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5728 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5729 dsBufferSize *= 2;
\r
5731 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5732 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5733 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5734 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5735 if ( FAILED( result ) ) {
\r
5736 output->Release();
\r
5737 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5738 errorText_ = errorStream_.str();
\r
5742 // Even though we will write to the secondary buffer, we need to
\r
5743 // access the primary buffer to set the correct output format
\r
5744 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5745 // buffer description.
\r
5746 DSBUFFERDESC bufferDescription;
\r
5747 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5748 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5749 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5751 // Obtain the primary buffer
\r
5752 LPDIRECTSOUNDBUFFER buffer;
\r
5753 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5754 if ( FAILED( result ) ) {
\r
5755 output->Release();
\r
5756 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5757 errorText_ = errorStream_.str();
\r
5761 // Set the primary DS buffer sound format.
\r
5762 result = buffer->SetFormat( &waveFormat );
\r
5763 if ( FAILED( result ) ) {
\r
5764 output->Release();
\r
5765 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5766 errorText_ = errorStream_.str();
\r
5770 // Setup the secondary DS buffer description.
\r
5771 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5772 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5773 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5774 DSBCAPS_GLOBALFOCUS |
\r
5775 DSBCAPS_GETCURRENTPOSITION2 |
\r
5776 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5777 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5778 bufferDescription.lpwfxFormat = &waveFormat;
\r
5780 // Try to create the secondary DS buffer. If that doesn't work,
\r
5781 // try to use software mixing. Otherwise, there's a problem.
\r
5782 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5783 if ( FAILED( result ) ) {
\r
5784 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5785 DSBCAPS_GLOBALFOCUS |
\r
5786 DSBCAPS_GETCURRENTPOSITION2 |
\r
5787 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5788 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5789 if ( FAILED( result ) ) {
\r
5790 output->Release();
\r
5791 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5792 errorText_ = errorStream_.str();
\r
5797 // Get the buffer size ... might be different from what we specified.
\r
5799 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5800 result = buffer->GetCaps( &dsbcaps );
\r
5801 if ( FAILED( result ) ) {
\r
5802 output->Release();
\r
5803 buffer->Release();
\r
5804 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5805 errorText_ = errorStream_.str();
\r
5809 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5811 // Lock the DS buffer
\r
5814 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5815 if ( FAILED( result ) ) {
\r
5816 output->Release();
\r
5817 buffer->Release();
\r
5818 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5819 errorText_ = errorStream_.str();
\r
5823 // Zero the DS buffer
\r
5824 ZeroMemory( audioPtr, dataLen );
\r
5826 // Unlock the DS buffer
\r
5827 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5828 if ( FAILED( result ) ) {
\r
5829 output->Release();
\r
5830 buffer->Release();
\r
5831 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5832 errorText_ = errorStream_.str();
\r
5836 ohandle = (void *) output;
\r
5837 bhandle = (void *) buffer;
\r
5840 if ( mode == INPUT ) {
\r
5842 LPDIRECTSOUNDCAPTURE input;
\r
5843 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5844 if ( FAILED( result ) ) {
\r
5845 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5846 errorText_ = errorStream_.str();
\r
5851 inCaps.dwSize = sizeof( inCaps );
\r
5852 result = input->GetCaps( &inCaps );
\r
5853 if ( FAILED( result ) ) {
\r
5855 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5856 errorText_ = errorStream_.str();
\r
5860 // Check channel information.
\r
5861 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5862 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5866 // Check format information. Use 16-bit format unless user
\r
5867 // requests 8-bit.
\r
5868 DWORD deviceFormats;
\r
5869 if ( channels + firstChannel == 2 ) {
\r
5870 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5871 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5872 waveFormat.wBitsPerSample = 8;
\r
5873 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5875 else { // assume 16-bit is supported
\r
5876 waveFormat.wBitsPerSample = 16;
\r
5877 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5880 else { // channel == 1
\r
5881 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5882 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5883 waveFormat.wBitsPerSample = 8;
\r
5884 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5886 else { // assume 16-bit is supported
\r
5887 waveFormat.wBitsPerSample = 16;
\r
5888 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5891 stream_.userFormat = format;
\r
5893 // Update wave format structure and buffer information.
\r
5894 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5895 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5896 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5898 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5899 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5900 dsBufferSize *= 2;
\r
5902 // Setup the secondary DS buffer description.
\r
5903 DSCBUFFERDESC bufferDescription;
\r
5904 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5905 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5906 bufferDescription.dwFlags = 0;
\r
5907 bufferDescription.dwReserved = 0;
\r
5908 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5909 bufferDescription.lpwfxFormat = &waveFormat;
\r
5911 // Create the capture buffer.
\r
5912 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5913 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5914 if ( FAILED( result ) ) {
\r
5916 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5917 errorText_ = errorStream_.str();
\r
5921 // Get the buffer size ... might be different from what we specified.
\r
5922 DSCBCAPS dscbcaps;
\r
5923 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5924 result = buffer->GetCaps( &dscbcaps );
\r
5925 if ( FAILED( result ) ) {
\r
5927 buffer->Release();
\r
5928 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5929 errorText_ = errorStream_.str();
\r
5933 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5935 // NOTE: We could have a problem here if this is a duplex stream
\r
5936 // and the play and capture hardware buffer sizes are different
\r
5937 // (I'm actually not sure if that is a problem or not).
\r
5938 // Currently, we are not verifying that.
\r
5940 // Lock the capture buffer
\r
5943 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5944 if ( FAILED( result ) ) {
\r
5946 buffer->Release();
\r
5947 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5948 errorText_ = errorStream_.str();
\r
5952 // Zero the buffer
\r
5953 ZeroMemory( audioPtr, dataLen );
\r
5955 // Unlock the buffer
\r
5956 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5957 if ( FAILED( result ) ) {
\r
5959 buffer->Release();
\r
5960 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5961 errorText_ = errorStream_.str();
\r
5965 ohandle = (void *) input;
\r
5966 bhandle = (void *) buffer;
\r
5969 // Set various stream parameters
\r
5970 DsHandle *handle = 0;
\r
5971 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5972 stream_.nUserChannels[mode] = channels;
\r
5973 stream_.bufferSize = *bufferSize;
\r
5974 stream_.channelOffset[mode] = firstChannel;
\r
5975 stream_.deviceInterleaved[mode] = true;
\r
5976 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5977 else stream_.userInterleaved = true;
\r
5979 // Set flag for buffer conversion
\r
5980 stream_.doConvertBuffer[mode] = false;
\r
5981 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5982 stream_.doConvertBuffer[mode] = true;
\r
5983 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5984 stream_.doConvertBuffer[mode] = true;
\r
5985 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5986 stream_.nUserChannels[mode] > 1 )
\r
5987 stream_.doConvertBuffer[mode] = true;
\r
5989 // Allocate necessary internal buffers
\r
5990 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5991 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5992 if ( stream_.userBuffer[mode] == NULL ) {
\r
5993 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5997 if ( stream_.doConvertBuffer[mode] ) {
\r
5999 bool makeBuffer = true;
\r
6000 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6001 if ( mode == INPUT ) {
\r
6002 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6003 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6004 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
6008 if ( makeBuffer ) {
\r
6009 bufferBytes *= *bufferSize;
\r
6010 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6011 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6012 if ( stream_.deviceBuffer == NULL ) {
\r
6013 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
6019 // Allocate our DsHandle structures for the stream.
\r
6020 if ( stream_.apiHandle == 0 ) {
\r
6022 handle = new DsHandle;
\r
6024 catch ( std::bad_alloc& ) {
\r
6025 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
6029 // Create a manual-reset event.
\r
6030 handle->condition = CreateEvent( NULL, // no security
\r
6031 TRUE, // manual-reset
\r
6032 FALSE, // non-signaled initially
\r
6033 NULL ); // unnamed
\r
6034 stream_.apiHandle = (void *) handle;
\r
6037 handle = (DsHandle *) stream_.apiHandle;
\r
6038 handle->id[mode] = ohandle;
\r
6039 handle->buffer[mode] = bhandle;
\r
6040 handle->dsBufferSize[mode] = dsBufferSize;
\r
6041 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
6043 stream_.device[mode] = device;
\r
6044 stream_.state = STREAM_STOPPED;
\r
6045 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
6046 // We had already set up an output stream.
\r
6047 stream_.mode = DUPLEX;
\r
6049 stream_.mode = mode;
\r
6050 stream_.nBuffers = nBuffers;
\r
6051 stream_.sampleRate = sampleRate;
\r
6053 // Setup the buffer conversion information structure.
\r
6054 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6056 // Setup the callback thread.
\r
6057 if ( stream_.callbackInfo.isRunning == false ) {
\r
6058 unsigned threadId;
\r
6059 stream_.callbackInfo.isRunning = true;
\r
6060 stream_.callbackInfo.object = (void *) this;
\r
6061 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
6062 &stream_.callbackInfo, 0, &threadId );
\r
6063 if ( stream_.callbackInfo.thread == 0 ) {
\r
6064 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
6068 // Boost DS thread priority
\r
6069 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
6075 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6076 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6077 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6078 if ( buffer ) buffer->Release();
\r
6079 object->Release();
\r
6081 if ( handle->buffer[1] ) {
\r
6082 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6083 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6084 if ( buffer ) buffer->Release();
\r
6085 object->Release();
\r
6087 CloseHandle( handle->condition );
\r
6089 stream_.apiHandle = 0;
\r
6092 for ( int i=0; i<2; i++ ) {
\r
6093 if ( stream_.userBuffer[i] ) {
\r
6094 free( stream_.userBuffer[i] );
\r
6095 stream_.userBuffer[i] = 0;
\r
6099 if ( stream_.deviceBuffer ) {
\r
6100 free( stream_.deviceBuffer );
\r
6101 stream_.deviceBuffer = 0;
\r
6104 stream_.state = STREAM_CLOSED;
\r
6108 void RtApiDs :: closeStream()
\r
6110 if ( stream_.state == STREAM_CLOSED ) {
\r
6111 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6112 error( RtAudioError::WARNING );
\r
6116 // Stop the callback thread.
\r
6117 stream_.callbackInfo.isRunning = false;
\r
6118 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6119 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6121 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6123 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6124 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6125 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6128 buffer->Release();
\r
6130 object->Release();
\r
6132 if ( handle->buffer[1] ) {
\r
6133 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6134 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6137 buffer->Release();
\r
6139 object->Release();
\r
6141 CloseHandle( handle->condition );
\r
6143 stream_.apiHandle = 0;
\r
6146 for ( int i=0; i<2; i++ ) {
\r
6147 if ( stream_.userBuffer[i] ) {
\r
6148 free( stream_.userBuffer[i] );
\r
6149 stream_.userBuffer[i] = 0;
\r
6153 if ( stream_.deviceBuffer ) {
\r
6154 free( stream_.deviceBuffer );
\r
6155 stream_.deviceBuffer = 0;
\r
6158 stream_.mode = UNINITIALIZED;
\r
6159 stream_.state = STREAM_CLOSED;
\r
6162 void RtApiDs :: startStream()
\r
6165 if ( stream_.state == STREAM_RUNNING ) {
\r
6166 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6167 error( RtAudioError::WARNING );
\r
6171 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6173 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6174 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6175 // this is already in effect.
\r
6176 timeBeginPeriod( 1 );
\r
6178 buffersRolling = false;
\r
6179 duplexPrerollBytes = 0;
\r
6181 if ( stream_.mode == DUPLEX ) {
\r
6182 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6183 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6186 HRESULT result = 0;
\r
6187 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6189 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6190 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6191 if ( FAILED( result ) ) {
\r
6192 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6193 errorText_ = errorStream_.str();
\r
6198 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6200 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6201 result = buffer->Start( DSCBSTART_LOOPING );
\r
6202 if ( FAILED( result ) ) {
\r
6203 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6204 errorText_ = errorStream_.str();
\r
6209 handle->drainCounter = 0;
\r
6210 handle->internalDrain = false;
\r
6211 ResetEvent( handle->condition );
\r
6212 stream_.state = STREAM_RUNNING;
\r
6215 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6218 void RtApiDs :: stopStream()
\r
6221 if ( stream_.state == STREAM_STOPPED ) {
\r
6222 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6223 error( RtAudioError::WARNING );
\r
6227 HRESULT result = 0;
\r
6230 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6231 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6232 if ( handle->drainCounter == 0 ) {
\r
6233 handle->drainCounter = 2;
\r
6234 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6237 stream_.state = STREAM_STOPPED;
\r
6239 MUTEX_LOCK( &stream_.mutex );
\r
6241 // Stop the buffer and clear memory
\r
6242 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6243 result = buffer->Stop();
\r
6244 if ( FAILED( result ) ) {
\r
6245 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6246 errorText_ = errorStream_.str();
\r
6250 // Lock the buffer and clear it so that if we start to play again,
\r
6251 // we won't have old data playing.
\r
6252 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6253 if ( FAILED( result ) ) {
\r
6254 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6255 errorText_ = errorStream_.str();
\r
6259 // Zero the DS buffer
\r
6260 ZeroMemory( audioPtr, dataLen );
\r
6262 // Unlock the DS buffer
\r
6263 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6264 if ( FAILED( result ) ) {
\r
6265 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6266 errorText_ = errorStream_.str();
\r
6270 // If we start playing again, we must begin at beginning of buffer.
\r
6271 handle->bufferPointer[0] = 0;
\r
6274 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6275 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6279 stream_.state = STREAM_STOPPED;
\r
6281 if ( stream_.mode != DUPLEX )
\r
6282 MUTEX_LOCK( &stream_.mutex );
\r
6284 result = buffer->Stop();
\r
6285 if ( FAILED( result ) ) {
\r
6286 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6287 errorText_ = errorStream_.str();
\r
6291 // Lock the buffer and clear it so that if we start to play again,
\r
6292 // we won't have old data playing.
\r
6293 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6294 if ( FAILED( result ) ) {
\r
6295 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6296 errorText_ = errorStream_.str();
\r
6300 // Zero the DS buffer
\r
6301 ZeroMemory( audioPtr, dataLen );
\r
6303 // Unlock the DS buffer
\r
6304 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6305 if ( FAILED( result ) ) {
\r
6306 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6307 errorText_ = errorStream_.str();
\r
6311 // If we start recording again, we must begin at beginning of buffer.
\r
6312 handle->bufferPointer[1] = 0;
\r
6316 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6317 MUTEX_UNLOCK( &stream_.mutex );
\r
6319 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6322 void RtApiDs :: abortStream()
\r
6325 if ( stream_.state == STREAM_STOPPED ) {
\r
6326 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6327 error( RtAudioError::WARNING );
\r
6331 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6332 handle->drainCounter = 2;
\r
6337 void RtApiDs :: callbackEvent()
\r
6339 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6340 Sleep( 50 ); // sleep 50 milliseconds
\r
6344 if ( stream_.state == STREAM_CLOSED ) {
\r
6345 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6346 error( RtAudioError::WARNING );
\r
6350 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6351 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6353 // Check if we were draining the stream and signal is finished.
\r
6354 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6356 stream_.state = STREAM_STOPPING;
\r
6357 if ( handle->internalDrain == false )
\r
6358 SetEvent( handle->condition );
\r
6364 // Invoke user callback to get fresh output data UNLESS we are
\r
6365 // draining stream.
\r
6366 if ( handle->drainCounter == 0 ) {
\r
6367 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6368 double streamTime = getStreamTime();
\r
6369 RtAudioStreamStatus status = 0;
\r
6370 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6371 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6372 handle->xrun[0] = false;
\r
6374 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6375 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6376 handle->xrun[1] = false;
\r
6378 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6379 stream_.bufferSize, streamTime, status, info->userData );
\r
6380 if ( cbReturnValue == 2 ) {
\r
6381 stream_.state = STREAM_STOPPING;
\r
6382 handle->drainCounter = 2;
\r
6386 else if ( cbReturnValue == 1 ) {
\r
6387 handle->drainCounter = 1;
\r
6388 handle->internalDrain = true;
\r
6393 DWORD currentWritePointer, safeWritePointer;
\r
6394 DWORD currentReadPointer, safeReadPointer;
\r
6395 UINT nextWritePointer;
\r
6397 LPVOID buffer1 = NULL;
\r
6398 LPVOID buffer2 = NULL;
\r
6399 DWORD bufferSize1 = 0;
\r
6400 DWORD bufferSize2 = 0;
\r
6405 MUTEX_LOCK( &stream_.mutex );
\r
6406 if ( stream_.state == STREAM_STOPPED ) {
\r
6407 MUTEX_UNLOCK( &stream_.mutex );
\r
6411 if ( buffersRolling == false ) {
\r
6412 if ( stream_.mode == DUPLEX ) {
\r
6413 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6415 // It takes a while for the devices to get rolling. As a result,
\r
6416 // there's no guarantee that the capture and write device pointers
\r
6417 // will move in lockstep. Wait here for both devices to start
\r
6418 // rolling, and then set our buffer pointers accordingly.
\r
6419 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6420 // bytes later than the write buffer.
\r
6422 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6423 // take place between the two GetCurrentPosition calls... but I'm
\r
6424 // really not sure how to solve the problem. Temporarily boost to
\r
6425 // Realtime priority, maybe; but I'm not sure what priority the
\r
6426 // DirectSound service threads run at. We *should* be roughly
\r
6427 // within a ms or so of correct.
\r
6429 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6430 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6432 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6434 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6435 if ( FAILED( result ) ) {
\r
6436 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6437 errorText_ = errorStream_.str();
\r
6438 MUTEX_UNLOCK( &stream_.mutex );
\r
6439 error( RtAudioError::SYSTEM_ERROR );
\r
6442 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6443 if ( FAILED( result ) ) {
\r
6444 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6445 errorText_ = errorStream_.str();
\r
6446 MUTEX_UNLOCK( &stream_.mutex );
\r
6447 error( RtAudioError::SYSTEM_ERROR );
\r
6451 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6452 if ( FAILED( result ) ) {
\r
6453 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6454 errorText_ = errorStream_.str();
\r
6455 MUTEX_UNLOCK( &stream_.mutex );
\r
6456 error( RtAudioError::SYSTEM_ERROR );
\r
6459 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6460 if ( FAILED( result ) ) {
\r
6461 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6462 errorText_ = errorStream_.str();
\r
6463 MUTEX_UNLOCK( &stream_.mutex );
\r
6464 error( RtAudioError::SYSTEM_ERROR );
\r
6467 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6471 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6473 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6474 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6475 handle->bufferPointer[1] = safeReadPointer;
\r
6477 else if ( stream_.mode == OUTPUT ) {
\r
6479 // Set the proper nextWritePosition after initial startup.
\r
6480 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6481 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6482 if ( FAILED( result ) ) {
\r
6483 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6484 errorText_ = errorStream_.str();
\r
6485 MUTEX_UNLOCK( &stream_.mutex );
\r
6486 error( RtAudioError::SYSTEM_ERROR );
\r
6489 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6490 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6493 buffersRolling = true;
\r
6496 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6498 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6500 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6501 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6502 bufferBytes *= formatBytes( stream_.userFormat );
\r
6503 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6506 // Setup parameters and do buffer conversion if necessary.
\r
6507 if ( stream_.doConvertBuffer[0] ) {
\r
6508 buffer = stream_.deviceBuffer;
\r
6509 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6510 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6511 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6514 buffer = stream_.userBuffer[0];
\r
6515 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6516 bufferBytes *= formatBytes( stream_.userFormat );
\r
6519 // No byte swapping necessary in DirectSound implementation.
\r
6521 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6522 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6524 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6525 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6527 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6528 nextWritePointer = handle->bufferPointer[0];
\r
6530 DWORD endWrite, leadPointer;
\r
6532 // Find out where the read and "safe write" pointers are.
\r
6533 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6534 if ( FAILED( result ) ) {
\r
6535 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6536 errorText_ = errorStream_.str();
\r
6537 MUTEX_UNLOCK( &stream_.mutex );
\r
6538 error( RtAudioError::SYSTEM_ERROR );
\r
6542 // We will copy our output buffer into the region between
\r
6543 // safeWritePointer and leadPointer. If leadPointer is not
\r
6544 // beyond the next endWrite position, wait until it is.
\r
6545 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6546 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6547 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6548 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6549 endWrite = nextWritePointer + bufferBytes;
\r
6551 // Check whether the entire write region is behind the play pointer.
\r
6552 if ( leadPointer >= endWrite ) break;
\r
6554 // If we are here, then we must wait until the leadPointer advances
\r
6555 // beyond the end of our next write region. We use the
\r
6556 // Sleep() function to suspend operation until that happens.
\r
6557 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6558 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6559 if ( millis < 1.0 ) millis = 1.0;
\r
6560 Sleep( (DWORD) millis );
\r
6563 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6564 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6565 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6566 handle->xrun[0] = true;
\r
6567 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6568 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6569 handle->bufferPointer[0] = nextWritePointer;
\r
6570 endWrite = nextWritePointer + bufferBytes;
\r
6573 // Lock free space in the buffer
\r
6574 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6575 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6576 if ( FAILED( result ) ) {
\r
6577 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6578 errorText_ = errorStream_.str();
\r
6579 MUTEX_UNLOCK( &stream_.mutex );
\r
6580 error( RtAudioError::SYSTEM_ERROR );
\r
6584 // Copy our buffer into the DS buffer
\r
6585 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6586 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6588 // Update our buffer offset and unlock sound buffer
\r
6589 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6590 if ( FAILED( result ) ) {
\r
6591 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6592 errorText_ = errorStream_.str();
\r
6593 MUTEX_UNLOCK( &stream_.mutex );
\r
6594 error( RtAudioError::SYSTEM_ERROR );
\r
6597 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6598 handle->bufferPointer[0] = nextWritePointer;
\r
6601 // Don't bother draining input
\r
6602 if ( handle->drainCounter ) {
\r
6603 handle->drainCounter++;
\r
6607 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6609 // Setup parameters.
\r
6610 if ( stream_.doConvertBuffer[1] ) {
\r
6611 buffer = stream_.deviceBuffer;
\r
6612 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6613 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6616 buffer = stream_.userBuffer[1];
\r
6617 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6618 bufferBytes *= formatBytes( stream_.userFormat );
\r
6621 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6622 long nextReadPointer = handle->bufferPointer[1];
\r
6623 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6625 // Find out where the write and "safe read" pointers are.
\r
6626 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6627 if ( FAILED( result ) ) {
\r
6628 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6629 errorText_ = errorStream_.str();
\r
6630 MUTEX_UNLOCK( &stream_.mutex );
\r
6631 error( RtAudioError::SYSTEM_ERROR );
\r
6635 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6636 DWORD endRead = nextReadPointer + bufferBytes;
\r
6638 // Handling depends on whether we are INPUT or DUPLEX.
\r
6639 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6640 // then a wait here will drag the write pointers into the forbidden zone.
\r
6642 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6643 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6644 // practical way to sync up the read and write pointers reliably, given the
\r
6645 // the very complex relationship between phase and increment of the read and write
\r
6648 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6649 // provide a pre-roll period of 0.5 seconds in which we return
\r
6650 // zeros from the read buffer while the pointers sync up.
\r
6652 if ( stream_.mode == DUPLEX ) {
\r
6653 if ( safeReadPointer < endRead ) {
\r
6654 if ( duplexPrerollBytes <= 0 ) {
\r
6655 // Pre-roll time over. Be more agressive.
\r
6656 int adjustment = endRead-safeReadPointer;
\r
6658 handle->xrun[1] = true;
\r
6660 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6661 // and perform fine adjustments later.
\r
6662 // - small adjustments: back off by twice as much.
\r
6663 if ( adjustment >= 2*bufferBytes )
\r
6664 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6666 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6668 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6672 // In pre=roll time. Just do it.
\r
6673 nextReadPointer = safeReadPointer - bufferBytes;
\r
6674 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6676 endRead = nextReadPointer + bufferBytes;
\r
6679 else { // mode == INPUT
\r
6680 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6681 // See comments for playback.
\r
6682 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6683 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6684 if ( millis < 1.0 ) millis = 1.0;
\r
6685 Sleep( (DWORD) millis );
\r
6687 // Wake up and find out where we are now.
\r
6688 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6689 if ( FAILED( result ) ) {
\r
6690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6691 errorText_ = errorStream_.str();
\r
6692 MUTEX_UNLOCK( &stream_.mutex );
\r
6693 error( RtAudioError::SYSTEM_ERROR );
\r
6697 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6701 // Lock free space in the buffer
\r
6702 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6703 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6704 if ( FAILED( result ) ) {
\r
6705 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6706 errorText_ = errorStream_.str();
\r
6707 MUTEX_UNLOCK( &stream_.mutex );
\r
6708 error( RtAudioError::SYSTEM_ERROR );
\r
6712 if ( duplexPrerollBytes <= 0 ) {
\r
6713 // Copy our buffer into the DS buffer
\r
6714 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6715 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6718 memset( buffer, 0, bufferSize1 );
\r
6719 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6720 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6723 // Update our buffer offset and unlock sound buffer
\r
6724 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6725 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6726 if ( FAILED( result ) ) {
\r
6727 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6728 errorText_ = errorStream_.str();
\r
6729 MUTEX_UNLOCK( &stream_.mutex );
\r
6730 error( RtAudioError::SYSTEM_ERROR );
\r
6733 handle->bufferPointer[1] = nextReadPointer;
\r
6735 // No byte swapping necessary in DirectSound implementation.
\r
6737 // If necessary, convert 8-bit data from unsigned to signed.
\r
6738 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6739 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6741 // Do buffer conversion if necessary.
\r
6742 if ( stream_.doConvertBuffer[1] )
\r
6743 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6747 MUTEX_UNLOCK( &stream_.mutex );
\r
6748 RtApi::tickStreamTime();
\r
6751 // Definitions for utility functions and callbacks
\r
6752 // specific to the DirectSound implementation.
\r
6754 static unsigned __stdcall callbackHandler( void *ptr )
\r
6756 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6757 RtApiDs *object = (RtApiDs *) info->object;
\r
6758 bool* isRunning = &info->isRunning;
\r
6760 while ( *isRunning == true ) {
\r
6761 object->callbackEvent();
\r
6764 _endthreadex( 0 );
\r
6768 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6769 LPCTSTR description,
\r
6770 LPCTSTR /*module*/,
\r
6771 LPVOID lpContext )
\r
6773 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6774 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6777 bool validDevice = false;
\r
6778 if ( probeInfo.isInput == true ) {
\r
6780 LPDIRECTSOUNDCAPTURE object;
\r
6782 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6783 if ( hr != DS_OK ) return TRUE;
\r
6785 caps.dwSize = sizeof(caps);
\r
6786 hr = object->GetCaps( &caps );
\r
6787 if ( hr == DS_OK ) {
\r
6788 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6789 validDevice = true;
\r
6791 object->Release();
\r
6795 LPDIRECTSOUND object;
\r
6796 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6797 if ( hr != DS_OK ) return TRUE;
\r
6799 caps.dwSize = sizeof(caps);
\r
6800 hr = object->GetCaps( &caps );
\r
6801 if ( hr == DS_OK ) {
\r
6802 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6803 validDevice = true;
\r
6805 object->Release();
\r
6808 // If good device, then save its name and guid.
\r
6809 std::string name = convertCharPointerToStdString( description );
\r
6810 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6811 if ( lpguid == NULL )
\r
6812 name = "Default Device";
\r
6813 if ( validDevice ) {
\r
6814 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6815 if ( dsDevices[i].name == name ) {
\r
6816 dsDevices[i].found = true;
\r
6817 if ( probeInfo.isInput ) {
\r
6818 dsDevices[i].id[1] = lpguid;
\r
6819 dsDevices[i].validId[1] = true;
\r
6822 dsDevices[i].id[0] = lpguid;
\r
6823 dsDevices[i].validId[0] = true;
\r
6830 device.name = name;
\r
6831 device.found = true;
\r
6832 if ( probeInfo.isInput ) {
\r
6833 device.id[1] = lpguid;
\r
6834 device.validId[1] = true;
\r
6837 device.id[0] = lpguid;
\r
6838 device.validId[0] = true;
\r
6840 dsDevices.push_back( device );
\r
6846 static const char* getErrorString( int code )
\r
6850 case DSERR_ALLOCATED:
\r
6851 return "Already allocated";
\r
6853 case DSERR_CONTROLUNAVAIL:
\r
6854 return "Control unavailable";
\r
6856 case DSERR_INVALIDPARAM:
\r
6857 return "Invalid parameter";
\r
6859 case DSERR_INVALIDCALL:
\r
6860 return "Invalid call";
\r
6862 case DSERR_GENERIC:
\r
6863 return "Generic error";
\r
6865 case DSERR_PRIOLEVELNEEDED:
\r
6866 return "Priority level needed";
\r
6868 case DSERR_OUTOFMEMORY:
\r
6869 return "Out of memory";
\r
6871 case DSERR_BADFORMAT:
\r
6872 return "The sample rate or the channel format is not supported";
\r
6874 case DSERR_UNSUPPORTED:
\r
6875 return "Not supported";
\r
6877 case DSERR_NODRIVER:
\r
6878 return "No driver";
\r
6880 case DSERR_ALREADYINITIALIZED:
\r
6881 return "Already initialized";
\r
6883 case DSERR_NOAGGREGATION:
\r
6884 return "No aggregation";
\r
6886 case DSERR_BUFFERLOST:
\r
6887 return "Buffer lost";
\r
6889 case DSERR_OTHERAPPHASPRIO:
\r
6890 return "Another application already has priority";
\r
6892 case DSERR_UNINITIALIZED:
\r
6893 return "Uninitialized";
\r
6896 return "DirectSound unknown error";
\r
6899 //******************** End of __WINDOWS_DS__ *********************//
\r
6903 #if defined(__LINUX_ALSA__)
\r
6905 #include <alsa/asoundlib.h>
\r
6906 #include <unistd.h>
\r
6908 // A structure to hold various information related to the ALSA API
\r
6909 // implementation.
\r
6910 struct AlsaHandle {
\r
6911 snd_pcm_t *handles[2];
\r
6912 bool synchronized;
\r
6914 pthread_cond_t runnable_cv;
\r
6918 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6921 static void *alsaCallbackHandler( void * ptr );
\r
6923 RtApiAlsa :: RtApiAlsa()
\r
6925 // Nothing to do here.
\r
6928 RtApiAlsa :: ~RtApiAlsa()
\r
6930 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6933 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6935 unsigned nDevices = 0;
\r
6936 int result, subdevice, card;
\r
6938 snd_ctl_t *handle;
\r
6940 // Count cards and devices
\r
6942 snd_card_next( &card );
\r
6943 while ( card >= 0 ) {
\r
6944 sprintf( name, "hw:%d", card );
\r
6945 result = snd_ctl_open( &handle, name, 0 );
\r
6946 if ( result < 0 ) {
\r
6947 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6948 errorText_ = errorStream_.str();
\r
6949 error( RtAudioError::WARNING );
\r
6954 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6955 if ( result < 0 ) {
\r
6956 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6957 errorText_ = errorStream_.str();
\r
6958 error( RtAudioError::WARNING );
\r
6961 if ( subdevice < 0 )
\r
6966 snd_ctl_close( handle );
\r
6967 snd_card_next( &card );
\r
6970 result = snd_ctl_open( &handle, "default", 0 );
\r
6971 if (result == 0) {
\r
6973 snd_ctl_close( handle );
\r
6979 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6981 RtAudio::DeviceInfo info;
\r
6982 info.probed = false;
\r
6984 unsigned nDevices = 0;
\r
6985 int result, subdevice, card;
\r
6987 snd_ctl_t *chandle;
\r
6989 // Count cards and devices
\r
6992 snd_card_next( &card );
\r
6993 while ( card >= 0 ) {
\r
6994 sprintf( name, "hw:%d", card );
\r
6995 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6996 if ( result < 0 ) {
\r
6997 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6998 errorText_ = errorStream_.str();
\r
6999 error( RtAudioError::WARNING );
\r
7004 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7005 if ( result < 0 ) {
\r
7006 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7007 errorText_ = errorStream_.str();
\r
7008 error( RtAudioError::WARNING );
\r
7011 if ( subdevice < 0 ) break;
\r
7012 if ( nDevices == device ) {
\r
7013 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7019 snd_ctl_close( chandle );
\r
7020 snd_card_next( &card );
\r
7023 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7024 if ( result == 0 ) {
\r
7025 if ( nDevices == device ) {
\r
7026 strcpy( name, "default" );
\r
7032 if ( nDevices == 0 ) {
\r
7033 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
7034 error( RtAudioError::INVALID_USE );
\r
7038 if ( device >= nDevices ) {
\r
7039 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
7040 error( RtAudioError::INVALID_USE );
\r
7046 // If a stream is already open, we cannot probe the stream devices.
\r
7047 // Thus, use the saved results.
\r
7048 if ( stream_.state != STREAM_CLOSED &&
\r
7049 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
7050 snd_ctl_close( chandle );
\r
7051 if ( device >= devices_.size() ) {
\r
7052 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
7053 error( RtAudioError::WARNING );
\r
7056 return devices_[ device ];
\r
7059 int openMode = SND_PCM_ASYNC;
\r
7060 snd_pcm_stream_t stream;
\r
7061 snd_pcm_info_t *pcminfo;
\r
7062 snd_pcm_info_alloca( &pcminfo );
\r
7063 snd_pcm_t *phandle;
\r
7064 snd_pcm_hw_params_t *params;
\r
7065 snd_pcm_hw_params_alloca( ¶ms );
\r
7067 // First try for playback unless default device (which has subdev -1)
\r
7068 stream = SND_PCM_STREAM_PLAYBACK;
\r
7069 snd_pcm_info_set_stream( pcminfo, stream );
\r
7070 if ( subdevice != -1 ) {
\r
7071 snd_pcm_info_set_device( pcminfo, subdevice );
\r
7072 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
7074 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7075 if ( result < 0 ) {
\r
7076 // Device probably doesn't support playback.
\r
7077 goto captureProbe;
\r
7081 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7082 if ( result < 0 ) {
\r
7083 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7084 errorText_ = errorStream_.str();
\r
7085 error( RtAudioError::WARNING );
\r
7086 goto captureProbe;
\r
7089 // The device is open ... fill the parameter structure.
\r
7090 result = snd_pcm_hw_params_any( phandle, params );
\r
7091 if ( result < 0 ) {
\r
7092 snd_pcm_close( phandle );
\r
7093 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7094 errorText_ = errorStream_.str();
\r
7095 error( RtAudioError::WARNING );
\r
7096 goto captureProbe;
\r
7099 // Get output channel information.
\r
7100 unsigned int value;
\r
7101 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7102 if ( result < 0 ) {
\r
7103 snd_pcm_close( phandle );
\r
7104 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7105 errorText_ = errorStream_.str();
\r
7106 error( RtAudioError::WARNING );
\r
7107 goto captureProbe;
\r
7109 info.outputChannels = value;
\r
7110 snd_pcm_close( phandle );
\r
7113 stream = SND_PCM_STREAM_CAPTURE;
\r
7114 snd_pcm_info_set_stream( pcminfo, stream );
\r
7116 // Now try for capture unless default device (with subdev = -1)
\r
7117 if ( subdevice != -1 ) {
\r
7118 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7119 snd_ctl_close( chandle );
\r
7120 if ( result < 0 ) {
\r
7121 // Device probably doesn't support capture.
\r
7122 if ( info.outputChannels == 0 ) return info;
\r
7123 goto probeParameters;
\r
7127 snd_ctl_close( chandle );
\r
7129 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7130 if ( result < 0 ) {
\r
7131 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7132 errorText_ = errorStream_.str();
\r
7133 error( RtAudioError::WARNING );
\r
7134 if ( info.outputChannels == 0 ) return info;
\r
7135 goto probeParameters;
\r
7138 // The device is open ... fill the parameter structure.
\r
7139 result = snd_pcm_hw_params_any( phandle, params );
\r
7140 if ( result < 0 ) {
\r
7141 snd_pcm_close( phandle );
\r
7142 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7143 errorText_ = errorStream_.str();
\r
7144 error( RtAudioError::WARNING );
\r
7145 if ( info.outputChannels == 0 ) return info;
\r
7146 goto probeParameters;
\r
7149 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7150 if ( result < 0 ) {
\r
7151 snd_pcm_close( phandle );
\r
7152 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7153 errorText_ = errorStream_.str();
\r
7154 error( RtAudioError::WARNING );
\r
7155 if ( info.outputChannels == 0 ) return info;
\r
7156 goto probeParameters;
\r
7158 info.inputChannels = value;
\r
7159 snd_pcm_close( phandle );
\r
7161 // If device opens for both playback and capture, we determine the channels.
\r
7162 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7163 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7165 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7166 if ( device == 0 && info.outputChannels > 0 )
\r
7167 info.isDefaultOutput = true;
\r
7168 if ( device == 0 && info.inputChannels > 0 )
\r
7169 info.isDefaultInput = true;
\r
7172 // At this point, we just need to figure out the supported data
\r
7173 // formats and sample rates. We'll proceed by opening the device in
\r
7174 // the direction with the maximum number of channels, or playback if
\r
7175 // they are equal. This might limit our sample rate options, but so
\r
7178 if ( info.outputChannels >= info.inputChannels )
\r
7179 stream = SND_PCM_STREAM_PLAYBACK;
\r
7181 stream = SND_PCM_STREAM_CAPTURE;
\r
7182 snd_pcm_info_set_stream( pcminfo, stream );
\r
7184 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7185 if ( result < 0 ) {
\r
7186 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7187 errorText_ = errorStream_.str();
\r
7188 error( RtAudioError::WARNING );
\r
7192 // The device is open ... fill the parameter structure.
\r
7193 result = snd_pcm_hw_params_any( phandle, params );
\r
7194 if ( result < 0 ) {
\r
7195 snd_pcm_close( phandle );
\r
7196 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7197 errorText_ = errorStream_.str();
\r
7198 error( RtAudioError::WARNING );
\r
7202 // Test our discrete set of sample rate values.
\r
7203 info.sampleRates.clear();
\r
7204 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7205 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7206 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7208 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7209 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7212 if ( info.sampleRates.size() == 0 ) {
\r
7213 snd_pcm_close( phandle );
\r
7214 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7215 errorText_ = errorStream_.str();
\r
7216 error( RtAudioError::WARNING );
\r
7220 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7221 snd_pcm_format_t format;
\r
7222 info.nativeFormats = 0;
\r
7223 format = SND_PCM_FORMAT_S8;
\r
7224 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7225 info.nativeFormats |= RTAUDIO_SINT8;
\r
7226 format = SND_PCM_FORMAT_S16;
\r
7227 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7228 info.nativeFormats |= RTAUDIO_SINT16;
\r
7229 format = SND_PCM_FORMAT_S24;
\r
7230 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7231 info.nativeFormats |= RTAUDIO_SINT24;
\r
7232 format = SND_PCM_FORMAT_S32;
\r
7233 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7234 info.nativeFormats |= RTAUDIO_SINT32;
\r
7235 format = SND_PCM_FORMAT_FLOAT;
\r
7236 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7237 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7238 format = SND_PCM_FORMAT_FLOAT64;
\r
7239 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7240 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7242 // Check that we have at least one supported format
\r
7243 if ( info.nativeFormats == 0 ) {
\r
7244 snd_pcm_close( phandle );
\r
7245 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7246 errorText_ = errorStream_.str();
\r
7247 error( RtAudioError::WARNING );
\r
7251 // Get the device name
\r
7253 result = snd_card_get_name( card, &cardname );
\r
7254 if ( result >= 0 ) {
\r
7255 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7260 // That's all ... close the device and return
\r
7261 snd_pcm_close( phandle );
\r
7262 info.probed = true;
\r
7266 void RtApiAlsa :: saveDeviceInfo( void )
\r
7270 unsigned int nDevices = getDeviceCount();
\r
7271 devices_.resize( nDevices );
\r
7272 for ( unsigned int i=0; i<nDevices; i++ )
\r
7273 devices_[i] = getDeviceInfo( i );
\r
7276 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7277 unsigned int firstChannel, unsigned int sampleRate,
\r
7278 RtAudioFormat format, unsigned int *bufferSize,
\r
7279 RtAudio::StreamOptions *options )
\r
7282 #if defined(__RTAUDIO_DEBUG__)
\r
7283 snd_output_t *out;
\r
7284 snd_output_stdio_attach(&out, stderr, 0);
\r
7287 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7289 unsigned nDevices = 0;
\r
7290 int result, subdevice, card;
\r
7292 snd_ctl_t *chandle;
\r
7294 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7295 snprintf(name, sizeof(name), "%s", "default");
\r
7297 // Count cards and devices
\r
7299 snd_card_next( &card );
\r
7300 while ( card >= 0 ) {
\r
7301 sprintf( name, "hw:%d", card );
\r
7302 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7303 if ( result < 0 ) {
\r
7304 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7305 errorText_ = errorStream_.str();
\r
7310 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7311 if ( result < 0 ) break;
\r
7312 if ( subdevice < 0 ) break;
\r
7313 if ( nDevices == device ) {
\r
7314 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7315 snd_ctl_close( chandle );
\r
7320 snd_ctl_close( chandle );
\r
7321 snd_card_next( &card );
\r
7324 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7325 if ( result == 0 ) {
\r
7326 if ( nDevices == device ) {
\r
7327 strcpy( name, "default" );
\r
7333 if ( nDevices == 0 ) {
\r
7334 // This should not happen because a check is made before this function is called.
\r
7335 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7339 if ( device >= nDevices ) {
\r
7340 // This should not happen because a check is made before this function is called.
\r
7341 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7348 // The getDeviceInfo() function will not work for a device that is
\r
7349 // already open. Thus, we'll probe the system before opening a
\r
7350 // stream and save the results for use by getDeviceInfo().
\r
7351 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7352 this->saveDeviceInfo();
\r
7354 snd_pcm_stream_t stream;
\r
7355 if ( mode == OUTPUT )
\r
7356 stream = SND_PCM_STREAM_PLAYBACK;
\r
7358 stream = SND_PCM_STREAM_CAPTURE;
\r
7360 snd_pcm_t *phandle;
\r
7361 int openMode = SND_PCM_ASYNC;
\r
7362 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7363 if ( result < 0 ) {
\r
7364 if ( mode == OUTPUT )
\r
7365 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7367 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7368 errorText_ = errorStream_.str();
\r
7372 // Fill the parameter structure.
\r
7373 snd_pcm_hw_params_t *hw_params;
\r
7374 snd_pcm_hw_params_alloca( &hw_params );
\r
7375 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7376 if ( result < 0 ) {
\r
7377 snd_pcm_close( phandle );
\r
7378 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7379 errorText_ = errorStream_.str();
\r
7383 #if defined(__RTAUDIO_DEBUG__)
\r
7384 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7385 snd_pcm_hw_params_dump( hw_params, out );
\r
7388 // Set access ... check user preference.
\r
7389 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7390 stream_.userInterleaved = false;
\r
7391 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7392 if ( result < 0 ) {
\r
7393 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7394 stream_.deviceInterleaved[mode] = true;
\r
7397 stream_.deviceInterleaved[mode] = false;
\r
7400 stream_.userInterleaved = true;
\r
7401 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7402 if ( result < 0 ) {
\r
7403 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7404 stream_.deviceInterleaved[mode] = false;
\r
7407 stream_.deviceInterleaved[mode] = true;
\r
7410 if ( result < 0 ) {
\r
7411 snd_pcm_close( phandle );
\r
7412 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7413 errorText_ = errorStream_.str();
\r
7417 // Determine how to set the device format.
\r
7418 stream_.userFormat = format;
\r
7419 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7421 if ( format == RTAUDIO_SINT8 )
\r
7422 deviceFormat = SND_PCM_FORMAT_S8;
\r
7423 else if ( format == RTAUDIO_SINT16 )
\r
7424 deviceFormat = SND_PCM_FORMAT_S16;
\r
7425 else if ( format == RTAUDIO_SINT24 )
\r
7426 deviceFormat = SND_PCM_FORMAT_S24;
\r
7427 else if ( format == RTAUDIO_SINT32 )
\r
7428 deviceFormat = SND_PCM_FORMAT_S32;
\r
7429 else if ( format == RTAUDIO_FLOAT32 )
\r
7430 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7431 else if ( format == RTAUDIO_FLOAT64 )
\r
7432 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7434 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7435 stream_.deviceFormat[mode] = format;
\r
7439 // The user requested format is not natively supported by the device.
\r
7440 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7441 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7442 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7446 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7447 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7448 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7452 deviceFormat = SND_PCM_FORMAT_S32;
\r
7453 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7454 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7458 deviceFormat = SND_PCM_FORMAT_S24;
\r
7459 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7460 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7464 deviceFormat = SND_PCM_FORMAT_S16;
\r
7465 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7466 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7470 deviceFormat = SND_PCM_FORMAT_S8;
\r
7471 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7472 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7476 // If we get here, no supported format was found.
\r
7477 snd_pcm_close( phandle );
\r
7478 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7479 errorText_ = errorStream_.str();
\r
7483 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7484 if ( result < 0 ) {
\r
7485 snd_pcm_close( phandle );
\r
7486 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7487 errorText_ = errorStream_.str();
\r
7491 // Determine whether byte-swaping is necessary.
\r
7492 stream_.doByteSwap[mode] = false;
\r
7493 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7494 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7495 if ( result == 0 )
\r
7496 stream_.doByteSwap[mode] = true;
\r
7497 else if (result < 0) {
\r
7498 snd_pcm_close( phandle );
\r
7499 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7500 errorText_ = errorStream_.str();
\r
7505 // Set the sample rate.
\r
7506 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7507 if ( result < 0 ) {
\r
7508 snd_pcm_close( phandle );
\r
7509 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7510 errorText_ = errorStream_.str();
\r
7514 // Determine the number of channels for this device. We support a possible
\r
7515 // minimum device channel number > than the value requested by the user.
\r
7516 stream_.nUserChannels[mode] = channels;
\r
7517 unsigned int value;
\r
7518 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7519 unsigned int deviceChannels = value;
\r
7520 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7521 snd_pcm_close( phandle );
\r
7522 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7523 errorText_ = errorStream_.str();
\r
7527 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7528 if ( result < 0 ) {
\r
7529 snd_pcm_close( phandle );
\r
7530 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7531 errorText_ = errorStream_.str();
\r
7534 deviceChannels = value;
\r
7535 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7536 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7538 // Set the device channels.
\r
7539 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7540 if ( result < 0 ) {
\r
7541 snd_pcm_close( phandle );
\r
7542 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7543 errorText_ = errorStream_.str();
\r
7547 // Set the buffer (or period) size.
\r
7549 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7550 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7551 if ( result < 0 ) {
\r
7552 snd_pcm_close( phandle );
\r
7553 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7554 errorText_ = errorStream_.str();
\r
7557 *bufferSize = periodSize;
\r
7559 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7560 unsigned int periods = 0;
\r
7561 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7562 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7563 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7564 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7565 if ( result < 0 ) {
\r
7566 snd_pcm_close( phandle );
\r
7567 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7568 errorText_ = errorStream_.str();
\r
7572 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7573 // MUST be the same in both directions!
\r
7574 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7575 snd_pcm_close( phandle );
\r
7576 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7577 errorText_ = errorStream_.str();
\r
7581 stream_.bufferSize = *bufferSize;
\r
7583 // Install the hardware configuration
\r
7584 result = snd_pcm_hw_params( phandle, hw_params );
\r
7585 if ( result < 0 ) {
\r
7586 snd_pcm_close( phandle );
\r
7587 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7588 errorText_ = errorStream_.str();
\r
7592 #if defined(__RTAUDIO_DEBUG__)
\r
7593 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7594 snd_pcm_hw_params_dump( hw_params, out );
\r
7597 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7598 snd_pcm_sw_params_t *sw_params = NULL;
\r
7599 snd_pcm_sw_params_alloca( &sw_params );
\r
7600 snd_pcm_sw_params_current( phandle, sw_params );
\r
7601 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7602 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7603 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7605 // The following two settings were suggested by Theo Veenker
\r
7606 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7607 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7609 // here are two options for a fix
\r
7610 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7611 snd_pcm_uframes_t val;
\r
7612 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7613 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7615 result = snd_pcm_sw_params( phandle, sw_params );
\r
7616 if ( result < 0 ) {
\r
7617 snd_pcm_close( phandle );
\r
7618 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7619 errorText_ = errorStream_.str();
\r
7623 #if defined(__RTAUDIO_DEBUG__)
\r
7624 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7625 snd_pcm_sw_params_dump( sw_params, out );
\r
7628 // Set flags for buffer conversion
\r
7629 stream_.doConvertBuffer[mode] = false;
\r
7630 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7631 stream_.doConvertBuffer[mode] = true;
\r
7632 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7633 stream_.doConvertBuffer[mode] = true;
\r
7634 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7635 stream_.nUserChannels[mode] > 1 )
\r
7636 stream_.doConvertBuffer[mode] = true;
\r
7638 // Allocate the ApiHandle if necessary and then save.
\r
7639 AlsaHandle *apiInfo = 0;
\r
7640 if ( stream_.apiHandle == 0 ) {
\r
7642 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7644 catch ( std::bad_alloc& ) {
\r
7645 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7649 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7650 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7654 stream_.apiHandle = (void *) apiInfo;
\r
7655 apiInfo->handles[0] = 0;
\r
7656 apiInfo->handles[1] = 0;
\r
7659 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7661 apiInfo->handles[mode] = phandle;
\r
7664 // Allocate necessary internal buffers.
\r
7665 unsigned long bufferBytes;
\r
7666 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7667 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7668 if ( stream_.userBuffer[mode] == NULL ) {
\r
7669 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7673 if ( stream_.doConvertBuffer[mode] ) {
\r
7675 bool makeBuffer = true;
\r
7676 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7677 if ( mode == INPUT ) {
\r
7678 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7679 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7680 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7684 if ( makeBuffer ) {
\r
7685 bufferBytes *= *bufferSize;
\r
7686 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7687 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7688 if ( stream_.deviceBuffer == NULL ) {
\r
7689 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7695 stream_.sampleRate = sampleRate;
\r
7696 stream_.nBuffers = periods;
\r
7697 stream_.device[mode] = device;
\r
7698 stream_.state = STREAM_STOPPED;
\r
7700 // Setup the buffer conversion information structure.
\r
7701 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7703 // Setup thread if necessary.
\r
7704 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7705 // We had already set up an output stream.
\r
7706 stream_.mode = DUPLEX;
\r
7707 // Link the streams if possible.
\r
7708 apiInfo->synchronized = false;
\r
7709 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7710 apiInfo->synchronized = true;
\r
7712 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7713 error( RtAudioError::WARNING );
\r
7717 stream_.mode = mode;
\r
7719 // Setup callback thread.
\r
7720 stream_.callbackInfo.object = (void *) this;
\r
7722 // Set the thread attributes for joinable and realtime scheduling
\r
7723 // priority (optional). The higher priority will only take affect
\r
7724 // if the program is run as root or suid. Note, under Linux
\r
7725 // processes with CAP_SYS_NICE privilege, a user can change
\r
7726 // scheduling policy and priority (thus need not be root). See
\r
7727 // POSIX "capabilities".
\r
7728 pthread_attr_t attr;
\r
7729 pthread_attr_init( &attr );
\r
7730 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7732 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7733 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7734 // We previously attempted to increase the audio callback priority
\r
7735 // to SCHED_RR here via the attributes. However, while no errors
\r
7736 // were reported in doing so, it did not work. So, now this is
\r
7737 // done in the alsaCallbackHandler function.
\r
7738 stream_.callbackInfo.doRealtime = true;
\r
7739 int priority = options->priority;
\r
7740 int min = sched_get_priority_min( SCHED_RR );
\r
7741 int max = sched_get_priority_max( SCHED_RR );
\r
7742 if ( priority < min ) priority = min;
\r
7743 else if ( priority > max ) priority = max;
\r
7744 stream_.callbackInfo.priority = priority;
\r
7748 stream_.callbackInfo.isRunning = true;
\r
7749 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7750 pthread_attr_destroy( &attr );
\r
7752 stream_.callbackInfo.isRunning = false;
\r
7753 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7762 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7763 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7764 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7766 stream_.apiHandle = 0;
\r
7769 if ( phandle) snd_pcm_close( phandle );
\r
7771 for ( int i=0; i<2; i++ ) {
\r
7772 if ( stream_.userBuffer[i] ) {
\r
7773 free( stream_.userBuffer[i] );
\r
7774 stream_.userBuffer[i] = 0;
\r
7778 if ( stream_.deviceBuffer ) {
\r
7779 free( stream_.deviceBuffer );
\r
7780 stream_.deviceBuffer = 0;
\r
7783 stream_.state = STREAM_CLOSED;
\r
7787 void RtApiAlsa :: closeStream()
\r
7789 if ( stream_.state == STREAM_CLOSED ) {
\r
7790 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7791 error( RtAudioError::WARNING );
\r
7795 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7796 stream_.callbackInfo.isRunning = false;
\r
7797 MUTEX_LOCK( &stream_.mutex );
\r
7798 if ( stream_.state == STREAM_STOPPED ) {
\r
7799 apiInfo->runnable = true;
\r
7800 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7802 MUTEX_UNLOCK( &stream_.mutex );
\r
7803 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7805 if ( stream_.state == STREAM_RUNNING ) {
\r
7806 stream_.state = STREAM_STOPPED;
\r
7807 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7808 snd_pcm_drop( apiInfo->handles[0] );
\r
7809 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7810 snd_pcm_drop( apiInfo->handles[1] );
\r
7814 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7815 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7816 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7818 stream_.apiHandle = 0;
\r
7821 for ( int i=0; i<2; i++ ) {
\r
7822 if ( stream_.userBuffer[i] ) {
\r
7823 free( stream_.userBuffer[i] );
\r
7824 stream_.userBuffer[i] = 0;
\r
7828 if ( stream_.deviceBuffer ) {
\r
7829 free( stream_.deviceBuffer );
\r
7830 stream_.deviceBuffer = 0;
\r
7833 stream_.mode = UNINITIALIZED;
\r
7834 stream_.state = STREAM_CLOSED;
\r
7837 void RtApiAlsa :: startStream()
\r
7839 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7842 if ( stream_.state == STREAM_RUNNING ) {
\r
7843 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7844 error( RtAudioError::WARNING );
\r
7848 MUTEX_LOCK( &stream_.mutex );
\r
7851 snd_pcm_state_t state;
\r
7852 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7853 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7854 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7855 state = snd_pcm_state( handle[0] );
\r
7856 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7857 result = snd_pcm_prepare( handle[0] );
\r
7858 if ( result < 0 ) {
\r
7859 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7860 errorText_ = errorStream_.str();
\r
7866 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7867 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7868 state = snd_pcm_state( handle[1] );
\r
7869 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7870 result = snd_pcm_prepare( handle[1] );
\r
7871 if ( result < 0 ) {
\r
7872 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7873 errorText_ = errorStream_.str();
\r
7879 stream_.state = STREAM_RUNNING;
\r
7882 apiInfo->runnable = true;
\r
7883 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7884 MUTEX_UNLOCK( &stream_.mutex );
\r
7886 if ( result >= 0 ) return;
\r
7887 error( RtAudioError::SYSTEM_ERROR );
\r
7890 void RtApiAlsa :: stopStream()
\r
7893 if ( stream_.state == STREAM_STOPPED ) {
\r
7894 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7895 error( RtAudioError::WARNING );
\r
7899 stream_.state = STREAM_STOPPED;
\r
7900 MUTEX_LOCK( &stream_.mutex );
\r
7903 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7904 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7905 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7906 if ( apiInfo->synchronized )
\r
7907 result = snd_pcm_drop( handle[0] );
\r
7909 result = snd_pcm_drain( handle[0] );
\r
7910 if ( result < 0 ) {
\r
7911 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7912 errorText_ = errorStream_.str();
\r
7917 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7918 result = snd_pcm_drop( handle[1] );
\r
7919 if ( result < 0 ) {
\r
7920 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7921 errorText_ = errorStream_.str();
\r
7927 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7928 MUTEX_UNLOCK( &stream_.mutex );
\r
7930 if ( result >= 0 ) return;
\r
7931 error( RtAudioError::SYSTEM_ERROR );
\r
7934 void RtApiAlsa :: abortStream()
\r
7937 if ( stream_.state == STREAM_STOPPED ) {
\r
7938 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7939 error( RtAudioError::WARNING );
\r
7943 stream_.state = STREAM_STOPPED;
\r
7944 MUTEX_LOCK( &stream_.mutex );
\r
7947 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7948 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7949 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7950 result = snd_pcm_drop( handle[0] );
\r
7951 if ( result < 0 ) {
\r
7952 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7953 errorText_ = errorStream_.str();
\r
7958 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7959 result = snd_pcm_drop( handle[1] );
\r
7960 if ( result < 0 ) {
\r
7961 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7962 errorText_ = errorStream_.str();
\r
7968 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7969 MUTEX_UNLOCK( &stream_.mutex );
\r
7971 if ( result >= 0 ) return;
\r
7972 error( RtAudioError::SYSTEM_ERROR );
\r
7975 void RtApiAlsa :: callbackEvent()
\r
7977 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7978 if ( stream_.state == STREAM_STOPPED ) {
\r
7979 MUTEX_LOCK( &stream_.mutex );
\r
7980 while ( !apiInfo->runnable )
\r
7981 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7983 if ( stream_.state != STREAM_RUNNING ) {
\r
7984 MUTEX_UNLOCK( &stream_.mutex );
\r
7987 MUTEX_UNLOCK( &stream_.mutex );
\r
7990 if ( stream_.state == STREAM_CLOSED ) {
\r
7991 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7992 error( RtAudioError::WARNING );
\r
7996 int doStopStream = 0;
\r
7997 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7998 double streamTime = getStreamTime();
\r
7999 RtAudioStreamStatus status = 0;
\r
8000 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
8001 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
8002 apiInfo->xrun[0] = false;
\r
8004 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
8005 status |= RTAUDIO_INPUT_OVERFLOW;
\r
8006 apiInfo->xrun[1] = false;
\r
8008 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
8009 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
8011 if ( doStopStream == 2 ) {
\r
8016 MUTEX_LOCK( &stream_.mutex );
\r
8018 // The state might change while waiting on a mutex.
\r
8019 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
8024 snd_pcm_t **handle;
\r
8025 snd_pcm_sframes_t frames;
\r
8026 RtAudioFormat format;
\r
8027 handle = (snd_pcm_t **) apiInfo->handles;
\r
8029 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
8031 // Setup parameters.
\r
8032 if ( stream_.doConvertBuffer[1] ) {
\r
8033 buffer = stream_.deviceBuffer;
\r
8034 channels = stream_.nDeviceChannels[1];
\r
8035 format = stream_.deviceFormat[1];
\r
8038 buffer = stream_.userBuffer[1];
\r
8039 channels = stream_.nUserChannels[1];
\r
8040 format = stream_.userFormat;
\r
8043 // Read samples from device in interleaved/non-interleaved format.
\r
8044 if ( stream_.deviceInterleaved[1] )
\r
8045 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
8047 void *bufs[channels];
\r
8048 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8049 for ( int i=0; i<channels; i++ )
\r
8050 bufs[i] = (void *) (buffer + (i * offset));
\r
8051 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
8054 if ( result < (int) stream_.bufferSize ) {
\r
8055 // Either an error or overrun occured.
\r
8056 if ( result == -EPIPE ) {
\r
8057 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
8058 if ( state == SND_PCM_STATE_XRUN ) {
\r
8059 apiInfo->xrun[1] = true;
\r
8060 result = snd_pcm_prepare( handle[1] );
\r
8061 if ( result < 0 ) {
\r
8062 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
8063 errorText_ = errorStream_.str();
\r
8067 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8068 errorText_ = errorStream_.str();
\r
8072 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
8073 errorText_ = errorStream_.str();
\r
8075 error( RtAudioError::WARNING );
\r
8079 // Do byte swapping if necessary.
\r
8080 if ( stream_.doByteSwap[1] )
\r
8081 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8083 // Do buffer conversion if necessary.
\r
8084 if ( stream_.doConvertBuffer[1] )
\r
8085 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8087 // Check stream latency
\r
8088 result = snd_pcm_delay( handle[1], &frames );
\r
8089 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8094 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8096 // Setup parameters and do buffer conversion if necessary.
\r
8097 if ( stream_.doConvertBuffer[0] ) {
\r
8098 buffer = stream_.deviceBuffer;
\r
8099 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8100 channels = stream_.nDeviceChannels[0];
\r
8101 format = stream_.deviceFormat[0];
\r
8104 buffer = stream_.userBuffer[0];
\r
8105 channels = stream_.nUserChannels[0];
\r
8106 format = stream_.userFormat;
\r
8109 // Do byte swapping if necessary.
\r
8110 if ( stream_.doByteSwap[0] )
\r
8111 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8113 // Write samples to device in interleaved/non-interleaved format.
\r
8114 if ( stream_.deviceInterleaved[0] )
\r
8115 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8117 void *bufs[channels];
\r
8118 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8119 for ( int i=0; i<channels; i++ )
\r
8120 bufs[i] = (void *) (buffer + (i * offset));
\r
8121 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8124 if ( result < (int) stream_.bufferSize ) {
\r
8125 // Either an error or underrun occured.
\r
8126 if ( result == -EPIPE ) {
\r
8127 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8128 if ( state == SND_PCM_STATE_XRUN ) {
\r
8129 apiInfo->xrun[0] = true;
\r
8130 result = snd_pcm_prepare( handle[0] );
\r
8131 if ( result < 0 ) {
\r
8132 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8133 errorText_ = errorStream_.str();
\r
8136 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8139 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8140 errorText_ = errorStream_.str();
\r
8144 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8145 errorText_ = errorStream_.str();
\r
8147 error( RtAudioError::WARNING );
\r
8151 // Check stream latency
\r
8152 result = snd_pcm_delay( handle[0], &frames );
\r
8153 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8157 MUTEX_UNLOCK( &stream_.mutex );
\r
8159 RtApi::tickStreamTime();
\r
8160 if ( doStopStream == 1 ) this->stopStream();
\r
8163 static void *alsaCallbackHandler( void *ptr )
\r
8165 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8166 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8167 bool *isRunning = &info->isRunning;
\r
8169 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8170 if ( info->doRealtime ) {
\r
8171 pthread_t tID = pthread_self(); // ID of this thread
\r
8172 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8173 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8177 while ( *isRunning == true ) {
\r
8178 pthread_testcancel();
\r
8179 object->callbackEvent();
\r
8182 pthread_exit( NULL );
\r
8185 //******************** End of __LINUX_ALSA__ *********************//
\r
8188 #if defined(__LINUX_PULSE__)
\r
8190 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8191 // and Tristan Matthews.
\r
8193 #include <pulse/error.h>
\r
8194 #include <pulse/simple.h>
\r
8197 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8198 44100, 48000, 96000, 0};
\r
8200 struct rtaudio_pa_format_mapping_t {
\r
8201 RtAudioFormat rtaudio_format;
\r
8202 pa_sample_format_t pa_format;
\r
8205 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8206 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8207 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8208 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8209 {0, PA_SAMPLE_INVALID}};
\r
8211 struct PulseAudioHandle {
\r
8212 pa_simple *s_play;
\r
8215 pthread_cond_t runnable_cv;
\r
8217 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8220 RtApiPulse::~RtApiPulse()
\r
8222 if ( stream_.state != STREAM_CLOSED )
\r
8226 unsigned int RtApiPulse::getDeviceCount( void )
\r
8231 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8233 RtAudio::DeviceInfo info;
\r
8234 info.probed = true;
\r
8235 info.name = "PulseAudio";
\r
8236 info.outputChannels = 2;
\r
8237 info.inputChannels = 2;
\r
8238 info.duplexChannels = 2;
\r
8239 info.isDefaultOutput = true;
\r
8240 info.isDefaultInput = true;
\r
8242 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8243 info.sampleRates.push_back( *sr );
\r
8245 info.preferredSampleRate = 48000;
\r
8246 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8251 static void *pulseaudio_callback( void * user )
\r
8253 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8254 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8255 volatile bool *isRunning = &cbi->isRunning;
\r
8257 while ( *isRunning ) {
\r
8258 pthread_testcancel();
\r
8259 context->callbackEvent();
\r
8262 pthread_exit( NULL );
\r
8265 void RtApiPulse::closeStream( void )
\r
8267 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8269 stream_.callbackInfo.isRunning = false;
\r
8271 MUTEX_LOCK( &stream_.mutex );
\r
8272 if ( stream_.state == STREAM_STOPPED ) {
\r
8273 pah->runnable = true;
\r
8274 pthread_cond_signal( &pah->runnable_cv );
\r
8276 MUTEX_UNLOCK( &stream_.mutex );
\r
8278 pthread_join( pah->thread, 0 );
\r
8279 if ( pah->s_play ) {
\r
8280 pa_simple_flush( pah->s_play, NULL );
\r
8281 pa_simple_free( pah->s_play );
\r
8284 pa_simple_free( pah->s_rec );
\r
8286 pthread_cond_destroy( &pah->runnable_cv );
\r
8288 stream_.apiHandle = 0;
\r
8291 if ( stream_.userBuffer[0] ) {
\r
8292 free( stream_.userBuffer[0] );
\r
8293 stream_.userBuffer[0] = 0;
\r
8295 if ( stream_.userBuffer[1] ) {
\r
8296 free( stream_.userBuffer[1] );
\r
8297 stream_.userBuffer[1] = 0;
\r
8300 stream_.state = STREAM_CLOSED;
\r
8301 stream_.mode = UNINITIALIZED;
\r
8304 void RtApiPulse::callbackEvent( void )
\r
8306 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8308 if ( stream_.state == STREAM_STOPPED ) {
\r
8309 MUTEX_LOCK( &stream_.mutex );
\r
8310 while ( !pah->runnable )
\r
8311 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8313 if ( stream_.state != STREAM_RUNNING ) {
\r
8314 MUTEX_UNLOCK( &stream_.mutex );
\r
8317 MUTEX_UNLOCK( &stream_.mutex );
\r
8320 if ( stream_.state == STREAM_CLOSED ) {
\r
8321 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8322 "this shouldn't happen!";
\r
8323 error( RtAudioError::WARNING );
\r
8327 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8328 double streamTime = getStreamTime();
\r
8329 RtAudioStreamStatus status = 0;
\r
8330 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8331 stream_.bufferSize, streamTime, status,
\r
8332 stream_.callbackInfo.userData );
\r
8334 if ( doStopStream == 2 ) {
\r
8339 MUTEX_LOCK( &stream_.mutex );
\r
8340 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8341 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8343 if ( stream_.state != STREAM_RUNNING )
\r
8348 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8349 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8350 convertBuffer( stream_.deviceBuffer,
\r
8351 stream_.userBuffer[OUTPUT],
\r
8352 stream_.convertInfo[OUTPUT] );
\r
8353 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8354 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8356 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8357 formatBytes( stream_.userFormat );
\r
8359 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8360 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8361 pa_strerror( pa_error ) << ".";
\r
8362 errorText_ = errorStream_.str();
\r
8363 error( RtAudioError::WARNING );
\r
8367 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8368 if ( stream_.doConvertBuffer[INPUT] )
\r
8369 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8370 formatBytes( stream_.deviceFormat[INPUT] );
\r
8372 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8373 formatBytes( stream_.userFormat );
\r
8375 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8376 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8377 pa_strerror( pa_error ) << ".";
\r
8378 errorText_ = errorStream_.str();
\r
8379 error( RtAudioError::WARNING );
\r
8381 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8382 convertBuffer( stream_.userBuffer[INPUT],
\r
8383 stream_.deviceBuffer,
\r
8384 stream_.convertInfo[INPUT] );
\r
8389 MUTEX_UNLOCK( &stream_.mutex );
\r
8390 RtApi::tickStreamTime();
\r
8392 if ( doStopStream == 1 )
\r
8396 void RtApiPulse::startStream( void )
\r
8398 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8400 if ( stream_.state == STREAM_CLOSED ) {
\r
8401 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8402 error( RtAudioError::INVALID_USE );
\r
8405 if ( stream_.state == STREAM_RUNNING ) {
\r
8406 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8407 error( RtAudioError::WARNING );
\r
8411 MUTEX_LOCK( &stream_.mutex );
\r
8413 stream_.state = STREAM_RUNNING;
\r
8415 pah->runnable = true;
\r
8416 pthread_cond_signal( &pah->runnable_cv );
\r
8417 MUTEX_UNLOCK( &stream_.mutex );
\r
8420 void RtApiPulse::stopStream( void )
\r
8422 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8424 if ( stream_.state == STREAM_CLOSED ) {
\r
8425 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8426 error( RtAudioError::INVALID_USE );
\r
8429 if ( stream_.state == STREAM_STOPPED ) {
\r
8430 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8431 error( RtAudioError::WARNING );
\r
8435 stream_.state = STREAM_STOPPED;
\r
8436 MUTEX_LOCK( &stream_.mutex );
\r
8438 if ( pah && pah->s_play ) {
\r
8440 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8441 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8442 pa_strerror( pa_error ) << ".";
\r
8443 errorText_ = errorStream_.str();
\r
8444 MUTEX_UNLOCK( &stream_.mutex );
\r
8445 error( RtAudioError::SYSTEM_ERROR );
\r
8450 stream_.state = STREAM_STOPPED;
\r
8451 MUTEX_UNLOCK( &stream_.mutex );
\r
8454 void RtApiPulse::abortStream( void )
\r
8456 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8458 if ( stream_.state == STREAM_CLOSED ) {
\r
8459 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8460 error( RtAudioError::INVALID_USE );
\r
8463 if ( stream_.state == STREAM_STOPPED ) {
\r
8464 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8465 error( RtAudioError::WARNING );
\r
8469 stream_.state = STREAM_STOPPED;
\r
8470 MUTEX_LOCK( &stream_.mutex );
\r
8472 if ( pah && pah->s_play ) {
\r
8474 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8475 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8476 pa_strerror( pa_error ) << ".";
\r
8477 errorText_ = errorStream_.str();
\r
8478 MUTEX_UNLOCK( &stream_.mutex );
\r
8479 error( RtAudioError::SYSTEM_ERROR );
\r
8484 stream_.state = STREAM_STOPPED;
\r
8485 MUTEX_UNLOCK( &stream_.mutex );
\r
8488 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8489 unsigned int channels, unsigned int firstChannel,
\r
8490 unsigned int sampleRate, RtAudioFormat format,
\r
8491 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8493 PulseAudioHandle *pah = 0;
\r
8494 unsigned long bufferBytes = 0;
\r
8495 pa_sample_spec ss;
\r
8497 if ( device != 0 ) return false;
\r
8498 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8499 if ( channels != 1 && channels != 2 ) {
\r
8500 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8503 ss.channels = channels;
\r
8505 if ( firstChannel != 0 ) return false;
\r
8507 bool sr_found = false;
\r
8508 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8509 if ( sampleRate == *sr ) {
\r
8511 stream_.sampleRate = sampleRate;
\r
8512 ss.rate = sampleRate;
\r
8516 if ( !sr_found ) {
\r
8517 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8521 bool sf_found = 0;
\r
8522 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8523 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8524 if ( format == sf->rtaudio_format ) {
\r
8526 stream_.userFormat = sf->rtaudio_format;
\r
8527 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8528 ss.format = sf->pa_format;
\r
8532 if ( !sf_found ) { // Use internal data format conversion.
\r
8533 stream_.userFormat = format;
\r
8534 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8535 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8538 // Set other stream parameters.
\r
8539 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8540 else stream_.userInterleaved = true;
\r
8541 stream_.deviceInterleaved[mode] = true;
\r
8542 stream_.nBuffers = 1;
\r
8543 stream_.doByteSwap[mode] = false;
\r
8544 stream_.nUserChannels[mode] = channels;
\r
8545 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8546 stream_.channelOffset[mode] = 0;
\r
8547 std::string streamName = "RtAudio";
\r
8549 // Set flags for buffer conversion.
\r
8550 stream_.doConvertBuffer[mode] = false;
\r
8551 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8552 stream_.doConvertBuffer[mode] = true;
\r
8553 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8554 stream_.doConvertBuffer[mode] = true;
\r
8556 // Allocate necessary internal buffers.
\r
8557 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8558 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8559 if ( stream_.userBuffer[mode] == NULL ) {
\r
8560 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8563 stream_.bufferSize = *bufferSize;
\r
8565 if ( stream_.doConvertBuffer[mode] ) {
\r
8567 bool makeBuffer = true;
\r
8568 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8569 if ( mode == INPUT ) {
\r
8570 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8571 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8572 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8576 if ( makeBuffer ) {
\r
8577 bufferBytes *= *bufferSize;
\r
8578 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8579 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8580 if ( stream_.deviceBuffer == NULL ) {
\r
8581 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8587 stream_.device[mode] = device;
\r
8589 // Setup the buffer conversion information structure.
\r
8590 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8592 if ( !stream_.apiHandle ) {
\r
8593 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8595 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8599 stream_.apiHandle = pah;
\r
8600 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8601 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8605 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8608 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8611 pa_buffer_attr buffer_attr;
\r
8612 buffer_attr.fragsize = bufferBytes;
\r
8613 buffer_attr.maxlength = -1;
\r
8615 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8616 if ( !pah->s_rec ) {
\r
8617 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8622 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8623 if ( !pah->s_play ) {
\r
8624 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8632 if ( stream_.mode == UNINITIALIZED )
\r
8633 stream_.mode = mode;
\r
8634 else if ( stream_.mode == mode )
\r
8637 stream_.mode = DUPLEX;
\r
8639 if ( !stream_.callbackInfo.isRunning ) {
\r
8640 stream_.callbackInfo.object = this;
\r
8641 stream_.callbackInfo.isRunning = true;
\r
8642 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8643 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8648 stream_.state = STREAM_STOPPED;
\r
8652 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8653 pthread_cond_destroy( &pah->runnable_cv );
\r
8655 stream_.apiHandle = 0;
\r
8658 for ( int i=0; i<2; i++ ) {
\r
8659 if ( stream_.userBuffer[i] ) {
\r
8660 free( stream_.userBuffer[i] );
\r
8661 stream_.userBuffer[i] = 0;
\r
8665 if ( stream_.deviceBuffer ) {
\r
8666 free( stream_.deviceBuffer );
\r
8667 stream_.deviceBuffer = 0;
\r
8673 //******************** End of __LINUX_PULSE__ *********************//
\r
8676 #if defined(__LINUX_OSS__)
\r
8678 #include <unistd.h>
\r
8679 #include <sys/ioctl.h>
\r
8680 #include <unistd.h>
\r
8681 #include <fcntl.h>
\r
8682 #include <sys/soundcard.h>
\r
8683 #include <errno.h>
\r
8686 static void *ossCallbackHandler(void * ptr);
\r
8688 // A structure to hold various information related to the OSS API
\r
8689 // implementation.
\r
8690 struct OssHandle {
\r
8691 int id[2]; // device ids
\r
8694 pthread_cond_t runnable;
\r
8697 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8700 RtApiOss :: RtApiOss()
\r
8702 // Nothing to do here.
\r
8705 RtApiOss :: ~RtApiOss()
\r
8707 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8710 unsigned int RtApiOss :: getDeviceCount( void )
\r
8712 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8713 if ( mixerfd == -1 ) {
\r
8714 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8715 error( RtAudioError::WARNING );
\r
8719 oss_sysinfo sysinfo;
\r
8720 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8722 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8723 error( RtAudioError::WARNING );
\r
8728 return sysinfo.numaudios;
\r
8731 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8733 RtAudio::DeviceInfo info;
\r
8734 info.probed = false;
\r
8736 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8737 if ( mixerfd == -1 ) {
\r
8738 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8739 error( RtAudioError::WARNING );
\r
8743 oss_sysinfo sysinfo;
\r
8744 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8745 if ( result == -1 ) {
\r
8747 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8748 error( RtAudioError::WARNING );
\r
8752 unsigned nDevices = sysinfo.numaudios;
\r
8753 if ( nDevices == 0 ) {
\r
8755 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8756 error( RtAudioError::INVALID_USE );
\r
8760 if ( device >= nDevices ) {
\r
8762 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8763 error( RtAudioError::INVALID_USE );
\r
8767 oss_audioinfo ainfo;
\r
8768 ainfo.dev = device;
\r
8769 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8771 if ( result == -1 ) {
\r
8772 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8773 errorText_ = errorStream_.str();
\r
8774 error( RtAudioError::WARNING );
\r
8779 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8780 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8781 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8782 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8783 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8786 // Probe data formats ... do for input
\r
8787 unsigned long mask = ainfo.iformats;
\r
8788 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8789 info.nativeFormats |= RTAUDIO_SINT16;
\r
8790 if ( mask & AFMT_S8 )
\r
8791 info.nativeFormats |= RTAUDIO_SINT8;
\r
8792 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8793 info.nativeFormats |= RTAUDIO_SINT32;
\r
8795 if ( mask & AFMT_FLOAT )
\r
8796 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8798 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8799 info.nativeFormats |= RTAUDIO_SINT24;
\r
8801 // Check that we have at least one supported format
\r
8802 if ( info.nativeFormats == 0 ) {
\r
8803 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8804 errorText_ = errorStream_.str();
\r
8805 error( RtAudioError::WARNING );
\r
8809 // Probe the supported sample rates.
\r
8810 info.sampleRates.clear();
\r
8811 if ( ainfo.nrates ) {
\r
8812 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8813 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8814 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8815 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8817 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8818 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8826 // Check min and max rate values;
\r
8827 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8828 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8829 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8831 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8832 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8837 if ( info.sampleRates.size() == 0 ) {
\r
8838 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8839 errorText_ = errorStream_.str();
\r
8840 error( RtAudioError::WARNING );
\r
8843 info.probed = true;
\r
8844 info.name = ainfo.name;
\r
8851 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8852 unsigned int firstChannel, unsigned int sampleRate,
\r
8853 RtAudioFormat format, unsigned int *bufferSize,
\r
8854 RtAudio::StreamOptions *options )
\r
8856 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8857 if ( mixerfd == -1 ) {
\r
8858 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8862 oss_sysinfo sysinfo;
\r
8863 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8864 if ( result == -1 ) {
\r
8866 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8870 unsigned nDevices = sysinfo.numaudios;
\r
8871 if ( nDevices == 0 ) {
\r
8872 // This should not happen because a check is made before this function is called.
\r
8874 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8878 if ( device >= nDevices ) {
\r
8879 // This should not happen because a check is made before this function is called.
\r
8881 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8885 oss_audioinfo ainfo;
\r
8886 ainfo.dev = device;
\r
8887 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8889 if ( result == -1 ) {
\r
8890 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8891 errorText_ = errorStream_.str();
\r
8895 // Check if device supports input or output
\r
8896 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8897 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8898 if ( mode == OUTPUT )
\r
8899 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8901 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8902 errorText_ = errorStream_.str();
\r
8907 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8908 if ( mode == OUTPUT )
\r
8909 flags |= O_WRONLY;
\r
8910 else { // mode == INPUT
\r
8911 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8912 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8913 close( handle->id[0] );
\r
8914 handle->id[0] = 0;
\r
8915 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8916 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8917 errorText_ = errorStream_.str();
\r
8920 // Check that the number previously set channels is the same.
\r
8921 if ( stream_.nUserChannels[0] != channels ) {
\r
8922 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8923 errorText_ = errorStream_.str();
\r
8929 flags |= O_RDONLY;
\r
8932 // Set exclusive access if specified.
\r
8933 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8935 // Try to open the device.
\r
8937 fd = open( ainfo.devnode, flags, 0 );
\r
8939 if ( errno == EBUSY )
\r
8940 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8942 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8943 errorText_ = errorStream_.str();
\r
8947 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8949 if ( flags | O_RDWR ) {
\r
8950 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8951 if ( result == -1) {
\r
8952 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8953 errorText_ = errorStream_.str();
\r
8959 // Check the device channel support.
\r
8960 stream_.nUserChannels[mode] = channels;
\r
8961 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8963 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8964 errorText_ = errorStream_.str();
\r
8968 // Set the number of channels.
\r
8969 int deviceChannels = channels + firstChannel;
\r
8970 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8971 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8973 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8974 errorText_ = errorStream_.str();
\r
8977 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8979 // Get the data format mask
\r
8981 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8982 if ( result == -1 ) {
\r
8984 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8985 errorText_ = errorStream_.str();
\r
8989 // Determine how to set the device format.
\r
8990 stream_.userFormat = format;
\r
8991 int deviceFormat = -1;
\r
8992 stream_.doByteSwap[mode] = false;
\r
8993 if ( format == RTAUDIO_SINT8 ) {
\r
8994 if ( mask & AFMT_S8 ) {
\r
8995 deviceFormat = AFMT_S8;
\r
8996 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8999 else if ( format == RTAUDIO_SINT16 ) {
\r
9000 if ( mask & AFMT_S16_NE ) {
\r
9001 deviceFormat = AFMT_S16_NE;
\r
9002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9004 else if ( mask & AFMT_S16_OE ) {
\r
9005 deviceFormat = AFMT_S16_OE;
\r
9006 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9007 stream_.doByteSwap[mode] = true;
\r
9010 else if ( format == RTAUDIO_SINT24 ) {
\r
9011 if ( mask & AFMT_S24_NE ) {
\r
9012 deviceFormat = AFMT_S24_NE;
\r
9013 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9015 else if ( mask & AFMT_S24_OE ) {
\r
9016 deviceFormat = AFMT_S24_OE;
\r
9017 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9018 stream_.doByteSwap[mode] = true;
\r
9021 else if ( format == RTAUDIO_SINT32 ) {
\r
9022 if ( mask & AFMT_S32_NE ) {
\r
9023 deviceFormat = AFMT_S32_NE;
\r
9024 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9026 else if ( mask & AFMT_S32_OE ) {
\r
9027 deviceFormat = AFMT_S32_OE;
\r
9028 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9029 stream_.doByteSwap[mode] = true;
\r
9033 if ( deviceFormat == -1 ) {
\r
9034 // The user requested format is not natively supported by the device.
\r
9035 if ( mask & AFMT_S16_NE ) {
\r
9036 deviceFormat = AFMT_S16_NE;
\r
9037 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9039 else if ( mask & AFMT_S32_NE ) {
\r
9040 deviceFormat = AFMT_S32_NE;
\r
9041 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9043 else if ( mask & AFMT_S24_NE ) {
\r
9044 deviceFormat = AFMT_S24_NE;
\r
9045 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9047 else if ( mask & AFMT_S16_OE ) {
\r
9048 deviceFormat = AFMT_S16_OE;
\r
9049 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9050 stream_.doByteSwap[mode] = true;
\r
9052 else if ( mask & AFMT_S32_OE ) {
\r
9053 deviceFormat = AFMT_S32_OE;
\r
9054 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9055 stream_.doByteSwap[mode] = true;
\r
9057 else if ( mask & AFMT_S24_OE ) {
\r
9058 deviceFormat = AFMT_S24_OE;
\r
9059 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9060 stream_.doByteSwap[mode] = true;
\r
9062 else if ( mask & AFMT_S8) {
\r
9063 deviceFormat = AFMT_S8;
\r
9064 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9068 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9069 // This really shouldn't happen ...
\r
9071 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9072 errorText_ = errorStream_.str();
\r
9076 // Set the data format.
\r
9077 int temp = deviceFormat;
\r
9078 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9079 if ( result == -1 || deviceFormat != temp ) {
\r
9081 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9082 errorText_ = errorStream_.str();
\r
9086 // Attempt to set the buffer size. According to OSS, the minimum
\r
9087 // number of buffers is two. The supposed minimum buffer size is 16
\r
9088 // bytes, so that will be our lower bound. The argument to this
\r
9089 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9090 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9091 // We'll check the actual value used near the end of the setup
\r
9093 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9094 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9096 if ( options ) buffers = options->numberOfBuffers;
\r
9097 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9098 if ( buffers < 2 ) buffers = 3;
\r
9099 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9100 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9101 if ( result == -1 ) {
\r
9103 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9104 errorText_ = errorStream_.str();
\r
9107 stream_.nBuffers = buffers;
\r
9109 // Save buffer size (in sample frames).
\r
9110 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9111 stream_.bufferSize = *bufferSize;
\r
9113 // Set the sample rate.
\r
9114 int srate = sampleRate;
\r
9115 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9116 if ( result == -1 ) {
\r
9118 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9119 errorText_ = errorStream_.str();
\r
9123 // Verify the sample rate setup worked.
\r
9124 if ( abs( srate - (int)sampleRate ) > 100 ) {
\r
9126 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9127 errorText_ = errorStream_.str();
\r
9130 stream_.sampleRate = sampleRate;
\r
9132 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9133 // We're doing duplex setup here.
\r
9134 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9135 stream_.nDeviceChannels[0] = deviceChannels;
\r
9138 // Set interleaving parameters.
\r
9139 stream_.userInterleaved = true;
\r
9140 stream_.deviceInterleaved[mode] = true;
\r
9141 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9142 stream_.userInterleaved = false;
\r
9144 // Set flags for buffer conversion
\r
9145 stream_.doConvertBuffer[mode] = false;
\r
9146 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9147 stream_.doConvertBuffer[mode] = true;
\r
9148 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9149 stream_.doConvertBuffer[mode] = true;
\r
9150 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9151 stream_.nUserChannels[mode] > 1 )
\r
9152 stream_.doConvertBuffer[mode] = true;
\r
9154 // Allocate the stream handles if necessary and then save.
\r
9155 if ( stream_.apiHandle == 0 ) {
\r
9157 handle = new OssHandle;
\r
9159 catch ( std::bad_alloc& ) {
\r
9160 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9164 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9165 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9169 stream_.apiHandle = (void *) handle;
\r
9172 handle = (OssHandle *) stream_.apiHandle;
\r
9174 handle->id[mode] = fd;
\r
9176 // Allocate necessary internal buffers.
\r
9177 unsigned long bufferBytes;
\r
9178 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9179 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9180 if ( stream_.userBuffer[mode] == NULL ) {
\r
9181 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9185 if ( stream_.doConvertBuffer[mode] ) {
\r
9187 bool makeBuffer = true;
\r
9188 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9189 if ( mode == INPUT ) {
\r
9190 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9191 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9192 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9196 if ( makeBuffer ) {
\r
9197 bufferBytes *= *bufferSize;
\r
9198 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9199 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9200 if ( stream_.deviceBuffer == NULL ) {
\r
9201 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9207 stream_.device[mode] = device;
\r
9208 stream_.state = STREAM_STOPPED;
\r
9210 // Setup the buffer conversion information structure.
\r
9211 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9213 // Setup thread if necessary.
\r
9214 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9215 // We had already set up an output stream.
\r
9216 stream_.mode = DUPLEX;
\r
9217 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9220 stream_.mode = mode;
\r
9222 // Setup callback thread.
\r
9223 stream_.callbackInfo.object = (void *) this;
\r
9225 // Set the thread attributes for joinable and realtime scheduling
\r
9226 // priority. The higher priority will only take affect if the
\r
9227 // program is run as root or suid.
\r
9228 pthread_attr_t attr;
\r
9229 pthread_attr_init( &attr );
\r
9230 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9231 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9232 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9233 struct sched_param param;
\r
9234 int priority = options->priority;
\r
9235 int min = sched_get_priority_min( SCHED_RR );
\r
9236 int max = sched_get_priority_max( SCHED_RR );
\r
9237 if ( priority < min ) priority = min;
\r
9238 else if ( priority > max ) priority = max;
\r
9239 param.sched_priority = priority;
\r
9240 pthread_attr_setschedparam( &attr, ¶m );
\r
9241 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9244 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9246 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9249 stream_.callbackInfo.isRunning = true;
\r
9250 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9251 pthread_attr_destroy( &attr );
\r
9253 stream_.callbackInfo.isRunning = false;
\r
9254 errorText_ = "RtApiOss::error creating callback thread!";
\r
9263 pthread_cond_destroy( &handle->runnable );
\r
9264 if ( handle->id[0] ) close( handle->id[0] );
\r
9265 if ( handle->id[1] ) close( handle->id[1] );
\r
9267 stream_.apiHandle = 0;
\r
9270 for ( int i=0; i<2; i++ ) {
\r
9271 if ( stream_.userBuffer[i] ) {
\r
9272 free( stream_.userBuffer[i] );
\r
9273 stream_.userBuffer[i] = 0;
\r
9277 if ( stream_.deviceBuffer ) {
\r
9278 free( stream_.deviceBuffer );
\r
9279 stream_.deviceBuffer = 0;
\r
9285 void RtApiOss :: closeStream()
\r
9287 if ( stream_.state == STREAM_CLOSED ) {
\r
9288 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9289 error( RtAudioError::WARNING );
\r
9293 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9294 stream_.callbackInfo.isRunning = false;
\r
9295 MUTEX_LOCK( &stream_.mutex );
\r
9296 if ( stream_.state == STREAM_STOPPED )
\r
9297 pthread_cond_signal( &handle->runnable );
\r
9298 MUTEX_UNLOCK( &stream_.mutex );
\r
9299 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9301 if ( stream_.state == STREAM_RUNNING ) {
\r
9302 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9303 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9305 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9306 stream_.state = STREAM_STOPPED;
\r
9310 pthread_cond_destroy( &handle->runnable );
\r
9311 if ( handle->id[0] ) close( handle->id[0] );
\r
9312 if ( handle->id[1] ) close( handle->id[1] );
\r
9314 stream_.apiHandle = 0;
\r
9317 for ( int i=0; i<2; i++ ) {
\r
9318 if ( stream_.userBuffer[i] ) {
\r
9319 free( stream_.userBuffer[i] );
\r
9320 stream_.userBuffer[i] = 0;
\r
9324 if ( stream_.deviceBuffer ) {
\r
9325 free( stream_.deviceBuffer );
\r
9326 stream_.deviceBuffer = 0;
\r
9329 stream_.mode = UNINITIALIZED;
\r
9330 stream_.state = STREAM_CLOSED;
\r
9333 void RtApiOss :: startStream()
\r
9336 if ( stream_.state == STREAM_RUNNING ) {
\r
9337 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9338 error( RtAudioError::WARNING );
\r
9342 MUTEX_LOCK( &stream_.mutex );
\r
9344 stream_.state = STREAM_RUNNING;
\r
9346 // No need to do anything else here ... OSS automatically starts
\r
9347 // when fed samples.
\r
9349 MUTEX_UNLOCK( &stream_.mutex );
\r
9351 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9352 pthread_cond_signal( &handle->runnable );
\r
9355 void RtApiOss :: stopStream()
\r
9358 if ( stream_.state == STREAM_STOPPED ) {
\r
9359 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9360 error( RtAudioError::WARNING );
\r
9364 MUTEX_LOCK( &stream_.mutex );
\r
9366 // The state might change while waiting on a mutex.
\r
9367 if ( stream_.state == STREAM_STOPPED ) {
\r
9368 MUTEX_UNLOCK( &stream_.mutex );
\r
9373 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9374 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9376 // Flush the output with zeros a few times.
\r
9379 RtAudioFormat format;
\r
9381 if ( stream_.doConvertBuffer[0] ) {
\r
9382 buffer = stream_.deviceBuffer;
\r
9383 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9384 format = stream_.deviceFormat[0];
\r
9387 buffer = stream_.userBuffer[0];
\r
9388 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9389 format = stream_.userFormat;
\r
9392 memset( buffer, 0, samples * formatBytes(format) );
\r
9393 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9394 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9395 if ( result == -1 ) {
\r
9396 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9397 error( RtAudioError::WARNING );
\r
9401 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9402 if ( result == -1 ) {
\r
9403 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9404 errorText_ = errorStream_.str();
\r
9407 handle->triggered = false;
\r
9410 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9411 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9412 if ( result == -1 ) {
\r
9413 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9414 errorText_ = errorStream_.str();
\r
9420 stream_.state = STREAM_STOPPED;
\r
9421 MUTEX_UNLOCK( &stream_.mutex );
\r
9423 if ( result != -1 ) return;
\r
9424 error( RtAudioError::SYSTEM_ERROR );
\r
9427 void RtApiOss :: abortStream()
\r
9430 if ( stream_.state == STREAM_STOPPED ) {
\r
9431 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9432 error( RtAudioError::WARNING );
\r
9436 MUTEX_LOCK( &stream_.mutex );
\r
9438 // The state might change while waiting on a mutex.
\r
9439 if ( stream_.state == STREAM_STOPPED ) {
\r
9440 MUTEX_UNLOCK( &stream_.mutex );
\r
9445 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9447 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9448 if ( result == -1 ) {
\r
9449 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9450 errorText_ = errorStream_.str();
\r
9453 handle->triggered = false;
\r
9456 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9457 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9458 if ( result == -1 ) {
\r
9459 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9460 errorText_ = errorStream_.str();
\r
9466 stream_.state = STREAM_STOPPED;
\r
9467 MUTEX_UNLOCK( &stream_.mutex );
\r
9469 if ( result != -1 ) return;
\r
9470 error( RtAudioError::SYSTEM_ERROR );
\r
9473 void RtApiOss :: callbackEvent()
\r
9475 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9476 if ( stream_.state == STREAM_STOPPED ) {
\r
9477 MUTEX_LOCK( &stream_.mutex );
\r
9478 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9479 if ( stream_.state != STREAM_RUNNING ) {
\r
9480 MUTEX_UNLOCK( &stream_.mutex );
\r
9483 MUTEX_UNLOCK( &stream_.mutex );
\r
9486 if ( stream_.state == STREAM_CLOSED ) {
\r
9487 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9488 error( RtAudioError::WARNING );
\r
9492 // Invoke user callback to get fresh output data.
\r
9493 int doStopStream = 0;
\r
9494 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9495 double streamTime = getStreamTime();
\r
9496 RtAudioStreamStatus status = 0;
\r
9497 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9498 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9499 handle->xrun[0] = false;
\r
9501 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9502 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9503 handle->xrun[1] = false;
\r
9505 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9506 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9507 if ( doStopStream == 2 ) {
\r
9508 this->abortStream();
\r
9512 MUTEX_LOCK( &stream_.mutex );
\r
9514 // The state might change while waiting on a mutex.
\r
9515 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9520 RtAudioFormat format;
\r
9522 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9524 // Setup parameters and do buffer conversion if necessary.
\r
9525 if ( stream_.doConvertBuffer[0] ) {
\r
9526 buffer = stream_.deviceBuffer;
\r
9527 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9528 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9529 format = stream_.deviceFormat[0];
\r
9532 buffer = stream_.userBuffer[0];
\r
9533 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9534 format = stream_.userFormat;
\r
9537 // Do byte swapping if necessary.
\r
9538 if ( stream_.doByteSwap[0] )
\r
9539 byteSwapBuffer( buffer, samples, format );
\r
9541 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9543 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9544 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9545 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9546 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9547 handle->triggered = true;
\r
9550 // Write samples to device.
\r
9551 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9553 if ( result == -1 ) {
\r
9554 // We'll assume this is an underrun, though there isn't a
\r
9555 // specific means for determining that.
\r
9556 handle->xrun[0] = true;
\r
9557 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9558 error( RtAudioError::WARNING );
\r
9559 // Continue on to input section.
\r
9563 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9565 // Setup parameters.
\r
9566 if ( stream_.doConvertBuffer[1] ) {
\r
9567 buffer = stream_.deviceBuffer;
\r
9568 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9569 format = stream_.deviceFormat[1];
\r
9572 buffer = stream_.userBuffer[1];
\r
9573 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9574 format = stream_.userFormat;
\r
9577 // Read samples from device.
\r
9578 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9580 if ( result == -1 ) {
\r
9581 // We'll assume this is an overrun, though there isn't a
\r
9582 // specific means for determining that.
\r
9583 handle->xrun[1] = true;
\r
9584 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9585 error( RtAudioError::WARNING );
\r
9589 // Do byte swapping if necessary.
\r
9590 if ( stream_.doByteSwap[1] )
\r
9591 byteSwapBuffer( buffer, samples, format );
\r
9593 // Do buffer conversion if necessary.
\r
9594 if ( stream_.doConvertBuffer[1] )
\r
9595 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9599 MUTEX_UNLOCK( &stream_.mutex );
\r
9601 RtApi::tickStreamTime();
\r
9602 if ( doStopStream == 1 ) this->stopStream();
\r
9605 static void *ossCallbackHandler( void *ptr )
\r
9607 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9608 RtApiOss *object = (RtApiOss *) info->object;
\r
9609 bool *isRunning = &info->isRunning;
\r
9611 while ( *isRunning == true ) {
\r
9612 pthread_testcancel();
\r
9613 object->callbackEvent();
\r
9616 pthread_exit( NULL );
\r
9619 //******************** End of __LINUX_OSS__ *********************//
\r
9623 // *************************************************** //
\r
9625 // Protected common (OS-independent) RtAudio methods.
\r
9627 // *************************************************** //
\r
9629 // This method can be modified to control the behavior of error
\r
9630 // message printing.
\r
9631 void RtApi :: error( RtAudioError::Type type )
\r
9633 errorStream_.str(""); // clear the ostringstream
\r
9635 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9636 if ( errorCallback ) {
\r
9637 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9639 if ( firstErrorOccurred_ )
\r
9642 firstErrorOccurred_ = true;
\r
9643 const std::string errorMessage = errorText_;
\r
9645 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9646 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9650 errorCallback( type, errorMessage );
\r
9651 firstErrorOccurred_ = false;
\r
9655 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9656 std::cerr << '\n' << errorText_ << "\n\n";
\r
9657 else if ( type != RtAudioError::WARNING )
\r
9658 throw( RtAudioError( errorText_, type ) );
\r
9661 void RtApi :: verifyStream()
\r
9663 if ( stream_.state == STREAM_CLOSED ) {
\r
9664 errorText_ = "RtApi:: a stream is not open!";
\r
9665 error( RtAudioError::INVALID_USE );
\r
9669 void RtApi :: clearStreamInfo()
\r
9671 stream_.mode = UNINITIALIZED;
\r
9672 stream_.state = STREAM_CLOSED;
\r
9673 stream_.sampleRate = 0;
\r
9674 stream_.bufferSize = 0;
\r
9675 stream_.nBuffers = 0;
\r
9676 stream_.userFormat = 0;
\r
9677 stream_.userInterleaved = true;
\r
9678 stream_.streamTime = 0.0;
\r
9679 stream_.apiHandle = 0;
\r
9680 stream_.deviceBuffer = 0;
\r
9681 stream_.callbackInfo.callback = 0;
\r
9682 stream_.callbackInfo.userData = 0;
\r
9683 stream_.callbackInfo.isRunning = false;
\r
9684 stream_.callbackInfo.errorCallback = 0;
\r
9685 for ( int i=0; i<2; i++ ) {
\r
9686 stream_.device[i] = 11111;
\r
9687 stream_.doConvertBuffer[i] = false;
\r
9688 stream_.deviceInterleaved[i] = true;
\r
9689 stream_.doByteSwap[i] = false;
\r
9690 stream_.nUserChannels[i] = 0;
\r
9691 stream_.nDeviceChannels[i] = 0;
\r
9692 stream_.channelOffset[i] = 0;
\r
9693 stream_.deviceFormat[i] = 0;
\r
9694 stream_.latency[i] = 0;
\r
9695 stream_.userBuffer[i] = 0;
\r
9696 stream_.convertInfo[i].channels = 0;
\r
9697 stream_.convertInfo[i].inJump = 0;
\r
9698 stream_.convertInfo[i].outJump = 0;
\r
9699 stream_.convertInfo[i].inFormat = 0;
\r
9700 stream_.convertInfo[i].outFormat = 0;
\r
9701 stream_.convertInfo[i].inOffset.clear();
\r
9702 stream_.convertInfo[i].outOffset.clear();
\r
9706 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9708 if ( format == RTAUDIO_SINT16 )
\r
9710 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9712 else if ( format == RTAUDIO_FLOAT64 )
\r
9714 else if ( format == RTAUDIO_SINT24 )
\r
9716 else if ( format == RTAUDIO_SINT8 )
\r
9719 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9720 error( RtAudioError::WARNING );
\r
9725 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9727 if ( mode == INPUT ) { // convert device to user buffer
\r
9728 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9729 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9730 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9731 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9733 else { // convert user to device buffer
\r
9734 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9735 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9736 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9737 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9740 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9741 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9743 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9745 // Set up the interleave/deinterleave offsets.
\r
9746 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9747 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9748 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9749 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9750 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9751 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9752 stream_.convertInfo[mode].inJump = 1;
\r
9756 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9757 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9758 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9759 stream_.convertInfo[mode].outJump = 1;
\r
9763 else { // no (de)interleaving
\r
9764 if ( stream_.userInterleaved ) {
\r
9765 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9766 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9767 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9771 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9772 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9773 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9774 stream_.convertInfo[mode].inJump = 1;
\r
9775 stream_.convertInfo[mode].outJump = 1;
\r
9780 // Add channel offset.
\r
9781 if ( firstChannel > 0 ) {
\r
9782 if ( stream_.deviceInterleaved[mode] ) {
\r
9783 if ( mode == OUTPUT ) {
\r
9784 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9785 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9788 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9789 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9793 if ( mode == OUTPUT ) {
\r
9794 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9795 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9798 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9799 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9805 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9807 // This function does format conversion, input/output channel compensation, and
\r
9808 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9809 // the lower three bytes of a 32-bit integer.
\r
9811 // Clear our device buffer when in/out duplex device channels are different
\r
9812 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9813 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9814 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9817 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9819 Float64 *out = (Float64 *)outBuffer;
\r
9821 if (info.inFormat == RTAUDIO_SINT8) {
\r
9822 signed char *in = (signed char *)inBuffer;
\r
9823 scale = 1.0 / 127.5;
\r
9824 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9825 for (j=0; j<info.channels; j++) {
\r
9826 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9827 out[info.outOffset[j]] += 0.5;
\r
9828 out[info.outOffset[j]] *= scale;
\r
9830 in += info.inJump;
\r
9831 out += info.outJump;
\r
9834 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9835 Int16 *in = (Int16 *)inBuffer;
\r
9836 scale = 1.0 / 32767.5;
\r
9837 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9838 for (j=0; j<info.channels; j++) {
\r
9839 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9840 out[info.outOffset[j]] += 0.5;
\r
9841 out[info.outOffset[j]] *= scale;
\r
9843 in += info.inJump;
\r
9844 out += info.outJump;
\r
9847 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9848 Int24 *in = (Int24 *)inBuffer;
\r
9849 scale = 1.0 / 8388607.5;
\r
9850 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9851 for (j=0; j<info.channels; j++) {
\r
9852 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9853 out[info.outOffset[j]] += 0.5;
\r
9854 out[info.outOffset[j]] *= scale;
\r
9856 in += info.inJump;
\r
9857 out += info.outJump;
\r
9860 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9861 Int32 *in = (Int32 *)inBuffer;
\r
9862 scale = 1.0 / 2147483647.5;
\r
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9864 for (j=0; j<info.channels; j++) {
\r
9865 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9866 out[info.outOffset[j]] += 0.5;
\r
9867 out[info.outOffset[j]] *= scale;
\r
9869 in += info.inJump;
\r
9870 out += info.outJump;
\r
9873 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9874 Float32 *in = (Float32 *)inBuffer;
\r
9875 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9876 for (j=0; j<info.channels; j++) {
\r
9877 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9879 in += info.inJump;
\r
9880 out += info.outJump;
\r
9883 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9884 // Channel compensation and/or (de)interleaving only.
\r
9885 Float64 *in = (Float64 *)inBuffer;
\r
9886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9887 for (j=0; j<info.channels; j++) {
\r
9888 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9890 in += info.inJump;
\r
9891 out += info.outJump;
\r
9895 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9897 Float32 *out = (Float32 *)outBuffer;
\r
9899 if (info.inFormat == RTAUDIO_SINT8) {
\r
9900 signed char *in = (signed char *)inBuffer;
\r
9901 scale = (Float32) ( 1.0 / 127.5 );
\r
9902 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9903 for (j=0; j<info.channels; j++) {
\r
9904 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9905 out[info.outOffset[j]] += 0.5;
\r
9906 out[info.outOffset[j]] *= scale;
\r
9908 in += info.inJump;
\r
9909 out += info.outJump;
\r
9912 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9913 Int16 *in = (Int16 *)inBuffer;
\r
9914 scale = (Float32) ( 1.0 / 32767.5 );
\r
9915 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9916 for (j=0; j<info.channels; j++) {
\r
9917 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9918 out[info.outOffset[j]] += 0.5;
\r
9919 out[info.outOffset[j]] *= scale;
\r
9921 in += info.inJump;
\r
9922 out += info.outJump;
\r
9925 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9926 Int24 *in = (Int24 *)inBuffer;
\r
9927 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9928 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9929 for (j=0; j<info.channels; j++) {
\r
9930 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9931 out[info.outOffset[j]] += 0.5;
\r
9932 out[info.outOffset[j]] *= scale;
\r
9934 in += info.inJump;
\r
9935 out += info.outJump;
\r
9938 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9939 Int32 *in = (Int32 *)inBuffer;
\r
9940 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9941 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9942 for (j=0; j<info.channels; j++) {
\r
9943 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9944 out[info.outOffset[j]] += 0.5;
\r
9945 out[info.outOffset[j]] *= scale;
\r
9947 in += info.inJump;
\r
9948 out += info.outJump;
\r
9951 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9952 // Channel compensation and/or (de)interleaving only.
\r
9953 Float32 *in = (Float32 *)inBuffer;
\r
9954 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9955 for (j=0; j<info.channels; j++) {
\r
9956 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9958 in += info.inJump;
\r
9959 out += info.outJump;
\r
9962 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9963 Float64 *in = (Float64 *)inBuffer;
\r
9964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9965 for (j=0; j<info.channels; j++) {
\r
9966 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9968 in += info.inJump;
\r
9969 out += info.outJump;
\r
9973 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9974 Int32 *out = (Int32 *)outBuffer;
\r
9975 if (info.inFormat == RTAUDIO_SINT8) {
\r
9976 signed char *in = (signed char *)inBuffer;
\r
9977 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9978 for (j=0; j<info.channels; j++) {
\r
9979 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9980 out[info.outOffset[j]] <<= 24;
\r
9982 in += info.inJump;
\r
9983 out += info.outJump;
\r
9986 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9987 Int16 *in = (Int16 *)inBuffer;
\r
9988 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9989 for (j=0; j<info.channels; j++) {
\r
9990 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9991 out[info.outOffset[j]] <<= 16;
\r
9993 in += info.inJump;
\r
9994 out += info.outJump;
\r
9997 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9998 Int24 *in = (Int24 *)inBuffer;
\r
9999 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10000 for (j=0; j<info.channels; j++) {
\r
10001 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
10002 out[info.outOffset[j]] <<= 8;
\r
10004 in += info.inJump;
\r
10005 out += info.outJump;
\r
10008 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10009 // Channel compensation and/or (de)interleaving only.
\r
10010 Int32 *in = (Int32 *)inBuffer;
\r
10011 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10012 for (j=0; j<info.channels; j++) {
\r
10013 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10015 in += info.inJump;
\r
10016 out += info.outJump;
\r
10019 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10020 Float32 *in = (Float32 *)inBuffer;
\r
10021 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10022 for (j=0; j<info.channels; j++) {
\r
10023 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10025 in += info.inJump;
\r
10026 out += info.outJump;
\r
10029 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10030 Float64 *in = (Float64 *)inBuffer;
\r
10031 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10032 for (j=0; j<info.channels; j++) {
\r
10033 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10035 in += info.inJump;
\r
10036 out += info.outJump;
\r
10040 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10041 Int24 *out = (Int24 *)outBuffer;
\r
10042 if (info.inFormat == RTAUDIO_SINT8) {
\r
10043 signed char *in = (signed char *)inBuffer;
\r
10044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10045 for (j=0; j<info.channels; j++) {
\r
10046 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10047 //out[info.outOffset[j]] <<= 16;
\r
10049 in += info.inJump;
\r
10050 out += info.outJump;
\r
10053 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10054 Int16 *in = (Int16 *)inBuffer;
\r
10055 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10056 for (j=0; j<info.channels; j++) {
\r
10057 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10058 //out[info.outOffset[j]] <<= 8;
\r
10060 in += info.inJump;
\r
10061 out += info.outJump;
\r
10064 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10065 // Channel compensation and/or (de)interleaving only.
\r
10066 Int24 *in = (Int24 *)inBuffer;
\r
10067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10068 for (j=0; j<info.channels; j++) {
\r
10069 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10071 in += info.inJump;
\r
10072 out += info.outJump;
\r
10075 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10076 Int32 *in = (Int32 *)inBuffer;
\r
10077 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10078 for (j=0; j<info.channels; j++) {
\r
10079 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10080 //out[info.outOffset[j]] >>= 8;
\r
10082 in += info.inJump;
\r
10083 out += info.outJump;
\r
10086 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10087 Float32 *in = (Float32 *)inBuffer;
\r
10088 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10089 for (j=0; j<info.channels; j++) {
\r
10090 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10092 in += info.inJump;
\r
10093 out += info.outJump;
\r
10096 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10097 Float64 *in = (Float64 *)inBuffer;
\r
10098 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10099 for (j=0; j<info.channels; j++) {
\r
10100 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10102 in += info.inJump;
\r
10103 out += info.outJump;
\r
10107 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10108 Int16 *out = (Int16 *)outBuffer;
\r
10109 if (info.inFormat == RTAUDIO_SINT8) {
\r
10110 signed char *in = (signed char *)inBuffer;
\r
10111 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10112 for (j=0; j<info.channels; j++) {
\r
10113 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10114 out[info.outOffset[j]] <<= 8;
\r
10116 in += info.inJump;
\r
10117 out += info.outJump;
\r
10120 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10121 // Channel compensation and/or (de)interleaving only.
\r
10122 Int16 *in = (Int16 *)inBuffer;
\r
10123 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10124 for (j=0; j<info.channels; j++) {
\r
10125 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10127 in += info.inJump;
\r
10128 out += info.outJump;
\r
10131 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10132 Int24 *in = (Int24 *)inBuffer;
\r
10133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10134 for (j=0; j<info.channels; j++) {
\r
10135 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10137 in += info.inJump;
\r
10138 out += info.outJump;
\r
10141 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10142 Int32 *in = (Int32 *)inBuffer;
\r
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10144 for (j=0; j<info.channels; j++) {
\r
10145 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10147 in += info.inJump;
\r
10148 out += info.outJump;
\r
10151 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10152 Float32 *in = (Float32 *)inBuffer;
\r
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10154 for (j=0; j<info.channels; j++) {
\r
10155 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10157 in += info.inJump;
\r
10158 out += info.outJump;
\r
10161 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10162 Float64 *in = (Float64 *)inBuffer;
\r
10163 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10164 for (j=0; j<info.channels; j++) {
\r
10165 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10167 in += info.inJump;
\r
10168 out += info.outJump;
\r
10172 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10173 signed char *out = (signed char *)outBuffer;
\r
10174 if (info.inFormat == RTAUDIO_SINT8) {
\r
10175 // Channel compensation and/or (de)interleaving only.
\r
10176 signed char *in = (signed char *)inBuffer;
\r
10177 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10178 for (j=0; j<info.channels; j++) {
\r
10179 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10181 in += info.inJump;
\r
10182 out += info.outJump;
\r
10185 if (info.inFormat == RTAUDIO_SINT16) {
\r
10186 Int16 *in = (Int16 *)inBuffer;
\r
10187 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10188 for (j=0; j<info.channels; j++) {
\r
10189 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10191 in += info.inJump;
\r
10192 out += info.outJump;
\r
10195 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10196 Int24 *in = (Int24 *)inBuffer;
\r
10197 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10198 for (j=0; j<info.channels; j++) {
\r
10199 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10201 in += info.inJump;
\r
10202 out += info.outJump;
\r
10205 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10206 Int32 *in = (Int32 *)inBuffer;
\r
10207 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10208 for (j=0; j<info.channels; j++) {
\r
10209 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10211 in += info.inJump;
\r
10212 out += info.outJump;
\r
10215 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10216 Float32 *in = (Float32 *)inBuffer;
\r
10217 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10218 for (j=0; j<info.channels; j++) {
\r
10219 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10221 in += info.inJump;
\r
10222 out += info.outJump;
\r
10225 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10226 Float64 *in = (Float64 *)inBuffer;
\r
10227 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10228 for (j=0; j<info.channels; j++) {
\r
10229 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10231 in += info.inJump;
\r
10232 out += info.outJump;
\r
10238 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10239 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10240 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10242 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10248 if ( format == RTAUDIO_SINT16 ) {
\r
10249 for ( unsigned int i=0; i<samples; i++ ) {
\r
10250 // Swap 1st and 2nd bytes.
\r
10252 *(ptr) = *(ptr+1);
\r
10255 // Increment 2 bytes.
\r
10259 else if ( format == RTAUDIO_SINT32 ||
\r
10260 format == RTAUDIO_FLOAT32 ) {
\r
10261 for ( unsigned int i=0; i<samples; i++ ) {
\r
10262 // Swap 1st and 4th bytes.
\r
10264 *(ptr) = *(ptr+3);
\r
10267 // Swap 2nd and 3rd bytes.
\r
10270 *(ptr) = *(ptr+1);
\r
10273 // Increment 3 more bytes.
\r
10277 else if ( format == RTAUDIO_SINT24 ) {
\r
10278 for ( unsigned int i=0; i<samples; i++ ) {
\r
10279 // Swap 1st and 3rd bytes.
\r
10281 *(ptr) = *(ptr+2);
\r
10284 // Increment 2 more bytes.
\r
10288 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10289 for ( unsigned int i=0; i<samples; i++ ) {
\r
10290 // Swap 1st and 8th bytes
\r
10292 *(ptr) = *(ptr+7);
\r
10295 // Swap 2nd and 7th bytes
\r
10298 *(ptr) = *(ptr+5);
\r
10301 // Swap 3rd and 6th bytes
\r
10304 *(ptr) = *(ptr+3);
\r
10307 // Swap 4th and 5th bytes
\r
10310 *(ptr) = *(ptr+1);
\r
10313 // Increment 5 more bytes.
\r
10319 // Indentation settings for Vim and Emacs
\r
10321 // Local Variables:
\r
10322 // c-basic-offset: 2
\r
10323 // indent-tabs-mode: nil
\r
10326 // vim: et sts=2 sw=2
\r