1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
49 #include <algorithm>
\r
51 // Static variable definitions.
\r
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
53 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
66 static std::string convertCharPointerToStdString(const char *text)
\r
68 return std::string(text);
\r
71 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
74 std::string s( length-1, '\0' );
\r
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
90 // *************************************************** //
\r
92 // RtAudio definitions.
\r
94 // *************************************************** //
\r
96 std::string RtAudio :: getVersion( void )
\r
98 return RTAUDIO_VERSION;
\r
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
\r
105 // The order here will control the order of RtAudio's API search in
\r
106 // the constructor.
\r
107 #if defined(__UNIX_JACK__)
\r
108 apis.push_back( UNIX_JACK );
\r
110 #if defined(__LINUX_ALSA__)
\r
111 apis.push_back( LINUX_ALSA );
\r
113 #if defined(__LINUX_PULSE__)
\r
114 apis.push_back( LINUX_PULSE );
\r
116 #if defined(__LINUX_OSS__)
\r
117 apis.push_back( LINUX_OSS );
\r
119 #if defined(__WINDOWS_ASIO__)
\r
120 apis.push_back( WINDOWS_ASIO );
\r
122 #if defined(__WINDOWS_WASAPI__)
\r
123 apis.push_back( WINDOWS_WASAPI );
\r
125 #if defined(__WINDOWS_DS__)
\r
126 apis.push_back( WINDOWS_DS );
\r
128 #if defined(__MACOSX_CORE__)
\r
129 apis.push_back( MACOSX_CORE );
\r
131 #if defined(__RTAUDIO_DUMMY__)
\r
132 apis.push_back( RTAUDIO_DUMMY );
\r
136 void RtAudio :: openRtApi( RtAudio::Api api )
\r
142 #if defined(__UNIX_JACK__)
\r
143 if ( api == UNIX_JACK )
\r
144 rtapi_ = new RtApiJack();
\r
146 #if defined(__LINUX_ALSA__)
\r
147 if ( api == LINUX_ALSA )
\r
148 rtapi_ = new RtApiAlsa();
\r
150 #if defined(__LINUX_PULSE__)
\r
151 if ( api == LINUX_PULSE )
\r
152 rtapi_ = new RtApiPulse();
\r
154 #if defined(__LINUX_OSS__)
\r
155 if ( api == LINUX_OSS )
\r
156 rtapi_ = new RtApiOss();
\r
158 #if defined(__WINDOWS_ASIO__)
\r
159 if ( api == WINDOWS_ASIO )
\r
160 rtapi_ = new RtApiAsio();
\r
162 #if defined(__WINDOWS_WASAPI__)
\r
163 if ( api == WINDOWS_WASAPI )
\r
164 rtapi_ = new RtApiWasapi();
\r
166 #if defined(__WINDOWS_DS__)
\r
167 if ( api == WINDOWS_DS )
\r
168 rtapi_ = new RtApiDs();
\r
170 #if defined(__MACOSX_CORE__)
\r
171 if ( api == MACOSX_CORE )
\r
172 rtapi_ = new RtApiCore();
\r
174 #if defined(__RTAUDIO_DUMMY__)
\r
175 if ( api == RTAUDIO_DUMMY )
\r
176 rtapi_ = new RtApiDummy();
\r
180 RtAudio :: RtAudio( RtAudio::Api api )
\r
184 if ( api != UNSPECIFIED ) {
\r
185 // Attempt to open the specified API.
\r
187 if ( rtapi_ ) return;
\r
189 // No compiled support for specified API value. Issue a debug
\r
190 // warning and continue as if no API was specified.
\r
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
194 // Iterate through the compiled APIs and return as soon as we find
\r
195 // one with at least one device or we reach the end of the list.
\r
196 std::vector< RtAudio::Api > apis;
\r
197 getCompiledApi( apis );
\r
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
199 openRtApi( apis[i] );
\r
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
203 if ( rtapi_ ) return;
\r
205 // It should not be possible to get here because the preprocessor
\r
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
207 // API-specific definitions are passed to the compiler. But just in
\r
208 // case something weird happens, we'll thow an error.
\r
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
213 RtAudio :: ~RtAudio()
\r
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
220 RtAudio::StreamParameters *inputParameters,
\r
221 RtAudioFormat format, unsigned int sampleRate,
\r
222 unsigned int *bufferFrames,
\r
223 RtAudioCallback callback, void *userData,
\r
224 RtAudio::StreamOptions *options,
\r
225 RtAudioErrorCallback errorCallback )
\r
227 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
228 sampleRate, bufferFrames, callback,
\r
229 userData, options, errorCallback );
\r
232 // *************************************************** //
\r
234 // Public RtApi definitions (see end of file for
\r
235 // private or protected utility functions).
\r
237 // *************************************************** //
\r
241 stream_.state = STREAM_CLOSED;
\r
242 stream_.mode = UNINITIALIZED;
\r
243 stream_.apiHandle = 0;
\r
244 stream_.userBuffer[0] = 0;
\r
245 stream_.userBuffer[1] = 0;
\r
246 MUTEX_INITIALIZE( &stream_.mutex );
\r
247 showWarnings_ = true;
\r
248 firstErrorOccurred_ = false;
\r
253 MUTEX_DESTROY( &stream_.mutex );
\r
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
257 RtAudio::StreamParameters *iParams,
\r
258 RtAudioFormat format, unsigned int sampleRate,
\r
259 unsigned int *bufferFrames,
\r
260 RtAudioCallback callback, void *userData,
\r
261 RtAudio::StreamOptions *options,
\r
262 RtAudioErrorCallback errorCallback )
\r
264 if ( stream_.state != STREAM_CLOSED ) {
\r
265 errorText_ = "RtApi::openStream: a stream is already open!";
\r
266 error( RtAudioError::INVALID_USE );
\r
270 // Clear stream information potentially left from a previously open stream.
\r
273 if ( oParams && oParams->nChannels < 1 ) {
\r
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
275 error( RtAudioError::INVALID_USE );
\r
279 if ( iParams && iParams->nChannels < 1 ) {
\r
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
281 error( RtAudioError::INVALID_USE );
\r
285 if ( oParams == NULL && iParams == NULL ) {
\r
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
287 error( RtAudioError::INVALID_USE );
\r
291 if ( formatBytes(format) == 0 ) {
\r
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
293 error( RtAudioError::INVALID_USE );
\r
297 unsigned int nDevices = getDeviceCount();
\r
298 unsigned int oChannels = 0;
\r
300 oChannels = oParams->nChannels;
\r
301 if ( oParams->deviceId >= nDevices ) {
\r
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
303 error( RtAudioError::INVALID_USE );
\r
308 unsigned int iChannels = 0;
\r
310 iChannels = iParams->nChannels;
\r
311 if ( iParams->deviceId >= nDevices ) {
\r
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
313 error( RtAudioError::INVALID_USE );
\r
320 if ( oChannels > 0 ) {
\r
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
323 sampleRate, format, bufferFrames, options );
\r
324 if ( result == false ) {
\r
325 error( RtAudioError::SYSTEM_ERROR );
\r
330 if ( iChannels > 0 ) {
\r
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
333 sampleRate, format, bufferFrames, options );
\r
334 if ( result == false ) {
\r
335 if ( oChannels > 0 ) closeStream();
\r
336 error( RtAudioError::SYSTEM_ERROR );
\r
341 stream_.callbackInfo.callback = (void *) callback;
\r
342 stream_.callbackInfo.userData = userData;
\r
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
346 stream_.state = STREAM_STOPPED;
\r
349 unsigned int RtApi :: getDefaultInputDevice( void )
\r
351 // Should be implemented in subclasses if possible.
\r
355 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
357 // Should be implemented in subclasses if possible.
\r
361 void RtApi :: closeStream( void )
\r
363 // MUST be implemented in subclasses!
\r
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
370 RtAudio::StreamOptions * /*options*/ )
\r
372 // MUST be implemented in subclasses!
\r
376 void RtApi :: tickStreamTime( void )
\r
378 // Subclasses that do not provide their own implementation of
\r
379 // getStreamTime should call this function once per buffer I/O to
\r
380 // provide basic stream time support.
\r
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
384 #if defined( HAVE_GETTIMEOFDAY )
\r
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
389 long RtApi :: getStreamLatency( void )
\r
393 long totalLatency = 0;
\r
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
395 totalLatency = stream_.latency[0];
\r
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
397 totalLatency += stream_.latency[1];
\r
399 return totalLatency;
\r
402 double RtApi :: getStreamTime( void )
\r
406 #if defined( HAVE_GETTIMEOFDAY )
\r
407 // Return a very accurate estimate of the stream time by
\r
408 // adding in the elapsed time since the last tick.
\r
409 struct timeval then;
\r
410 struct timeval now;
\r
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
413 return stream_.streamTime;
\r
415 gettimeofday( &now, NULL );
\r
416 then = stream_.lastTickTimestamp;
\r
417 return stream_.streamTime +
\r
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
419 (then.tv_sec + 0.000001 * then.tv_usec));
\r
421 return stream_.streamTime;
\r
425 void RtApi :: setStreamTime( double time )
\r
430 stream_.streamTime = time;
\r
431 #if defined( HAVE_GETTIMEOFDAY )
\r
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
436 unsigned int RtApi :: getStreamSampleRate( void )
\r
440 return stream_.sampleRate;
\r
444 // *************************************************** //
\r
446 // OS/API-specific methods.
\r
448 // *************************************************** //
\r
450 #if defined(__MACOSX_CORE__)
\r
452 // The OS X CoreAudio API is designed to use a separate callback
\r
453 // procedure for each of its audio devices. A single RtAudio duplex
\r
454 // stream using two different devices is supported here, though it
\r
455 // cannot be guaranteed to always behave correctly because we cannot
\r
456 // synchronize these two callbacks.
\r
458 // A property listener is installed for over/underrun information.
\r
459 // However, no functionality is currently provided to allow property
\r
460 // listeners to trigger user handlers because it is unclear what could
\r
461 // be done if a critical stream parameter (buffer size, sample rate,
\r
462 // device disconnect) notification arrived. The listeners entail
\r
463 // quite a bit of extra code and most likely, a user program wouldn't
\r
464 // be prepared for the result anyway. However, we do provide a flag
\r
465 // to the client callback function to inform of an over/underrun.
\r
467 // A structure to hold various information related to the CoreAudio API
\r
469 struct CoreHandle {
\r
470 AudioDeviceID id[2]; // device ids
\r
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
472 AudioDeviceIOProcID procId[2];
\r
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
475 UInt32 nStreams[2]; // number of streams to use
\r
477 char *deviceBuffer;
\r
478 pthread_cond_t condition;
\r
479 int drainCounter; // Tracks callback counts when draining
\r
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
486 RtApiCore:: RtApiCore()
\r
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
489 // This is a largely undocumented but absolutely necessary
\r
490 // requirement starting with OS-X 10.6. If not called, queries and
\r
491 // updates to various audio device properties are not handled
\r
493 CFRunLoopRef theRunLoop = NULL;
\r
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
495 kAudioObjectPropertyScopeGlobal,
\r
496 kAudioObjectPropertyElementMaster };
\r
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
498 if ( result != noErr ) {
\r
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
500 error( RtAudioError::WARNING );
\r
505 RtApiCore :: ~RtApiCore()
\r
507 // The subclass destructor gets called before the base class
\r
508 // destructor, so close an existing stream before deallocating
\r
509 // apiDeviceId memory.
\r
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
513 unsigned int RtApiCore :: getDeviceCount( void )
\r
515 // Find out how many audio devices there are, if any.
\r
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
519 if ( result != noErr ) {
\r
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
521 error( RtAudioError::WARNING );
\r
525 return dataSize / sizeof( AudioDeviceID );
\r
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
530 unsigned int nDevices = getDeviceCount();
\r
531 if ( nDevices <= 1 ) return 0;
\r
534 UInt32 dataSize = sizeof( AudioDeviceID );
\r
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
537 if ( result != noErr ) {
\r
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
539 error( RtAudioError::WARNING );
\r
543 dataSize *= nDevices;
\r
544 AudioDeviceID deviceList[ nDevices ];
\r
545 property.mSelector = kAudioHardwarePropertyDevices;
\r
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
547 if ( result != noErr ) {
\r
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
549 error( RtAudioError::WARNING );
\r
553 for ( unsigned int i=0; i<nDevices; i++ )
\r
554 if ( id == deviceList[i] ) return i;
\r
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
557 error( RtAudioError::WARNING );
\r
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
563 unsigned int nDevices = getDeviceCount();
\r
564 if ( nDevices <= 1 ) return 0;
\r
567 UInt32 dataSize = sizeof( AudioDeviceID );
\r
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
570 if ( result != noErr ) {
\r
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
572 error( RtAudioError::WARNING );
\r
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
577 AudioDeviceID deviceList[ nDevices ];
\r
578 property.mSelector = kAudioHardwarePropertyDevices;
\r
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
580 if ( result != noErr ) {
\r
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
582 error( RtAudioError::WARNING );
\r
586 for ( unsigned int i=0; i<nDevices; i++ )
\r
587 if ( id == deviceList[i] ) return i;
\r
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
590 error( RtAudioError::WARNING );
\r
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
596 RtAudio::DeviceInfo info;
\r
597 info.probed = false;
\r
600 unsigned int nDevices = getDeviceCount();
\r
601 if ( nDevices == 0 ) {
\r
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
603 error( RtAudioError::INVALID_USE );
\r
607 if ( device >= nDevices ) {
\r
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
609 error( RtAudioError::INVALID_USE );
\r
613 AudioDeviceID deviceList[ nDevices ];
\r
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
616 kAudioObjectPropertyScopeGlobal,
\r
617 kAudioObjectPropertyElementMaster };
\r
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
619 0, NULL, &dataSize, (void *) &deviceList );
\r
620 if ( result != noErr ) {
\r
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
622 error( RtAudioError::WARNING );
\r
626 AudioDeviceID id = deviceList[ device ];
\r
628 // Get the device name.
\r
630 CFStringRef cfname;
\r
631 dataSize = sizeof( CFStringRef );
\r
632 property.mSelector = kAudioObjectPropertyManufacturer;
\r
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
634 if ( result != noErr ) {
\r
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
636 errorText_ = errorStream_.str();
\r
637 error( RtAudioError::WARNING );
\r
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
642 int length = CFStringGetLength(cfname);
\r
643 char *mname = (char *)malloc(length * 3 + 1);
\r
644 #if defined( UNICODE ) || defined( _UNICODE )
\r
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
649 info.name.append( (const char *)mname, strlen(mname) );
\r
650 info.name.append( ": " );
\r
651 CFRelease( cfname );
\r
654 property.mSelector = kAudioObjectPropertyName;
\r
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
656 if ( result != noErr ) {
\r
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
658 errorText_ = errorStream_.str();
\r
659 error( RtAudioError::WARNING );
\r
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
664 length = CFStringGetLength(cfname);
\r
665 char *name = (char *)malloc(length * 3 + 1);
\r
666 #if defined( UNICODE ) || defined( _UNICODE )
\r
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
671 info.name.append( (const char *)name, strlen(name) );
\r
672 CFRelease( cfname );
\r
675 // Get the output stream "configuration".
\r
676 AudioBufferList *bufferList = nil;
\r
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
678 property.mScope = kAudioDevicePropertyScopeOutput;
\r
679 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
682 if ( result != noErr || dataSize == 0 ) {
\r
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
684 errorText_ = errorStream_.str();
\r
685 error( RtAudioError::WARNING );
\r
689 // Allocate the AudioBufferList.
\r
690 bufferList = (AudioBufferList *) malloc( dataSize );
\r
691 if ( bufferList == NULL ) {
\r
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
693 error( RtAudioError::WARNING );
\r
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
698 if ( result != noErr || dataSize == 0 ) {
\r
699 free( bufferList );
\r
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
701 errorText_ = errorStream_.str();
\r
702 error( RtAudioError::WARNING );
\r
706 // Get output channel information.
\r
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
708 for ( i=0; i<nStreams; i++ )
\r
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
710 free( bufferList );
\r
712 // Get the input stream "configuration".
\r
713 property.mScope = kAudioDevicePropertyScopeInput;
\r
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
715 if ( result != noErr || dataSize == 0 ) {
\r
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
717 errorText_ = errorStream_.str();
\r
718 error( RtAudioError::WARNING );
\r
722 // Allocate the AudioBufferList.
\r
723 bufferList = (AudioBufferList *) malloc( dataSize );
\r
724 if ( bufferList == NULL ) {
\r
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
726 error( RtAudioError::WARNING );
\r
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
731 if (result != noErr || dataSize == 0) {
\r
732 free( bufferList );
\r
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
734 errorText_ = errorStream_.str();
\r
735 error( RtAudioError::WARNING );
\r
739 // Get input channel information.
\r
740 nStreams = bufferList->mNumberBuffers;
\r
741 for ( i=0; i<nStreams; i++ )
\r
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
743 free( bufferList );
\r
745 // If device opens for both playback and capture, we determine the channels.
\r
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
749 // Probe the device sample rates.
\r
750 bool isInput = false;
\r
751 if ( info.outputChannels == 0 ) isInput = true;
\r
753 // Determine the supported sample rates.
\r
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
759 errorText_ = errorStream_.str();
\r
760 error( RtAudioError::WARNING );
\r
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
765 AudioValueRange rangeList[ nRanges ];
\r
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
767 if ( result != kAudioHardwareNoError ) {
\r
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
769 errorText_ = errorStream_.str();
\r
770 error( RtAudioError::WARNING );
\r
774 // The sample rate reporting mechanism is a bit of a mystery. It
\r
775 // seems that it can either return individual rates or a range of
\r
776 // rates. I assume that if the min / max range values are the same,
\r
777 // then that represents a single supported rate and if the min / max
\r
778 // range values are different, the device supports an arbitrary
\r
779 // range of values (though there might be multiple ranges, so we'll
\r
780 // use the most conservative range).
\r
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
782 bool haveValueRange = false;
\r
783 info.sampleRates.clear();
\r
784 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
787 info.sampleRates.push_back( tmpSr );
\r
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
790 info.preferredSampleRate = tmpSr;
\r
793 haveValueRange = true;
\r
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
799 if ( haveValueRange ) {
\r
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
805 info.preferredSampleRate = SAMPLE_RATES[k];
\r
810 // Sort and remove any redundant values
\r
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
814 if ( info.sampleRates.size() == 0 ) {
\r
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
816 errorText_ = errorStream_.str();
\r
817 error( RtAudioError::WARNING );
\r
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
822 // Thus, any other "physical" formats supported by the device are of
\r
823 // no interest to the client.
\r
824 info.nativeFormats = RTAUDIO_FLOAT32;
\r
826 if ( info.outputChannels > 0 )
\r
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
828 if ( info.inputChannels > 0 )
\r
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
831 info.probed = true;
\r
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
836 const AudioTimeStamp* /*inNow*/,
\r
837 const AudioBufferList* inInputData,
\r
838 const AudioTimeStamp* /*inInputTime*/,
\r
839 AudioBufferList* outOutputData,
\r
840 const AudioTimeStamp* /*inOutputTime*/,
\r
841 void* infoPointer )
\r
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
845 RtApiCore *object = (RtApiCore *) info->object;
\r
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
847 return kAudioHardwareUnspecifiedError;
\r
849 return kAudioHardwareNoError;
\r
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
854 const AudioObjectPropertyAddress properties[],
\r
855 void* handlePointer )
\r
857 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
861 handle->xrun[1] = true;
\r
863 handle->xrun[0] = true;
\r
867 return kAudioHardwareNoError;
\r
870 static OSStatus rateListener( AudioObjectID inDevice,
\r
871 UInt32 /*nAddresses*/,
\r
872 const AudioObjectPropertyAddress /*properties*/[],
\r
873 void* ratePointer )
\r
875 Float64 *rate = (Float64 *) ratePointer;
\r
876 UInt32 dataSize = sizeof( Float64 );
\r
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
878 kAudioObjectPropertyScopeGlobal,
\r
879 kAudioObjectPropertyElementMaster };
\r
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
881 return kAudioHardwareNoError;
\r
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
885 unsigned int firstChannel, unsigned int sampleRate,
\r
886 RtAudioFormat format, unsigned int *bufferSize,
\r
887 RtAudio::StreamOptions *options )
\r
890 unsigned int nDevices = getDeviceCount();
\r
891 if ( nDevices == 0 ) {
\r
892 // This should not happen because a check is made before this function is called.
\r
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
897 if ( device >= nDevices ) {
\r
898 // This should not happen because a check is made before this function is called.
\r
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
903 AudioDeviceID deviceList[ nDevices ];
\r
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
906 kAudioObjectPropertyScopeGlobal,
\r
907 kAudioObjectPropertyElementMaster };
\r
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
909 0, NULL, &dataSize, (void *) &deviceList );
\r
910 if ( result != noErr ) {
\r
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
915 AudioDeviceID id = deviceList[ device ];
\r
917 // Setup for stream mode.
\r
918 bool isInput = false;
\r
919 if ( mode == INPUT ) {
\r
921 property.mScope = kAudioDevicePropertyScopeInput;
\r
924 property.mScope = kAudioDevicePropertyScopeOutput;
\r
926 // Get the stream "configuration".
\r
927 AudioBufferList *bufferList = nil;
\r
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
931 if ( result != noErr || dataSize == 0 ) {
\r
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
933 errorText_ = errorStream_.str();
\r
937 // Allocate the AudioBufferList.
\r
938 bufferList = (AudioBufferList *) malloc( dataSize );
\r
939 if ( bufferList == NULL ) {
\r
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
945 if (result != noErr || dataSize == 0) {
\r
946 free( bufferList );
\r
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
948 errorText_ = errorStream_.str();
\r
952 // Search for one or more streams that contain the desired number of
\r
953 // channels. CoreAudio devices can have an arbitrary number of
\r
954 // streams and each stream can have an arbitrary number of channels.
\r
955 // For each stream, a single buffer of interleaved samples is
\r
956 // provided. RtAudio prefers the use of one stream of interleaved
\r
957 // data or multiple consecutive single-channel streams. However, we
\r
958 // now support multiple consecutive multi-channel streams of
\r
959 // interleaved data as well.
\r
960 UInt32 iStream, offsetCounter = firstChannel;
\r
961 UInt32 nStreams = bufferList->mNumberBuffers;
\r
962 bool monoMode = false;
\r
963 bool foundStream = false;
\r
965 // First check that the device supports the requested number of
\r
967 UInt32 deviceChannels = 0;
\r
968 for ( iStream=0; iStream<nStreams; iStream++ )
\r
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
971 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
972 free( bufferList );
\r
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
974 errorText_ = errorStream_.str();
\r
978 // Look for a single stream meeting our needs.
\r
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
982 if ( streamChannels >= channels + offsetCounter ) {
\r
983 firstStream = iStream;
\r
984 channelOffset = offsetCounter;
\r
985 foundStream = true;
\r
988 if ( streamChannels > offsetCounter ) break;
\r
989 offsetCounter -= streamChannels;
\r
992 // If we didn't find a single stream above, then we should be able
\r
993 // to meet the channel specification with multiple streams.
\r
994 if ( foundStream == false ) {
\r
996 offsetCounter = firstChannel;
\r
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
999 if ( streamChannels > offsetCounter ) break;
\r
1000 offsetCounter -= streamChannels;
\r
1003 firstStream = iStream;
\r
1004 channelOffset = offsetCounter;
\r
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1007 if ( streamChannels > 1 ) monoMode = false;
\r
1008 while ( channelCounter > 0 ) {
\r
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1010 if ( streamChannels > 1 ) monoMode = false;
\r
1011 channelCounter -= streamChannels;
\r
1016 free( bufferList );
\r
1018 // Determine the buffer size.
\r
1019 AudioValueRange bufferRange;
\r
1020 dataSize = sizeof( AudioValueRange );
\r
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1024 if ( result != noErr ) {
\r
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1026 errorText_ = errorStream_.str();
\r
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1034 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1035 // need to make this setting for the master channel.
\r
1036 UInt32 theSize = (UInt32) *bufferSize;
\r
1037 dataSize = sizeof( UInt32 );
\r
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1041 if ( result != noErr ) {
\r
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1043 errorText_ = errorStream_.str();
\r
1047 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1048 // MUST be the same in both directions!
\r
1049 *bufferSize = theSize;
\r
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1052 errorText_ = errorStream_.str();
\r
1056 stream_.bufferSize = *bufferSize;
\r
1057 stream_.nBuffers = 1;
\r
1059 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1062 dataSize = sizeof( hog_pid );
\r
1063 property.mSelector = kAudioDevicePropertyHogMode;
\r
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1065 if ( result != noErr ) {
\r
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1067 errorText_ = errorStream_.str();
\r
1071 if ( hog_pid != getpid() ) {
\r
1072 hog_pid = getpid();
\r
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1074 if ( result != noErr ) {
\r
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1076 errorText_ = errorStream_.str();
\r
1082 // Check and if necessary, change the sample rate for the device.
\r
1083 Float64 nominalRate;
\r
1084 dataSize = sizeof( Float64 );
\r
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1087 if ( result != noErr ) {
\r
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1089 errorText_ = errorStream_.str();
\r
1093 // Only change the sample rate if off by more than 1 Hz.
\r
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1096 // Set a property listener for the sample rate change
\r
1097 Float64 reportedRate = 0.0;
\r
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1100 if ( result != noErr ) {
\r
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1102 errorText_ = errorStream_.str();
\r
1106 nominalRate = (Float64) sampleRate;
\r
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1108 if ( result != noErr ) {
\r
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1111 errorText_ = errorStream_.str();
\r
1115 // Now wait until the reported nominal rate is what we just set.
\r
1116 UInt32 microCounter = 0;
\r
1117 while ( reportedRate != nominalRate ) {
\r
1118 microCounter += 5000;
\r
1119 if ( microCounter > 5000000 ) break;
\r
1123 // Remove the property listener.
\r
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1126 if ( microCounter > 5000000 ) {
\r
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1128 errorText_ = errorStream_.str();
\r
1133 // Now set the stream format for all streams. Also, check the
\r
1134 // physical format of the device and change that if necessary.
\r
1135 AudioStreamBasicDescription description;
\r
1136 dataSize = sizeof( AudioStreamBasicDescription );
\r
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1139 if ( result != noErr ) {
\r
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1141 errorText_ = errorStream_.str();
\r
1145 // Set the sample rate and data format id. However, only make the
\r
1146 // change if the sample rate is not within 1.0 of the desired
\r
1147 // rate and the format is not linear pcm.
\r
1148 bool updateFormat = false;
\r
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1150 description.mSampleRate = (Float64) sampleRate;
\r
1151 updateFormat = true;
\r
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1155 description.mFormatID = kAudioFormatLinearPCM;
\r
1156 updateFormat = true;
\r
1159 if ( updateFormat ) {
\r
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1161 if ( result != noErr ) {
\r
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1163 errorText_ = errorStream_.str();
\r
1168 // Now check the physical format.
\r
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1171 if ( result != noErr ) {
\r
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1173 errorText_ = errorStream_.str();
\r
1177 //std::cout << "Current physical stream format:" << std::endl;
\r
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1184 description.mFormatID = kAudioFormatLinearPCM;
\r
1185 //description.mSampleRate = (Float64) sampleRate;
\r
1186 AudioStreamBasicDescription testDescription = description;
\r
1187 UInt32 formatFlags;
\r
1189 // We'll try higher bit rates first and then work our way down.
\r
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1204 bool setPhysicalFormat = false;
\r
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1206 testDescription = description;
\r
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1208 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1215 if ( result == noErr ) {
\r
1216 setPhysicalFormat = true;
\r
1217 //std::cout << "Updated physical stream format:" << std::endl;
\r
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1226 if ( !setPhysicalFormat ) {
\r
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1228 errorText_ = errorStream_.str();
\r
1231 } // done setting virtual/physical formats.
\r
1233 // Get the stream / device latency.
\r
1235 dataSize = sizeof( UInt32 );
\r
1236 property.mSelector = kAudioDevicePropertyLatency;
\r
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1242 errorText_ = errorStream_.str();
\r
1243 error( RtAudioError::WARNING );
\r
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1248 // always be presented in native-endian format, so we should never
\r
1249 // need to byte swap.
\r
1250 stream_.doByteSwap[mode] = false;
\r
1252 // From the CoreAudio documentation, PCM data must be supplied as
\r
1254 stream_.userFormat = format;
\r
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1257 if ( streamCount == 1 )
\r
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1259 else // multiple streams
\r
1260 stream_.nDeviceChannels[mode] = channels;
\r
1261 stream_.nUserChannels[mode] = channels;
\r
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1264 else stream_.userInterleaved = true;
\r
1265 stream_.deviceInterleaved[mode] = true;
\r
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1268 // Set flags for buffer conversion.
\r
1269 stream_.doConvertBuffer[mode] = false;
\r
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1271 stream_.doConvertBuffer[mode] = true;
\r
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1274 if ( streamCount == 1 ) {
\r
1275 if ( stream_.nUserChannels[mode] > 1 &&
\r
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1277 stream_.doConvertBuffer[mode] = true;
\r
1279 else if ( monoMode && stream_.userInterleaved )
\r
1280 stream_.doConvertBuffer[mode] = true;
\r
1282 // Allocate our CoreHandle structure for the stream.
\r
1283 CoreHandle *handle = 0;
\r
1284 if ( stream_.apiHandle == 0 ) {
\r
1286 handle = new CoreHandle;
\r
1288 catch ( std::bad_alloc& ) {
\r
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1297 stream_.apiHandle = (void *) handle;
\r
1300 handle = (CoreHandle *) stream_.apiHandle;
\r
1301 handle->iStream[mode] = firstStream;
\r
1302 handle->nStreams[mode] = streamCount;
\r
1303 handle->id[mode] = id;
\r
1305 // Allocate necessary internal buffers.
\r
1306 unsigned long bufferBytes;
\r
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1311 if ( stream_.userBuffer[mode] == NULL ) {
\r
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1316 // If possible, we will make use of the CoreAudio stream buffers as
\r
1317 // "device buffers". However, we can't do this if using multiple
\r
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1321 bool makeBuffer = true;
\r
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1323 if ( mode == INPUT ) {
\r
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1330 if ( makeBuffer ) {
\r
1331 bufferBytes *= *bufferSize;
\r
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1334 if ( stream_.deviceBuffer == NULL ) {
\r
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1341 stream_.sampleRate = sampleRate;
\r
1342 stream_.device[mode] = device;
\r
1343 stream_.state = STREAM_STOPPED;
\r
1344 stream_.callbackInfo.object = (void *) this;
\r
1346 // Setup the buffer conversion information structure.
\r
1347 if ( stream_.doConvertBuffer[mode] ) {
\r
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1349 else setConvertInfo( mode, channelOffset );
\r
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1353 // Only one callback procedure per device.
\r
1354 stream_.mode = DUPLEX;
\r
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1362 if ( result != noErr ) {
\r
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1364 errorText_ = errorStream_.str();
\r
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1368 stream_.mode = DUPLEX;
\r
1370 stream_.mode = mode;
\r
1373 // Setup the device property listener for over/underload.
\r
1374 property.mSelector = kAudioDeviceProcessorOverload;
\r
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1382 pthread_cond_destroy( &handle->condition );
\r
1384 stream_.apiHandle = 0;
\r
1387 for ( int i=0; i<2; i++ ) {
\r
1388 if ( stream_.userBuffer[i] ) {
\r
1389 free( stream_.userBuffer[i] );
\r
1390 stream_.userBuffer[i] = 0;
\r
1394 if ( stream_.deviceBuffer ) {
\r
1395 free( stream_.deviceBuffer );
\r
1396 stream_.deviceBuffer = 0;
\r
1399 stream_.state = STREAM_CLOSED;
\r
1403 void RtApiCore :: closeStream( void )
\r
1405 if ( stream_.state == STREAM_CLOSED ) {
\r
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1407 error( RtAudioError::WARNING );
\r
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1415 kAudioObjectPropertyScopeGlobal,
\r
1416 kAudioObjectPropertyElementMaster };
\r
1418 property.mSelector = kAudioDeviceProcessorOverload;
\r
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1422 error( RtAudioError::WARNING );
\r
1425 if ( stream_.state == STREAM_RUNNING )
\r
1426 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1438 kAudioObjectPropertyScopeGlobal,
\r
1439 kAudioObjectPropertyElementMaster };
\r
1441 property.mSelector = kAudioDeviceProcessorOverload;
\r
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1445 error( RtAudioError::WARNING );
\r
1448 if ( stream_.state == STREAM_RUNNING )
\r
1449 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1458 for ( int i=0; i<2; i++ ) {
\r
1459 if ( stream_.userBuffer[i] ) {
\r
1460 free( stream_.userBuffer[i] );
\r
1461 stream_.userBuffer[i] = 0;
\r
1465 if ( stream_.deviceBuffer ) {
\r
1466 free( stream_.deviceBuffer );
\r
1467 stream_.deviceBuffer = 0;
\r
1470 // Destroy pthread condition variable.
\r
1471 pthread_cond_destroy( &handle->condition );
\r
1473 stream_.apiHandle = 0;
\r
1475 stream_.mode = UNINITIALIZED;
\r
1476 stream_.state = STREAM_CLOSED;
\r
1479 void RtApiCore :: startStream( void )
\r
1482 if ( stream_.state == STREAM_RUNNING ) {
\r
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1484 error( RtAudioError::WARNING );
\r
1488 OSStatus result = noErr;
\r
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1493 if ( result != noErr ) {
\r
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1495 errorText_ = errorStream_.str();
\r
1500 if ( stream_.mode == INPUT ||
\r
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1504 if ( result != noErr ) {
\r
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1506 errorText_ = errorStream_.str();
\r
1511 handle->drainCounter = 0;
\r
1512 handle->internalDrain = false;
\r
1513 stream_.state = STREAM_RUNNING;
\r
1516 if ( result == noErr ) return;
\r
1517 error( RtAudioError::SYSTEM_ERROR );
\r
1520 void RtApiCore :: stopStream( void )
\r
1523 if ( stream_.state == STREAM_STOPPED ) {
\r
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1525 error( RtAudioError::WARNING );
\r
1529 OSStatus result = noErr;
\r
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1533 if ( handle->drainCounter == 0 ) {
\r
1534 handle->drainCounter = 2;
\r
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1539 if ( result != noErr ) {
\r
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1541 errorText_ = errorStream_.str();
\r
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1549 if ( result != noErr ) {
\r
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1551 errorText_ = errorStream_.str();
\r
1556 stream_.state = STREAM_STOPPED;
\r
1559 if ( result == noErr ) return;
\r
1560 error( RtAudioError::SYSTEM_ERROR );
\r
1563 void RtApiCore :: abortStream( void )
\r
1566 if ( stream_.state == STREAM_STOPPED ) {
\r
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1568 error( RtAudioError::WARNING );
\r
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1573 handle->drainCounter = 2;
\r
1578 // This function will be called by a spawned thread when the user
\r
1579 // callback function signals that the stream should be stopped or
\r
1580 // aborted. It is better to handle it this way because the
\r
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1582 // function is called.
\r
1583 static void *coreStopStream( void *ptr )
\r
1585 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1586 RtApiCore *object = (RtApiCore *) info->object;
\r
1588 object->stopStream();
\r
1589 pthread_exit( NULL );
\r
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1593 const AudioBufferList *inBufferList,
\r
1594 const AudioBufferList *outBufferList )
\r
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1597 if ( stream_.state == STREAM_CLOSED ) {
\r
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1599 error( RtAudioError::WARNING );
\r
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1606 // Check if we were draining the stream and signal is finished.
\r
1607 if ( handle->drainCounter > 3 ) {
\r
1608 ThreadHandle threadId;
\r
1610 stream_.state = STREAM_STOPPING;
\r
1611 if ( handle->internalDrain == true )
\r
1612 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1613 else // external call to stopStream()
\r
1614 pthread_cond_signal( &handle->condition );
\r
1618 AudioDeviceID outputDevice = handle->id[0];
\r
1620 // Invoke user callback to get fresh output data UNLESS we are
\r
1621 // draining stream or duplex mode AND the input/output devices are
\r
1622 // different AND this function is called for the input device.
\r
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1625 double streamTime = getStreamTime();
\r
1626 RtAudioStreamStatus status = 0;
\r
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1629 handle->xrun[0] = false;
\r
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1632 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1633 handle->xrun[1] = false;
\r
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1637 stream_.bufferSize, streamTime, status, info->userData );
\r
1638 if ( cbReturnValue == 2 ) {
\r
1639 stream_.state = STREAM_STOPPING;
\r
1640 handle->drainCounter = 2;
\r
1644 else if ( cbReturnValue == 1 ) {
\r
1645 handle->drainCounter = 1;
\r
1646 handle->internalDrain = true;
\r
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1654 if ( handle->nStreams[0] == 1 ) {
\r
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1659 else { // fill multiple streams with zeros
\r
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1667 else if ( handle->nStreams[0] == 1 ) {
\r
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1672 else { // copy from user buffer
\r
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1674 stream_.userBuffer[0],
\r
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1678 else { // fill multiple streams
\r
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1680 if ( stream_.doConvertBuffer[0] ) {
\r
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1692 else { // fill multiple multi-channel streams with interleaved data
\r
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1694 Float32 *out, *in;
\r
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1697 UInt32 inChannels = stream_.nUserChannels[0];
\r
1698 if ( stream_.doConvertBuffer[0] ) {
\r
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1700 inChannels = stream_.nDeviceChannels[0];
\r
1703 if ( inInterleaved ) inOffset = 1;
\r
1704 else inOffset = stream_.bufferSize;
\r
1706 channelsLeft = inChannels;
\r
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1713 // Account for possible channel offset in first stream
\r
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1715 streamChannels -= stream_.channelOffset[0];
\r
1716 outJump = stream_.channelOffset[0];
\r
1720 // Account for possible unfilled channels at end of the last stream
\r
1721 if ( streamChannels > channelsLeft ) {
\r
1722 outJump = streamChannels - channelsLeft;
\r
1723 streamChannels = channelsLeft;
\r
1726 // Determine input buffer offsets and skips
\r
1727 if ( inInterleaved ) {
\r
1728 inJump = inChannels;
\r
1729 in += inChannels - channelsLeft;
\r
1733 in += (inChannels - channelsLeft) * inOffset;
\r
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1738 *out++ = in[j*inOffset];
\r
1743 channelsLeft -= streamChannels;
\r
1749 // Don't bother draining input
\r
1750 if ( handle->drainCounter ) {
\r
1751 handle->drainCounter++;
\r
1755 AudioDeviceID inputDevice;
\r
1756 inputDevice = handle->id[1];
\r
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1759 if ( handle->nStreams[1] == 1 ) {
\r
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1761 convertBuffer( stream_.userBuffer[1],
\r
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1763 stream_.convertInfo[1] );
\r
1765 else { // copy to user buffer
\r
1766 memcpy( stream_.userBuffer[1],
\r
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1771 else { // read from multiple streams
\r
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1782 else { // read from multiple multi-channel streams
\r
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1784 Float32 *out, *in;
\r
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1787 UInt32 outChannels = stream_.nUserChannels[1];
\r
1788 if ( stream_.doConvertBuffer[1] ) {
\r
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1790 outChannels = stream_.nDeviceChannels[1];
\r
1793 if ( outInterleaved ) outOffset = 1;
\r
1794 else outOffset = stream_.bufferSize;
\r
1796 channelsLeft = outChannels;
\r
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1803 // Account for possible channel offset in first stream
\r
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1805 streamChannels -= stream_.channelOffset[1];
\r
1806 inJump = stream_.channelOffset[1];
\r
1810 // Account for possible unread channels at end of the last stream
\r
1811 if ( streamChannels > channelsLeft ) {
\r
1812 inJump = streamChannels - channelsLeft;
\r
1813 streamChannels = channelsLeft;
\r
1816 // Determine output buffer offsets and skips
\r
1817 if ( outInterleaved ) {
\r
1818 outJump = outChannels;
\r
1819 out += outChannels - channelsLeft;
\r
1823 out += (outChannels - channelsLeft) * outOffset;
\r
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1828 out[j*outOffset] = *in++;
\r
1833 channelsLeft -= streamChannels;
\r
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1838 convertBuffer( stream_.userBuffer[1],
\r
1839 stream_.deviceBuffer,
\r
1840 stream_.convertInfo[1] );
\r
1846 //MUTEX_UNLOCK( &stream_.mutex );
\r
1848 RtApi::tickStreamTime();
\r
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1856 case kAudioHardwareNotRunningError:
\r
1857 return "kAudioHardwareNotRunningError";
\r
1859 case kAudioHardwareUnspecifiedError:
\r
1860 return "kAudioHardwareUnspecifiedError";
\r
1862 case kAudioHardwareUnknownPropertyError:
\r
1863 return "kAudioHardwareUnknownPropertyError";
\r
1865 case kAudioHardwareBadPropertySizeError:
\r
1866 return "kAudioHardwareBadPropertySizeError";
\r
1868 case kAudioHardwareIllegalOperationError:
\r
1869 return "kAudioHardwareIllegalOperationError";
\r
1871 case kAudioHardwareBadObjectError:
\r
1872 return "kAudioHardwareBadObjectError";
\r
1874 case kAudioHardwareBadDeviceError:
\r
1875 return "kAudioHardwareBadDeviceError";
\r
1877 case kAudioHardwareBadStreamError:
\r
1878 return "kAudioHardwareBadStreamError";
\r
1880 case kAudioHardwareUnsupportedOperationError:
\r
1881 return "kAudioHardwareUnsupportedOperationError";
\r
1883 case kAudioDeviceUnsupportedFormatError:
\r
1884 return "kAudioDeviceUnsupportedFormatError";
\r
1886 case kAudioDevicePermissionsError:
\r
1887 return "kAudioDevicePermissionsError";
\r
1890 return "CoreAudio unknown error";
\r
1894 //******************** End of __MACOSX_CORE__ *********************//
\r
1897 #if defined(__UNIX_JACK__)
\r
1899 // JACK is a low-latency audio server, originally written for the
\r
1900 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1901 // connect a number of different applications to an audio device, as
\r
1902 // well as allowing them to share audio between themselves.
\r
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1905 // have ports connected to the server. The JACK server is typically
\r
1906 // started in a terminal as follows:
\r
1908 // .jackd -d alsa -d hw:0
\r
1910 // or through an interface program such as qjackctl. Many of the
\r
1911 // parameters normally set for a stream are fixed by the JACK server
\r
1912 // and can be specified when the JACK server is started. In
\r
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1918 // frames, and number of buffers = 4. Once the server is running, it
\r
1919 // is not possible to override these values. If the values are not
\r
1920 // specified in the command-line, the JACK server uses default values.
\r
1922 // The JACK server does not have to be running when an instance of
\r
1923 // RtApiJack is created, though the function getDeviceCount() will
\r
1924 // report 0 devices found until JACK has been started. When no
\r
1925 // devices are available (i.e., the JACK server is not running), a
\r
1926 // stream cannot be opened.
\r
1928 #include <jack/jack.h>
\r
1929 #include <unistd.h>
\r
1932 // A structure to hold various information related to the Jack API
\r
1933 // implementation.
\r
1934 struct JackHandle {
\r
1935 jack_client_t *client;
\r
1936 jack_port_t **ports[2];
\r
1937 std::string deviceName[2];
\r
1939 pthread_cond_t condition;
\r
1940 int drainCounter; // Tracks callback counts when draining
\r
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1947 static void jackSilentError( const char * ) {};
\r
1949 RtApiJack :: RtApiJack()
\r
1951 // Nothing to do here.
\r
1952 #if !defined(__RTAUDIO_DEBUG__)
\r
1953 // Turn off Jack's internal error reporting.
\r
1954 jack_set_error_function( &jackSilentError );
\r
1958 RtApiJack :: ~RtApiJack()
\r
1960 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1963 unsigned int RtApiJack :: getDeviceCount( void )
\r
1965 // See if we can become a jack client.
\r
1966 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1967 jack_status_t *status = NULL;
\r
1968 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1969 if ( client == 0 ) return 0;
\r
1971 const char **ports;
\r
1972 std::string port, previousPort;
\r
1973 unsigned int nChannels = 0, nDevices = 0;
\r
1974 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1976 // Parse the port names up to the first colon (:).
\r
1977 size_t iColon = 0;
\r
1979 port = (char *) ports[ nChannels ];
\r
1980 iColon = port.find(":");
\r
1981 if ( iColon != std::string::npos ) {
\r
1982 port = port.substr( 0, iColon + 1 );
\r
1983 if ( port != previousPort ) {
\r
1985 previousPort = port;
\r
1988 } while ( ports[++nChannels] );
\r
1992 jack_client_close( client );
\r
1996 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1998 RtAudio::DeviceInfo info;
\r
1999 info.probed = false;
\r
2001 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
2002 jack_status_t *status = NULL;
\r
2003 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2004 if ( client == 0 ) {
\r
2005 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2006 error( RtAudioError::WARNING );
\r
2010 const char **ports;
\r
2011 std::string port, previousPort;
\r
2012 unsigned int nPorts = 0, nDevices = 0;
\r
2013 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2015 // Parse the port names up to the first colon (:).
\r
2016 size_t iColon = 0;
\r
2018 port = (char *) ports[ nPorts ];
\r
2019 iColon = port.find(":");
\r
2020 if ( iColon != std::string::npos ) {
\r
2021 port = port.substr( 0, iColon );
\r
2022 if ( port != previousPort ) {
\r
2023 if ( nDevices == device ) info.name = port;
\r
2025 previousPort = port;
\r
2028 } while ( ports[++nPorts] );
\r
2032 if ( device >= nDevices ) {
\r
2033 jack_client_close( client );
\r
2034 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2035 error( RtAudioError::INVALID_USE );
\r
2039 // Get the current jack server sample rate.
\r
2040 info.sampleRates.clear();
\r
2042 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2043 info.sampleRates.push_back( info.preferredSampleRate );
\r
2045 // Count the available ports containing the client name as device
\r
2046 // channels. Jack "input ports" equal RtAudio output channels.
\r
2047 unsigned int nChannels = 0;
\r
2048 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2050 while ( ports[ nChannels ] ) nChannels++;
\r
2052 info.outputChannels = nChannels;
\r
2055 // Jack "output ports" equal RtAudio input channels.
\r
2057 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2059 while ( ports[ nChannels ] ) nChannels++;
\r
2061 info.inputChannels = nChannels;
\r
2064 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2065 jack_client_close(client);
\r
2066 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2067 error( RtAudioError::WARNING );
\r
2071 // If device opens for both playback and capture, we determine the channels.
\r
2072 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2073 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2075 // Jack always uses 32-bit floats.
\r
2076 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2078 // Jack doesn't provide default devices so we'll use the first available one.
\r
2079 if ( device == 0 && info.outputChannels > 0 )
\r
2080 info.isDefaultOutput = true;
\r
2081 if ( device == 0 && info.inputChannels > 0 )
\r
2082 info.isDefaultInput = true;
\r
2084 jack_client_close(client);
\r
2085 info.probed = true;
\r
2089 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2091 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2093 RtApiJack *object = (RtApiJack *) info->object;
\r
2094 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2099 // This function will be called by a spawned thread when the Jack
\r
2100 // server signals that it is shutting down. It is necessary to handle
\r
2101 // it this way because the jackShutdown() function must return before
\r
2102 // the jack_deactivate() function (in closeStream()) will return.
\r
2103 static void *jackCloseStream( void *ptr )
\r
2105 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2106 RtApiJack *object = (RtApiJack *) info->object;
\r
2108 object->closeStream();
\r
2110 pthread_exit( NULL );
\r
2112 static void jackShutdown( void *infoPointer )
\r
2114 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2115 RtApiJack *object = (RtApiJack *) info->object;
\r
2117 // Check current stream state. If stopped, then we'll assume this
\r
2118 // was called as a result of a call to RtApiJack::stopStream (the
\r
2119 // deactivation of a client handle causes this function to be called).
\r
2120 // If not, we'll assume the Jack server is shutting down or some
\r
2121 // other problem occurred and we should close the stream.
\r
2122 if ( object->isStreamRunning() == false ) return;
\r
2124 ThreadHandle threadId;
\r
2125 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2126 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2129 static int jackXrun( void *infoPointer )
\r
2131 JackHandle *handle = (JackHandle *) infoPointer;
\r
2133 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2134 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2139 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2140 unsigned int firstChannel, unsigned int sampleRate,
\r
2141 RtAudioFormat format, unsigned int *bufferSize,
\r
2142 RtAudio::StreamOptions *options )
\r
2144 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2146 // Look for jack server and try to become a client (only do once per stream).
\r
2147 jack_client_t *client = 0;
\r
2148 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2149 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2150 jack_status_t *status = NULL;
\r
2151 if ( options && !options->streamName.empty() )
\r
2152 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2154 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2155 if ( client == 0 ) {
\r
2156 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2157 error( RtAudioError::WARNING );
\r
2162 // The handle must have been created on an earlier pass.
\r
2163 client = handle->client;
\r
2166 const char **ports;
\r
2167 std::string port, previousPort, deviceName;
\r
2168 unsigned int nPorts = 0, nDevices = 0;
\r
2169 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2171 // Parse the port names up to the first colon (:).
\r
2172 size_t iColon = 0;
\r
2174 port = (char *) ports[ nPorts ];
\r
2175 iColon = port.find(":");
\r
2176 if ( iColon != std::string::npos ) {
\r
2177 port = port.substr( 0, iColon );
\r
2178 if ( port != previousPort ) {
\r
2179 if ( nDevices == device ) deviceName = port;
\r
2181 previousPort = port;
\r
2184 } while ( ports[++nPorts] );
\r
2188 if ( device >= nDevices ) {
\r
2189 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2193 // Count the available ports containing the client name as device
\r
2194 // channels. Jack "input ports" equal RtAudio output channels.
\r
2195 unsigned int nChannels = 0;
\r
2196 unsigned long flag = JackPortIsInput;
\r
2197 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2198 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2200 while ( ports[ nChannels ] ) nChannels++;
\r
2204 // Compare the jack ports for specified client to the requested number of channels.
\r
2205 if ( nChannels < (channels + firstChannel) ) {
\r
2206 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2207 errorText_ = errorStream_.str();
\r
2211 // Check the jack server sample rate.
\r
2212 unsigned int jackRate = jack_get_sample_rate( client );
\r
2213 if ( sampleRate != jackRate ) {
\r
2214 jack_client_close( client );
\r
2215 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2216 errorText_ = errorStream_.str();
\r
2219 stream_.sampleRate = jackRate;
\r
2221 // Get the latency of the JACK port.
\r
2222 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2223 if ( ports[ firstChannel ] ) {
\r
2224 // Added by Ge Wang
\r
2225 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2226 // the range (usually the min and max are equal)
\r
2227 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2228 // get the latency range
\r
2229 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2230 // be optimistic, use the min!
\r
2231 stream_.latency[mode] = latrange.min;
\r
2232 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2236 // The jack server always uses 32-bit floating-point data.
\r
2237 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2238 stream_.userFormat = format;
\r
2240 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2241 else stream_.userInterleaved = true;
\r
2243 // Jack always uses non-interleaved buffers.
\r
2244 stream_.deviceInterleaved[mode] = false;
\r
2246 // Jack always provides host byte-ordered data.
\r
2247 stream_.doByteSwap[mode] = false;
\r
2249 // Get the buffer size. The buffer size and number of buffers
\r
2250 // (periods) is set when the jack server is started.
\r
2251 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2252 *bufferSize = stream_.bufferSize;
\r
2254 stream_.nDeviceChannels[mode] = channels;
\r
2255 stream_.nUserChannels[mode] = channels;
\r
2257 // Set flags for buffer conversion.
\r
2258 stream_.doConvertBuffer[mode] = false;
\r
2259 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2260 stream_.doConvertBuffer[mode] = true;
\r
2261 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2262 stream_.nUserChannels[mode] > 1 )
\r
2263 stream_.doConvertBuffer[mode] = true;
\r
2265 // Allocate our JackHandle structure for the stream.
\r
2266 if ( handle == 0 ) {
\r
2268 handle = new JackHandle;
\r
2270 catch ( std::bad_alloc& ) {
\r
2271 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2275 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2276 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2279 stream_.apiHandle = (void *) handle;
\r
2280 handle->client = client;
\r
2282 handle->deviceName[mode] = deviceName;
\r
2284 // Allocate necessary internal buffers.
\r
2285 unsigned long bufferBytes;
\r
2286 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2287 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2288 if ( stream_.userBuffer[mode] == NULL ) {
\r
2289 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2293 if ( stream_.doConvertBuffer[mode] ) {
\r
2295 bool makeBuffer = true;
\r
2296 if ( mode == OUTPUT )
\r
2297 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2298 else { // mode == INPUT
\r
2299 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2300 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2301 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2302 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2306 if ( makeBuffer ) {
\r
2307 bufferBytes *= *bufferSize;
\r
2308 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2309 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2310 if ( stream_.deviceBuffer == NULL ) {
\r
2311 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2317 // Allocate memory for the Jack ports (channels) identifiers.
\r
2318 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2319 if ( handle->ports[mode] == NULL ) {
\r
2320 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2324 stream_.device[mode] = device;
\r
2325 stream_.channelOffset[mode] = firstChannel;
\r
2326 stream_.state = STREAM_STOPPED;
\r
2327 stream_.callbackInfo.object = (void *) this;
\r
2329 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2330 // We had already set up the stream for output.
\r
2331 stream_.mode = DUPLEX;
\r
2333 stream_.mode = mode;
\r
2334 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2335 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2336 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2339 // Register our ports.
\r
2341 if ( mode == OUTPUT ) {
\r
2342 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2343 snprintf( label, 64, "outport %d", i );
\r
2344 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2345 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2349 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2350 snprintf( label, 64, "inport %d", i );
\r
2351 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2352 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2356 // Setup the buffer conversion information structure. We don't use
\r
2357 // buffers to do channel offsets, so we override that parameter
\r
2359 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2365 pthread_cond_destroy( &handle->condition );
\r
2366 jack_client_close( handle->client );
\r
2368 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2369 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2372 stream_.apiHandle = 0;
\r
2375 for ( int i=0; i<2; i++ ) {
\r
2376 if ( stream_.userBuffer[i] ) {
\r
2377 free( stream_.userBuffer[i] );
\r
2378 stream_.userBuffer[i] = 0;
\r
2382 if ( stream_.deviceBuffer ) {
\r
2383 free( stream_.deviceBuffer );
\r
2384 stream_.deviceBuffer = 0;
\r
2390 void RtApiJack :: closeStream( void )
\r
2392 if ( stream_.state == STREAM_CLOSED ) {
\r
2393 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2394 error( RtAudioError::WARNING );
\r
2398 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2401 if ( stream_.state == STREAM_RUNNING )
\r
2402 jack_deactivate( handle->client );
\r
2404 jack_client_close( handle->client );
\r
2408 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2409 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2410 pthread_cond_destroy( &handle->condition );
\r
2412 stream_.apiHandle = 0;
\r
2415 for ( int i=0; i<2; i++ ) {
\r
2416 if ( stream_.userBuffer[i] ) {
\r
2417 free( stream_.userBuffer[i] );
\r
2418 stream_.userBuffer[i] = 0;
\r
2422 if ( stream_.deviceBuffer ) {
\r
2423 free( stream_.deviceBuffer );
\r
2424 stream_.deviceBuffer = 0;
\r
2427 stream_.mode = UNINITIALIZED;
\r
2428 stream_.state = STREAM_CLOSED;
\r
2431 void RtApiJack :: startStream( void )
\r
2434 if ( stream_.state == STREAM_RUNNING ) {
\r
2435 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2436 error( RtAudioError::WARNING );
\r
2440 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2441 int result = jack_activate( handle->client );
\r
2443 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2447 const char **ports;
\r
2449 // Get the list of available ports.
\r
2450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2452 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2453 if ( ports == NULL) {
\r
2454 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2458 // Now make the port connections. Since RtAudio wasn't designed to
\r
2459 // allow the user to select particular channels of a device, we'll
\r
2460 // just open the first "nChannels" ports with offset.
\r
2461 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2463 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2464 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2467 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2474 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2476 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2477 if ( ports == NULL) {
\r
2478 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2482 // Now make the port connections. See note above.
\r
2483 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2485 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2486 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2489 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2496 handle->drainCounter = 0;
\r
2497 handle->internalDrain = false;
\r
2498 stream_.state = STREAM_RUNNING;
\r
2501 if ( result == 0 ) return;
\r
2502 error( RtAudioError::SYSTEM_ERROR );
\r
2505 void RtApiJack :: stopStream( void )
\r
2508 if ( stream_.state == STREAM_STOPPED ) {
\r
2509 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2510 error( RtAudioError::WARNING );
\r
2514 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2515 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2517 if ( handle->drainCounter == 0 ) {
\r
2518 handle->drainCounter = 2;
\r
2519 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2523 jack_deactivate( handle->client );
\r
2524 stream_.state = STREAM_STOPPED;
\r
2527 void RtApiJack :: abortStream( void )
\r
2530 if ( stream_.state == STREAM_STOPPED ) {
\r
2531 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2532 error( RtAudioError::WARNING );
\r
2536 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2537 handle->drainCounter = 2;
\r
2542 // This function will be called by a spawned thread when the user
\r
2543 // callback function signals that the stream should be stopped or
\r
2544 // aborted. It is necessary to handle it this way because the
\r
2545 // callbackEvent() function must return before the jack_deactivate()
\r
2546 // function will return.
\r
2547 static void *jackStopStream( void *ptr )
\r
2549 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2550 RtApiJack *object = (RtApiJack *) info->object;
\r
2552 object->stopStream();
\r
2553 pthread_exit( NULL );
\r
2556 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2558 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2559 if ( stream_.state == STREAM_CLOSED ) {
\r
2560 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2561 error( RtAudioError::WARNING );
\r
2564 if ( stream_.bufferSize != nframes ) {
\r
2565 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2566 error( RtAudioError::WARNING );
\r
2570 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2571 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2573 // Check if we were draining the stream and signal is finished.
\r
2574 if ( handle->drainCounter > 3 ) {
\r
2575 ThreadHandle threadId;
\r
2577 stream_.state = STREAM_STOPPING;
\r
2578 if ( handle->internalDrain == true )
\r
2579 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2581 pthread_cond_signal( &handle->condition );
\r
2585 // Invoke user callback first, to get fresh output data.
\r
2586 if ( handle->drainCounter == 0 ) {
\r
2587 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2588 double streamTime = getStreamTime();
\r
2589 RtAudioStreamStatus status = 0;
\r
2590 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2591 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2592 handle->xrun[0] = false;
\r
2594 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2595 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2596 handle->xrun[1] = false;
\r
2598 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2599 stream_.bufferSize, streamTime, status, info->userData );
\r
2600 if ( cbReturnValue == 2 ) {
\r
2601 stream_.state = STREAM_STOPPING;
\r
2602 handle->drainCounter = 2;
\r
2604 pthread_create( &id, NULL, jackStopStream, info );
\r
2607 else if ( cbReturnValue == 1 ) {
\r
2608 handle->drainCounter = 1;
\r
2609 handle->internalDrain = true;
\r
2613 jack_default_audio_sample_t *jackbuffer;
\r
2614 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2615 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2617 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2619 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2620 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2621 memset( jackbuffer, 0, bufferBytes );
\r
2625 else if ( stream_.doConvertBuffer[0] ) {
\r
2627 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2629 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2630 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2631 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2634 else { // no buffer conversion
\r
2635 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2637 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2642 // Don't bother draining input
\r
2643 if ( handle->drainCounter ) {
\r
2644 handle->drainCounter++;
\r
2648 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2650 if ( stream_.doConvertBuffer[1] ) {
\r
2651 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2652 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2653 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2655 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2657 else { // no buffer conversion
\r
2658 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2659 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2660 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2666 RtApi::tickStreamTime();
\r
2669 //******************** End of __UNIX_JACK__ *********************//
\r
2672 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2674 // The ASIO API is designed around a callback scheme, so this
\r
2675 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2676 // Jack. The primary constraint with ASIO is that it only allows
\r
2677 // access to a single driver at a time. Thus, it is not possible to
\r
2678 // have more than one simultaneous RtAudio stream.
\r
2680 // This implementation also requires a number of external ASIO files
\r
2681 // and a few global variables. The ASIO callback scheme does not
\r
2682 // allow for the passing of user data, so we must create a global
\r
2683 // pointer to our callbackInfo structure.
\r
2685 // On unix systems, we make use of a pthread condition variable.
\r
2686 // Since there is no equivalent in Windows, I hacked something based
\r
2687 // on information found in
\r
2688 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2690 #include "asiosys.h"
\r
2692 #include "iasiothiscallresolver.h"
\r
2693 #include "asiodrivers.h"
\r
2696 static AsioDrivers drivers;
\r
2697 static ASIOCallbacks asioCallbacks;
\r
2698 static ASIODriverInfo driverInfo;
\r
2699 static CallbackInfo *asioCallbackInfo;
\r
2700 static bool asioXRun;
\r
2702 struct AsioHandle {
\r
2703 int drainCounter; // Tracks callback counts when draining
\r
2704 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2705 ASIOBufferInfo *bufferInfos;
\r
2709 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2712 // Function declarations (definitions at end of section)
\r
2713 static const char* getAsioErrorString( ASIOError result );
\r
2714 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2715 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2717 RtApiAsio :: RtApiAsio()
\r
2719 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2720 // CoInitialize beforehand, but it must be for appartment threading
\r
2721 // (in which case, CoInitilialize will return S_FALSE here).
\r
2722 coInitialized_ = false;
\r
2723 HRESULT hr = CoInitialize( NULL );
\r
2724 if ( FAILED(hr) ) {
\r
2725 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2726 error( RtAudioError::WARNING );
\r
2728 coInitialized_ = true;
\r
2730 drivers.removeCurrentDriver();
\r
2731 driverInfo.asioVersion = 2;
\r
2733 // See note in DirectSound implementation about GetDesktopWindow().
\r
2734 driverInfo.sysRef = GetForegroundWindow();
\r
2737 RtApiAsio :: ~RtApiAsio()
\r
2739 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2740 if ( coInitialized_ ) CoUninitialize();
\r
2743 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2745 return (unsigned int) drivers.asioGetNumDev();
\r
2748 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2750 RtAudio::DeviceInfo info;
\r
2751 info.probed = false;
\r
2754 unsigned int nDevices = getDeviceCount();
\r
2755 if ( nDevices == 0 ) {
\r
2756 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2757 error( RtAudioError::INVALID_USE );
\r
2761 if ( device >= nDevices ) {
\r
2762 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2763 error( RtAudioError::INVALID_USE );
\r
2767 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2768 if ( stream_.state != STREAM_CLOSED ) {
\r
2769 if ( device >= devices_.size() ) {
\r
2770 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2771 error( RtAudioError::WARNING );
\r
2774 return devices_[ device ];
\r
2777 char driverName[32];
\r
2778 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2779 if ( result != ASE_OK ) {
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.name = driverName;
\r
2788 if ( !drivers.loadDriver( driverName ) ) {
\r
2789 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2790 errorText_ = errorStream_.str();
\r
2791 error( RtAudioError::WARNING );
\r
2795 result = ASIOInit( &driverInfo );
\r
2796 if ( result != ASE_OK ) {
\r
2797 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2798 errorText_ = errorStream_.str();
\r
2799 error( RtAudioError::WARNING );
\r
2803 // Determine the device channel information.
\r
2804 long inputChannels, outputChannels;
\r
2805 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2806 if ( result != ASE_OK ) {
\r
2807 drivers.removeCurrentDriver();
\r
2808 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2809 errorText_ = errorStream_.str();
\r
2810 error( RtAudioError::WARNING );
\r
2814 info.outputChannels = outputChannels;
\r
2815 info.inputChannels = inputChannels;
\r
2816 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2817 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2819 // Determine the supported sample rates.
\r
2820 info.sampleRates.clear();
\r
2821 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2822 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2823 if ( result == ASE_OK ) {
\r
2824 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2826 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2827 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2831 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2832 ASIOChannelInfo channelInfo;
\r
2833 channelInfo.channel = 0;
\r
2834 channelInfo.isInput = true;
\r
2835 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2836 result = ASIOGetChannelInfo( &channelInfo );
\r
2837 if ( result != ASE_OK ) {
\r
2838 drivers.removeCurrentDriver();
\r
2839 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2840 errorText_ = errorStream_.str();
\r
2841 error( RtAudioError::WARNING );
\r
2845 info.nativeFormats = 0;
\r
2846 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2847 info.nativeFormats |= RTAUDIO_SINT16;
\r
2848 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2849 info.nativeFormats |= RTAUDIO_SINT32;
\r
2850 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2851 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2852 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2853 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2854 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2855 info.nativeFormats |= RTAUDIO_SINT24;
\r
2857 if ( info.outputChannels > 0 )
\r
2858 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2859 if ( info.inputChannels > 0 )
\r
2860 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2862 info.probed = true;
\r
2863 drivers.removeCurrentDriver();
\r
2867 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2869 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2870 object->callbackEvent( index );
\r
2873 void RtApiAsio :: saveDeviceInfo( void )
\r
2877 unsigned int nDevices = getDeviceCount();
\r
2878 devices_.resize( nDevices );
\r
2879 for ( unsigned int i=0; i<nDevices; i++ )
\r
2880 devices_[i] = getDeviceInfo( i );
\r
2883 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2884 unsigned int firstChannel, unsigned int sampleRate,
\r
2885 RtAudioFormat format, unsigned int *bufferSize,
\r
2886 RtAudio::StreamOptions *options )
\r
2887 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2889 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2891 // For ASIO, a duplex stream MUST use the same driver.
\r
2892 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2893 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2897 char driverName[32];
\r
2898 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2899 if ( result != ASE_OK ) {
\r
2900 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2901 errorText_ = errorStream_.str();
\r
2905 // Only load the driver once for duplex stream.
\r
2906 if ( !isDuplexInput ) {
\r
2907 // The getDeviceInfo() function will not work when a stream is open
\r
2908 // because ASIO does not allow multiple devices to run at the same
\r
2909 // time. Thus, we'll probe the system before opening a stream and
\r
2910 // save the results for use by getDeviceInfo().
\r
2911 this->saveDeviceInfo();
\r
2913 if ( !drivers.loadDriver( driverName ) ) {
\r
2914 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2915 errorText_ = errorStream_.str();
\r
2919 result = ASIOInit( &driverInfo );
\r
2920 if ( result != ASE_OK ) {
\r
2921 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2922 errorText_ = errorStream_.str();
\r
2927 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2928 bool buffersAllocated = false;
\r
2929 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2930 unsigned int nChannels;
\r
2933 // Check the device channel count.
\r
2934 long inputChannels, outputChannels;
\r
2935 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2936 if ( result != ASE_OK ) {
\r
2937 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2938 errorText_ = errorStream_.str();
\r
2942 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2943 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2944 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2945 errorText_ = errorStream_.str();
\r
2948 stream_.nDeviceChannels[mode] = channels;
\r
2949 stream_.nUserChannels[mode] = channels;
\r
2950 stream_.channelOffset[mode] = firstChannel;
\r
2952 // Verify the sample rate is supported.
\r
2953 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2954 if ( result != ASE_OK ) {
\r
2955 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2956 errorText_ = errorStream_.str();
\r
2960 // Get the current sample rate
\r
2961 ASIOSampleRate currentRate;
\r
2962 result = ASIOGetSampleRate( ¤tRate );
\r
2963 if ( result != ASE_OK ) {
\r
2964 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2965 errorText_ = errorStream_.str();
\r
2969 // Set the sample rate only if necessary
\r
2970 if ( currentRate != sampleRate ) {
\r
2971 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2972 if ( result != ASE_OK ) {
\r
2973 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2974 errorText_ = errorStream_.str();
\r
2979 // Determine the driver data type.
\r
2980 ASIOChannelInfo channelInfo;
\r
2981 channelInfo.channel = 0;
\r
2982 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2983 else channelInfo.isInput = true;
\r
2984 result = ASIOGetChannelInfo( &channelInfo );
\r
2985 if ( result != ASE_OK ) {
\r
2986 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2987 errorText_ = errorStream_.str();
\r
2991 // Assuming WINDOWS host is always little-endian.
\r
2992 stream_.doByteSwap[mode] = false;
\r
2993 stream_.userFormat = format;
\r
2994 stream_.deviceFormat[mode] = 0;
\r
2995 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2996 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2997 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2999 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
3000 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
3001 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
3003 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3004 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3005 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3007 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3008 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3009 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3011 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3012 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3013 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3016 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3017 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3018 errorText_ = errorStream_.str();
\r
3022 // Set the buffer size. For a duplex stream, this will end up
\r
3023 // setting the buffer size based on the input constraints, which
\r
3025 long minSize, maxSize, preferSize, granularity;
\r
3026 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3027 if ( result != ASE_OK ) {
\r
3028 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3029 errorText_ = errorStream_.str();
\r
3033 if ( isDuplexInput ) {
\r
3034 // When this is the duplex input (output was opened before), then we have to use the same
\r
3035 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3036 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3037 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3038 // to the "bufferSize" param as usual to set up processing buffers.
\r
3040 *bufferSize = stream_.bufferSize;
\r
3043 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3044 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3045 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3046 else if ( granularity == -1 ) {
\r
3047 // Make sure bufferSize is a power of two.
\r
3048 int log2_of_min_size = 0;
\r
3049 int log2_of_max_size = 0;
\r
3051 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3052 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3053 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3056 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3057 int min_delta_num = log2_of_min_size;
\r
3059 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3060 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3061 if (current_delta < min_delta) {
\r
3062 min_delta = current_delta;
\r
3063 min_delta_num = i;
\r
3067 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3068 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3069 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3071 else if ( granularity != 0 ) {
\r
3072 // Set to an even multiple of granularity, rounding up.
\r
3073 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3078 // we don't use it anymore, see above!
\r
3079 // Just left it here for the case...
\r
3080 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3081 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3086 stream_.bufferSize = *bufferSize;
\r
3087 stream_.nBuffers = 2;
\r
3089 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3090 else stream_.userInterleaved = true;
\r
3092 // ASIO always uses non-interleaved buffers.
\r
3093 stream_.deviceInterleaved[mode] = false;
\r
3095 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3096 if ( handle == 0 ) {
\r
3098 handle = new AsioHandle;
\r
3100 catch ( std::bad_alloc& ) {
\r
3101 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3104 handle->bufferInfos = 0;
\r
3106 // Create a manual-reset event.
\r
3107 handle->condition = CreateEvent( NULL, // no security
\r
3108 TRUE, // manual-reset
\r
3109 FALSE, // non-signaled initially
\r
3110 NULL ); // unnamed
\r
3111 stream_.apiHandle = (void *) handle;
\r
3114 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3115 // and output separately, we'll have to dispose of previously
\r
3116 // created output buffers for a duplex stream.
\r
3117 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3118 ASIODisposeBuffers();
\r
3119 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3122 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3124 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3125 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3126 if ( handle->bufferInfos == NULL ) {
\r
3127 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3128 errorText_ = errorStream_.str();
\r
3132 ASIOBufferInfo *infos;
\r
3133 infos = handle->bufferInfos;
\r
3134 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3135 infos->isInput = ASIOFalse;
\r
3136 infos->channelNum = i + stream_.channelOffset[0];
\r
3137 infos->buffers[0] = infos->buffers[1] = 0;
\r
3139 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3140 infos->isInput = ASIOTrue;
\r
3141 infos->channelNum = i + stream_.channelOffset[1];
\r
3142 infos->buffers[0] = infos->buffers[1] = 0;
\r
3145 // prepare for callbacks
\r
3146 stream_.sampleRate = sampleRate;
\r
3147 stream_.device[mode] = device;
\r
3148 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3150 // store this class instance before registering callbacks, that are going to use it
\r
3151 asioCallbackInfo = &stream_.callbackInfo;
\r
3152 stream_.callbackInfo.object = (void *) this;
\r
3154 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3155 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3156 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3157 asioCallbacks.asioMessage = &asioMessages;
\r
3158 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3159 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3160 if ( result != ASE_OK ) {
\r
3161 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3162 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3163 // in that case, let's be naïve and try that instead
\r
3164 *bufferSize = preferSize;
\r
3165 stream_.bufferSize = *bufferSize;
\r
3166 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3169 if ( result != ASE_OK ) {
\r
3170 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3171 errorText_ = errorStream_.str();
\r
3174 buffersAllocated = true;
\r
3175 stream_.state = STREAM_STOPPED;
\r
3177 // Set flags for buffer conversion.
\r
3178 stream_.doConvertBuffer[mode] = false;
\r
3179 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3180 stream_.doConvertBuffer[mode] = true;
\r
3181 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3182 stream_.nUserChannels[mode] > 1 )
\r
3183 stream_.doConvertBuffer[mode] = true;
\r
3185 // Allocate necessary internal buffers
\r
3186 unsigned long bufferBytes;
\r
3187 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3188 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3189 if ( stream_.userBuffer[mode] == NULL ) {
\r
3190 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3194 if ( stream_.doConvertBuffer[mode] ) {
\r
3196 bool makeBuffer = true;
\r
3197 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3198 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3199 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3200 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3203 if ( makeBuffer ) {
\r
3204 bufferBytes *= *bufferSize;
\r
3205 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3206 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3207 if ( stream_.deviceBuffer == NULL ) {
\r
3208 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3214 // Determine device latencies
\r
3215 long inputLatency, outputLatency;
\r
3216 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3217 if ( result != ASE_OK ) {
\r
3218 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3219 errorText_ = errorStream_.str();
\r
3220 error( RtAudioError::WARNING); // warn but don't fail
\r
3223 stream_.latency[0] = outputLatency;
\r
3224 stream_.latency[1] = inputLatency;
\r
3227 // Setup the buffer conversion information structure. We don't use
\r
3228 // buffers to do channel offsets, so we override that parameter
\r
3230 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3235 if ( !isDuplexInput ) {
\r
3236 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3237 // So we clean up for single channel only
\r
3239 if ( buffersAllocated )
\r
3240 ASIODisposeBuffers();
\r
3242 drivers.removeCurrentDriver();
\r
3245 CloseHandle( handle->condition );
\r
3246 if ( handle->bufferInfos )
\r
3247 free( handle->bufferInfos );
\r
3250 stream_.apiHandle = 0;
\r
3254 if ( stream_.userBuffer[mode] ) {
\r
3255 free( stream_.userBuffer[mode] );
\r
3256 stream_.userBuffer[mode] = 0;
\r
3259 if ( stream_.deviceBuffer ) {
\r
3260 free( stream_.deviceBuffer );
\r
3261 stream_.deviceBuffer = 0;
\r
3266 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3268 void RtApiAsio :: closeStream()
\r
3270 if ( stream_.state == STREAM_CLOSED ) {
\r
3271 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3272 error( RtAudioError::WARNING );
\r
3276 if ( stream_.state == STREAM_RUNNING ) {
\r
3277 stream_.state = STREAM_STOPPED;
\r
3280 ASIODisposeBuffers();
\r
3281 drivers.removeCurrentDriver();
\r
3283 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3285 CloseHandle( handle->condition );
\r
3286 if ( handle->bufferInfos )
\r
3287 free( handle->bufferInfos );
\r
3289 stream_.apiHandle = 0;
\r
3292 for ( int i=0; i<2; i++ ) {
\r
3293 if ( stream_.userBuffer[i] ) {
\r
3294 free( stream_.userBuffer[i] );
\r
3295 stream_.userBuffer[i] = 0;
\r
3299 if ( stream_.deviceBuffer ) {
\r
3300 free( stream_.deviceBuffer );
\r
3301 stream_.deviceBuffer = 0;
\r
3304 stream_.mode = UNINITIALIZED;
\r
3305 stream_.state = STREAM_CLOSED;
\r
3308 bool stopThreadCalled = false;
\r
3310 void RtApiAsio :: startStream()
\r
3313 if ( stream_.state == STREAM_RUNNING ) {
\r
3314 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3315 error( RtAudioError::WARNING );
\r
3319 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3320 ASIOError result = ASIOStart();
\r
3321 if ( result != ASE_OK ) {
\r
3322 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3323 errorText_ = errorStream_.str();
\r
3327 handle->drainCounter = 0;
\r
3328 handle->internalDrain = false;
\r
3329 ResetEvent( handle->condition );
\r
3330 stream_.state = STREAM_RUNNING;
\r
3334 stopThreadCalled = false;
\r
3336 if ( result == ASE_OK ) return;
\r
3337 error( RtAudioError::SYSTEM_ERROR );
\r
3340 void RtApiAsio :: stopStream()
\r
3343 if ( stream_.state == STREAM_STOPPED ) {
\r
3344 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3345 error( RtAudioError::WARNING );
\r
3349 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3350 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3351 if ( handle->drainCounter == 0 ) {
\r
3352 handle->drainCounter = 2;
\r
3353 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3357 stream_.state = STREAM_STOPPED;
\r
3359 ASIOError result = ASIOStop();
\r
3360 if ( result != ASE_OK ) {
\r
3361 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3362 errorText_ = errorStream_.str();
\r
3365 if ( result == ASE_OK ) return;
\r
3366 error( RtAudioError::SYSTEM_ERROR );
\r
3369 void RtApiAsio :: abortStream()
\r
3372 if ( stream_.state == STREAM_STOPPED ) {
\r
3373 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3374 error( RtAudioError::WARNING );
\r
3378 // The following lines were commented-out because some behavior was
\r
3379 // noted where the device buffers need to be zeroed to avoid
\r
3380 // continuing sound, even when the device buffers are completely
\r
3381 // disposed. So now, calling abort is the same as calling stop.
\r
3382 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3383 // handle->drainCounter = 2;
\r
3387 // This function will be called by a spawned thread when the user
\r
3388 // callback function signals that the stream should be stopped or
\r
3389 // aborted. It is necessary to handle it this way because the
\r
3390 // callbackEvent() function must return before the ASIOStop()
\r
3391 // function will return.
\r
3392 static unsigned __stdcall asioStopStream( void *ptr )
\r
3394 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3395 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3397 object->stopStream();
\r
3398 _endthreadex( 0 );
\r
3402 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3404 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3405 if ( stream_.state == STREAM_CLOSED ) {
\r
3406 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3407 error( RtAudioError::WARNING );
\r
3411 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3412 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3414 // Check if we were draining the stream and signal if finished.
\r
3415 if ( handle->drainCounter > 3 ) {
\r
3417 stream_.state = STREAM_STOPPING;
\r
3418 if ( handle->internalDrain == false )
\r
3419 SetEvent( handle->condition );
\r
3420 else { // spawn a thread to stop the stream
\r
3421 unsigned threadId;
\r
3422 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3423 &stream_.callbackInfo, 0, &threadId );
\r
3428 // Invoke user callback to get fresh output data UNLESS we are
\r
3429 // draining stream.
\r
3430 if ( handle->drainCounter == 0 ) {
\r
3431 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3432 double streamTime = getStreamTime();
\r
3433 RtAudioStreamStatus status = 0;
\r
3434 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3435 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3438 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3439 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3442 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3443 stream_.bufferSize, streamTime, status, info->userData );
\r
3444 if ( cbReturnValue == 2 ) {
\r
3445 stream_.state = STREAM_STOPPING;
\r
3446 handle->drainCounter = 2;
\r
3447 unsigned threadId;
\r
3448 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3449 &stream_.callbackInfo, 0, &threadId );
\r
3452 else if ( cbReturnValue == 1 ) {
\r
3453 handle->drainCounter = 1;
\r
3454 handle->internalDrain = true;
\r
3458 unsigned int nChannels, bufferBytes, i, j;
\r
3459 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3460 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3462 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3464 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3466 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3467 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3468 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3472 else if ( stream_.doConvertBuffer[0] ) {
\r
3474 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3475 if ( stream_.doByteSwap[0] )
\r
3476 byteSwapBuffer( stream_.deviceBuffer,
\r
3477 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3478 stream_.deviceFormat[0] );
\r
3480 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3481 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3482 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3483 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3489 if ( stream_.doByteSwap[0] )
\r
3490 byteSwapBuffer( stream_.userBuffer[0],
\r
3491 stream_.bufferSize * stream_.nUserChannels[0],
\r
3492 stream_.userFormat );
\r
3494 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3495 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3496 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3497 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3503 // Don't bother draining input
\r
3504 if ( handle->drainCounter ) {
\r
3505 handle->drainCounter++;
\r
3509 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3511 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3513 if (stream_.doConvertBuffer[1]) {
\r
3515 // Always interleave ASIO input data.
\r
3516 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3517 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3518 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3519 handle->bufferInfos[i].buffers[bufferIndex],
\r
3523 if ( stream_.doByteSwap[1] )
\r
3524 byteSwapBuffer( stream_.deviceBuffer,
\r
3525 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3526 stream_.deviceFormat[1] );
\r
3527 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3531 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3532 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3533 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3534 handle->bufferInfos[i].buffers[bufferIndex],
\r
3539 if ( stream_.doByteSwap[1] )
\r
3540 byteSwapBuffer( stream_.userBuffer[1],
\r
3541 stream_.bufferSize * stream_.nUserChannels[1],
\r
3542 stream_.userFormat );
\r
3547 // The following call was suggested by Malte Clasen. While the API
\r
3548 // documentation indicates it should not be required, some device
\r
3549 // drivers apparently do not function correctly without it.
\r
3550 ASIOOutputReady();
\r
3552 RtApi::tickStreamTime();
\r
3556 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3558 // The ASIO documentation says that this usually only happens during
\r
3559 // external sync. Audio processing is not stopped by the driver,
\r
3560 // actual sample rate might not have even changed, maybe only the
\r
3561 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3564 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3566 object->stopStream();
\r
3568 catch ( RtAudioError &exception ) {
\r
3569 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3573 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3576 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3580 switch( selector ) {
\r
3581 case kAsioSelectorSupported:
\r
3582 if ( value == kAsioResetRequest
\r
3583 || value == kAsioEngineVersion
\r
3584 || value == kAsioResyncRequest
\r
3585 || value == kAsioLatenciesChanged
\r
3586 // The following three were added for ASIO 2.0, you don't
\r
3587 // necessarily have to support them.
\r
3588 || value == kAsioSupportsTimeInfo
\r
3589 || value == kAsioSupportsTimeCode
\r
3590 || value == kAsioSupportsInputMonitor)
\r
3593 case kAsioResetRequest:
\r
3594 // Defer the task and perform the reset of the driver during the
\r
3595 // next "safe" situation. You cannot reset the driver right now,
\r
3596 // as this code is called from the driver. Reset the driver is
\r
3597 // done by completely destruct is. I.e. ASIOStop(),
\r
3598 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3600 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3603 case kAsioResyncRequest:
\r
3604 // This informs the application that the driver encountered some
\r
3605 // non-fatal data loss. It is used for synchronization purposes
\r
3606 // of different media. Added mainly to work around the Win16Mutex
\r
3607 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3608 // which could lose data because the Mutex was held too long by
\r
3609 // another thread. However a driver can issue it in other
\r
3610 // situations, too.
\r
3611 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3615 case kAsioLatenciesChanged:
\r
3616 // This will inform the host application that the drivers were
\r
3617 // latencies changed. Beware, it this does not mean that the
\r
3618 // buffer sizes have changed! You might need to update internal
\r
3620 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3623 case kAsioEngineVersion:
\r
3624 // Return the supported ASIO version of the host application. If
\r
3625 // a host application does not implement this selector, ASIO 1.0
\r
3626 // is assumed by the driver.
\r
3629 case kAsioSupportsTimeInfo:
\r
3630 // Informs the driver whether the
\r
3631 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3632 // For compatibility with ASIO 1.0 drivers the host application
\r
3633 // should always support the "old" bufferSwitch method, too.
\r
3636 case kAsioSupportsTimeCode:
\r
3637 // Informs the driver whether application is interested in time
\r
3638 // code info. If an application does not need to know about time
\r
3639 // code, the driver has less work to do.
\r
3646 static const char* getAsioErrorString( ASIOError result )
\r
3651 const char*message;
\r
3654 static const Messages m[] =
\r
3656 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3657 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3658 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3659 { ASE_InvalidMode, "Invalid mode." },
\r
3660 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3661 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3662 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3665 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3666 if ( m[i].value == result ) return m[i].message;
\r
3668 return "Unknown error.";
\r
3671 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3675 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3677 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3678 // - Introduces support for the Windows WASAPI API
\r
3679 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3680 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3681 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3686 #include <audioclient.h>
\r
3688 #include <mmdeviceapi.h>
\r
3689 #include <functiondiscoverykeys_devpkey.h>
\r
3691 //=============================================================================
\r
3693 #define SAFE_RELEASE( objectPtr )\
\r
3696 objectPtr->Release();\
\r
3697 objectPtr = NULL;\
\r
3700 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3702 //-----------------------------------------------------------------------------
\r
3704 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3705 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3706 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3707 // provide intermediate storage for read / write synchronization.
\r
3708 class WasapiBuffer
\r
3712 : buffer_( NULL ),
\r
3721 // sets the length of the internal ring buffer
\r
3722 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3725 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3727 bufferSize_ = bufferSize;
\r
3732 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3733 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3735 if ( !buffer || // incoming buffer is NULL
\r
3736 bufferSize == 0 || // incoming buffer has no data
\r
3737 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3742 unsigned int relOutIndex = outIndex_;
\r
3743 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3744 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3745 relOutIndex += bufferSize_;
\r
3748 // "in" index can end on the "out" index but cannot begin at it
\r
3749 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3750 return false; // not enough space between "in" index and "out" index
\r
3753 // copy buffer from external to internal
\r
3754 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3755 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3756 int fromInSize = bufferSize - fromZeroSize;
\r
3760 case RTAUDIO_SINT8:
\r
3761 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3762 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3764 case RTAUDIO_SINT16:
\r
3765 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3766 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3768 case RTAUDIO_SINT24:
\r
3769 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3770 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3772 case RTAUDIO_SINT32:
\r
3773 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3774 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3776 case RTAUDIO_FLOAT32:
\r
3777 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3778 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3780 case RTAUDIO_FLOAT64:
\r
3781 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3782 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3786 // update "in" index
\r
3787 inIndex_ += bufferSize;
\r
3788 inIndex_ %= bufferSize_;
\r
3793 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3794 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3796 if ( !buffer || // incoming buffer is NULL
\r
3797 bufferSize == 0 || // incoming buffer has no data
\r
3798 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3803 unsigned int relInIndex = inIndex_;
\r
3804 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3805 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3806 relInIndex += bufferSize_;
\r
3809 // "out" index can begin at and end on the "in" index
\r
3810 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3811 return false; // not enough space between "out" index and "in" index
\r
3814 // copy buffer from internal to external
\r
3815 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3816 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3817 int fromOutSize = bufferSize - fromZeroSize;
\r
3821 case RTAUDIO_SINT8:
\r
3822 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3823 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3825 case RTAUDIO_SINT16:
\r
3826 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3827 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3829 case RTAUDIO_SINT24:
\r
3830 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3831 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3833 case RTAUDIO_SINT32:
\r
3834 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3835 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3837 case RTAUDIO_FLOAT32:
\r
3838 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3839 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3841 case RTAUDIO_FLOAT64:
\r
3842 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3843 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3847 // update "out" index
\r
3848 outIndex_ += bufferSize;
\r
3849 outIndex_ %= bufferSize_;
\r
3856 unsigned int bufferSize_;
\r
3857 unsigned int inIndex_;
\r
3858 unsigned int outIndex_;
\r
3861 //-----------------------------------------------------------------------------
\r
3863 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3864 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3865 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3866 // This sample rate converter works best with conversions between one rate and its multiple.
\r
3867 void convertBufferWasapi( char* outBuffer,
\r
3868 const char* inBuffer,
\r
3869 const unsigned int& channelCount,
\r
3870 const unsigned int& inSampleRate,
\r
3871 const unsigned int& outSampleRate,
\r
3872 const unsigned int& inSampleCount,
\r
3873 unsigned int& outSampleCount,
\r
3874 const RtAudioFormat& format )
\r
3876 // calculate the new outSampleCount and relative sampleStep
\r
3877 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3878 float sampleRatioInv = ( float ) 1 / sampleRatio;
\r
3879 float sampleStep = 1.0f / sampleRatio;
\r
3880 float inSampleFraction = 0.0f;
\r
3882 outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
\r
3884 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
\r
3885 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
\r
3887 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3888 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3890 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3894 case RTAUDIO_SINT8:
\r
3895 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3897 case RTAUDIO_SINT16:
\r
3898 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3900 case RTAUDIO_SINT24:
\r
3901 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3903 case RTAUDIO_SINT32:
\r
3904 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3906 case RTAUDIO_FLOAT32:
\r
3907 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3909 case RTAUDIO_FLOAT64:
\r
3910 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3914 // jump to next in sample
\r
3915 inSampleFraction += sampleStep;
\r
3918 else // else interpolate
\r
3920 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3921 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3923 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3924 float inSampleDec = inSampleFraction - inSample;
\r
3925 unsigned int frameInSample = inSample * channelCount;
\r
3926 unsigned int frameOutSample = outSample * channelCount;
\r
3930 case RTAUDIO_SINT8:
\r
3932 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3934 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
\r
3935 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3936 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
\r
3937 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3941 case RTAUDIO_SINT16:
\r
3943 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3945 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
\r
3946 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3947 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
\r
3948 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3952 case RTAUDIO_SINT24:
\r
3954 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3956 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
\r
3957 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
\r
3958 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3959 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3963 case RTAUDIO_SINT32:
\r
3965 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3967 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
\r
3968 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3969 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
\r
3970 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3974 case RTAUDIO_FLOAT32:
\r
3976 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3978 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
\r
3979 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3980 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3981 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3985 case RTAUDIO_FLOAT64:
\r
3987 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3989 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
\r
3990 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
\r
3991 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
\r
3992 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
\r
3998 // jump to next in sample
\r
3999 inSampleFraction += sampleStep;
\r
4004 //-----------------------------------------------------------------------------
\r
4006 // A structure to hold various information related to the WASAPI implementation.
\r
4007 struct WasapiHandle
\r
4009 IAudioClient* captureAudioClient;
\r
4010 IAudioClient* renderAudioClient;
\r
4011 IAudioCaptureClient* captureClient;
\r
4012 IAudioRenderClient* renderClient;
\r
4013 HANDLE captureEvent;
\r
4014 HANDLE renderEvent;
\r
4017 : captureAudioClient( NULL ),
\r
4018 renderAudioClient( NULL ),
\r
4019 captureClient( NULL ),
\r
4020 renderClient( NULL ),
\r
4021 captureEvent( NULL ),
\r
4022 renderEvent( NULL ) {}
\r
4025 //=============================================================================
\r
4027 RtApiWasapi::RtApiWasapi()
\r
4028 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
4030 // WASAPI can run either apartment or multi-threaded
\r
4031 HRESULT hr = CoInitialize( NULL );
\r
4032 if ( !FAILED( hr ) )
\r
4033 coInitialized_ = true;
\r
4035 // Instantiate device enumerator
\r
4036 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
4037 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
4038 ( void** ) &deviceEnumerator_ );
\r
4040 if ( FAILED( hr ) ) {
\r
4041 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
4042 error( RtAudioError::DRIVER_ERROR );
\r
4046 //-----------------------------------------------------------------------------
\r
4048 RtApiWasapi::~RtApiWasapi()
\r
4050 if ( stream_.state != STREAM_CLOSED )
\r
4053 SAFE_RELEASE( deviceEnumerator_ );
\r
4055 // If this object previously called CoInitialize()
\r
4056 if ( coInitialized_ )
\r
4060 //=============================================================================
\r
4062 unsigned int RtApiWasapi::getDeviceCount( void )
\r
4064 unsigned int captureDeviceCount = 0;
\r
4065 unsigned int renderDeviceCount = 0;
\r
4067 IMMDeviceCollection* captureDevices = NULL;
\r
4068 IMMDeviceCollection* renderDevices = NULL;
\r
4070 // Count capture devices
\r
4071 errorText_.clear();
\r
4072 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4073 if ( FAILED( hr ) ) {
\r
4074 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4078 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4079 if ( FAILED( hr ) ) {
\r
4080 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4084 // Count render devices
\r
4085 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4086 if ( FAILED( hr ) ) {
\r
4087 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4091 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4092 if ( FAILED( hr ) ) {
\r
4093 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4098 // release all references
\r
4099 SAFE_RELEASE( captureDevices );
\r
4100 SAFE_RELEASE( renderDevices );
\r
4102 if ( errorText_.empty() )
\r
4103 return captureDeviceCount + renderDeviceCount;
\r
4105 error( RtAudioError::DRIVER_ERROR );
\r
4109 //-----------------------------------------------------------------------------
\r
4111 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4113 RtAudio::DeviceInfo info;
\r
4114 unsigned int captureDeviceCount = 0;
\r
4115 unsigned int renderDeviceCount = 0;
\r
4116 std::string defaultDeviceName;
\r
4117 bool isCaptureDevice = false;
\r
4119 PROPVARIANT deviceNameProp;
\r
4120 PROPVARIANT defaultDeviceNameProp;
\r
4122 IMMDeviceCollection* captureDevices = NULL;
\r
4123 IMMDeviceCollection* renderDevices = NULL;
\r
4124 IMMDevice* devicePtr = NULL;
\r
4125 IMMDevice* defaultDevicePtr = NULL;
\r
4126 IAudioClient* audioClient = NULL;
\r
4127 IPropertyStore* devicePropStore = NULL;
\r
4128 IPropertyStore* defaultDevicePropStore = NULL;
\r
4130 WAVEFORMATEX* deviceFormat = NULL;
\r
4131 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4134 info.probed = false;
\r
4136 // Count capture devices
\r
4137 errorText_.clear();
\r
4138 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4139 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4140 if ( FAILED( hr ) ) {
\r
4141 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4145 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4146 if ( FAILED( hr ) ) {
\r
4147 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4151 // Count render devices
\r
4152 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4153 if ( FAILED( hr ) ) {
\r
4154 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4158 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4159 if ( FAILED( hr ) ) {
\r
4160 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4164 // validate device index
\r
4165 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4166 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4167 errorType = RtAudioError::INVALID_USE;
\r
4171 // determine whether index falls within capture or render devices
\r
4172 if ( device >= renderDeviceCount ) {
\r
4173 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4174 if ( FAILED( hr ) ) {
\r
4175 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4178 isCaptureDevice = true;
\r
4181 hr = renderDevices->Item( device, &devicePtr );
\r
4182 if ( FAILED( hr ) ) {
\r
4183 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4186 isCaptureDevice = false;
\r
4189 // get default device name
\r
4190 if ( isCaptureDevice ) {
\r
4191 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4192 if ( FAILED( hr ) ) {
\r
4193 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4198 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4199 if ( FAILED( hr ) ) {
\r
4200 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4205 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4206 if ( FAILED( hr ) ) {
\r
4207 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4210 PropVariantInit( &defaultDeviceNameProp );
\r
4212 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4213 if ( FAILED( hr ) ) {
\r
4214 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4218 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4221 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4222 if ( FAILED( hr ) ) {
\r
4223 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4227 PropVariantInit( &deviceNameProp );
\r
4229 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4230 if ( FAILED( hr ) ) {
\r
4231 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4235 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4238 if ( isCaptureDevice ) {
\r
4239 info.isDefaultInput = info.name == defaultDeviceName;
\r
4240 info.isDefaultOutput = false;
\r
4243 info.isDefaultInput = false;
\r
4244 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4248 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4249 if ( FAILED( hr ) ) {
\r
4250 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4254 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4255 if ( FAILED( hr ) ) {
\r
4256 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4260 if ( isCaptureDevice ) {
\r
4261 info.inputChannels = deviceFormat->nChannels;
\r
4262 info.outputChannels = 0;
\r
4263 info.duplexChannels = 0;
\r
4266 info.inputChannels = 0;
\r
4267 info.outputChannels = deviceFormat->nChannels;
\r
4268 info.duplexChannels = 0;
\r
4272 info.sampleRates.clear();
\r
4274 // allow support for all sample rates as we have a built-in sample rate converter
\r
4275 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4276 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4278 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4281 info.nativeFormats = 0;
\r
4283 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4284 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4285 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4287 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4288 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4290 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4291 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4294 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4295 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4296 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4298 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4299 info.nativeFormats |= RTAUDIO_SINT8;
\r
4301 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4302 info.nativeFormats |= RTAUDIO_SINT16;
\r
4304 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4305 info.nativeFormats |= RTAUDIO_SINT24;
\r
4307 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4308 info.nativeFormats |= RTAUDIO_SINT32;
\r
4313 info.probed = true;
\r
4316 // release all references
\r
4317 PropVariantClear( &deviceNameProp );
\r
4318 PropVariantClear( &defaultDeviceNameProp );
\r
4320 SAFE_RELEASE( captureDevices );
\r
4321 SAFE_RELEASE( renderDevices );
\r
4322 SAFE_RELEASE( devicePtr );
\r
4323 SAFE_RELEASE( defaultDevicePtr );
\r
4324 SAFE_RELEASE( audioClient );
\r
4325 SAFE_RELEASE( devicePropStore );
\r
4326 SAFE_RELEASE( defaultDevicePropStore );
\r
4328 CoTaskMemFree( deviceFormat );
\r
4329 CoTaskMemFree( closestMatchFormat );
\r
4331 if ( !errorText_.empty() )
\r
4332 error( errorType );
\r
4336 //-----------------------------------------------------------------------------
\r
4338 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4340 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4341 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4349 //-----------------------------------------------------------------------------
\r
4351 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4353 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4354 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4362 //-----------------------------------------------------------------------------
\r
4364 void RtApiWasapi::closeStream( void )
\r
4366 if ( stream_.state == STREAM_CLOSED ) {
\r
4367 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4368 error( RtAudioError::WARNING );
\r
4372 if ( stream_.state != STREAM_STOPPED )
\r
4375 // clean up stream memory
\r
4376 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4377 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4379 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4380 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4382 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4383 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4385 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4386 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4388 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4389 stream_.apiHandle = NULL;
\r
4391 for ( int i = 0; i < 2; i++ ) {
\r
4392 if ( stream_.userBuffer[i] ) {
\r
4393 free( stream_.userBuffer[i] );
\r
4394 stream_.userBuffer[i] = 0;
\r
4398 if ( stream_.deviceBuffer ) {
\r
4399 free( stream_.deviceBuffer );
\r
4400 stream_.deviceBuffer = 0;
\r
4403 // update stream state
\r
4404 stream_.state = STREAM_CLOSED;
\r
4407 //-----------------------------------------------------------------------------
\r
4409 void RtApiWasapi::startStream( void )
\r
4413 if ( stream_.state == STREAM_RUNNING ) {
\r
4414 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4415 error( RtAudioError::WARNING );
\r
4419 // update stream state
\r
4420 stream_.state = STREAM_RUNNING;
\r
4422 // create WASAPI stream thread
\r
4423 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4425 if ( !stream_.callbackInfo.thread ) {
\r
4426 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4427 error( RtAudioError::THREAD_ERROR );
\r
4430 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4431 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4435 //-----------------------------------------------------------------------------
\r
4437 void RtApiWasapi::stopStream( void )
\r
4441 if ( stream_.state == STREAM_STOPPED ) {
\r
4442 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4443 error( RtAudioError::WARNING );
\r
4447 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4448 stream_.state = STREAM_STOPPING;
\r
4450 // wait until stream thread is stopped
\r
4451 while( stream_.state != STREAM_STOPPED ) {
\r
4455 // Wait for the last buffer to play before stopping.
\r
4456 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4458 // stop capture client if applicable
\r
4459 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4460 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4461 if ( FAILED( hr ) ) {
\r
4462 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4463 error( RtAudioError::DRIVER_ERROR );
\r
4468 // stop render client if applicable
\r
4469 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4470 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4471 if ( FAILED( hr ) ) {
\r
4472 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4473 error( RtAudioError::DRIVER_ERROR );
\r
4478 // close thread handle
\r
4479 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4480 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4481 error( RtAudioError::THREAD_ERROR );
\r
4485 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4488 //-----------------------------------------------------------------------------
\r
4490 void RtApiWasapi::abortStream( void )
\r
4494 if ( stream_.state == STREAM_STOPPED ) {
\r
4495 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4496 error( RtAudioError::WARNING );
\r
4500 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4501 stream_.state = STREAM_STOPPING;
\r
4503 // wait until stream thread is stopped
\r
4504 while ( stream_.state != STREAM_STOPPED ) {
\r
4508 // stop capture client if applicable
\r
4509 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4510 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4511 if ( FAILED( hr ) ) {
\r
4512 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4513 error( RtAudioError::DRIVER_ERROR );
\r
4518 // stop render client if applicable
\r
4519 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4520 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4521 if ( FAILED( hr ) ) {
\r
4522 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4523 error( RtAudioError::DRIVER_ERROR );
\r
4528 // close thread handle
\r
4529 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4530 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4531 error( RtAudioError::THREAD_ERROR );
\r
4535 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4538 //-----------------------------------------------------------------------------
\r
4540 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4541 unsigned int firstChannel, unsigned int sampleRate,
\r
4542 RtAudioFormat format, unsigned int* bufferSize,
\r
4543 RtAudio::StreamOptions* options )
\r
4545 bool methodResult = FAILURE;
\r
4546 unsigned int captureDeviceCount = 0;
\r
4547 unsigned int renderDeviceCount = 0;
\r
4549 IMMDeviceCollection* captureDevices = NULL;
\r
4550 IMMDeviceCollection* renderDevices = NULL;
\r
4551 IMMDevice* devicePtr = NULL;
\r
4552 WAVEFORMATEX* deviceFormat = NULL;
\r
4553 unsigned int bufferBytes;
\r
4554 stream_.state = STREAM_STOPPED;
\r
4556 // create API Handle if not already created
\r
4557 if ( !stream_.apiHandle )
\r
4558 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4560 // Count capture devices
\r
4561 errorText_.clear();
\r
4562 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4563 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4564 if ( FAILED( hr ) ) {
\r
4565 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4569 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4570 if ( FAILED( hr ) ) {
\r
4571 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4575 // Count render devices
\r
4576 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4577 if ( FAILED( hr ) ) {
\r
4578 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4582 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4583 if ( FAILED( hr ) ) {
\r
4584 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4588 // validate device index
\r
4589 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4590 errorType = RtAudioError::INVALID_USE;
\r
4591 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4595 // determine whether index falls within capture or render devices
\r
4596 if ( device >= renderDeviceCount ) {
\r
4597 if ( mode != INPUT ) {
\r
4598 errorType = RtAudioError::INVALID_USE;
\r
4599 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4603 // retrieve captureAudioClient from devicePtr
\r
4604 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4606 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4607 if ( FAILED( hr ) ) {
\r
4608 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4612 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4613 NULL, ( void** ) &captureAudioClient );
\r
4614 if ( FAILED( hr ) ) {
\r
4615 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4619 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4620 if ( FAILED( hr ) ) {
\r
4621 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4625 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4626 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4629 if ( mode != OUTPUT ) {
\r
4630 errorType = RtAudioError::INVALID_USE;
\r
4631 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4635 // retrieve renderAudioClient from devicePtr
\r
4636 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4638 hr = renderDevices->Item( device, &devicePtr );
\r
4639 if ( FAILED( hr ) ) {
\r
4640 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4644 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4645 NULL, ( void** ) &renderAudioClient );
\r
4646 if ( FAILED( hr ) ) {
\r
4647 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4651 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4652 if ( FAILED( hr ) ) {
\r
4653 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4657 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4658 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4661 // fill stream data
\r
4662 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4663 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4664 stream_.mode = DUPLEX;
\r
4667 stream_.mode = mode;
\r
4670 stream_.device[mode] = device;
\r
4671 stream_.doByteSwap[mode] = false;
\r
4672 stream_.sampleRate = sampleRate;
\r
4673 stream_.bufferSize = *bufferSize;
\r
4674 stream_.nBuffers = 1;
\r
4675 stream_.nUserChannels[mode] = channels;
\r
4676 stream_.channelOffset[mode] = firstChannel;
\r
4677 stream_.userFormat = format;
\r
4678 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4680 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4681 stream_.userInterleaved = false;
\r
4683 stream_.userInterleaved = true;
\r
4684 stream_.deviceInterleaved[mode] = true;
\r
4686 // Set flags for buffer conversion.
\r
4687 stream_.doConvertBuffer[mode] = false;
\r
4688 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4689 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4690 stream_.doConvertBuffer[mode] = true;
\r
4691 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4692 stream_.nUserChannels[mode] > 1 )
\r
4693 stream_.doConvertBuffer[mode] = true;
\r
4695 if ( stream_.doConvertBuffer[mode] )
\r
4696 setConvertInfo( mode, 0 );
\r
4698 // Allocate necessary internal buffers
\r
4699 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4701 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4702 if ( !stream_.userBuffer[mode] ) {
\r
4703 errorType = RtAudioError::MEMORY_ERROR;
\r
4704 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4708 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4709 stream_.callbackInfo.priority = 15;
\r
4711 stream_.callbackInfo.priority = 0;
\r
4713 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4714 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4716 methodResult = SUCCESS;
\r
4720 SAFE_RELEASE( captureDevices );
\r
4721 SAFE_RELEASE( renderDevices );
\r
4722 SAFE_RELEASE( devicePtr );
\r
4723 CoTaskMemFree( deviceFormat );
\r
4725 // if method failed, close the stream
\r
4726 if ( methodResult == FAILURE )
\r
4729 if ( !errorText_.empty() )
\r
4730 error( errorType );
\r
4731 return methodResult;
\r
4734 //=============================================================================
\r
4736 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4739 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4744 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4747 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4752 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4755 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4760 //-----------------------------------------------------------------------------
\r
4762 void RtApiWasapi::wasapiThread()
\r
4764 // as this is a new thread, we must CoInitialize it
\r
4765 CoInitialize( NULL );
\r
4769 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4770 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4771 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4772 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4773 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4774 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4776 WAVEFORMATEX* captureFormat = NULL;
\r
4777 WAVEFORMATEX* renderFormat = NULL;
\r
4778 float captureSrRatio = 0.0f;
\r
4779 float renderSrRatio = 0.0f;
\r
4780 WasapiBuffer captureBuffer;
\r
4781 WasapiBuffer renderBuffer;
\r
4783 // declare local stream variables
\r
4784 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4785 BYTE* streamBuffer = NULL;
\r
4786 unsigned long captureFlags = 0;
\r
4787 unsigned int bufferFrameCount = 0;
\r
4788 unsigned int numFramesPadding = 0;
\r
4789 unsigned int convBufferSize = 0;
\r
4790 bool callbackPushed = false;
\r
4791 bool callbackPulled = false;
\r
4792 bool callbackStopped = false;
\r
4793 int callbackResult = 0;
\r
4795 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4796 char* convBuffer = NULL;
\r
4797 unsigned int convBuffSize = 0;
\r
4798 unsigned int deviceBuffSize = 0;
\r
4800 errorText_.clear();
\r
4801 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4803 // Attempt to assign "Pro Audio" characteristic to thread
\r
4804 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4806 DWORD taskIndex = 0;
\r
4807 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4808 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4809 FreeLibrary( AvrtDll );
\r
4812 // start capture stream if applicable
\r
4813 if ( captureAudioClient ) {
\r
4814 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4815 if ( FAILED( hr ) ) {
\r
4816 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4820 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4822 // initialize capture stream according to desire buffer size
\r
4823 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4824 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4826 if ( !captureClient ) {
\r
4827 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4828 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4829 desiredBufferPeriod,
\r
4830 desiredBufferPeriod,
\r
4833 if ( FAILED( hr ) ) {
\r
4834 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4838 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4839 ( void** ) &captureClient );
\r
4840 if ( FAILED( hr ) ) {
\r
4841 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4845 // configure captureEvent to trigger on every available capture buffer
\r
4846 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4847 if ( !captureEvent ) {
\r
4848 errorType = RtAudioError::SYSTEM_ERROR;
\r
4849 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4853 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4854 if ( FAILED( hr ) ) {
\r
4855 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4859 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4860 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4863 unsigned int inBufferSize = 0;
\r
4864 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4865 if ( FAILED( hr ) ) {
\r
4866 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4870 // scale outBufferSize according to stream->user sample rate ratio
\r
4871 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4872 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4874 // set captureBuffer size
\r
4875 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4877 // reset the capture stream
\r
4878 hr = captureAudioClient->Reset();
\r
4879 if ( FAILED( hr ) ) {
\r
4880 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4884 // start the capture stream
\r
4885 hr = captureAudioClient->Start();
\r
4886 if ( FAILED( hr ) ) {
\r
4887 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4892 // start render stream if applicable
\r
4893 if ( renderAudioClient ) {
\r
4894 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4895 if ( FAILED( hr ) ) {
\r
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4900 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4902 // initialize render stream according to desire buffer size
\r
4903 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4904 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4906 if ( !renderClient ) {
\r
4907 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4908 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4909 desiredBufferPeriod,
\r
4910 desiredBufferPeriod,
\r
4913 if ( FAILED( hr ) ) {
\r
4914 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4918 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4919 ( void** ) &renderClient );
\r
4920 if ( FAILED( hr ) ) {
\r
4921 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4925 // configure renderEvent to trigger on every available render buffer
\r
4926 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4927 if ( !renderEvent ) {
\r
4928 errorType = RtAudioError::SYSTEM_ERROR;
\r
4929 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4933 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4934 if ( FAILED( hr ) ) {
\r
4935 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4939 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4940 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4943 unsigned int outBufferSize = 0;
\r
4944 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4945 if ( FAILED( hr ) ) {
\r
4946 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4950 // scale inBufferSize according to user->stream sample rate ratio
\r
4951 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4952 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4954 // set renderBuffer size
\r
4955 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4957 // reset the render stream
\r
4958 hr = renderAudioClient->Reset();
\r
4959 if ( FAILED( hr ) ) {
\r
4960 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4964 // start the render stream
\r
4965 hr = renderAudioClient->Start();
\r
4966 if ( FAILED( hr ) ) {
\r
4967 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4972 if ( stream_.mode == INPUT ) {
\r
4973 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4974 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4976 else if ( stream_.mode == OUTPUT ) {
\r
4977 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4978 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4980 else if ( stream_.mode == DUPLEX ) {
\r
4981 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4982 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4983 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4984 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4987 convBuffer = ( char* ) malloc( convBuffSize );
\r
4988 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4989 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4990 errorType = RtAudioError::MEMORY_ERROR;
\r
4991 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4995 // stream process loop
\r
4996 while ( stream_.state != STREAM_STOPPING ) {
\r
4997 if ( !callbackPulled ) {
\r
5000 // 1. Pull callback buffer from inputBuffer
\r
5001 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
5002 // Convert callback buffer to user format
\r
5004 if ( captureAudioClient ) {
\r
5005 // Pull callback buffer from inputBuffer
\r
5006 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
5007 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
5008 stream_.deviceFormat[INPUT] );
\r
5010 if ( callbackPulled ) {
\r
5011 // Convert callback buffer to user sample rate
\r
5012 convertBufferWasapi( stream_.deviceBuffer,
\r
5014 stream_.nDeviceChannels[INPUT],
\r
5015 captureFormat->nSamplesPerSec,
\r
5016 stream_.sampleRate,
\r
5017 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
5019 stream_.deviceFormat[INPUT] );
\r
5021 if ( stream_.doConvertBuffer[INPUT] ) {
\r
5022 // Convert callback buffer to user format
\r
5023 convertBuffer( stream_.userBuffer[INPUT],
\r
5024 stream_.deviceBuffer,
\r
5025 stream_.convertInfo[INPUT] );
\r
5028 // no further conversion, simple copy deviceBuffer to userBuffer
\r
5029 memcpy( stream_.userBuffer[INPUT],
\r
5030 stream_.deviceBuffer,
\r
5031 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
5036 // if there is no capture stream, set callbackPulled flag
\r
5037 callbackPulled = true;
\r
5040 // Execute Callback
\r
5041 // ================
\r
5042 // 1. Execute user callback method
\r
5043 // 2. Handle return value from callback
\r
5045 // if callback has not requested the stream to stop
\r
5046 if ( callbackPulled && !callbackStopped ) {
\r
5047 // Execute user callback method
\r
5048 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
5049 stream_.userBuffer[INPUT],
\r
5050 stream_.bufferSize,
\r
5052 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
5053 stream_.callbackInfo.userData );
\r
5055 // Handle return value from callback
\r
5056 if ( callbackResult == 1 ) {
\r
5057 // instantiate a thread to stop this thread
\r
5058 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
5059 if ( !threadHandle ) {
\r
5060 errorType = RtAudioError::THREAD_ERROR;
\r
5061 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
5064 else if ( !CloseHandle( threadHandle ) ) {
\r
5065 errorType = RtAudioError::THREAD_ERROR;
\r
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
5070 callbackStopped = true;
\r
5072 else if ( callbackResult == 2 ) {
\r
5073 // instantiate a thread to stop this thread
\r
5074 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
5075 if ( !threadHandle ) {
\r
5076 errorType = RtAudioError::THREAD_ERROR;
\r
5077 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5080 else if ( !CloseHandle( threadHandle ) ) {
\r
5081 errorType = RtAudioError::THREAD_ERROR;
\r
5082 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5086 callbackStopped = true;
\r
5091 // Callback Output
\r
5092 // ===============
\r
5093 // 1. Convert callback buffer to stream format
\r
5094 // 2. Convert callback buffer to stream sample rate and channel count
\r
5095 // 3. Push callback buffer into outputBuffer
\r
5097 if ( renderAudioClient && callbackPulled ) {
\r
5098 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5099 // Convert callback buffer to stream format
\r
5100 convertBuffer( stream_.deviceBuffer,
\r
5101 stream_.userBuffer[OUTPUT],
\r
5102 stream_.convertInfo[OUTPUT] );
\r
5106 // Convert callback buffer to stream sample rate
\r
5107 convertBufferWasapi( convBuffer,
\r
5108 stream_.deviceBuffer,
\r
5109 stream_.nDeviceChannels[OUTPUT],
\r
5110 stream_.sampleRate,
\r
5111 renderFormat->nSamplesPerSec,
\r
5112 stream_.bufferSize,
\r
5114 stream_.deviceFormat[OUTPUT] );
\r
5116 // Push callback buffer into outputBuffer
\r
5117 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5118 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5119 stream_.deviceFormat[OUTPUT] );
\r
5122 // if there is no render stream, set callbackPushed flag
\r
5123 callbackPushed = true;
\r
5128 // 1. Get capture buffer from stream
\r
5129 // 2. Push capture buffer into inputBuffer
\r
5130 // 3. If 2. was successful: Release capture buffer
\r
5132 if ( captureAudioClient ) {
\r
5133 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5134 if ( !callbackPulled ) {
\r
5135 WaitForSingleObject( captureEvent, INFINITE );
\r
5138 // Get capture buffer from stream
\r
5139 hr = captureClient->GetBuffer( &streamBuffer,
\r
5140 &bufferFrameCount,
\r
5141 &captureFlags, NULL, NULL );
\r
5142 if ( FAILED( hr ) ) {
\r
5143 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5147 if ( bufferFrameCount != 0 ) {
\r
5148 // Push capture buffer into inputBuffer
\r
5149 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5150 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5151 stream_.deviceFormat[INPUT] ) )
\r
5153 // Release capture buffer
\r
5154 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5155 if ( FAILED( hr ) ) {
\r
5156 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5162 // Inform WASAPI that capture was unsuccessful
\r
5163 hr = captureClient->ReleaseBuffer( 0 );
\r
5164 if ( FAILED( hr ) ) {
\r
5165 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5172 // Inform WASAPI that capture was unsuccessful
\r
5173 hr = captureClient->ReleaseBuffer( 0 );
\r
5174 if ( FAILED( hr ) ) {
\r
5175 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5183 // 1. Get render buffer from stream
\r
5184 // 2. Pull next buffer from outputBuffer
\r
5185 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5186 // Release render buffer
\r
5188 if ( renderAudioClient ) {
\r
5189 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5190 if ( callbackPulled && !callbackPushed ) {
\r
5191 WaitForSingleObject( renderEvent, INFINITE );
\r
5194 // Get render buffer from stream
\r
5195 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5196 if ( FAILED( hr ) ) {
\r
5197 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5201 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5202 if ( FAILED( hr ) ) {
\r
5203 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5207 bufferFrameCount -= numFramesPadding;
\r
5209 if ( bufferFrameCount != 0 ) {
\r
5210 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5211 if ( FAILED( hr ) ) {
\r
5212 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5216 // Pull next buffer from outputBuffer
\r
5217 // Fill render buffer with next buffer
\r
5218 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5219 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5220 stream_.deviceFormat[OUTPUT] ) )
\r
5222 // Release render buffer
\r
5223 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5224 if ( FAILED( hr ) ) {
\r
5225 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5231 // Inform WASAPI that render was unsuccessful
\r
5232 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5233 if ( FAILED( hr ) ) {
\r
5234 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5241 // Inform WASAPI that render was unsuccessful
\r
5242 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5243 if ( FAILED( hr ) ) {
\r
5244 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5250 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5251 if ( callbackPushed ) {
\r
5252 callbackPulled = false;
\r
5253 // tick stream time
\r
5254 RtApi::tickStreamTime();
\r
5261 CoTaskMemFree( captureFormat );
\r
5262 CoTaskMemFree( renderFormat );
\r
5264 free ( convBuffer );
\r
5268 // update stream state
\r
5269 stream_.state = STREAM_STOPPED;
\r
5271 if ( errorText_.empty() )
\r
5274 error( errorType );
\r
5277 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5281 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5283 // Modified by Robin Davies, October 2005
\r
5284 // - Improvements to DirectX pointer chasing.
\r
5285 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5286 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5287 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5288 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5290 #include <dsound.h>
\r
5291 #include <assert.h>
\r
5292 #include <algorithm>
\r
5294 #if defined(__MINGW32__)
\r
5295 // missing from latest mingw winapi
\r
5296 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5297 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5298 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5299 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5302 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5304 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5305 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5308 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5310 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5311 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5312 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5313 return pointer >= earlierPointer && pointer < laterPointer;
\r
5316 // A structure to hold various information related to the DirectSound
\r
5317 // API implementation.
\r
5319 unsigned int drainCounter; // Tracks callback counts when draining
\r
5320 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5324 UINT bufferPointer[2];
\r
5325 DWORD dsBufferSize[2];
\r
5326 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5330 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5333 // Declarations for utility functions, callbacks, and structures
\r
5334 // specific to the DirectSound implementation.
\r
5335 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5336 LPCTSTR description,
\r
5338 LPVOID lpContext );
\r
5340 static const char* getErrorString( int code );
\r
5342 static unsigned __stdcall callbackHandler( void *ptr );
\r
5351 : found(false) { validId[0] = false; validId[1] = false; }
\r
5354 struct DsProbeData {
\r
5356 std::vector<struct DsDevice>* dsDevices;
\r
5359 RtApiDs :: RtApiDs()
\r
5361 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5362 // accept whatever the mainline chose for a threading model.
\r
5363 coInitialized_ = false;
\r
5364 HRESULT hr = CoInitialize( NULL );
\r
5365 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5368 RtApiDs :: ~RtApiDs()
\r
5370 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5371 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5374 // The DirectSound default output is always the first device.
\r
5375 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5380 // The DirectSound default input is always the first input device,
\r
5381 // which is the first capture device enumerated.
\r
5382 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5387 unsigned int RtApiDs :: getDeviceCount( void )
\r
5389 // Set query flag for previously found devices to false, so that we
\r
5390 // can check for any devices that have disappeared.
\r
5391 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5392 dsDevices[i].found = false;
\r
5394 // Query DirectSound devices.
\r
5395 struct DsProbeData probeInfo;
\r
5396 probeInfo.isInput = false;
\r
5397 probeInfo.dsDevices = &dsDevices;
\r
5398 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5399 if ( FAILED( result ) ) {
\r
5400 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5401 errorText_ = errorStream_.str();
\r
5402 error( RtAudioError::WARNING );
\r
5405 // Query DirectSoundCapture devices.
\r
5406 probeInfo.isInput = true;
\r
5407 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5408 if ( FAILED( result ) ) {
\r
5409 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5410 errorText_ = errorStream_.str();
\r
5411 error( RtAudioError::WARNING );
\r
5414 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5415 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5416 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5420 return static_cast<unsigned int>(dsDevices.size());
\r
5423 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5425 RtAudio::DeviceInfo info;
\r
5426 info.probed = false;
\r
5428 if ( dsDevices.size() == 0 ) {
\r
5429 // Force a query of all devices
\r
5431 if ( dsDevices.size() == 0 ) {
\r
5432 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5433 error( RtAudioError::INVALID_USE );
\r
5438 if ( device >= dsDevices.size() ) {
\r
5439 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5440 error( RtAudioError::INVALID_USE );
\r
5445 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5447 LPDIRECTSOUND output;
\r
5449 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5450 if ( FAILED( result ) ) {
\r
5451 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5452 errorText_ = errorStream_.str();
\r
5453 error( RtAudioError::WARNING );
\r
5457 outCaps.dwSize = sizeof( outCaps );
\r
5458 result = output->GetCaps( &outCaps );
\r
5459 if ( FAILED( result ) ) {
\r
5460 output->Release();
\r
5461 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5462 errorText_ = errorStream_.str();
\r
5463 error( RtAudioError::WARNING );
\r
5467 // Get output channel information.
\r
5468 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5470 // Get sample rate information.
\r
5471 info.sampleRates.clear();
\r
5472 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5473 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5474 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5475 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5477 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5478 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5482 // Get format information.
\r
5483 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5484 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5486 output->Release();
\r
5488 if ( getDefaultOutputDevice() == device )
\r
5489 info.isDefaultOutput = true;
\r
5491 if ( dsDevices[ device ].validId[1] == false ) {
\r
5492 info.name = dsDevices[ device ].name;
\r
5493 info.probed = true;
\r
5499 LPDIRECTSOUNDCAPTURE input;
\r
5500 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5501 if ( FAILED( result ) ) {
\r
5502 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5503 errorText_ = errorStream_.str();
\r
5504 error( RtAudioError::WARNING );
\r
5509 inCaps.dwSize = sizeof( inCaps );
\r
5510 result = input->GetCaps( &inCaps );
\r
5511 if ( FAILED( result ) ) {
\r
5513 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5514 errorText_ = errorStream_.str();
\r
5515 error( RtAudioError::WARNING );
\r
5519 // Get input channel information.
\r
5520 info.inputChannels = inCaps.dwChannels;
\r
5522 // Get sample rate and format information.
\r
5523 std::vector<unsigned int> rates;
\r
5524 if ( inCaps.dwChannels >= 2 ) {
\r
5525 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5526 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5527 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5528 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5529 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5530 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5531 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5532 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5534 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5535 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5536 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5537 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5538 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5540 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5541 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5542 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5543 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5544 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5547 else if ( inCaps.dwChannels == 1 ) {
\r
5548 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5549 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5550 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5551 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5552 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5553 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5554 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5555 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5557 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5558 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5559 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5560 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5561 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5563 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5564 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5565 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5566 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5567 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5570 else info.inputChannels = 0; // technically, this would be an error
\r
5574 if ( info.inputChannels == 0 ) return info;
\r
5576 // Copy the supported rates to the info structure but avoid duplication.
\r
5578 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5580 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5581 if ( rates[i] == info.sampleRates[j] ) {
\r
5586 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5588 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5590 // If device opens for both playback and capture, we determine the channels.
\r
5591 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5592 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5594 if ( device == 0 ) info.isDefaultInput = true;
\r
5596 // Copy name and return.
\r
5597 info.name = dsDevices[ device ].name;
\r
5598 info.probed = true;
\r
5602 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5603 unsigned int firstChannel, unsigned int sampleRate,
\r
5604 RtAudioFormat format, unsigned int *bufferSize,
\r
5605 RtAudio::StreamOptions *options )
\r
5607 if ( channels + firstChannel > 2 ) {
\r
5608 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5612 size_t nDevices = dsDevices.size();
\r
5613 if ( nDevices == 0 ) {
\r
5614 // This should not happen because a check is made before this function is called.
\r
5615 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5619 if ( device >= nDevices ) {
\r
5620 // This should not happen because a check is made before this function is called.
\r
5621 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5625 if ( mode == OUTPUT ) {
\r
5626 if ( dsDevices[ device ].validId[0] == false ) {
\r
5627 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5628 errorText_ = errorStream_.str();
\r
5632 else { // mode == INPUT
\r
5633 if ( dsDevices[ device ].validId[1] == false ) {
\r
5634 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5635 errorText_ = errorStream_.str();
\r
5640 // According to a note in PortAudio, using GetDesktopWindow()
\r
5641 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5642 // that occur when the application's window is not the foreground
\r
5643 // window. Also, if the application window closes before the
\r
5644 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5645 // problems when using GetDesktopWindow() but it seems fine now
\r
5646 // (January 2010). I'll leave it commented here.
\r
5647 // HWND hWnd = GetForegroundWindow();
\r
5648 HWND hWnd = GetDesktopWindow();
\r
5650 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5651 // two. This is a judgement call and a value of two is probably too
\r
5652 // low for capture, but it should work for playback.
\r
5654 if ( options ) nBuffers = options->numberOfBuffers;
\r
5655 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5656 if ( nBuffers < 2 ) nBuffers = 3;
\r
5658 // Check the lower range of the user-specified buffer size and set
\r
5659 // (arbitrarily) to a lower bound of 32.
\r
5660 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5662 // Create the wave format structure. The data format setting will
\r
5663 // be determined later.
\r
5664 WAVEFORMATEX waveFormat;
\r
5665 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5666 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5667 waveFormat.nChannels = channels + firstChannel;
\r
5668 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5670 // Determine the device buffer size. By default, we'll use the value
\r
5671 // defined above (32K), but we will grow it to make allowances for
\r
5672 // very large software buffer sizes.
\r
5673 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5674 DWORD dsPointerLeadTime = 0;
\r
5676 void *ohandle = 0, *bhandle = 0;
\r
5678 if ( mode == OUTPUT ) {
\r
5680 LPDIRECTSOUND output;
\r
5681 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5682 if ( FAILED( result ) ) {
\r
5683 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5684 errorText_ = errorStream_.str();
\r
5689 outCaps.dwSize = sizeof( outCaps );
\r
5690 result = output->GetCaps( &outCaps );
\r
5691 if ( FAILED( result ) ) {
\r
5692 output->Release();
\r
5693 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5694 errorText_ = errorStream_.str();
\r
5698 // Check channel information.
\r
5699 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5700 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5701 errorText_ = errorStream_.str();
\r
5705 // Check format information. Use 16-bit format unless not
\r
5706 // supported or user requests 8-bit.
\r
5707 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5708 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5709 waveFormat.wBitsPerSample = 16;
\r
5710 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5713 waveFormat.wBitsPerSample = 8;
\r
5714 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5716 stream_.userFormat = format;
\r
5718 // Update wave format structure and buffer information.
\r
5719 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5720 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5721 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5723 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5724 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5725 dsBufferSize *= 2;
\r
5727 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5728 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5729 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5730 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5731 if ( FAILED( result ) ) {
\r
5732 output->Release();
\r
5733 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5734 errorText_ = errorStream_.str();
\r
5738 // Even though we will write to the secondary buffer, we need to
\r
5739 // access the primary buffer to set the correct output format
\r
5740 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5741 // buffer description.
\r
5742 DSBUFFERDESC bufferDescription;
\r
5743 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5744 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5745 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5747 // Obtain the primary buffer
\r
5748 LPDIRECTSOUNDBUFFER buffer;
\r
5749 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5750 if ( FAILED( result ) ) {
\r
5751 output->Release();
\r
5752 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5753 errorText_ = errorStream_.str();
\r
5757 // Set the primary DS buffer sound format.
\r
5758 result = buffer->SetFormat( &waveFormat );
\r
5759 if ( FAILED( result ) ) {
\r
5760 output->Release();
\r
5761 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5762 errorText_ = errorStream_.str();
\r
5766 // Setup the secondary DS buffer description.
\r
5767 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5768 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5769 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5770 DSBCAPS_GLOBALFOCUS |
\r
5771 DSBCAPS_GETCURRENTPOSITION2 |
\r
5772 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5773 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5774 bufferDescription.lpwfxFormat = &waveFormat;
\r
5776 // Try to create the secondary DS buffer. If that doesn't work,
\r
5777 // try to use software mixing. Otherwise, there's a problem.
\r
5778 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5779 if ( FAILED( result ) ) {
\r
5780 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5781 DSBCAPS_GLOBALFOCUS |
\r
5782 DSBCAPS_GETCURRENTPOSITION2 |
\r
5783 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5784 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5785 if ( FAILED( result ) ) {
\r
5786 output->Release();
\r
5787 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5788 errorText_ = errorStream_.str();
\r
5793 // Get the buffer size ... might be different from what we specified.
\r
5795 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5796 result = buffer->GetCaps( &dsbcaps );
\r
5797 if ( FAILED( result ) ) {
\r
5798 output->Release();
\r
5799 buffer->Release();
\r
5800 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5801 errorText_ = errorStream_.str();
\r
5805 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5807 // Lock the DS buffer
\r
5810 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5811 if ( FAILED( result ) ) {
\r
5812 output->Release();
\r
5813 buffer->Release();
\r
5814 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5815 errorText_ = errorStream_.str();
\r
5819 // Zero the DS buffer
\r
5820 ZeroMemory( audioPtr, dataLen );
\r
5822 // Unlock the DS buffer
\r
5823 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5824 if ( FAILED( result ) ) {
\r
5825 output->Release();
\r
5826 buffer->Release();
\r
5827 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5828 errorText_ = errorStream_.str();
\r
5832 ohandle = (void *) output;
\r
5833 bhandle = (void *) buffer;
\r
5836 if ( mode == INPUT ) {
\r
5838 LPDIRECTSOUNDCAPTURE input;
\r
5839 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5840 if ( FAILED( result ) ) {
\r
5841 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5842 errorText_ = errorStream_.str();
\r
5847 inCaps.dwSize = sizeof( inCaps );
\r
5848 result = input->GetCaps( &inCaps );
\r
5849 if ( FAILED( result ) ) {
\r
5851 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5852 errorText_ = errorStream_.str();
\r
5856 // Check channel information.
\r
5857 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5858 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5862 // Check format information. Use 16-bit format unless user
\r
5863 // requests 8-bit.
\r
5864 DWORD deviceFormats;
\r
5865 if ( channels + firstChannel == 2 ) {
\r
5866 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5867 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5868 waveFormat.wBitsPerSample = 8;
\r
5869 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5871 else { // assume 16-bit is supported
\r
5872 waveFormat.wBitsPerSample = 16;
\r
5873 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5876 else { // channel == 1
\r
5877 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5878 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5879 waveFormat.wBitsPerSample = 8;
\r
5880 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5882 else { // assume 16-bit is supported
\r
5883 waveFormat.wBitsPerSample = 16;
\r
5884 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5887 stream_.userFormat = format;
\r
5889 // Update wave format structure and buffer information.
\r
5890 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5891 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5892 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5894 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5895 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5896 dsBufferSize *= 2;
\r
5898 // Setup the secondary DS buffer description.
\r
5899 DSCBUFFERDESC bufferDescription;
\r
5900 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5901 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5902 bufferDescription.dwFlags = 0;
\r
5903 bufferDescription.dwReserved = 0;
\r
5904 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5905 bufferDescription.lpwfxFormat = &waveFormat;
\r
5907 // Create the capture buffer.
\r
5908 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5909 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5910 if ( FAILED( result ) ) {
\r
5912 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5913 errorText_ = errorStream_.str();
\r
5917 // Get the buffer size ... might be different from what we specified.
\r
5918 DSCBCAPS dscbcaps;
\r
5919 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5920 result = buffer->GetCaps( &dscbcaps );
\r
5921 if ( FAILED( result ) ) {
\r
5923 buffer->Release();
\r
5924 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5925 errorText_ = errorStream_.str();
\r
5929 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5931 // NOTE: We could have a problem here if this is a duplex stream
\r
5932 // and the play and capture hardware buffer sizes are different
\r
5933 // (I'm actually not sure if that is a problem or not).
\r
5934 // Currently, we are not verifying that.
\r
5936 // Lock the capture buffer
\r
5939 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5940 if ( FAILED( result ) ) {
\r
5942 buffer->Release();
\r
5943 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5944 errorText_ = errorStream_.str();
\r
5948 // Zero the buffer
\r
5949 ZeroMemory( audioPtr, dataLen );
\r
5951 // Unlock the buffer
\r
5952 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5953 if ( FAILED( result ) ) {
\r
5955 buffer->Release();
\r
5956 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5957 errorText_ = errorStream_.str();
\r
5961 ohandle = (void *) input;
\r
5962 bhandle = (void *) buffer;
\r
5965 // Set various stream parameters
\r
5966 DsHandle *handle = 0;
\r
5967 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5968 stream_.nUserChannels[mode] = channels;
\r
5969 stream_.bufferSize = *bufferSize;
\r
5970 stream_.channelOffset[mode] = firstChannel;
\r
5971 stream_.deviceInterleaved[mode] = true;
\r
5972 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5973 else stream_.userInterleaved = true;
\r
5975 // Set flag for buffer conversion
\r
5976 stream_.doConvertBuffer[mode] = false;
\r
5977 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5978 stream_.doConvertBuffer[mode] = true;
\r
5979 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5980 stream_.doConvertBuffer[mode] = true;
\r
5981 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5982 stream_.nUserChannels[mode] > 1 )
\r
5983 stream_.doConvertBuffer[mode] = true;
\r
5985 // Allocate necessary internal buffers
\r
5986 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5987 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5988 if ( stream_.userBuffer[mode] == NULL ) {
\r
5989 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5993 if ( stream_.doConvertBuffer[mode] ) {
\r
5995 bool makeBuffer = true;
\r
5996 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5997 if ( mode == INPUT ) {
\r
5998 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5999 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6000 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
6004 if ( makeBuffer ) {
\r
6005 bufferBytes *= *bufferSize;
\r
6006 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6007 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6008 if ( stream_.deviceBuffer == NULL ) {
\r
6009 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
6015 // Allocate our DsHandle structures for the stream.
\r
6016 if ( stream_.apiHandle == 0 ) {
\r
6018 handle = new DsHandle;
\r
6020 catch ( std::bad_alloc& ) {
\r
6021 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
6025 // Create a manual-reset event.
\r
6026 handle->condition = CreateEvent( NULL, // no security
\r
6027 TRUE, // manual-reset
\r
6028 FALSE, // non-signaled initially
\r
6029 NULL ); // unnamed
\r
6030 stream_.apiHandle = (void *) handle;
\r
6033 handle = (DsHandle *) stream_.apiHandle;
\r
6034 handle->id[mode] = ohandle;
\r
6035 handle->buffer[mode] = bhandle;
\r
6036 handle->dsBufferSize[mode] = dsBufferSize;
\r
6037 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
6039 stream_.device[mode] = device;
\r
6040 stream_.state = STREAM_STOPPED;
\r
6041 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
6042 // We had already set up an output stream.
\r
6043 stream_.mode = DUPLEX;
\r
6045 stream_.mode = mode;
\r
6046 stream_.nBuffers = nBuffers;
\r
6047 stream_.sampleRate = sampleRate;
\r
6049 // Setup the buffer conversion information structure.
\r
6050 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6052 // Setup the callback thread.
\r
6053 if ( stream_.callbackInfo.isRunning == false ) {
\r
6054 unsigned threadId;
\r
6055 stream_.callbackInfo.isRunning = true;
\r
6056 stream_.callbackInfo.object = (void *) this;
\r
6057 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
6058 &stream_.callbackInfo, 0, &threadId );
\r
6059 if ( stream_.callbackInfo.thread == 0 ) {
\r
6060 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
6064 // Boost DS thread priority
\r
6065 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
6071 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6072 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6073 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6074 if ( buffer ) buffer->Release();
\r
6075 object->Release();
\r
6077 if ( handle->buffer[1] ) {
\r
6078 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6079 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6080 if ( buffer ) buffer->Release();
\r
6081 object->Release();
\r
6083 CloseHandle( handle->condition );
\r
6085 stream_.apiHandle = 0;
\r
6088 for ( int i=0; i<2; i++ ) {
\r
6089 if ( stream_.userBuffer[i] ) {
\r
6090 free( stream_.userBuffer[i] );
\r
6091 stream_.userBuffer[i] = 0;
\r
6095 if ( stream_.deviceBuffer ) {
\r
6096 free( stream_.deviceBuffer );
\r
6097 stream_.deviceBuffer = 0;
\r
6100 stream_.state = STREAM_CLOSED;
\r
6104 void RtApiDs :: closeStream()
\r
6106 if ( stream_.state == STREAM_CLOSED ) {
\r
6107 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6108 error( RtAudioError::WARNING );
\r
6112 // Stop the callback thread.
\r
6113 stream_.callbackInfo.isRunning = false;
\r
6114 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6115 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6117 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6119 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6120 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6121 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6124 buffer->Release();
\r
6126 object->Release();
\r
6128 if ( handle->buffer[1] ) {
\r
6129 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6130 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6133 buffer->Release();
\r
6135 object->Release();
\r
6137 CloseHandle( handle->condition );
\r
6139 stream_.apiHandle = 0;
\r
6142 for ( int i=0; i<2; i++ ) {
\r
6143 if ( stream_.userBuffer[i] ) {
\r
6144 free( stream_.userBuffer[i] );
\r
6145 stream_.userBuffer[i] = 0;
\r
6149 if ( stream_.deviceBuffer ) {
\r
6150 free( stream_.deviceBuffer );
\r
6151 stream_.deviceBuffer = 0;
\r
6154 stream_.mode = UNINITIALIZED;
\r
6155 stream_.state = STREAM_CLOSED;
\r
6158 void RtApiDs :: startStream()
\r
6161 if ( stream_.state == STREAM_RUNNING ) {
\r
6162 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6163 error( RtAudioError::WARNING );
\r
6167 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6169 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6170 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6171 // this is already in effect.
\r
6172 timeBeginPeriod( 1 );
\r
6174 buffersRolling = false;
\r
6175 duplexPrerollBytes = 0;
\r
6177 if ( stream_.mode == DUPLEX ) {
\r
6178 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6179 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6182 HRESULT result = 0;
\r
6183 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6185 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6186 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6187 if ( FAILED( result ) ) {
\r
6188 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6189 errorText_ = errorStream_.str();
\r
6194 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6196 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6197 result = buffer->Start( DSCBSTART_LOOPING );
\r
6198 if ( FAILED( result ) ) {
\r
6199 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6200 errorText_ = errorStream_.str();
\r
6205 handle->drainCounter = 0;
\r
6206 handle->internalDrain = false;
\r
6207 ResetEvent( handle->condition );
\r
6208 stream_.state = STREAM_RUNNING;
\r
6211 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6214 void RtApiDs :: stopStream()
\r
6217 if ( stream_.state == STREAM_STOPPED ) {
\r
6218 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6219 error( RtAudioError::WARNING );
\r
6223 HRESULT result = 0;
\r
6226 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6227 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6228 if ( handle->drainCounter == 0 ) {
\r
6229 handle->drainCounter = 2;
\r
6230 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6233 stream_.state = STREAM_STOPPED;
\r
6235 MUTEX_LOCK( &stream_.mutex );
\r
6237 // Stop the buffer and clear memory
\r
6238 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6239 result = buffer->Stop();
\r
6240 if ( FAILED( result ) ) {
\r
6241 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6242 errorText_ = errorStream_.str();
\r
6246 // Lock the buffer and clear it so that if we start to play again,
\r
6247 // we won't have old data playing.
\r
6248 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6249 if ( FAILED( result ) ) {
\r
6250 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6251 errorText_ = errorStream_.str();
\r
6255 // Zero the DS buffer
\r
6256 ZeroMemory( audioPtr, dataLen );
\r
6258 // Unlock the DS buffer
\r
6259 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6260 if ( FAILED( result ) ) {
\r
6261 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6262 errorText_ = errorStream_.str();
\r
6266 // If we start playing again, we must begin at beginning of buffer.
\r
6267 handle->bufferPointer[0] = 0;
\r
6270 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6271 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6275 stream_.state = STREAM_STOPPED;
\r
6277 if ( stream_.mode != DUPLEX )
\r
6278 MUTEX_LOCK( &stream_.mutex );
\r
6280 result = buffer->Stop();
\r
6281 if ( FAILED( result ) ) {
\r
6282 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6283 errorText_ = errorStream_.str();
\r
6287 // Lock the buffer and clear it so that if we start to play again,
\r
6288 // we won't have old data playing.
\r
6289 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6290 if ( FAILED( result ) ) {
\r
6291 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6292 errorText_ = errorStream_.str();
\r
6296 // Zero the DS buffer
\r
6297 ZeroMemory( audioPtr, dataLen );
\r
6299 // Unlock the DS buffer
\r
6300 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6301 if ( FAILED( result ) ) {
\r
6302 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6303 errorText_ = errorStream_.str();
\r
6307 // If we start recording again, we must begin at beginning of buffer.
\r
6308 handle->bufferPointer[1] = 0;
\r
6312 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6313 MUTEX_UNLOCK( &stream_.mutex );
\r
6315 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6318 void RtApiDs :: abortStream()
\r
6321 if ( stream_.state == STREAM_STOPPED ) {
\r
6322 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6323 error( RtAudioError::WARNING );
\r
6327 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6328 handle->drainCounter = 2;
\r
6333 void RtApiDs :: callbackEvent()
\r
6335 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6336 Sleep( 50 ); // sleep 50 milliseconds
\r
6340 if ( stream_.state == STREAM_CLOSED ) {
\r
6341 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6342 error( RtAudioError::WARNING );
\r
6346 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6347 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6349 // Check if we were draining the stream and signal is finished.
\r
6350 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6352 stream_.state = STREAM_STOPPING;
\r
6353 if ( handle->internalDrain == false )
\r
6354 SetEvent( handle->condition );
\r
6360 // Invoke user callback to get fresh output data UNLESS we are
\r
6361 // draining stream.
\r
6362 if ( handle->drainCounter == 0 ) {
\r
6363 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6364 double streamTime = getStreamTime();
\r
6365 RtAudioStreamStatus status = 0;
\r
6366 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6367 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6368 handle->xrun[0] = false;
\r
6370 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6371 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6372 handle->xrun[1] = false;
\r
6374 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6375 stream_.bufferSize, streamTime, status, info->userData );
\r
6376 if ( cbReturnValue == 2 ) {
\r
6377 stream_.state = STREAM_STOPPING;
\r
6378 handle->drainCounter = 2;
\r
6382 else if ( cbReturnValue == 1 ) {
\r
6383 handle->drainCounter = 1;
\r
6384 handle->internalDrain = true;
\r
6389 DWORD currentWritePointer, safeWritePointer;
\r
6390 DWORD currentReadPointer, safeReadPointer;
\r
6391 UINT nextWritePointer;
\r
6393 LPVOID buffer1 = NULL;
\r
6394 LPVOID buffer2 = NULL;
\r
6395 DWORD bufferSize1 = 0;
\r
6396 DWORD bufferSize2 = 0;
\r
6401 MUTEX_LOCK( &stream_.mutex );
\r
6402 if ( stream_.state == STREAM_STOPPED ) {
\r
6403 MUTEX_UNLOCK( &stream_.mutex );
\r
6407 if ( buffersRolling == false ) {
\r
6408 if ( stream_.mode == DUPLEX ) {
\r
6409 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6411 // It takes a while for the devices to get rolling. As a result,
\r
6412 // there's no guarantee that the capture and write device pointers
\r
6413 // will move in lockstep. Wait here for both devices to start
\r
6414 // rolling, and then set our buffer pointers accordingly.
\r
6415 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6416 // bytes later than the write buffer.
\r
6418 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6419 // take place between the two GetCurrentPosition calls... but I'm
\r
6420 // really not sure how to solve the problem. Temporarily boost to
\r
6421 // Realtime priority, maybe; but I'm not sure what priority the
\r
6422 // DirectSound service threads run at. We *should* be roughly
\r
6423 // within a ms or so of correct.
\r
6425 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6426 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6428 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6430 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6431 if ( FAILED( result ) ) {
\r
6432 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6433 errorText_ = errorStream_.str();
\r
6434 MUTEX_UNLOCK( &stream_.mutex );
\r
6435 error( RtAudioError::SYSTEM_ERROR );
\r
6438 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6439 if ( FAILED( result ) ) {
\r
6440 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6441 errorText_ = errorStream_.str();
\r
6442 MUTEX_UNLOCK( &stream_.mutex );
\r
6443 error( RtAudioError::SYSTEM_ERROR );
\r
6447 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6448 if ( FAILED( result ) ) {
\r
6449 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6450 errorText_ = errorStream_.str();
\r
6451 MUTEX_UNLOCK( &stream_.mutex );
\r
6452 error( RtAudioError::SYSTEM_ERROR );
\r
6455 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6456 if ( FAILED( result ) ) {
\r
6457 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6458 errorText_ = errorStream_.str();
\r
6459 MUTEX_UNLOCK( &stream_.mutex );
\r
6460 error( RtAudioError::SYSTEM_ERROR );
\r
6463 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6467 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6469 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6470 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6471 handle->bufferPointer[1] = safeReadPointer;
\r
6473 else if ( stream_.mode == OUTPUT ) {
\r
6475 // Set the proper nextWritePosition after initial startup.
\r
6476 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6477 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6478 if ( FAILED( result ) ) {
\r
6479 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6480 errorText_ = errorStream_.str();
\r
6481 MUTEX_UNLOCK( &stream_.mutex );
\r
6482 error( RtAudioError::SYSTEM_ERROR );
\r
6485 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6486 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6489 buffersRolling = true;
\r
6492 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6494 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6496 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6497 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6498 bufferBytes *= formatBytes( stream_.userFormat );
\r
6499 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6502 // Setup parameters and do buffer conversion if necessary.
\r
6503 if ( stream_.doConvertBuffer[0] ) {
\r
6504 buffer = stream_.deviceBuffer;
\r
6505 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6506 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6507 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6510 buffer = stream_.userBuffer[0];
\r
6511 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6512 bufferBytes *= formatBytes( stream_.userFormat );
\r
6515 // No byte swapping necessary in DirectSound implementation.
\r
6517 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6518 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6520 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6521 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6523 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6524 nextWritePointer = handle->bufferPointer[0];
\r
6526 DWORD endWrite, leadPointer;
\r
6528 // Find out where the read and "safe write" pointers are.
\r
6529 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6530 if ( FAILED( result ) ) {
\r
6531 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6532 errorText_ = errorStream_.str();
\r
6533 MUTEX_UNLOCK( &stream_.mutex );
\r
6534 error( RtAudioError::SYSTEM_ERROR );
\r
6538 // We will copy our output buffer into the region between
\r
6539 // safeWritePointer and leadPointer. If leadPointer is not
\r
6540 // beyond the next endWrite position, wait until it is.
\r
6541 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6542 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6543 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6544 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6545 endWrite = nextWritePointer + bufferBytes;
\r
6547 // Check whether the entire write region is behind the play pointer.
\r
6548 if ( leadPointer >= endWrite ) break;
\r
6550 // If we are here, then we must wait until the leadPointer advances
\r
6551 // beyond the end of our next write region. We use the
\r
6552 // Sleep() function to suspend operation until that happens.
\r
6553 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6554 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6555 if ( millis < 1.0 ) millis = 1.0;
\r
6556 Sleep( (DWORD) millis );
\r
6559 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6560 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6561 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6562 handle->xrun[0] = true;
\r
6563 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6564 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6565 handle->bufferPointer[0] = nextWritePointer;
\r
6566 endWrite = nextWritePointer + bufferBytes;
\r
6569 // Lock free space in the buffer
\r
6570 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6571 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6572 if ( FAILED( result ) ) {
\r
6573 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6574 errorText_ = errorStream_.str();
\r
6575 MUTEX_UNLOCK( &stream_.mutex );
\r
6576 error( RtAudioError::SYSTEM_ERROR );
\r
6580 // Copy our buffer into the DS buffer
\r
6581 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6582 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6584 // Update our buffer offset and unlock sound buffer
\r
6585 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6586 if ( FAILED( result ) ) {
\r
6587 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6588 errorText_ = errorStream_.str();
\r
6589 MUTEX_UNLOCK( &stream_.mutex );
\r
6590 error( RtAudioError::SYSTEM_ERROR );
\r
6593 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6594 handle->bufferPointer[0] = nextWritePointer;
\r
6597 // Don't bother draining input
\r
6598 if ( handle->drainCounter ) {
\r
6599 handle->drainCounter++;
\r
6603 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6605 // Setup parameters.
\r
6606 if ( stream_.doConvertBuffer[1] ) {
\r
6607 buffer = stream_.deviceBuffer;
\r
6608 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6609 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6612 buffer = stream_.userBuffer[1];
\r
6613 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6614 bufferBytes *= formatBytes( stream_.userFormat );
\r
6617 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6618 long nextReadPointer = handle->bufferPointer[1];
\r
6619 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6621 // Find out where the write and "safe read" pointers are.
\r
6622 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6623 if ( FAILED( result ) ) {
\r
6624 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6625 errorText_ = errorStream_.str();
\r
6626 MUTEX_UNLOCK( &stream_.mutex );
\r
6627 error( RtAudioError::SYSTEM_ERROR );
\r
6631 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6632 DWORD endRead = nextReadPointer + bufferBytes;
\r
6634 // Handling depends on whether we are INPUT or DUPLEX.
\r
6635 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6636 // then a wait here will drag the write pointers into the forbidden zone.
\r
6638 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6639 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6640 // practical way to sync up the read and write pointers reliably, given the
\r
6641 // the very complex relationship between phase and increment of the read and write
\r
6644 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6645 // provide a pre-roll period of 0.5 seconds in which we return
\r
6646 // zeros from the read buffer while the pointers sync up.
\r
6648 if ( stream_.mode == DUPLEX ) {
\r
6649 if ( safeReadPointer < endRead ) {
\r
6650 if ( duplexPrerollBytes <= 0 ) {
\r
6651 // Pre-roll time over. Be more agressive.
\r
6652 int adjustment = endRead-safeReadPointer;
\r
6654 handle->xrun[1] = true;
\r
6656 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6657 // and perform fine adjustments later.
\r
6658 // - small adjustments: back off by twice as much.
\r
6659 if ( adjustment >= 2*bufferBytes )
\r
6660 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6662 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6664 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6668 // In pre=roll time. Just do it.
\r
6669 nextReadPointer = safeReadPointer - bufferBytes;
\r
6670 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6672 endRead = nextReadPointer + bufferBytes;
\r
6675 else { // mode == INPUT
\r
6676 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6677 // See comments for playback.
\r
6678 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6679 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6680 if ( millis < 1.0 ) millis = 1.0;
\r
6681 Sleep( (DWORD) millis );
\r
6683 // Wake up and find out where we are now.
\r
6684 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6685 if ( FAILED( result ) ) {
\r
6686 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6687 errorText_ = errorStream_.str();
\r
6688 MUTEX_UNLOCK( &stream_.mutex );
\r
6689 error( RtAudioError::SYSTEM_ERROR );
\r
6693 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6697 // Lock free space in the buffer
\r
6698 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6699 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6700 if ( FAILED( result ) ) {
\r
6701 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6702 errorText_ = errorStream_.str();
\r
6703 MUTEX_UNLOCK( &stream_.mutex );
\r
6704 error( RtAudioError::SYSTEM_ERROR );
\r
6708 if ( duplexPrerollBytes <= 0 ) {
\r
6709 // Copy our buffer into the DS buffer
\r
6710 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6711 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6714 memset( buffer, 0, bufferSize1 );
\r
6715 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6716 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6719 // Update our buffer offset and unlock sound buffer
\r
6720 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6721 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6722 if ( FAILED( result ) ) {
\r
6723 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6724 errorText_ = errorStream_.str();
\r
6725 MUTEX_UNLOCK( &stream_.mutex );
\r
6726 error( RtAudioError::SYSTEM_ERROR );
\r
6729 handle->bufferPointer[1] = nextReadPointer;
\r
6731 // No byte swapping necessary in DirectSound implementation.
\r
6733 // If necessary, convert 8-bit data from unsigned to signed.
\r
6734 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6735 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6737 // Do buffer conversion if necessary.
\r
6738 if ( stream_.doConvertBuffer[1] )
\r
6739 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6743 MUTEX_UNLOCK( &stream_.mutex );
\r
6744 RtApi::tickStreamTime();
\r
6747 // Definitions for utility functions and callbacks
\r
6748 // specific to the DirectSound implementation.
\r
6750 static unsigned __stdcall callbackHandler( void *ptr )
\r
6752 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6753 RtApiDs *object = (RtApiDs *) info->object;
\r
6754 bool* isRunning = &info->isRunning;
\r
6756 while ( *isRunning == true ) {
\r
6757 object->callbackEvent();
\r
6760 _endthreadex( 0 );
\r
6764 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6765 LPCTSTR description,
\r
6766 LPCTSTR /*module*/,
\r
6767 LPVOID lpContext )
\r
6769 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6770 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6773 bool validDevice = false;
\r
6774 if ( probeInfo.isInput == true ) {
\r
6776 LPDIRECTSOUNDCAPTURE object;
\r
6778 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6779 if ( hr != DS_OK ) return TRUE;
\r
6781 caps.dwSize = sizeof(caps);
\r
6782 hr = object->GetCaps( &caps );
\r
6783 if ( hr == DS_OK ) {
\r
6784 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6785 validDevice = true;
\r
6787 object->Release();
\r
6791 LPDIRECTSOUND object;
\r
6792 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6793 if ( hr != DS_OK ) return TRUE;
\r
6795 caps.dwSize = sizeof(caps);
\r
6796 hr = object->GetCaps( &caps );
\r
6797 if ( hr == DS_OK ) {
\r
6798 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6799 validDevice = true;
\r
6801 object->Release();
\r
6804 // If good device, then save its name and guid.
\r
6805 std::string name = convertCharPointerToStdString( description );
\r
6806 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6807 if ( lpguid == NULL )
\r
6808 name = "Default Device";
\r
6809 if ( validDevice ) {
\r
6810 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6811 if ( dsDevices[i].name == name ) {
\r
6812 dsDevices[i].found = true;
\r
6813 if ( probeInfo.isInput ) {
\r
6814 dsDevices[i].id[1] = lpguid;
\r
6815 dsDevices[i].validId[1] = true;
\r
6818 dsDevices[i].id[0] = lpguid;
\r
6819 dsDevices[i].validId[0] = true;
\r
6826 device.name = name;
\r
6827 device.found = true;
\r
6828 if ( probeInfo.isInput ) {
\r
6829 device.id[1] = lpguid;
\r
6830 device.validId[1] = true;
\r
6833 device.id[0] = lpguid;
\r
6834 device.validId[0] = true;
\r
6836 dsDevices.push_back( device );
\r
6842 static const char* getErrorString( int code )
\r
6846 case DSERR_ALLOCATED:
\r
6847 return "Already allocated";
\r
6849 case DSERR_CONTROLUNAVAIL:
\r
6850 return "Control unavailable";
\r
6852 case DSERR_INVALIDPARAM:
\r
6853 return "Invalid parameter";
\r
6855 case DSERR_INVALIDCALL:
\r
6856 return "Invalid call";
\r
6858 case DSERR_GENERIC:
\r
6859 return "Generic error";
\r
6861 case DSERR_PRIOLEVELNEEDED:
\r
6862 return "Priority level needed";
\r
6864 case DSERR_OUTOFMEMORY:
\r
6865 return "Out of memory";
\r
6867 case DSERR_BADFORMAT:
\r
6868 return "The sample rate or the channel format is not supported";
\r
6870 case DSERR_UNSUPPORTED:
\r
6871 return "Not supported";
\r
6873 case DSERR_NODRIVER:
\r
6874 return "No driver";
\r
6876 case DSERR_ALREADYINITIALIZED:
\r
6877 return "Already initialized";
\r
6879 case DSERR_NOAGGREGATION:
\r
6880 return "No aggregation";
\r
6882 case DSERR_BUFFERLOST:
\r
6883 return "Buffer lost";
\r
6885 case DSERR_OTHERAPPHASPRIO:
\r
6886 return "Another application already has priority";
\r
6888 case DSERR_UNINITIALIZED:
\r
6889 return "Uninitialized";
\r
6892 return "DirectSound unknown error";
\r
6895 //******************** End of __WINDOWS_DS__ *********************//
\r
6899 #if defined(__LINUX_ALSA__)
\r
6901 #include <alsa/asoundlib.h>
\r
6902 #include <unistd.h>
\r
6904 // A structure to hold various information related to the ALSA API
\r
6905 // implementation.
\r
6906 struct AlsaHandle {
\r
6907 snd_pcm_t *handles[2];
\r
6908 bool synchronized;
\r
6910 pthread_cond_t runnable_cv;
\r
6914 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6917 static void *alsaCallbackHandler( void * ptr );
\r
6919 RtApiAlsa :: RtApiAlsa()
\r
6921 // Nothing to do here.
\r
6924 RtApiAlsa :: ~RtApiAlsa()
\r
6926 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6929 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6931 unsigned nDevices = 0;
\r
6932 int result, subdevice, card;
\r
6934 snd_ctl_t *handle;
\r
6936 // Count cards and devices
\r
6938 snd_card_next( &card );
\r
6939 while ( card >= 0 ) {
\r
6940 sprintf( name, "hw:%d", card );
\r
6941 result = snd_ctl_open( &handle, name, 0 );
\r
6942 if ( result < 0 ) {
\r
6943 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6944 errorText_ = errorStream_.str();
\r
6945 error( RtAudioError::WARNING );
\r
6950 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6951 if ( result < 0 ) {
\r
6952 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6953 errorText_ = errorStream_.str();
\r
6954 error( RtAudioError::WARNING );
\r
6957 if ( subdevice < 0 )
\r
6962 snd_ctl_close( handle );
\r
6963 snd_card_next( &card );
\r
6966 result = snd_ctl_open( &handle, "default", 0 );
\r
6967 if (result == 0) {
\r
6969 snd_ctl_close( handle );
\r
6975 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6977 RtAudio::DeviceInfo info;
\r
6978 info.probed = false;
\r
6980 unsigned nDevices = 0;
\r
6981 int result, subdevice, card;
\r
6983 snd_ctl_t *chandle;
\r
6985 // Count cards and devices
\r
6988 snd_card_next( &card );
\r
6989 while ( card >= 0 ) {
\r
6990 sprintf( name, "hw:%d", card );
\r
6991 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6992 if ( result < 0 ) {
\r
6993 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6994 errorText_ = errorStream_.str();
\r
6995 error( RtAudioError::WARNING );
\r
7000 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7001 if ( result < 0 ) {
\r
7002 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7003 errorText_ = errorStream_.str();
\r
7004 error( RtAudioError::WARNING );
\r
7007 if ( subdevice < 0 ) break;
\r
7008 if ( nDevices == device ) {
\r
7009 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7015 snd_ctl_close( chandle );
\r
7016 snd_card_next( &card );
\r
7019 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7020 if ( result == 0 ) {
\r
7021 if ( nDevices == device ) {
\r
7022 strcpy( name, "default" );
\r
7028 if ( nDevices == 0 ) {
\r
7029 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
7030 error( RtAudioError::INVALID_USE );
\r
7034 if ( device >= nDevices ) {
\r
7035 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
7036 error( RtAudioError::INVALID_USE );
\r
7042 // If a stream is already open, we cannot probe the stream devices.
\r
7043 // Thus, use the saved results.
\r
7044 if ( stream_.state != STREAM_CLOSED &&
\r
7045 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
7046 snd_ctl_close( chandle );
\r
7047 if ( device >= devices_.size() ) {
\r
7048 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
7049 error( RtAudioError::WARNING );
\r
7052 return devices_[ device ];
\r
7055 int openMode = SND_PCM_ASYNC;
\r
7056 snd_pcm_stream_t stream;
\r
7057 snd_pcm_info_t *pcminfo;
\r
7058 snd_pcm_info_alloca( &pcminfo );
\r
7059 snd_pcm_t *phandle;
\r
7060 snd_pcm_hw_params_t *params;
\r
7061 snd_pcm_hw_params_alloca( ¶ms );
\r
7063 // First try for playback unless default device (which has subdev -1)
\r
7064 stream = SND_PCM_STREAM_PLAYBACK;
\r
7065 snd_pcm_info_set_stream( pcminfo, stream );
\r
7066 if ( subdevice != -1 ) {
\r
7067 snd_pcm_info_set_device( pcminfo, subdevice );
\r
7068 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
7070 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7071 if ( result < 0 ) {
\r
7072 // Device probably doesn't support playback.
\r
7073 goto captureProbe;
\r
7077 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7078 if ( result < 0 ) {
\r
7079 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7080 errorText_ = errorStream_.str();
\r
7081 error( RtAudioError::WARNING );
\r
7082 goto captureProbe;
\r
7085 // The device is open ... fill the parameter structure.
\r
7086 result = snd_pcm_hw_params_any( phandle, params );
\r
7087 if ( result < 0 ) {
\r
7088 snd_pcm_close( phandle );
\r
7089 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7090 errorText_ = errorStream_.str();
\r
7091 error( RtAudioError::WARNING );
\r
7092 goto captureProbe;
\r
7095 // Get output channel information.
\r
7096 unsigned int value;
\r
7097 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7098 if ( result < 0 ) {
\r
7099 snd_pcm_close( phandle );
\r
7100 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7101 errorText_ = errorStream_.str();
\r
7102 error( RtAudioError::WARNING );
\r
7103 goto captureProbe;
\r
7105 info.outputChannels = value;
\r
7106 snd_pcm_close( phandle );
\r
7109 stream = SND_PCM_STREAM_CAPTURE;
\r
7110 snd_pcm_info_set_stream( pcminfo, stream );
\r
7112 // Now try for capture unless default device (with subdev = -1)
\r
7113 if ( subdevice != -1 ) {
\r
7114 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7115 snd_ctl_close( chandle );
\r
7116 if ( result < 0 ) {
\r
7117 // Device probably doesn't support capture.
\r
7118 if ( info.outputChannels == 0 ) return info;
\r
7119 goto probeParameters;
\r
7123 snd_ctl_close( chandle );
\r
7125 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7126 if ( result < 0 ) {
\r
7127 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7128 errorText_ = errorStream_.str();
\r
7129 error( RtAudioError::WARNING );
\r
7130 if ( info.outputChannels == 0 ) return info;
\r
7131 goto probeParameters;
\r
7134 // The device is open ... fill the parameter structure.
\r
7135 result = snd_pcm_hw_params_any( phandle, params );
\r
7136 if ( result < 0 ) {
\r
7137 snd_pcm_close( phandle );
\r
7138 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7139 errorText_ = errorStream_.str();
\r
7140 error( RtAudioError::WARNING );
\r
7141 if ( info.outputChannels == 0 ) return info;
\r
7142 goto probeParameters;
\r
7145 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7146 if ( result < 0 ) {
\r
7147 snd_pcm_close( phandle );
\r
7148 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7149 errorText_ = errorStream_.str();
\r
7150 error( RtAudioError::WARNING );
\r
7151 if ( info.outputChannels == 0 ) return info;
\r
7152 goto probeParameters;
\r
7154 info.inputChannels = value;
\r
7155 snd_pcm_close( phandle );
\r
7157 // If device opens for both playback and capture, we determine the channels.
\r
7158 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7159 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7161 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7162 if ( device == 0 && info.outputChannels > 0 )
\r
7163 info.isDefaultOutput = true;
\r
7164 if ( device == 0 && info.inputChannels > 0 )
\r
7165 info.isDefaultInput = true;
\r
7168 // At this point, we just need to figure out the supported data
\r
7169 // formats and sample rates. We'll proceed by opening the device in
\r
7170 // the direction with the maximum number of channels, or playback if
\r
7171 // they are equal. This might limit our sample rate options, but so
\r
7174 if ( info.outputChannels >= info.inputChannels )
\r
7175 stream = SND_PCM_STREAM_PLAYBACK;
\r
7177 stream = SND_PCM_STREAM_CAPTURE;
\r
7178 snd_pcm_info_set_stream( pcminfo, stream );
\r
7180 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7181 if ( result < 0 ) {
\r
7182 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7183 errorText_ = errorStream_.str();
\r
7184 error( RtAudioError::WARNING );
\r
7188 // The device is open ... fill the parameter structure.
\r
7189 result = snd_pcm_hw_params_any( phandle, params );
\r
7190 if ( result < 0 ) {
\r
7191 snd_pcm_close( phandle );
\r
7192 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7193 errorText_ = errorStream_.str();
\r
7194 error( RtAudioError::WARNING );
\r
7198 // Test our discrete set of sample rate values.
\r
7199 info.sampleRates.clear();
\r
7200 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7201 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7202 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7204 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7205 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7208 if ( info.sampleRates.size() == 0 ) {
\r
7209 snd_pcm_close( phandle );
\r
7210 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7211 errorText_ = errorStream_.str();
\r
7212 error( RtAudioError::WARNING );
\r
7216 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7217 snd_pcm_format_t format;
\r
7218 info.nativeFormats = 0;
\r
7219 format = SND_PCM_FORMAT_S8;
\r
7220 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7221 info.nativeFormats |= RTAUDIO_SINT8;
\r
7222 format = SND_PCM_FORMAT_S16;
\r
7223 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7224 info.nativeFormats |= RTAUDIO_SINT16;
\r
7225 format = SND_PCM_FORMAT_S24;
\r
7226 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7227 info.nativeFormats |= RTAUDIO_SINT24;
\r
7228 format = SND_PCM_FORMAT_S32;
\r
7229 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7230 info.nativeFormats |= RTAUDIO_SINT32;
\r
7231 format = SND_PCM_FORMAT_FLOAT;
\r
7232 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7233 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7234 format = SND_PCM_FORMAT_FLOAT64;
\r
7235 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7236 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7238 // Check that we have at least one supported format
\r
7239 if ( info.nativeFormats == 0 ) {
\r
7240 snd_pcm_close( phandle );
\r
7241 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7242 errorText_ = errorStream_.str();
\r
7243 error( RtAudioError::WARNING );
\r
7247 // Get the device name
\r
7249 result = snd_card_get_name( card, &cardname );
\r
7250 if ( result >= 0 ) {
\r
7251 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7256 // That's all ... close the device and return
\r
7257 snd_pcm_close( phandle );
\r
7258 info.probed = true;
\r
7262 void RtApiAlsa :: saveDeviceInfo( void )
\r
7266 unsigned int nDevices = getDeviceCount();
\r
7267 devices_.resize( nDevices );
\r
7268 for ( unsigned int i=0; i<nDevices; i++ )
\r
7269 devices_[i] = getDeviceInfo( i );
\r
7272 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7273 unsigned int firstChannel, unsigned int sampleRate,
\r
7274 RtAudioFormat format, unsigned int *bufferSize,
\r
7275 RtAudio::StreamOptions *options )
\r
7278 #if defined(__RTAUDIO_DEBUG__)
\r
7279 snd_output_t *out;
\r
7280 snd_output_stdio_attach(&out, stderr, 0);
\r
7283 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7285 unsigned nDevices = 0;
\r
7286 int result, subdevice, card;
\r
7288 snd_ctl_t *chandle;
\r
7290 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7291 snprintf(name, sizeof(name), "%s", "default");
\r
7293 // Count cards and devices
\r
7295 snd_card_next( &card );
\r
7296 while ( card >= 0 ) {
\r
7297 sprintf( name, "hw:%d", card );
\r
7298 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7299 if ( result < 0 ) {
\r
7300 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7301 errorText_ = errorStream_.str();
\r
7306 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7307 if ( result < 0 ) break;
\r
7308 if ( subdevice < 0 ) break;
\r
7309 if ( nDevices == device ) {
\r
7310 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7311 snd_ctl_close( chandle );
\r
7316 snd_ctl_close( chandle );
\r
7317 snd_card_next( &card );
\r
7320 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7321 if ( result == 0 ) {
\r
7322 if ( nDevices == device ) {
\r
7323 strcpy( name, "default" );
\r
7329 if ( nDevices == 0 ) {
\r
7330 // This should not happen because a check is made before this function is called.
\r
7331 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7335 if ( device >= nDevices ) {
\r
7336 // This should not happen because a check is made before this function is called.
\r
7337 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7344 // The getDeviceInfo() function will not work for a device that is
\r
7345 // already open. Thus, we'll probe the system before opening a
\r
7346 // stream and save the results for use by getDeviceInfo().
\r
7347 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7348 this->saveDeviceInfo();
\r
7350 snd_pcm_stream_t stream;
\r
7351 if ( mode == OUTPUT )
\r
7352 stream = SND_PCM_STREAM_PLAYBACK;
\r
7354 stream = SND_PCM_STREAM_CAPTURE;
\r
7356 snd_pcm_t *phandle;
\r
7357 int openMode = SND_PCM_ASYNC;
\r
7358 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7359 if ( result < 0 ) {
\r
7360 if ( mode == OUTPUT )
\r
7361 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7363 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7364 errorText_ = errorStream_.str();
\r
7368 // Fill the parameter structure.
\r
7369 snd_pcm_hw_params_t *hw_params;
\r
7370 snd_pcm_hw_params_alloca( &hw_params );
\r
7371 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7372 if ( result < 0 ) {
\r
7373 snd_pcm_close( phandle );
\r
7374 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7375 errorText_ = errorStream_.str();
\r
7379 #if defined(__RTAUDIO_DEBUG__)
\r
7380 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7381 snd_pcm_hw_params_dump( hw_params, out );
\r
7384 // Set access ... check user preference.
\r
7385 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7386 stream_.userInterleaved = false;
\r
7387 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7388 if ( result < 0 ) {
\r
7389 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7390 stream_.deviceInterleaved[mode] = true;
\r
7393 stream_.deviceInterleaved[mode] = false;
\r
7396 stream_.userInterleaved = true;
\r
7397 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7398 if ( result < 0 ) {
\r
7399 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7400 stream_.deviceInterleaved[mode] = false;
\r
7403 stream_.deviceInterleaved[mode] = true;
\r
7406 if ( result < 0 ) {
\r
7407 snd_pcm_close( phandle );
\r
7408 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7409 errorText_ = errorStream_.str();
\r
7413 // Determine how to set the device format.
\r
7414 stream_.userFormat = format;
\r
7415 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7417 if ( format == RTAUDIO_SINT8 )
\r
7418 deviceFormat = SND_PCM_FORMAT_S8;
\r
7419 else if ( format == RTAUDIO_SINT16 )
\r
7420 deviceFormat = SND_PCM_FORMAT_S16;
\r
7421 else if ( format == RTAUDIO_SINT24 )
\r
7422 deviceFormat = SND_PCM_FORMAT_S24;
\r
7423 else if ( format == RTAUDIO_SINT32 )
\r
7424 deviceFormat = SND_PCM_FORMAT_S32;
\r
7425 else if ( format == RTAUDIO_FLOAT32 )
\r
7426 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7427 else if ( format == RTAUDIO_FLOAT64 )
\r
7428 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7430 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7431 stream_.deviceFormat[mode] = format;
\r
7435 // The user requested format is not natively supported by the device.
\r
7436 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7437 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7438 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7442 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7443 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7444 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7448 deviceFormat = SND_PCM_FORMAT_S32;
\r
7449 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7450 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7454 deviceFormat = SND_PCM_FORMAT_S24;
\r
7455 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7456 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7460 deviceFormat = SND_PCM_FORMAT_S16;
\r
7461 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7462 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7466 deviceFormat = SND_PCM_FORMAT_S8;
\r
7467 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7468 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7472 // If we get here, no supported format was found.
\r
7473 snd_pcm_close( phandle );
\r
7474 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7475 errorText_ = errorStream_.str();
\r
7479 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7480 if ( result < 0 ) {
\r
7481 snd_pcm_close( phandle );
\r
7482 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7483 errorText_ = errorStream_.str();
\r
7487 // Determine whether byte-swaping is necessary.
\r
7488 stream_.doByteSwap[mode] = false;
\r
7489 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7490 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7491 if ( result == 0 )
\r
7492 stream_.doByteSwap[mode] = true;
\r
7493 else if (result < 0) {
\r
7494 snd_pcm_close( phandle );
\r
7495 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7496 errorText_ = errorStream_.str();
\r
7501 // Set the sample rate.
\r
7502 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7503 if ( result < 0 ) {
\r
7504 snd_pcm_close( phandle );
\r
7505 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7506 errorText_ = errorStream_.str();
\r
7510 // Determine the number of channels for this device. We support a possible
\r
7511 // minimum device channel number > than the value requested by the user.
\r
7512 stream_.nUserChannels[mode] = channels;
\r
7513 unsigned int value;
\r
7514 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7515 unsigned int deviceChannels = value;
\r
7516 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7517 snd_pcm_close( phandle );
\r
7518 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7519 errorText_ = errorStream_.str();
\r
7523 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7524 if ( result < 0 ) {
\r
7525 snd_pcm_close( phandle );
\r
7526 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7527 errorText_ = errorStream_.str();
\r
7530 deviceChannels = value;
\r
7531 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7532 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7534 // Set the device channels.
\r
7535 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7536 if ( result < 0 ) {
\r
7537 snd_pcm_close( phandle );
\r
7538 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7539 errorText_ = errorStream_.str();
\r
7543 // Set the buffer (or period) size.
\r
7545 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7546 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7547 if ( result < 0 ) {
\r
7548 snd_pcm_close( phandle );
\r
7549 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7550 errorText_ = errorStream_.str();
\r
7553 *bufferSize = periodSize;
\r
7555 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7556 unsigned int periods = 0;
\r
7557 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7558 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7559 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7560 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7561 if ( result < 0 ) {
\r
7562 snd_pcm_close( phandle );
\r
7563 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7564 errorText_ = errorStream_.str();
\r
7568 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7569 // MUST be the same in both directions!
\r
7570 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7571 snd_pcm_close( phandle );
\r
7572 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7573 errorText_ = errorStream_.str();
\r
7577 stream_.bufferSize = *bufferSize;
\r
7579 // Install the hardware configuration
\r
7580 result = snd_pcm_hw_params( phandle, hw_params );
\r
7581 if ( result < 0 ) {
\r
7582 snd_pcm_close( phandle );
\r
7583 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7584 errorText_ = errorStream_.str();
\r
7588 #if defined(__RTAUDIO_DEBUG__)
\r
7589 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7590 snd_pcm_hw_params_dump( hw_params, out );
\r
7593 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7594 snd_pcm_sw_params_t *sw_params = NULL;
\r
7595 snd_pcm_sw_params_alloca( &sw_params );
\r
7596 snd_pcm_sw_params_current( phandle, sw_params );
\r
7597 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7598 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7599 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7601 // The following two settings were suggested by Theo Veenker
\r
7602 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7603 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7605 // here are two options for a fix
\r
7606 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7607 snd_pcm_uframes_t val;
\r
7608 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7609 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7611 result = snd_pcm_sw_params( phandle, sw_params );
\r
7612 if ( result < 0 ) {
\r
7613 snd_pcm_close( phandle );
\r
7614 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7615 errorText_ = errorStream_.str();
\r
7619 #if defined(__RTAUDIO_DEBUG__)
\r
7620 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7621 snd_pcm_sw_params_dump( sw_params, out );
\r
7624 // Set flags for buffer conversion
\r
7625 stream_.doConvertBuffer[mode] = false;
\r
7626 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7627 stream_.doConvertBuffer[mode] = true;
\r
7628 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7629 stream_.doConvertBuffer[mode] = true;
\r
7630 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7631 stream_.nUserChannels[mode] > 1 )
\r
7632 stream_.doConvertBuffer[mode] = true;
\r
7634 // Allocate the ApiHandle if necessary and then save.
\r
7635 AlsaHandle *apiInfo = 0;
\r
7636 if ( stream_.apiHandle == 0 ) {
\r
7638 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7640 catch ( std::bad_alloc& ) {
\r
7641 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7645 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7646 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7650 stream_.apiHandle = (void *) apiInfo;
\r
7651 apiInfo->handles[0] = 0;
\r
7652 apiInfo->handles[1] = 0;
\r
7655 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7657 apiInfo->handles[mode] = phandle;
\r
7660 // Allocate necessary internal buffers.
\r
7661 unsigned long bufferBytes;
\r
7662 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7663 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7664 if ( stream_.userBuffer[mode] == NULL ) {
\r
7665 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7669 if ( stream_.doConvertBuffer[mode] ) {
\r
7671 bool makeBuffer = true;
\r
7672 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7673 if ( mode == INPUT ) {
\r
7674 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7675 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7676 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7680 if ( makeBuffer ) {
\r
7681 bufferBytes *= *bufferSize;
\r
7682 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7683 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7684 if ( stream_.deviceBuffer == NULL ) {
\r
7685 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7691 stream_.sampleRate = sampleRate;
\r
7692 stream_.nBuffers = periods;
\r
7693 stream_.device[mode] = device;
\r
7694 stream_.state = STREAM_STOPPED;
\r
7696 // Setup the buffer conversion information structure.
\r
7697 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7699 // Setup thread if necessary.
\r
7700 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7701 // We had already set up an output stream.
\r
7702 stream_.mode = DUPLEX;
\r
7703 // Link the streams if possible.
\r
7704 apiInfo->synchronized = false;
\r
7705 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7706 apiInfo->synchronized = true;
\r
7708 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7709 error( RtAudioError::WARNING );
\r
7713 stream_.mode = mode;
\r
7715 // Setup callback thread.
\r
7716 stream_.callbackInfo.object = (void *) this;
\r
7718 // Set the thread attributes for joinable and realtime scheduling
\r
7719 // priority (optional). The higher priority will only take affect
\r
7720 // if the program is run as root or suid. Note, under Linux
\r
7721 // processes with CAP_SYS_NICE privilege, a user can change
\r
7722 // scheduling policy and priority (thus need not be root). See
\r
7723 // POSIX "capabilities".
\r
7724 pthread_attr_t attr;
\r
7725 pthread_attr_init( &attr );
\r
7726 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7728 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7729 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7730 // We previously attempted to increase the audio callback priority
\r
7731 // to SCHED_RR here via the attributes. However, while no errors
\r
7732 // were reported in doing so, it did not work. So, now this is
\r
7733 // done in the alsaCallbackHandler function.
\r
7734 stream_.callbackInfo.doRealtime = true;
\r
7735 int priority = options->priority;
\r
7736 int min = sched_get_priority_min( SCHED_RR );
\r
7737 int max = sched_get_priority_max( SCHED_RR );
\r
7738 if ( priority < min ) priority = min;
\r
7739 else if ( priority > max ) priority = max;
\r
7740 stream_.callbackInfo.priority = priority;
\r
7744 stream_.callbackInfo.isRunning = true;
\r
7745 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7746 pthread_attr_destroy( &attr );
\r
7748 stream_.callbackInfo.isRunning = false;
\r
7749 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7758 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7759 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7760 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7762 stream_.apiHandle = 0;
\r
7765 if ( phandle) snd_pcm_close( phandle );
\r
7767 for ( int i=0; i<2; i++ ) {
\r
7768 if ( stream_.userBuffer[i] ) {
\r
7769 free( stream_.userBuffer[i] );
\r
7770 stream_.userBuffer[i] = 0;
\r
7774 if ( stream_.deviceBuffer ) {
\r
7775 free( stream_.deviceBuffer );
\r
7776 stream_.deviceBuffer = 0;
\r
7779 stream_.state = STREAM_CLOSED;
\r
7783 void RtApiAlsa :: closeStream()
\r
7785 if ( stream_.state == STREAM_CLOSED ) {
\r
7786 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7787 error( RtAudioError::WARNING );
\r
7791 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7792 stream_.callbackInfo.isRunning = false;
\r
7793 MUTEX_LOCK( &stream_.mutex );
\r
7794 if ( stream_.state == STREAM_STOPPED ) {
\r
7795 apiInfo->runnable = true;
\r
7796 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7798 MUTEX_UNLOCK( &stream_.mutex );
\r
7799 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7801 if ( stream_.state == STREAM_RUNNING ) {
\r
7802 stream_.state = STREAM_STOPPED;
\r
7803 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7804 snd_pcm_drop( apiInfo->handles[0] );
\r
7805 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7806 snd_pcm_drop( apiInfo->handles[1] );
\r
7810 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7811 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7812 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7814 stream_.apiHandle = 0;
\r
7817 for ( int i=0; i<2; i++ ) {
\r
7818 if ( stream_.userBuffer[i] ) {
\r
7819 free( stream_.userBuffer[i] );
\r
7820 stream_.userBuffer[i] = 0;
\r
7824 if ( stream_.deviceBuffer ) {
\r
7825 free( stream_.deviceBuffer );
\r
7826 stream_.deviceBuffer = 0;
\r
7829 stream_.mode = UNINITIALIZED;
\r
7830 stream_.state = STREAM_CLOSED;
\r
7833 void RtApiAlsa :: startStream()
\r
7835 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7838 if ( stream_.state == STREAM_RUNNING ) {
\r
7839 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7840 error( RtAudioError::WARNING );
\r
7844 MUTEX_LOCK( &stream_.mutex );
\r
7847 snd_pcm_state_t state;
\r
7848 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7849 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7850 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7851 state = snd_pcm_state( handle[0] );
\r
7852 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7853 result = snd_pcm_prepare( handle[0] );
\r
7854 if ( result < 0 ) {
\r
7855 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7856 errorText_ = errorStream_.str();
\r
7862 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7863 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7864 state = snd_pcm_state( handle[1] );
\r
7865 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7866 result = snd_pcm_prepare( handle[1] );
\r
7867 if ( result < 0 ) {
\r
7868 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7869 errorText_ = errorStream_.str();
\r
7875 stream_.state = STREAM_RUNNING;
\r
7878 apiInfo->runnable = true;
\r
7879 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7880 MUTEX_UNLOCK( &stream_.mutex );
\r
7882 if ( result >= 0 ) return;
\r
7883 error( RtAudioError::SYSTEM_ERROR );
\r
7886 void RtApiAlsa :: stopStream()
\r
7889 if ( stream_.state == STREAM_STOPPED ) {
\r
7890 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7891 error( RtAudioError::WARNING );
\r
7895 stream_.state = STREAM_STOPPED;
\r
7896 MUTEX_LOCK( &stream_.mutex );
\r
7899 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7900 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7901 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7902 if ( apiInfo->synchronized )
\r
7903 result = snd_pcm_drop( handle[0] );
\r
7905 result = snd_pcm_drain( handle[0] );
\r
7906 if ( result < 0 ) {
\r
7907 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7908 errorText_ = errorStream_.str();
\r
7913 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7914 result = snd_pcm_drop( handle[1] );
\r
7915 if ( result < 0 ) {
\r
7916 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7917 errorText_ = errorStream_.str();
\r
7923 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7924 MUTEX_UNLOCK( &stream_.mutex );
\r
7926 if ( result >= 0 ) return;
\r
7927 error( RtAudioError::SYSTEM_ERROR );
\r
7930 void RtApiAlsa :: abortStream()
\r
7933 if ( stream_.state == STREAM_STOPPED ) {
\r
7934 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7935 error( RtAudioError::WARNING );
\r
7939 stream_.state = STREAM_STOPPED;
\r
7940 MUTEX_LOCK( &stream_.mutex );
\r
7943 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7944 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7945 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7946 result = snd_pcm_drop( handle[0] );
\r
7947 if ( result < 0 ) {
\r
7948 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7949 errorText_ = errorStream_.str();
\r
7954 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7955 result = snd_pcm_drop( handle[1] );
\r
7956 if ( result < 0 ) {
\r
7957 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7958 errorText_ = errorStream_.str();
\r
7964 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7965 MUTEX_UNLOCK( &stream_.mutex );
\r
7967 if ( result >= 0 ) return;
\r
7968 error( RtAudioError::SYSTEM_ERROR );
\r
7971 void RtApiAlsa :: callbackEvent()
\r
7973 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7974 if ( stream_.state == STREAM_STOPPED ) {
\r
7975 MUTEX_LOCK( &stream_.mutex );
\r
7976 while ( !apiInfo->runnable )
\r
7977 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7979 if ( stream_.state != STREAM_RUNNING ) {
\r
7980 MUTEX_UNLOCK( &stream_.mutex );
\r
7983 MUTEX_UNLOCK( &stream_.mutex );
\r
7986 if ( stream_.state == STREAM_CLOSED ) {
\r
7987 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7988 error( RtAudioError::WARNING );
\r
7992 int doStopStream = 0;
\r
7993 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7994 double streamTime = getStreamTime();
\r
7995 RtAudioStreamStatus status = 0;
\r
7996 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7997 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7998 apiInfo->xrun[0] = false;
\r
8000 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
8001 status |= RTAUDIO_INPUT_OVERFLOW;
\r
8002 apiInfo->xrun[1] = false;
\r
8004 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
8005 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
8007 if ( doStopStream == 2 ) {
\r
8012 MUTEX_LOCK( &stream_.mutex );
\r
8014 // The state might change while waiting on a mutex.
\r
8015 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
8020 snd_pcm_t **handle;
\r
8021 snd_pcm_sframes_t frames;
\r
8022 RtAudioFormat format;
\r
8023 handle = (snd_pcm_t **) apiInfo->handles;
\r
8025 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
8027 // Setup parameters.
\r
8028 if ( stream_.doConvertBuffer[1] ) {
\r
8029 buffer = stream_.deviceBuffer;
\r
8030 channels = stream_.nDeviceChannels[1];
\r
8031 format = stream_.deviceFormat[1];
\r
8034 buffer = stream_.userBuffer[1];
\r
8035 channels = stream_.nUserChannels[1];
\r
8036 format = stream_.userFormat;
\r
8039 // Read samples from device in interleaved/non-interleaved format.
\r
8040 if ( stream_.deviceInterleaved[1] )
\r
8041 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
8043 void *bufs[channels];
\r
8044 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8045 for ( int i=0; i<channels; i++ )
\r
8046 bufs[i] = (void *) (buffer + (i * offset));
\r
8047 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
8050 if ( result < (int) stream_.bufferSize ) {
\r
8051 // Either an error or overrun occured.
\r
8052 if ( result == -EPIPE ) {
\r
8053 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
8054 if ( state == SND_PCM_STATE_XRUN ) {
\r
8055 apiInfo->xrun[1] = true;
\r
8056 result = snd_pcm_prepare( handle[1] );
\r
8057 if ( result < 0 ) {
\r
8058 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
8059 errorText_ = errorStream_.str();
\r
8063 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8064 errorText_ = errorStream_.str();
\r
8068 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
8069 errorText_ = errorStream_.str();
\r
8071 error( RtAudioError::WARNING );
\r
8075 // Do byte swapping if necessary.
\r
8076 if ( stream_.doByteSwap[1] )
\r
8077 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8079 // Do buffer conversion if necessary.
\r
8080 if ( stream_.doConvertBuffer[1] )
\r
8081 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8083 // Check stream latency
\r
8084 result = snd_pcm_delay( handle[1], &frames );
\r
8085 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8090 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8092 // Setup parameters and do buffer conversion if necessary.
\r
8093 if ( stream_.doConvertBuffer[0] ) {
\r
8094 buffer = stream_.deviceBuffer;
\r
8095 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8096 channels = stream_.nDeviceChannels[0];
\r
8097 format = stream_.deviceFormat[0];
\r
8100 buffer = stream_.userBuffer[0];
\r
8101 channels = stream_.nUserChannels[0];
\r
8102 format = stream_.userFormat;
\r
8105 // Do byte swapping if necessary.
\r
8106 if ( stream_.doByteSwap[0] )
\r
8107 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8109 // Write samples to device in interleaved/non-interleaved format.
\r
8110 if ( stream_.deviceInterleaved[0] )
\r
8111 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8113 void *bufs[channels];
\r
8114 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8115 for ( int i=0; i<channels; i++ )
\r
8116 bufs[i] = (void *) (buffer + (i * offset));
\r
8117 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8120 if ( result < (int) stream_.bufferSize ) {
\r
8121 // Either an error or underrun occured.
\r
8122 if ( result == -EPIPE ) {
\r
8123 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8124 if ( state == SND_PCM_STATE_XRUN ) {
\r
8125 apiInfo->xrun[0] = true;
\r
8126 result = snd_pcm_prepare( handle[0] );
\r
8127 if ( result < 0 ) {
\r
8128 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8129 errorText_ = errorStream_.str();
\r
8132 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8135 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8136 errorText_ = errorStream_.str();
\r
8140 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8141 errorText_ = errorStream_.str();
\r
8143 error( RtAudioError::WARNING );
\r
8147 // Check stream latency
\r
8148 result = snd_pcm_delay( handle[0], &frames );
\r
8149 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8153 MUTEX_UNLOCK( &stream_.mutex );
\r
8155 RtApi::tickStreamTime();
\r
8156 if ( doStopStream == 1 ) this->stopStream();
\r
8159 static void *alsaCallbackHandler( void *ptr )
\r
8161 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8162 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8163 bool *isRunning = &info->isRunning;
\r
8165 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8166 if ( info->doRealtime ) {
\r
8167 pthread_t tID = pthread_self(); // ID of this thread
\r
8168 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8169 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8173 while ( *isRunning == true ) {
\r
8174 pthread_testcancel();
\r
8175 object->callbackEvent();
\r
8178 pthread_exit( NULL );
\r
8181 //******************** End of __LINUX_ALSA__ *********************//
\r
8184 #if defined(__LINUX_PULSE__)
\r
8186 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8187 // and Tristan Matthews.
\r
8189 #include <pulse/error.h>
\r
8190 #include <pulse/simple.h>
\r
8193 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8194 44100, 48000, 96000, 0};
\r
8196 struct rtaudio_pa_format_mapping_t {
\r
8197 RtAudioFormat rtaudio_format;
\r
8198 pa_sample_format_t pa_format;
\r
8201 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8202 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8203 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8204 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8205 {0, PA_SAMPLE_INVALID}};
\r
8207 struct PulseAudioHandle {
\r
8208 pa_simple *s_play;
\r
8211 pthread_cond_t runnable_cv;
\r
8213 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8216 RtApiPulse::~RtApiPulse()
\r
8218 if ( stream_.state != STREAM_CLOSED )
\r
8222 unsigned int RtApiPulse::getDeviceCount( void )
\r
8227 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8229 RtAudio::DeviceInfo info;
\r
8230 info.probed = true;
\r
8231 info.name = "PulseAudio";
\r
8232 info.outputChannels = 2;
\r
8233 info.inputChannels = 2;
\r
8234 info.duplexChannels = 2;
\r
8235 info.isDefaultOutput = true;
\r
8236 info.isDefaultInput = true;
\r
8238 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8239 info.sampleRates.push_back( *sr );
\r
8241 info.preferredSampleRate = 48000;
\r
8242 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8247 static void *pulseaudio_callback( void * user )
\r
8249 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8250 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8251 volatile bool *isRunning = &cbi->isRunning;
\r
8253 while ( *isRunning ) {
\r
8254 pthread_testcancel();
\r
8255 context->callbackEvent();
\r
8258 pthread_exit( NULL );
\r
8261 void RtApiPulse::closeStream( void )
\r
8263 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8265 stream_.callbackInfo.isRunning = false;
\r
8267 MUTEX_LOCK( &stream_.mutex );
\r
8268 if ( stream_.state == STREAM_STOPPED ) {
\r
8269 pah->runnable = true;
\r
8270 pthread_cond_signal( &pah->runnable_cv );
\r
8272 MUTEX_UNLOCK( &stream_.mutex );
\r
8274 pthread_join( pah->thread, 0 );
\r
8275 if ( pah->s_play ) {
\r
8276 pa_simple_flush( pah->s_play, NULL );
\r
8277 pa_simple_free( pah->s_play );
\r
8280 pa_simple_free( pah->s_rec );
\r
8282 pthread_cond_destroy( &pah->runnable_cv );
\r
8284 stream_.apiHandle = 0;
\r
8287 if ( stream_.userBuffer[0] ) {
\r
8288 free( stream_.userBuffer[0] );
\r
8289 stream_.userBuffer[0] = 0;
\r
8291 if ( stream_.userBuffer[1] ) {
\r
8292 free( stream_.userBuffer[1] );
\r
8293 stream_.userBuffer[1] = 0;
\r
8296 stream_.state = STREAM_CLOSED;
\r
8297 stream_.mode = UNINITIALIZED;
\r
8300 void RtApiPulse::callbackEvent( void )
\r
8302 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8304 if ( stream_.state == STREAM_STOPPED ) {
\r
8305 MUTEX_LOCK( &stream_.mutex );
\r
8306 while ( !pah->runnable )
\r
8307 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8309 if ( stream_.state != STREAM_RUNNING ) {
\r
8310 MUTEX_UNLOCK( &stream_.mutex );
\r
8313 MUTEX_UNLOCK( &stream_.mutex );
\r
8316 if ( stream_.state == STREAM_CLOSED ) {
\r
8317 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8318 "this shouldn't happen!";
\r
8319 error( RtAudioError::WARNING );
\r
8323 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8324 double streamTime = getStreamTime();
\r
8325 RtAudioStreamStatus status = 0;
\r
8326 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8327 stream_.bufferSize, streamTime, status,
\r
8328 stream_.callbackInfo.userData );
\r
8330 if ( doStopStream == 2 ) {
\r
8335 MUTEX_LOCK( &stream_.mutex );
\r
8336 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8337 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8339 if ( stream_.state != STREAM_RUNNING )
\r
8344 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8345 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8346 convertBuffer( stream_.deviceBuffer,
\r
8347 stream_.userBuffer[OUTPUT],
\r
8348 stream_.convertInfo[OUTPUT] );
\r
8349 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8350 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8352 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8353 formatBytes( stream_.userFormat );
\r
8355 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8356 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8357 pa_strerror( pa_error ) << ".";
\r
8358 errorText_ = errorStream_.str();
\r
8359 error( RtAudioError::WARNING );
\r
8363 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8364 if ( stream_.doConvertBuffer[INPUT] )
\r
8365 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8366 formatBytes( stream_.deviceFormat[INPUT] );
\r
8368 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8369 formatBytes( stream_.userFormat );
\r
8371 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8372 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8373 pa_strerror( pa_error ) << ".";
\r
8374 errorText_ = errorStream_.str();
\r
8375 error( RtAudioError::WARNING );
\r
8377 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8378 convertBuffer( stream_.userBuffer[INPUT],
\r
8379 stream_.deviceBuffer,
\r
8380 stream_.convertInfo[INPUT] );
\r
8385 MUTEX_UNLOCK( &stream_.mutex );
\r
8386 RtApi::tickStreamTime();
\r
8388 if ( doStopStream == 1 )
\r
8392 void RtApiPulse::startStream( void )
\r
8394 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8396 if ( stream_.state == STREAM_CLOSED ) {
\r
8397 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8398 error( RtAudioError::INVALID_USE );
\r
8401 if ( stream_.state == STREAM_RUNNING ) {
\r
8402 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8403 error( RtAudioError::WARNING );
\r
8407 MUTEX_LOCK( &stream_.mutex );
\r
8409 stream_.state = STREAM_RUNNING;
\r
8411 pah->runnable = true;
\r
8412 pthread_cond_signal( &pah->runnable_cv );
\r
8413 MUTEX_UNLOCK( &stream_.mutex );
\r
8416 void RtApiPulse::stopStream( void )
\r
8418 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8420 if ( stream_.state == STREAM_CLOSED ) {
\r
8421 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8422 error( RtAudioError::INVALID_USE );
\r
8425 if ( stream_.state == STREAM_STOPPED ) {
\r
8426 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8427 error( RtAudioError::WARNING );
\r
8431 stream_.state = STREAM_STOPPED;
\r
8432 MUTEX_LOCK( &stream_.mutex );
\r
8434 if ( pah && pah->s_play ) {
\r
8436 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8437 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8438 pa_strerror( pa_error ) << ".";
\r
8439 errorText_ = errorStream_.str();
\r
8440 MUTEX_UNLOCK( &stream_.mutex );
\r
8441 error( RtAudioError::SYSTEM_ERROR );
\r
8446 stream_.state = STREAM_STOPPED;
\r
8447 MUTEX_UNLOCK( &stream_.mutex );
\r
8450 void RtApiPulse::abortStream( void )
\r
8452 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8454 if ( stream_.state == STREAM_CLOSED ) {
\r
8455 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8456 error( RtAudioError::INVALID_USE );
\r
8459 if ( stream_.state == STREAM_STOPPED ) {
\r
8460 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8461 error( RtAudioError::WARNING );
\r
8465 stream_.state = STREAM_STOPPED;
\r
8466 MUTEX_LOCK( &stream_.mutex );
\r
8468 if ( pah && pah->s_play ) {
\r
8470 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8471 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8472 pa_strerror( pa_error ) << ".";
\r
8473 errorText_ = errorStream_.str();
\r
8474 MUTEX_UNLOCK( &stream_.mutex );
\r
8475 error( RtAudioError::SYSTEM_ERROR );
\r
8480 stream_.state = STREAM_STOPPED;
\r
8481 MUTEX_UNLOCK( &stream_.mutex );
\r
8484 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8485 unsigned int channels, unsigned int firstChannel,
\r
8486 unsigned int sampleRate, RtAudioFormat format,
\r
8487 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8489 PulseAudioHandle *pah = 0;
\r
8490 unsigned long bufferBytes = 0;
\r
8491 pa_sample_spec ss;
\r
8493 if ( device != 0 ) return false;
\r
8494 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8495 if ( channels != 1 && channels != 2 ) {
\r
8496 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8499 ss.channels = channels;
\r
8501 if ( firstChannel != 0 ) return false;
\r
8503 bool sr_found = false;
\r
8504 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8505 if ( sampleRate == *sr ) {
\r
8507 stream_.sampleRate = sampleRate;
\r
8508 ss.rate = sampleRate;
\r
8512 if ( !sr_found ) {
\r
8513 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8517 bool sf_found = 0;
\r
8518 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8519 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8520 if ( format == sf->rtaudio_format ) {
\r
8522 stream_.userFormat = sf->rtaudio_format;
\r
8523 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8524 ss.format = sf->pa_format;
\r
8528 if ( !sf_found ) { // Use internal data format conversion.
\r
8529 stream_.userFormat = format;
\r
8530 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8531 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8534 // Set other stream parameters.
\r
8535 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8536 else stream_.userInterleaved = true;
\r
8537 stream_.deviceInterleaved[mode] = true;
\r
8538 stream_.nBuffers = 1;
\r
8539 stream_.doByteSwap[mode] = false;
\r
8540 stream_.nUserChannels[mode] = channels;
\r
8541 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8542 stream_.channelOffset[mode] = 0;
\r
8543 std::string streamName = "RtAudio";
\r
8545 // Set flags for buffer conversion.
\r
8546 stream_.doConvertBuffer[mode] = false;
\r
8547 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8548 stream_.doConvertBuffer[mode] = true;
\r
8549 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8550 stream_.doConvertBuffer[mode] = true;
\r
8552 // Allocate necessary internal buffers.
\r
8553 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8554 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8555 if ( stream_.userBuffer[mode] == NULL ) {
\r
8556 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8559 stream_.bufferSize = *bufferSize;
\r
8561 if ( stream_.doConvertBuffer[mode] ) {
\r
8563 bool makeBuffer = true;
\r
8564 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8565 if ( mode == INPUT ) {
\r
8566 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8567 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8568 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8572 if ( makeBuffer ) {
\r
8573 bufferBytes *= *bufferSize;
\r
8574 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8575 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8576 if ( stream_.deviceBuffer == NULL ) {
\r
8577 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8583 stream_.device[mode] = device;
\r
8585 // Setup the buffer conversion information structure.
\r
8586 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8588 if ( !stream_.apiHandle ) {
\r
8589 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8591 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8595 stream_.apiHandle = pah;
\r
8596 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8597 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8601 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8604 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8607 pa_buffer_attr buffer_attr;
\r
8608 buffer_attr.fragsize = bufferBytes;
\r
8609 buffer_attr.maxlength = -1;
\r
8611 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8612 if ( !pah->s_rec ) {
\r
8613 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8618 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8619 if ( !pah->s_play ) {
\r
8620 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8628 if ( stream_.mode == UNINITIALIZED )
\r
8629 stream_.mode = mode;
\r
8630 else if ( stream_.mode == mode )
\r
8633 stream_.mode = DUPLEX;
\r
8635 if ( !stream_.callbackInfo.isRunning ) {
\r
8636 stream_.callbackInfo.object = this;
\r
8637 stream_.callbackInfo.isRunning = true;
\r
8638 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8639 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8644 stream_.state = STREAM_STOPPED;
\r
8648 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8649 pthread_cond_destroy( &pah->runnable_cv );
\r
8651 stream_.apiHandle = 0;
\r
8654 for ( int i=0; i<2; i++ ) {
\r
8655 if ( stream_.userBuffer[i] ) {
\r
8656 free( stream_.userBuffer[i] );
\r
8657 stream_.userBuffer[i] = 0;
\r
8661 if ( stream_.deviceBuffer ) {
\r
8662 free( stream_.deviceBuffer );
\r
8663 stream_.deviceBuffer = 0;
\r
8669 //******************** End of __LINUX_PULSE__ *********************//
\r
8672 #if defined(__LINUX_OSS__)
\r
8674 #include <unistd.h>
\r
8675 #include <sys/ioctl.h>
\r
8676 #include <unistd.h>
\r
8677 #include <fcntl.h>
\r
8678 #include <sys/soundcard.h>
\r
8679 #include <errno.h>
\r
8682 static void *ossCallbackHandler(void * ptr);
\r
8684 // A structure to hold various information related to the OSS API
\r
8685 // implementation.
\r
8686 struct OssHandle {
\r
8687 int id[2]; // device ids
\r
8690 pthread_cond_t runnable;
\r
8693 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8696 RtApiOss :: RtApiOss()
\r
8698 // Nothing to do here.
\r
8701 RtApiOss :: ~RtApiOss()
\r
8703 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8706 unsigned int RtApiOss :: getDeviceCount( void )
\r
8708 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8709 if ( mixerfd == -1 ) {
\r
8710 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8711 error( RtAudioError::WARNING );
\r
8715 oss_sysinfo sysinfo;
\r
8716 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8718 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8719 error( RtAudioError::WARNING );
\r
8724 return sysinfo.numaudios;
\r
8727 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8729 RtAudio::DeviceInfo info;
\r
8730 info.probed = false;
\r
8732 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8733 if ( mixerfd == -1 ) {
\r
8734 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8735 error( RtAudioError::WARNING );
\r
8739 oss_sysinfo sysinfo;
\r
8740 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8741 if ( result == -1 ) {
\r
8743 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8744 error( RtAudioError::WARNING );
\r
8748 unsigned nDevices = sysinfo.numaudios;
\r
8749 if ( nDevices == 0 ) {
\r
8751 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8752 error( RtAudioError::INVALID_USE );
\r
8756 if ( device >= nDevices ) {
\r
8758 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8759 error( RtAudioError::INVALID_USE );
\r
8763 oss_audioinfo ainfo;
\r
8764 ainfo.dev = device;
\r
8765 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8767 if ( result == -1 ) {
\r
8768 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8769 errorText_ = errorStream_.str();
\r
8770 error( RtAudioError::WARNING );
\r
8775 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8776 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8777 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8778 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8779 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8782 // Probe data formats ... do for input
\r
8783 unsigned long mask = ainfo.iformats;
\r
8784 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8785 info.nativeFormats |= RTAUDIO_SINT16;
\r
8786 if ( mask & AFMT_S8 )
\r
8787 info.nativeFormats |= RTAUDIO_SINT8;
\r
8788 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8789 info.nativeFormats |= RTAUDIO_SINT32;
\r
8791 if ( mask & AFMT_FLOAT )
\r
8792 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8794 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8795 info.nativeFormats |= RTAUDIO_SINT24;
\r
8797 // Check that we have at least one supported format
\r
8798 if ( info.nativeFormats == 0 ) {
\r
8799 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8800 errorText_ = errorStream_.str();
\r
8801 error( RtAudioError::WARNING );
\r
8805 // Probe the supported sample rates.
\r
8806 info.sampleRates.clear();
\r
8807 if ( ainfo.nrates ) {
\r
8808 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8809 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8810 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8811 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8813 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8814 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8822 // Check min and max rate values;
\r
8823 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8824 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8825 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8827 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8828 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8833 if ( info.sampleRates.size() == 0 ) {
\r
8834 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8835 errorText_ = errorStream_.str();
\r
8836 error( RtAudioError::WARNING );
\r
8839 info.probed = true;
\r
8840 info.name = ainfo.name;
\r
8847 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8848 unsigned int firstChannel, unsigned int sampleRate,
\r
8849 RtAudioFormat format, unsigned int *bufferSize,
\r
8850 RtAudio::StreamOptions *options )
\r
8852 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8853 if ( mixerfd == -1 ) {
\r
8854 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8858 oss_sysinfo sysinfo;
\r
8859 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8860 if ( result == -1 ) {
\r
8862 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8866 unsigned nDevices = sysinfo.numaudios;
\r
8867 if ( nDevices == 0 ) {
\r
8868 // This should not happen because a check is made before this function is called.
\r
8870 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8874 if ( device >= nDevices ) {
\r
8875 // This should not happen because a check is made before this function is called.
\r
8877 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8881 oss_audioinfo ainfo;
\r
8882 ainfo.dev = device;
\r
8883 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8885 if ( result == -1 ) {
\r
8886 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8887 errorText_ = errorStream_.str();
\r
8891 // Check if device supports input or output
\r
8892 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8893 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8894 if ( mode == OUTPUT )
\r
8895 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8897 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8898 errorText_ = errorStream_.str();
\r
8903 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8904 if ( mode == OUTPUT )
\r
8905 flags |= O_WRONLY;
\r
8906 else { // mode == INPUT
\r
8907 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8908 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8909 close( handle->id[0] );
\r
8910 handle->id[0] = 0;
\r
8911 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8912 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8913 errorText_ = errorStream_.str();
\r
8916 // Check that the number previously set channels is the same.
\r
8917 if ( stream_.nUserChannels[0] != channels ) {
\r
8918 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8919 errorText_ = errorStream_.str();
\r
8925 flags |= O_RDONLY;
\r
8928 // Set exclusive access if specified.
\r
8929 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8931 // Try to open the device.
\r
8933 fd = open( ainfo.devnode, flags, 0 );
\r
8935 if ( errno == EBUSY )
\r
8936 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8938 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8939 errorText_ = errorStream_.str();
\r
8943 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8945 if ( flags | O_RDWR ) {
\r
8946 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8947 if ( result == -1) {
\r
8948 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8949 errorText_ = errorStream_.str();
\r
8955 // Check the device channel support.
\r
8956 stream_.nUserChannels[mode] = channels;
\r
8957 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8959 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8960 errorText_ = errorStream_.str();
\r
8964 // Set the number of channels.
\r
8965 int deviceChannels = channels + firstChannel;
\r
8966 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8967 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8969 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8970 errorText_ = errorStream_.str();
\r
8973 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8975 // Get the data format mask
\r
8977 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8978 if ( result == -1 ) {
\r
8980 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8981 errorText_ = errorStream_.str();
\r
8985 // Determine how to set the device format.
\r
8986 stream_.userFormat = format;
\r
8987 int deviceFormat = -1;
\r
8988 stream_.doByteSwap[mode] = false;
\r
8989 if ( format == RTAUDIO_SINT8 ) {
\r
8990 if ( mask & AFMT_S8 ) {
\r
8991 deviceFormat = AFMT_S8;
\r
8992 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8995 else if ( format == RTAUDIO_SINT16 ) {
\r
8996 if ( mask & AFMT_S16_NE ) {
\r
8997 deviceFormat = AFMT_S16_NE;
\r
8998 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9000 else if ( mask & AFMT_S16_OE ) {
\r
9001 deviceFormat = AFMT_S16_OE;
\r
9002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9003 stream_.doByteSwap[mode] = true;
\r
9006 else if ( format == RTAUDIO_SINT24 ) {
\r
9007 if ( mask & AFMT_S24_NE ) {
\r
9008 deviceFormat = AFMT_S24_NE;
\r
9009 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9011 else if ( mask & AFMT_S24_OE ) {
\r
9012 deviceFormat = AFMT_S24_OE;
\r
9013 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9014 stream_.doByteSwap[mode] = true;
\r
9017 else if ( format == RTAUDIO_SINT32 ) {
\r
9018 if ( mask & AFMT_S32_NE ) {
\r
9019 deviceFormat = AFMT_S32_NE;
\r
9020 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9022 else if ( mask & AFMT_S32_OE ) {
\r
9023 deviceFormat = AFMT_S32_OE;
\r
9024 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9025 stream_.doByteSwap[mode] = true;
\r
9029 if ( deviceFormat == -1 ) {
\r
9030 // The user requested format is not natively supported by the device.
\r
9031 if ( mask & AFMT_S16_NE ) {
\r
9032 deviceFormat = AFMT_S16_NE;
\r
9033 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9035 else if ( mask & AFMT_S32_NE ) {
\r
9036 deviceFormat = AFMT_S32_NE;
\r
9037 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9039 else if ( mask & AFMT_S24_NE ) {
\r
9040 deviceFormat = AFMT_S24_NE;
\r
9041 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9043 else if ( mask & AFMT_S16_OE ) {
\r
9044 deviceFormat = AFMT_S16_OE;
\r
9045 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9046 stream_.doByteSwap[mode] = true;
\r
9048 else if ( mask & AFMT_S32_OE ) {
\r
9049 deviceFormat = AFMT_S32_OE;
\r
9050 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9051 stream_.doByteSwap[mode] = true;
\r
9053 else if ( mask & AFMT_S24_OE ) {
\r
9054 deviceFormat = AFMT_S24_OE;
\r
9055 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9056 stream_.doByteSwap[mode] = true;
\r
9058 else if ( mask & AFMT_S8) {
\r
9059 deviceFormat = AFMT_S8;
\r
9060 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9064 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9065 // This really shouldn't happen ...
\r
9067 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9068 errorText_ = errorStream_.str();
\r
9072 // Set the data format.
\r
9073 int temp = deviceFormat;
\r
9074 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9075 if ( result == -1 || deviceFormat != temp ) {
\r
9077 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9078 errorText_ = errorStream_.str();
\r
9082 // Attempt to set the buffer size. According to OSS, the minimum
\r
9083 // number of buffers is two. The supposed minimum buffer size is 16
\r
9084 // bytes, so that will be our lower bound. The argument to this
\r
9085 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9086 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9087 // We'll check the actual value used near the end of the setup
\r
9089 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9090 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9092 if ( options ) buffers = options->numberOfBuffers;
\r
9093 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9094 if ( buffers < 2 ) buffers = 3;
\r
9095 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9096 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9097 if ( result == -1 ) {
\r
9099 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9100 errorText_ = errorStream_.str();
\r
9103 stream_.nBuffers = buffers;
\r
9105 // Save buffer size (in sample frames).
\r
9106 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9107 stream_.bufferSize = *bufferSize;
\r
9109 // Set the sample rate.
\r
9110 int srate = sampleRate;
\r
9111 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9112 if ( result == -1 ) {
\r
9114 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9115 errorText_ = errorStream_.str();
\r
9119 // Verify the sample rate setup worked.
\r
9120 if ( abs( srate - (int)sampleRate ) > 100 ) {
\r
9122 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9123 errorText_ = errorStream_.str();
\r
9126 stream_.sampleRate = sampleRate;
\r
9128 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9129 // We're doing duplex setup here.
\r
9130 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9131 stream_.nDeviceChannels[0] = deviceChannels;
\r
9134 // Set interleaving parameters.
\r
9135 stream_.userInterleaved = true;
\r
9136 stream_.deviceInterleaved[mode] = true;
\r
9137 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9138 stream_.userInterleaved = false;
\r
9140 // Set flags for buffer conversion
\r
9141 stream_.doConvertBuffer[mode] = false;
\r
9142 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9143 stream_.doConvertBuffer[mode] = true;
\r
9144 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9145 stream_.doConvertBuffer[mode] = true;
\r
9146 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9147 stream_.nUserChannels[mode] > 1 )
\r
9148 stream_.doConvertBuffer[mode] = true;
\r
9150 // Allocate the stream handles if necessary and then save.
\r
9151 if ( stream_.apiHandle == 0 ) {
\r
9153 handle = new OssHandle;
\r
9155 catch ( std::bad_alloc& ) {
\r
9156 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9160 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9161 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9165 stream_.apiHandle = (void *) handle;
\r
9168 handle = (OssHandle *) stream_.apiHandle;
\r
9170 handle->id[mode] = fd;
\r
9172 // Allocate necessary internal buffers.
\r
9173 unsigned long bufferBytes;
\r
9174 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9175 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9176 if ( stream_.userBuffer[mode] == NULL ) {
\r
9177 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9181 if ( stream_.doConvertBuffer[mode] ) {
\r
9183 bool makeBuffer = true;
\r
9184 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9185 if ( mode == INPUT ) {
\r
9186 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9187 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9188 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9192 if ( makeBuffer ) {
\r
9193 bufferBytes *= *bufferSize;
\r
9194 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9195 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9196 if ( stream_.deviceBuffer == NULL ) {
\r
9197 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9203 stream_.device[mode] = device;
\r
9204 stream_.state = STREAM_STOPPED;
\r
9206 // Setup the buffer conversion information structure.
\r
9207 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9209 // Setup thread if necessary.
\r
9210 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9211 // We had already set up an output stream.
\r
9212 stream_.mode = DUPLEX;
\r
9213 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9216 stream_.mode = mode;
\r
9218 // Setup callback thread.
\r
9219 stream_.callbackInfo.object = (void *) this;
\r
9221 // Set the thread attributes for joinable and realtime scheduling
\r
9222 // priority. The higher priority will only take affect if the
\r
9223 // program is run as root or suid.
\r
9224 pthread_attr_t attr;
\r
9225 pthread_attr_init( &attr );
\r
9226 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9227 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9228 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9229 struct sched_param param;
\r
9230 int priority = options->priority;
\r
9231 int min = sched_get_priority_min( SCHED_RR );
\r
9232 int max = sched_get_priority_max( SCHED_RR );
\r
9233 if ( priority < min ) priority = min;
\r
9234 else if ( priority > max ) priority = max;
\r
9235 param.sched_priority = priority;
\r
9236 pthread_attr_setschedparam( &attr, ¶m );
\r
9237 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9240 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9242 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9245 stream_.callbackInfo.isRunning = true;
\r
9246 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9247 pthread_attr_destroy( &attr );
\r
9249 stream_.callbackInfo.isRunning = false;
\r
9250 errorText_ = "RtApiOss::error creating callback thread!";
\r
9259 pthread_cond_destroy( &handle->runnable );
\r
9260 if ( handle->id[0] ) close( handle->id[0] );
\r
9261 if ( handle->id[1] ) close( handle->id[1] );
\r
9263 stream_.apiHandle = 0;
\r
9266 for ( int i=0; i<2; i++ ) {
\r
9267 if ( stream_.userBuffer[i] ) {
\r
9268 free( stream_.userBuffer[i] );
\r
9269 stream_.userBuffer[i] = 0;
\r
9273 if ( stream_.deviceBuffer ) {
\r
9274 free( stream_.deviceBuffer );
\r
9275 stream_.deviceBuffer = 0;
\r
9281 void RtApiOss :: closeStream()
\r
9283 if ( stream_.state == STREAM_CLOSED ) {
\r
9284 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9285 error( RtAudioError::WARNING );
\r
9289 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9290 stream_.callbackInfo.isRunning = false;
\r
9291 MUTEX_LOCK( &stream_.mutex );
\r
9292 if ( stream_.state == STREAM_STOPPED )
\r
9293 pthread_cond_signal( &handle->runnable );
\r
9294 MUTEX_UNLOCK( &stream_.mutex );
\r
9295 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9297 if ( stream_.state == STREAM_RUNNING ) {
\r
9298 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9299 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9301 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9302 stream_.state = STREAM_STOPPED;
\r
9306 pthread_cond_destroy( &handle->runnable );
\r
9307 if ( handle->id[0] ) close( handle->id[0] );
\r
9308 if ( handle->id[1] ) close( handle->id[1] );
\r
9310 stream_.apiHandle = 0;
\r
9313 for ( int i=0; i<2; i++ ) {
\r
9314 if ( stream_.userBuffer[i] ) {
\r
9315 free( stream_.userBuffer[i] );
\r
9316 stream_.userBuffer[i] = 0;
\r
9320 if ( stream_.deviceBuffer ) {
\r
9321 free( stream_.deviceBuffer );
\r
9322 stream_.deviceBuffer = 0;
\r
9325 stream_.mode = UNINITIALIZED;
\r
9326 stream_.state = STREAM_CLOSED;
\r
9329 void RtApiOss :: startStream()
\r
9332 if ( stream_.state == STREAM_RUNNING ) {
\r
9333 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9334 error( RtAudioError::WARNING );
\r
9338 MUTEX_LOCK( &stream_.mutex );
\r
9340 stream_.state = STREAM_RUNNING;
\r
9342 // No need to do anything else here ... OSS automatically starts
\r
9343 // when fed samples.
\r
9345 MUTEX_UNLOCK( &stream_.mutex );
\r
9347 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9348 pthread_cond_signal( &handle->runnable );
\r
9351 void RtApiOss :: stopStream()
\r
9354 if ( stream_.state == STREAM_STOPPED ) {
\r
9355 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9356 error( RtAudioError::WARNING );
\r
9360 MUTEX_LOCK( &stream_.mutex );
\r
9362 // The state might change while waiting on a mutex.
\r
9363 if ( stream_.state == STREAM_STOPPED ) {
\r
9364 MUTEX_UNLOCK( &stream_.mutex );
\r
9369 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9370 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9372 // Flush the output with zeros a few times.
\r
9375 RtAudioFormat format;
\r
9377 if ( stream_.doConvertBuffer[0] ) {
\r
9378 buffer = stream_.deviceBuffer;
\r
9379 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9380 format = stream_.deviceFormat[0];
\r
9383 buffer = stream_.userBuffer[0];
\r
9384 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9385 format = stream_.userFormat;
\r
9388 memset( buffer, 0, samples * formatBytes(format) );
\r
9389 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9390 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9391 if ( result == -1 ) {
\r
9392 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9393 error( RtAudioError::WARNING );
\r
9397 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9398 if ( result == -1 ) {
\r
9399 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9400 errorText_ = errorStream_.str();
\r
9403 handle->triggered = false;
\r
9406 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9407 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9408 if ( result == -1 ) {
\r
9409 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9410 errorText_ = errorStream_.str();
\r
9416 stream_.state = STREAM_STOPPED;
\r
9417 MUTEX_UNLOCK( &stream_.mutex );
\r
9419 if ( result != -1 ) return;
\r
9420 error( RtAudioError::SYSTEM_ERROR );
\r
9423 void RtApiOss :: abortStream()
\r
9426 if ( stream_.state == STREAM_STOPPED ) {
\r
9427 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9428 error( RtAudioError::WARNING );
\r
9432 MUTEX_LOCK( &stream_.mutex );
\r
9434 // The state might change while waiting on a mutex.
\r
9435 if ( stream_.state == STREAM_STOPPED ) {
\r
9436 MUTEX_UNLOCK( &stream_.mutex );
\r
9441 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9442 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9443 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9444 if ( result == -1 ) {
\r
9445 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9446 errorText_ = errorStream_.str();
\r
9449 handle->triggered = false;
\r
9452 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9453 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9454 if ( result == -1 ) {
\r
9455 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9456 errorText_ = errorStream_.str();
\r
9462 stream_.state = STREAM_STOPPED;
\r
9463 MUTEX_UNLOCK( &stream_.mutex );
\r
9465 if ( result != -1 ) return;
\r
9466 error( RtAudioError::SYSTEM_ERROR );
\r
9469 void RtApiOss :: callbackEvent()
\r
9471 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9472 if ( stream_.state == STREAM_STOPPED ) {
\r
9473 MUTEX_LOCK( &stream_.mutex );
\r
9474 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9475 if ( stream_.state != STREAM_RUNNING ) {
\r
9476 MUTEX_UNLOCK( &stream_.mutex );
\r
9479 MUTEX_UNLOCK( &stream_.mutex );
\r
9482 if ( stream_.state == STREAM_CLOSED ) {
\r
9483 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9484 error( RtAudioError::WARNING );
\r
9488 // Invoke user callback to get fresh output data.
\r
9489 int doStopStream = 0;
\r
9490 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9491 double streamTime = getStreamTime();
\r
9492 RtAudioStreamStatus status = 0;
\r
9493 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9494 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9495 handle->xrun[0] = false;
\r
9497 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9498 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9499 handle->xrun[1] = false;
\r
9501 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9502 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9503 if ( doStopStream == 2 ) {
\r
9504 this->abortStream();
\r
9508 MUTEX_LOCK( &stream_.mutex );
\r
9510 // The state might change while waiting on a mutex.
\r
9511 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9516 RtAudioFormat format;
\r
9518 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9520 // Setup parameters and do buffer conversion if necessary.
\r
9521 if ( stream_.doConvertBuffer[0] ) {
\r
9522 buffer = stream_.deviceBuffer;
\r
9523 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9524 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9525 format = stream_.deviceFormat[0];
\r
9528 buffer = stream_.userBuffer[0];
\r
9529 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9530 format = stream_.userFormat;
\r
9533 // Do byte swapping if necessary.
\r
9534 if ( stream_.doByteSwap[0] )
\r
9535 byteSwapBuffer( buffer, samples, format );
\r
9537 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9539 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9540 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9541 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9542 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9543 handle->triggered = true;
\r
9546 // Write samples to device.
\r
9547 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9549 if ( result == -1 ) {
\r
9550 // We'll assume this is an underrun, though there isn't a
\r
9551 // specific means for determining that.
\r
9552 handle->xrun[0] = true;
\r
9553 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9554 error( RtAudioError::WARNING );
\r
9555 // Continue on to input section.
\r
9559 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9561 // Setup parameters.
\r
9562 if ( stream_.doConvertBuffer[1] ) {
\r
9563 buffer = stream_.deviceBuffer;
\r
9564 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9565 format = stream_.deviceFormat[1];
\r
9568 buffer = stream_.userBuffer[1];
\r
9569 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9570 format = stream_.userFormat;
\r
9573 // Read samples from device.
\r
9574 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9576 if ( result == -1 ) {
\r
9577 // We'll assume this is an overrun, though there isn't a
\r
9578 // specific means for determining that.
\r
9579 handle->xrun[1] = true;
\r
9580 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9581 error( RtAudioError::WARNING );
\r
9585 // Do byte swapping if necessary.
\r
9586 if ( stream_.doByteSwap[1] )
\r
9587 byteSwapBuffer( buffer, samples, format );
\r
9589 // Do buffer conversion if necessary.
\r
9590 if ( stream_.doConvertBuffer[1] )
\r
9591 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9595 MUTEX_UNLOCK( &stream_.mutex );
\r
9597 RtApi::tickStreamTime();
\r
9598 if ( doStopStream == 1 ) this->stopStream();
\r
9601 static void *ossCallbackHandler( void *ptr )
\r
9603 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9604 RtApiOss *object = (RtApiOss *) info->object;
\r
9605 bool *isRunning = &info->isRunning;
\r
9607 while ( *isRunning == true ) {
\r
9608 pthread_testcancel();
\r
9609 object->callbackEvent();
\r
9612 pthread_exit( NULL );
\r
9615 //******************** End of __LINUX_OSS__ *********************//
\r
9619 // *************************************************** //
\r
9621 // Protected common (OS-independent) RtAudio methods.
\r
9623 // *************************************************** //
\r
9625 // This method can be modified to control the behavior of error
\r
9626 // message printing.
\r
9627 void RtApi :: error( RtAudioError::Type type )
\r
9629 errorStream_.str(""); // clear the ostringstream
\r
9631 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9632 if ( errorCallback ) {
\r
9633 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9635 if ( firstErrorOccurred_ )
\r
9638 firstErrorOccurred_ = true;
\r
9639 const std::string errorMessage = errorText_;
\r
9641 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9642 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9646 errorCallback( type, errorMessage );
\r
9647 firstErrorOccurred_ = false;
\r
9651 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9652 std::cerr << '\n' << errorText_ << "\n\n";
\r
9653 else if ( type != RtAudioError::WARNING )
\r
9654 throw( RtAudioError( errorText_, type ) );
\r
9657 void RtApi :: verifyStream()
\r
9659 if ( stream_.state == STREAM_CLOSED ) {
\r
9660 errorText_ = "RtApi:: a stream is not open!";
\r
9661 error( RtAudioError::INVALID_USE );
\r
9665 void RtApi :: clearStreamInfo()
\r
9667 stream_.mode = UNINITIALIZED;
\r
9668 stream_.state = STREAM_CLOSED;
\r
9669 stream_.sampleRate = 0;
\r
9670 stream_.bufferSize = 0;
\r
9671 stream_.nBuffers = 0;
\r
9672 stream_.userFormat = 0;
\r
9673 stream_.userInterleaved = true;
\r
9674 stream_.streamTime = 0.0;
\r
9675 stream_.apiHandle = 0;
\r
9676 stream_.deviceBuffer = 0;
\r
9677 stream_.callbackInfo.callback = 0;
\r
9678 stream_.callbackInfo.userData = 0;
\r
9679 stream_.callbackInfo.isRunning = false;
\r
9680 stream_.callbackInfo.errorCallback = 0;
\r
9681 for ( int i=0; i<2; i++ ) {
\r
9682 stream_.device[i] = 11111;
\r
9683 stream_.doConvertBuffer[i] = false;
\r
9684 stream_.deviceInterleaved[i] = true;
\r
9685 stream_.doByteSwap[i] = false;
\r
9686 stream_.nUserChannels[i] = 0;
\r
9687 stream_.nDeviceChannels[i] = 0;
\r
9688 stream_.channelOffset[i] = 0;
\r
9689 stream_.deviceFormat[i] = 0;
\r
9690 stream_.latency[i] = 0;
\r
9691 stream_.userBuffer[i] = 0;
\r
9692 stream_.convertInfo[i].channels = 0;
\r
9693 stream_.convertInfo[i].inJump = 0;
\r
9694 stream_.convertInfo[i].outJump = 0;
\r
9695 stream_.convertInfo[i].inFormat = 0;
\r
9696 stream_.convertInfo[i].outFormat = 0;
\r
9697 stream_.convertInfo[i].inOffset.clear();
\r
9698 stream_.convertInfo[i].outOffset.clear();
\r
9702 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9704 if ( format == RTAUDIO_SINT16 )
\r
9706 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9708 else if ( format == RTAUDIO_FLOAT64 )
\r
9710 else if ( format == RTAUDIO_SINT24 )
\r
9712 else if ( format == RTAUDIO_SINT8 )
\r
9715 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9716 error( RtAudioError::WARNING );
\r
9721 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9723 if ( mode == INPUT ) { // convert device to user buffer
\r
9724 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9725 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9726 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9727 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9729 else { // convert user to device buffer
\r
9730 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9731 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9732 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9733 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9736 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9737 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9739 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9741 // Set up the interleave/deinterleave offsets.
\r
9742 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9743 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9744 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9745 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9746 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9747 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9748 stream_.convertInfo[mode].inJump = 1;
\r
9752 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9753 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9754 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9755 stream_.convertInfo[mode].outJump = 1;
\r
9759 else { // no (de)interleaving
\r
9760 if ( stream_.userInterleaved ) {
\r
9761 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9762 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9763 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9767 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9768 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9769 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9770 stream_.convertInfo[mode].inJump = 1;
\r
9771 stream_.convertInfo[mode].outJump = 1;
\r
9776 // Add channel offset.
\r
9777 if ( firstChannel > 0 ) {
\r
9778 if ( stream_.deviceInterleaved[mode] ) {
\r
9779 if ( mode == OUTPUT ) {
\r
9780 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9781 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9784 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9785 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9789 if ( mode == OUTPUT ) {
\r
9790 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9791 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9794 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9795 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9801 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9803 // This function does format conversion, input/output channel compensation, and
\r
9804 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9805 // the lower three bytes of a 32-bit integer.
\r
9807 // Clear our device buffer when in/out duplex device channels are different
\r
9808 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9809 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9810 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9813 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9815 Float64 *out = (Float64 *)outBuffer;
\r
9817 if (info.inFormat == RTAUDIO_SINT8) {
\r
9818 signed char *in = (signed char *)inBuffer;
\r
9819 scale = 1.0 / 127.5;
\r
9820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9821 for (j=0; j<info.channels; j++) {
\r
9822 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9823 out[info.outOffset[j]] += 0.5;
\r
9824 out[info.outOffset[j]] *= scale;
\r
9826 in += info.inJump;
\r
9827 out += info.outJump;
\r
9830 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9831 Int16 *in = (Int16 *)inBuffer;
\r
9832 scale = 1.0 / 32767.5;
\r
9833 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9834 for (j=0; j<info.channels; j++) {
\r
9835 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9836 out[info.outOffset[j]] += 0.5;
\r
9837 out[info.outOffset[j]] *= scale;
\r
9839 in += info.inJump;
\r
9840 out += info.outJump;
\r
9843 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9844 Int24 *in = (Int24 *)inBuffer;
\r
9845 scale = 1.0 / 8388607.5;
\r
9846 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9847 for (j=0; j<info.channels; j++) {
\r
9848 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9849 out[info.outOffset[j]] += 0.5;
\r
9850 out[info.outOffset[j]] *= scale;
\r
9852 in += info.inJump;
\r
9853 out += info.outJump;
\r
9856 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9857 Int32 *in = (Int32 *)inBuffer;
\r
9858 scale = 1.0 / 2147483647.5;
\r
9859 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9860 for (j=0; j<info.channels; j++) {
\r
9861 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9862 out[info.outOffset[j]] += 0.5;
\r
9863 out[info.outOffset[j]] *= scale;
\r
9865 in += info.inJump;
\r
9866 out += info.outJump;
\r
9869 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9870 Float32 *in = (Float32 *)inBuffer;
\r
9871 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9872 for (j=0; j<info.channels; j++) {
\r
9873 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9875 in += info.inJump;
\r
9876 out += info.outJump;
\r
9879 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9880 // Channel compensation and/or (de)interleaving only.
\r
9881 Float64 *in = (Float64 *)inBuffer;
\r
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9883 for (j=0; j<info.channels; j++) {
\r
9884 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9886 in += info.inJump;
\r
9887 out += info.outJump;
\r
9891 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9893 Float32 *out = (Float32 *)outBuffer;
\r
9895 if (info.inFormat == RTAUDIO_SINT8) {
\r
9896 signed char *in = (signed char *)inBuffer;
\r
9897 scale = (Float32) ( 1.0 / 127.5 );
\r
9898 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9899 for (j=0; j<info.channels; j++) {
\r
9900 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9901 out[info.outOffset[j]] += 0.5;
\r
9902 out[info.outOffset[j]] *= scale;
\r
9904 in += info.inJump;
\r
9905 out += info.outJump;
\r
9908 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9909 Int16 *in = (Int16 *)inBuffer;
\r
9910 scale = (Float32) ( 1.0 / 32767.5 );
\r
9911 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9912 for (j=0; j<info.channels; j++) {
\r
9913 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9914 out[info.outOffset[j]] += 0.5;
\r
9915 out[info.outOffset[j]] *= scale;
\r
9917 in += info.inJump;
\r
9918 out += info.outJump;
\r
9921 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9922 Int24 *in = (Int24 *)inBuffer;
\r
9923 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9924 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9925 for (j=0; j<info.channels; j++) {
\r
9926 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9927 out[info.outOffset[j]] += 0.5;
\r
9928 out[info.outOffset[j]] *= scale;
\r
9930 in += info.inJump;
\r
9931 out += info.outJump;
\r
9934 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9935 Int32 *in = (Int32 *)inBuffer;
\r
9936 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9937 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9938 for (j=0; j<info.channels; j++) {
\r
9939 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9940 out[info.outOffset[j]] += 0.5;
\r
9941 out[info.outOffset[j]] *= scale;
\r
9943 in += info.inJump;
\r
9944 out += info.outJump;
\r
9947 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9948 // Channel compensation and/or (de)interleaving only.
\r
9949 Float32 *in = (Float32 *)inBuffer;
\r
9950 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9951 for (j=0; j<info.channels; j++) {
\r
9952 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9954 in += info.inJump;
\r
9955 out += info.outJump;
\r
9958 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9959 Float64 *in = (Float64 *)inBuffer;
\r
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9961 for (j=0; j<info.channels; j++) {
\r
9962 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9964 in += info.inJump;
\r
9965 out += info.outJump;
\r
9969 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9970 Int32 *out = (Int32 *)outBuffer;
\r
9971 if (info.inFormat == RTAUDIO_SINT8) {
\r
9972 signed char *in = (signed char *)inBuffer;
\r
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9974 for (j=0; j<info.channels; j++) {
\r
9975 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9976 out[info.outOffset[j]] <<= 24;
\r
9978 in += info.inJump;
\r
9979 out += info.outJump;
\r
9982 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9983 Int16 *in = (Int16 *)inBuffer;
\r
9984 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9985 for (j=0; j<info.channels; j++) {
\r
9986 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9987 out[info.outOffset[j]] <<= 16;
\r
9989 in += info.inJump;
\r
9990 out += info.outJump;
\r
9993 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9994 Int24 *in = (Int24 *)inBuffer;
\r
9995 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9996 for (j=0; j<info.channels; j++) {
\r
9997 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9998 out[info.outOffset[j]] <<= 8;
\r
10000 in += info.inJump;
\r
10001 out += info.outJump;
\r
10004 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10005 // Channel compensation and/or (de)interleaving only.
\r
10006 Int32 *in = (Int32 *)inBuffer;
\r
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10008 for (j=0; j<info.channels; j++) {
\r
10009 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10011 in += info.inJump;
\r
10012 out += info.outJump;
\r
10015 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10016 Float32 *in = (Float32 *)inBuffer;
\r
10017 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10018 for (j=0; j<info.channels; j++) {
\r
10019 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10021 in += info.inJump;
\r
10022 out += info.outJump;
\r
10025 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10026 Float64 *in = (Float64 *)inBuffer;
\r
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10028 for (j=0; j<info.channels; j++) {
\r
10029 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10031 in += info.inJump;
\r
10032 out += info.outJump;
\r
10036 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10037 Int24 *out = (Int24 *)outBuffer;
\r
10038 if (info.inFormat == RTAUDIO_SINT8) {
\r
10039 signed char *in = (signed char *)inBuffer;
\r
10040 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10041 for (j=0; j<info.channels; j++) {
\r
10042 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10043 //out[info.outOffset[j]] <<= 16;
\r
10045 in += info.inJump;
\r
10046 out += info.outJump;
\r
10049 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10050 Int16 *in = (Int16 *)inBuffer;
\r
10051 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10052 for (j=0; j<info.channels; j++) {
\r
10053 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10054 //out[info.outOffset[j]] <<= 8;
\r
10056 in += info.inJump;
\r
10057 out += info.outJump;
\r
10060 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10061 // Channel compensation and/or (de)interleaving only.
\r
10062 Int24 *in = (Int24 *)inBuffer;
\r
10063 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10064 for (j=0; j<info.channels; j++) {
\r
10065 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10067 in += info.inJump;
\r
10068 out += info.outJump;
\r
10071 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10072 Int32 *in = (Int32 *)inBuffer;
\r
10073 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10074 for (j=0; j<info.channels; j++) {
\r
10075 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10076 //out[info.outOffset[j]] >>= 8;
\r
10078 in += info.inJump;
\r
10079 out += info.outJump;
\r
10082 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10083 Float32 *in = (Float32 *)inBuffer;
\r
10084 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10085 for (j=0; j<info.channels; j++) {
\r
10086 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10088 in += info.inJump;
\r
10089 out += info.outJump;
\r
10092 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10093 Float64 *in = (Float64 *)inBuffer;
\r
10094 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10095 for (j=0; j<info.channels; j++) {
\r
10096 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10098 in += info.inJump;
\r
10099 out += info.outJump;
\r
10103 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10104 Int16 *out = (Int16 *)outBuffer;
\r
10105 if (info.inFormat == RTAUDIO_SINT8) {
\r
10106 signed char *in = (signed char *)inBuffer;
\r
10107 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10108 for (j=0; j<info.channels; j++) {
\r
10109 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10110 out[info.outOffset[j]] <<= 8;
\r
10112 in += info.inJump;
\r
10113 out += info.outJump;
\r
10116 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10117 // Channel compensation and/or (de)interleaving only.
\r
10118 Int16 *in = (Int16 *)inBuffer;
\r
10119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10120 for (j=0; j<info.channels; j++) {
\r
10121 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10123 in += info.inJump;
\r
10124 out += info.outJump;
\r
10127 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10128 Int24 *in = (Int24 *)inBuffer;
\r
10129 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10130 for (j=0; j<info.channels; j++) {
\r
10131 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10133 in += info.inJump;
\r
10134 out += info.outJump;
\r
10137 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10138 Int32 *in = (Int32 *)inBuffer;
\r
10139 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10140 for (j=0; j<info.channels; j++) {
\r
10141 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10143 in += info.inJump;
\r
10144 out += info.outJump;
\r
10147 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10148 Float32 *in = (Float32 *)inBuffer;
\r
10149 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10150 for (j=0; j<info.channels; j++) {
\r
10151 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10153 in += info.inJump;
\r
10154 out += info.outJump;
\r
10157 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10158 Float64 *in = (Float64 *)inBuffer;
\r
10159 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10160 for (j=0; j<info.channels; j++) {
\r
10161 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10163 in += info.inJump;
\r
10164 out += info.outJump;
\r
10168 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10169 signed char *out = (signed char *)outBuffer;
\r
10170 if (info.inFormat == RTAUDIO_SINT8) {
\r
10171 // Channel compensation and/or (de)interleaving only.
\r
10172 signed char *in = (signed char *)inBuffer;
\r
10173 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10174 for (j=0; j<info.channels; j++) {
\r
10175 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10177 in += info.inJump;
\r
10178 out += info.outJump;
\r
10181 if (info.inFormat == RTAUDIO_SINT16) {
\r
10182 Int16 *in = (Int16 *)inBuffer;
\r
10183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10184 for (j=0; j<info.channels; j++) {
\r
10185 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10187 in += info.inJump;
\r
10188 out += info.outJump;
\r
10191 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10192 Int24 *in = (Int24 *)inBuffer;
\r
10193 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10194 for (j=0; j<info.channels; j++) {
\r
10195 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10197 in += info.inJump;
\r
10198 out += info.outJump;
\r
10201 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10202 Int32 *in = (Int32 *)inBuffer;
\r
10203 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10204 for (j=0; j<info.channels; j++) {
\r
10205 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10207 in += info.inJump;
\r
10208 out += info.outJump;
\r
10211 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10212 Float32 *in = (Float32 *)inBuffer;
\r
10213 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10214 for (j=0; j<info.channels; j++) {
\r
10215 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10217 in += info.inJump;
\r
10218 out += info.outJump;
\r
10221 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10222 Float64 *in = (Float64 *)inBuffer;
\r
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10224 for (j=0; j<info.channels; j++) {
\r
10225 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10227 in += info.inJump;
\r
10228 out += info.outJump;
\r
10234 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10235 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10236 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10238 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10244 if ( format == RTAUDIO_SINT16 ) {
\r
10245 for ( unsigned int i=0; i<samples; i++ ) {
\r
10246 // Swap 1st and 2nd bytes.
\r
10248 *(ptr) = *(ptr+1);
\r
10251 // Increment 2 bytes.
\r
10255 else if ( format == RTAUDIO_SINT32 ||
\r
10256 format == RTAUDIO_FLOAT32 ) {
\r
10257 for ( unsigned int i=0; i<samples; i++ ) {
\r
10258 // Swap 1st and 4th bytes.
\r
10260 *(ptr) = *(ptr+3);
\r
10263 // Swap 2nd and 3rd bytes.
\r
10266 *(ptr) = *(ptr+1);
\r
10269 // Increment 3 more bytes.
\r
10273 else if ( format == RTAUDIO_SINT24 ) {
\r
10274 for ( unsigned int i=0; i<samples; i++ ) {
\r
10275 // Swap 1st and 3rd bytes.
\r
10277 *(ptr) = *(ptr+2);
\r
10280 // Increment 2 more bytes.
\r
10284 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10285 for ( unsigned int i=0; i<samples; i++ ) {
\r
10286 // Swap 1st and 8th bytes
\r
10288 *(ptr) = *(ptr+7);
\r
10291 // Swap 2nd and 7th bytes
\r
10294 *(ptr) = *(ptr+5);
\r
10297 // Swap 3rd and 6th bytes
\r
10300 *(ptr) = *(ptr+3);
\r
10303 // Swap 4th and 5th bytes
\r
10306 *(ptr) = *(ptr+1);
\r
10309 // Increment 5 more bytes.
\r
10315 // Indentation settings for Vim and Emacs
\r
10317 // Local Variables:
\r
10318 // c-basic-offset: 2
\r
10319 // indent-tabs-mode: nil
\r
10322 // vim: et sts=2 sw=2
\r