1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2016 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.2
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1410 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1411 kAudioObjectPropertyScopeGlobal,
\r
1412 kAudioObjectPropertyElementMaster };
\r
1414 property.mSelector = kAudioDeviceProcessorOverload;
\r
1415 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1416 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
\r
1417 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1418 error( RtAudioError::WARNING );
\r
1421 if ( stream_.state == STREAM_RUNNING )
\r
1422 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1423 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1424 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1426 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1427 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1431 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1433 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
1434 kAudioObjectPropertyScopeGlobal,
\r
1435 kAudioObjectPropertyElementMaster };
\r
1437 property.mSelector = kAudioDeviceProcessorOverload;
\r
1438 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1439 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
\r
1440 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
\r
1441 error( RtAudioError::WARNING );
\r
1444 if ( stream_.state == STREAM_RUNNING )
\r
1445 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1446 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1447 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1449 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1450 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1454 for ( int i=0; i<2; i++ ) {
\r
1455 if ( stream_.userBuffer[i] ) {
\r
1456 free( stream_.userBuffer[i] );
\r
1457 stream_.userBuffer[i] = 0;
\r
1461 if ( stream_.deviceBuffer ) {
\r
1462 free( stream_.deviceBuffer );
\r
1463 stream_.deviceBuffer = 0;
\r
1466 // Destroy pthread condition variable.
\r
1467 pthread_cond_destroy( &handle->condition );
\r
1469 stream_.apiHandle = 0;
\r
1471 stream_.mode = UNINITIALIZED;
\r
1472 stream_.state = STREAM_CLOSED;
\r
1475 void RtApiCore :: startStream( void )
\r
1478 if ( stream_.state == STREAM_RUNNING ) {
\r
1479 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1480 error( RtAudioError::WARNING );
\r
1484 OSStatus result = noErr;
\r
1485 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1486 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1488 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1489 if ( result != noErr ) {
\r
1490 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1491 errorText_ = errorStream_.str();
\r
1496 if ( stream_.mode == INPUT ||
\r
1497 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1499 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1500 if ( result != noErr ) {
\r
1501 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1502 errorText_ = errorStream_.str();
\r
1507 handle->drainCounter = 0;
\r
1508 handle->internalDrain = false;
\r
1509 stream_.state = STREAM_RUNNING;
\r
1512 if ( result == noErr ) return;
\r
1513 error( RtAudioError::SYSTEM_ERROR );
\r
1516 void RtApiCore :: stopStream( void )
\r
1519 if ( stream_.state == STREAM_STOPPED ) {
\r
1520 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1521 error( RtAudioError::WARNING );
\r
1525 OSStatus result = noErr;
\r
1526 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1529 if ( handle->drainCounter == 0 ) {
\r
1530 handle->drainCounter = 2;
\r
1531 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1534 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1535 if ( result != noErr ) {
\r
1536 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1537 errorText_ = errorStream_.str();
\r
1542 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1544 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1545 if ( result != noErr ) {
\r
1546 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1547 errorText_ = errorStream_.str();
\r
1552 stream_.state = STREAM_STOPPED;
\r
1555 if ( result == noErr ) return;
\r
1556 error( RtAudioError::SYSTEM_ERROR );
\r
1559 void RtApiCore :: abortStream( void )
\r
1562 if ( stream_.state == STREAM_STOPPED ) {
\r
1563 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1564 error( RtAudioError::WARNING );
\r
1568 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1569 handle->drainCounter = 2;
\r
1574 // This function will be called by a spawned thread when the user
\r
1575 // callback function signals that the stream should be stopped or
\r
1576 // aborted. It is better to handle it this way because the
\r
1577 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1578 // function is called.
\r
1579 static void *coreStopStream( void *ptr )
\r
1581 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1582 RtApiCore *object = (RtApiCore *) info->object;
\r
1584 object->stopStream();
\r
1585 pthread_exit( NULL );
\r
1588 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1589 const AudioBufferList *inBufferList,
\r
1590 const AudioBufferList *outBufferList )
\r
1592 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1593 if ( stream_.state == STREAM_CLOSED ) {
\r
1594 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1595 error( RtAudioError::WARNING );
\r
1599 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1602 // Check if we were draining the stream and signal is finished.
\r
1603 if ( handle->drainCounter > 3 ) {
\r
1604 ThreadHandle threadId;
\r
1606 stream_.state = STREAM_STOPPING;
\r
1607 if ( handle->internalDrain == true )
\r
1608 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1609 else // external call to stopStream()
\r
1610 pthread_cond_signal( &handle->condition );
\r
1614 AudioDeviceID outputDevice = handle->id[0];
\r
1616 // Invoke user callback to get fresh output data UNLESS we are
\r
1617 // draining stream or duplex mode AND the input/output devices are
\r
1618 // different AND this function is called for the input device.
\r
1619 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1620 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1621 double streamTime = getStreamTime();
\r
1622 RtAudioStreamStatus status = 0;
\r
1623 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1624 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1625 handle->xrun[0] = false;
\r
1627 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1628 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1629 handle->xrun[1] = false;
\r
1632 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1633 stream_.bufferSize, streamTime, status, info->userData );
\r
1634 if ( cbReturnValue == 2 ) {
\r
1635 stream_.state = STREAM_STOPPING;
\r
1636 handle->drainCounter = 2;
\r
1640 else if ( cbReturnValue == 1 ) {
\r
1641 handle->drainCounter = 1;
\r
1642 handle->internalDrain = true;
\r
1646 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1648 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1650 if ( handle->nStreams[0] == 1 ) {
\r
1651 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1653 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1655 else { // fill multiple streams with zeros
\r
1656 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1657 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1659 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1663 else if ( handle->nStreams[0] == 1 ) {
\r
1664 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1665 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1666 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1668 else { // copy from user buffer
\r
1669 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1670 stream_.userBuffer[0],
\r
1671 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1674 else { // fill multiple streams
\r
1675 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1676 if ( stream_.doConvertBuffer[0] ) {
\r
1677 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1678 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1681 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1682 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1683 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1684 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1685 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1688 else { // fill multiple multi-channel streams with interleaved data
\r
1689 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1690 Float32 *out, *in;
\r
1692 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1693 UInt32 inChannels = stream_.nUserChannels[0];
\r
1694 if ( stream_.doConvertBuffer[0] ) {
\r
1695 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1696 inChannels = stream_.nDeviceChannels[0];
\r
1699 if ( inInterleaved ) inOffset = 1;
\r
1700 else inOffset = stream_.bufferSize;
\r
1702 channelsLeft = inChannels;
\r
1703 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1705 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1706 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1709 // Account for possible channel offset in first stream
\r
1710 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1711 streamChannels -= stream_.channelOffset[0];
\r
1712 outJump = stream_.channelOffset[0];
\r
1716 // Account for possible unfilled channels at end of the last stream
\r
1717 if ( streamChannels > channelsLeft ) {
\r
1718 outJump = streamChannels - channelsLeft;
\r
1719 streamChannels = channelsLeft;
\r
1722 // Determine input buffer offsets and skips
\r
1723 if ( inInterleaved ) {
\r
1724 inJump = inChannels;
\r
1725 in += inChannels - channelsLeft;
\r
1729 in += (inChannels - channelsLeft) * inOffset;
\r
1732 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1733 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1734 *out++ = in[j*inOffset];
\r
1739 channelsLeft -= streamChannels;
\r
1745 // Don't bother draining input
\r
1746 if ( handle->drainCounter ) {
\r
1747 handle->drainCounter++;
\r
1751 AudioDeviceID inputDevice;
\r
1752 inputDevice = handle->id[1];
\r
1753 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1755 if ( handle->nStreams[1] == 1 ) {
\r
1756 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1757 convertBuffer( stream_.userBuffer[1],
\r
1758 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1759 stream_.convertInfo[1] );
\r
1761 else { // copy to user buffer
\r
1762 memcpy( stream_.userBuffer[1],
\r
1763 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1764 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1767 else { // read from multiple streams
\r
1768 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1769 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1771 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1772 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1773 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1774 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1775 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1778 else { // read from multiple multi-channel streams
\r
1779 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1780 Float32 *out, *in;
\r
1782 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1783 UInt32 outChannels = stream_.nUserChannels[1];
\r
1784 if ( stream_.doConvertBuffer[1] ) {
\r
1785 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1786 outChannels = stream_.nDeviceChannels[1];
\r
1789 if ( outInterleaved ) outOffset = 1;
\r
1790 else outOffset = stream_.bufferSize;
\r
1792 channelsLeft = outChannels;
\r
1793 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1795 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1796 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1799 // Account for possible channel offset in first stream
\r
1800 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1801 streamChannels -= stream_.channelOffset[1];
\r
1802 inJump = stream_.channelOffset[1];
\r
1806 // Account for possible unread channels at end of the last stream
\r
1807 if ( streamChannels > channelsLeft ) {
\r
1808 inJump = streamChannels - channelsLeft;
\r
1809 streamChannels = channelsLeft;
\r
1812 // Determine output buffer offsets and skips
\r
1813 if ( outInterleaved ) {
\r
1814 outJump = outChannels;
\r
1815 out += outChannels - channelsLeft;
\r
1819 out += (outChannels - channelsLeft) * outOffset;
\r
1822 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1823 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1824 out[j*outOffset] = *in++;
\r
1829 channelsLeft -= streamChannels;
\r
1833 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1834 convertBuffer( stream_.userBuffer[1],
\r
1835 stream_.deviceBuffer,
\r
1836 stream_.convertInfo[1] );
\r
1842 //MUTEX_UNLOCK( &stream_.mutex );
\r
1844 RtApi::tickStreamTime();
\r
1848 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1852 case kAudioHardwareNotRunningError:
\r
1853 return "kAudioHardwareNotRunningError";
\r
1855 case kAudioHardwareUnspecifiedError:
\r
1856 return "kAudioHardwareUnspecifiedError";
\r
1858 case kAudioHardwareUnknownPropertyError:
\r
1859 return "kAudioHardwareUnknownPropertyError";
\r
1861 case kAudioHardwareBadPropertySizeError:
\r
1862 return "kAudioHardwareBadPropertySizeError";
\r
1864 case kAudioHardwareIllegalOperationError:
\r
1865 return "kAudioHardwareIllegalOperationError";
\r
1867 case kAudioHardwareBadObjectError:
\r
1868 return "kAudioHardwareBadObjectError";
\r
1870 case kAudioHardwareBadDeviceError:
\r
1871 return "kAudioHardwareBadDeviceError";
\r
1873 case kAudioHardwareBadStreamError:
\r
1874 return "kAudioHardwareBadStreamError";
\r
1876 case kAudioHardwareUnsupportedOperationError:
\r
1877 return "kAudioHardwareUnsupportedOperationError";
\r
1879 case kAudioDeviceUnsupportedFormatError:
\r
1880 return "kAudioDeviceUnsupportedFormatError";
\r
1882 case kAudioDevicePermissionsError:
\r
1883 return "kAudioDevicePermissionsError";
\r
1886 return "CoreAudio unknown error";
\r
1890 //******************** End of __MACOSX_CORE__ *********************//
\r
1893 #if defined(__UNIX_JACK__)
\r
1895 // JACK is a low-latency audio server, originally written for the
\r
1896 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1897 // connect a number of different applications to an audio device, as
\r
1898 // well as allowing them to share audio between themselves.
\r
1900 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1901 // have ports connected to the server. The JACK server is typically
\r
1902 // started in a terminal as follows:
\r
1904 // .jackd -d alsa -d hw:0
\r
1906 // or through an interface program such as qjackctl. Many of the
\r
1907 // parameters normally set for a stream are fixed by the JACK server
\r
1908 // and can be specified when the JACK server is started. In
\r
1911 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1913 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1914 // frames, and number of buffers = 4. Once the server is running, it
\r
1915 // is not possible to override these values. If the values are not
\r
1916 // specified in the command-line, the JACK server uses default values.
\r
1918 // The JACK server does not have to be running when an instance of
\r
1919 // RtApiJack is created, though the function getDeviceCount() will
\r
1920 // report 0 devices found until JACK has been started. When no
\r
1921 // devices are available (i.e., the JACK server is not running), a
\r
1922 // stream cannot be opened.
\r
1924 #include <jack/jack.h>
\r
1925 #include <unistd.h>
\r
1928 // A structure to hold various information related to the Jack API
\r
1929 // implementation.
\r
1930 struct JackHandle {
\r
1931 jack_client_t *client;
\r
1932 jack_port_t **ports[2];
\r
1933 std::string deviceName[2];
\r
1935 pthread_cond_t condition;
\r
1936 int drainCounter; // Tracks callback counts when draining
\r
1937 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1940 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1943 static void jackSilentError( const char * ) {};
\r
1945 RtApiJack :: RtApiJack()
\r
1947 // Nothing to do here.
\r
1948 #if !defined(__RTAUDIO_DEBUG__)
\r
1949 // Turn off Jack's internal error reporting.
\r
1950 jack_set_error_function( &jackSilentError );
\r
1954 RtApiJack :: ~RtApiJack()
\r
1956 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1959 unsigned int RtApiJack :: getDeviceCount( void )
\r
1961 // See if we can become a jack client.
\r
1962 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1963 jack_status_t *status = NULL;
\r
1964 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1965 if ( client == 0 ) return 0;
\r
1967 const char **ports;
\r
1968 std::string port, previousPort;
\r
1969 unsigned int nChannels = 0, nDevices = 0;
\r
1970 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1972 // Parse the port names up to the first colon (:).
\r
1973 size_t iColon = 0;
\r
1975 port = (char *) ports[ nChannels ];
\r
1976 iColon = port.find(":");
\r
1977 if ( iColon != std::string::npos ) {
\r
1978 port = port.substr( 0, iColon + 1 );
\r
1979 if ( port != previousPort ) {
\r
1981 previousPort = port;
\r
1984 } while ( ports[++nChannels] );
\r
1988 jack_client_close( client );
\r
1992 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1994 RtAudio::DeviceInfo info;
\r
1995 info.probed = false;
\r
1997 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1998 jack_status_t *status = NULL;
\r
1999 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
2000 if ( client == 0 ) {
\r
2001 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
2002 error( RtAudioError::WARNING );
\r
2006 const char **ports;
\r
2007 std::string port, previousPort;
\r
2008 unsigned int nPorts = 0, nDevices = 0;
\r
2009 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2011 // Parse the port names up to the first colon (:).
\r
2012 size_t iColon = 0;
\r
2014 port = (char *) ports[ nPorts ];
\r
2015 iColon = port.find(":");
\r
2016 if ( iColon != std::string::npos ) {
\r
2017 port = port.substr( 0, iColon );
\r
2018 if ( port != previousPort ) {
\r
2019 if ( nDevices == device ) info.name = port;
\r
2021 previousPort = port;
\r
2024 } while ( ports[++nPorts] );
\r
2028 if ( device >= nDevices ) {
\r
2029 jack_client_close( client );
\r
2030 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2031 error( RtAudioError::INVALID_USE );
\r
2035 // Get the current jack server sample rate.
\r
2036 info.sampleRates.clear();
\r
2038 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2039 info.sampleRates.push_back( info.preferredSampleRate );
\r
2041 // Count the available ports containing the client name as device
\r
2042 // channels. Jack "input ports" equal RtAudio output channels.
\r
2043 unsigned int nChannels = 0;
\r
2044 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2046 while ( ports[ nChannels ] ) nChannels++;
\r
2048 info.outputChannels = nChannels;
\r
2051 // Jack "output ports" equal RtAudio input channels.
\r
2053 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2055 while ( ports[ nChannels ] ) nChannels++;
\r
2057 info.inputChannels = nChannels;
\r
2060 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2061 jack_client_close(client);
\r
2062 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2063 error( RtAudioError::WARNING );
\r
2067 // If device opens for both playback and capture, we determine the channels.
\r
2068 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2069 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2071 // Jack always uses 32-bit floats.
\r
2072 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2074 // Jack doesn't provide default devices so we'll use the first available one.
\r
2075 if ( device == 0 && info.outputChannels > 0 )
\r
2076 info.isDefaultOutput = true;
\r
2077 if ( device == 0 && info.inputChannels > 0 )
\r
2078 info.isDefaultInput = true;
\r
2080 jack_client_close(client);
\r
2081 info.probed = true;
\r
2085 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2087 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2089 RtApiJack *object = (RtApiJack *) info->object;
\r
2090 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2095 // This function will be called by a spawned thread when the Jack
\r
2096 // server signals that it is shutting down. It is necessary to handle
\r
2097 // it this way because the jackShutdown() function must return before
\r
2098 // the jack_deactivate() function (in closeStream()) will return.
\r
2099 static void *jackCloseStream( void *ptr )
\r
2101 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2102 RtApiJack *object = (RtApiJack *) info->object;
\r
2104 object->closeStream();
\r
2106 pthread_exit( NULL );
\r
2108 static void jackShutdown( void *infoPointer )
\r
2110 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2111 RtApiJack *object = (RtApiJack *) info->object;
\r
2113 // Check current stream state. If stopped, then we'll assume this
\r
2114 // was called as a result of a call to RtApiJack::stopStream (the
\r
2115 // deactivation of a client handle causes this function to be called).
\r
2116 // If not, we'll assume the Jack server is shutting down or some
\r
2117 // other problem occurred and we should close the stream.
\r
2118 if ( object->isStreamRunning() == false ) return;
\r
2120 ThreadHandle threadId;
\r
2121 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2122 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2125 static int jackXrun( void *infoPointer )
\r
2127 JackHandle *handle = (JackHandle *) infoPointer;
\r
2129 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2130 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2135 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2136 unsigned int firstChannel, unsigned int sampleRate,
\r
2137 RtAudioFormat format, unsigned int *bufferSize,
\r
2138 RtAudio::StreamOptions *options )
\r
2140 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2142 // Look for jack server and try to become a client (only do once per stream).
\r
2143 jack_client_t *client = 0;
\r
2144 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2145 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2146 jack_status_t *status = NULL;
\r
2147 if ( options && !options->streamName.empty() )
\r
2148 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2150 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2151 if ( client == 0 ) {
\r
2152 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2153 error( RtAudioError::WARNING );
\r
2158 // The handle must have been created on an earlier pass.
\r
2159 client = handle->client;
\r
2162 const char **ports;
\r
2163 std::string port, previousPort, deviceName;
\r
2164 unsigned int nPorts = 0, nDevices = 0;
\r
2165 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2167 // Parse the port names up to the first colon (:).
\r
2168 size_t iColon = 0;
\r
2170 port = (char *) ports[ nPorts ];
\r
2171 iColon = port.find(":");
\r
2172 if ( iColon != std::string::npos ) {
\r
2173 port = port.substr( 0, iColon );
\r
2174 if ( port != previousPort ) {
\r
2175 if ( nDevices == device ) deviceName = port;
\r
2177 previousPort = port;
\r
2180 } while ( ports[++nPorts] );
\r
2184 if ( device >= nDevices ) {
\r
2185 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2189 // Count the available ports containing the client name as device
\r
2190 // channels. Jack "input ports" equal RtAudio output channels.
\r
2191 unsigned int nChannels = 0;
\r
2192 unsigned long flag = JackPortIsInput;
\r
2193 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2196 while ( ports[ nChannels ] ) nChannels++;
\r
2200 // Compare the jack ports for specified client to the requested number of channels.
\r
2201 if ( nChannels < (channels + firstChannel) ) {
\r
2202 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2203 errorText_ = errorStream_.str();
\r
2207 // Check the jack server sample rate.
\r
2208 unsigned int jackRate = jack_get_sample_rate( client );
\r
2209 if ( sampleRate != jackRate ) {
\r
2210 jack_client_close( client );
\r
2211 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2212 errorText_ = errorStream_.str();
\r
2215 stream_.sampleRate = jackRate;
\r
2217 // Get the latency of the JACK port.
\r
2218 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2219 if ( ports[ firstChannel ] ) {
\r
2220 // Added by Ge Wang
\r
2221 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2222 // the range (usually the min and max are equal)
\r
2223 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2224 // get the latency range
\r
2225 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2226 // be optimistic, use the min!
\r
2227 stream_.latency[mode] = latrange.min;
\r
2228 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2232 // The jack server always uses 32-bit floating-point data.
\r
2233 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2234 stream_.userFormat = format;
\r
2236 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2237 else stream_.userInterleaved = true;
\r
2239 // Jack always uses non-interleaved buffers.
\r
2240 stream_.deviceInterleaved[mode] = false;
\r
2242 // Jack always provides host byte-ordered data.
\r
2243 stream_.doByteSwap[mode] = false;
\r
2245 // Get the buffer size. The buffer size and number of buffers
\r
2246 // (periods) is set when the jack server is started.
\r
2247 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2248 *bufferSize = stream_.bufferSize;
\r
2250 stream_.nDeviceChannels[mode] = channels;
\r
2251 stream_.nUserChannels[mode] = channels;
\r
2253 // Set flags for buffer conversion.
\r
2254 stream_.doConvertBuffer[mode] = false;
\r
2255 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2256 stream_.doConvertBuffer[mode] = true;
\r
2257 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2258 stream_.nUserChannels[mode] > 1 )
\r
2259 stream_.doConvertBuffer[mode] = true;
\r
2261 // Allocate our JackHandle structure for the stream.
\r
2262 if ( handle == 0 ) {
\r
2264 handle = new JackHandle;
\r
2266 catch ( std::bad_alloc& ) {
\r
2267 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2271 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2272 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2275 stream_.apiHandle = (void *) handle;
\r
2276 handle->client = client;
\r
2278 handle->deviceName[mode] = deviceName;
\r
2280 // Allocate necessary internal buffers.
\r
2281 unsigned long bufferBytes;
\r
2282 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2283 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2284 if ( stream_.userBuffer[mode] == NULL ) {
\r
2285 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2289 if ( stream_.doConvertBuffer[mode] ) {
\r
2291 bool makeBuffer = true;
\r
2292 if ( mode == OUTPUT )
\r
2293 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2294 else { // mode == INPUT
\r
2295 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2296 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2297 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2298 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2302 if ( makeBuffer ) {
\r
2303 bufferBytes *= *bufferSize;
\r
2304 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2305 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2306 if ( stream_.deviceBuffer == NULL ) {
\r
2307 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2313 // Allocate memory for the Jack ports (channels) identifiers.
\r
2314 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2315 if ( handle->ports[mode] == NULL ) {
\r
2316 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2320 stream_.device[mode] = device;
\r
2321 stream_.channelOffset[mode] = firstChannel;
\r
2322 stream_.state = STREAM_STOPPED;
\r
2323 stream_.callbackInfo.object = (void *) this;
\r
2325 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2326 // We had already set up the stream for output.
\r
2327 stream_.mode = DUPLEX;
\r
2329 stream_.mode = mode;
\r
2330 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2331 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2332 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2335 // Register our ports.
\r
2337 if ( mode == OUTPUT ) {
\r
2338 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2339 snprintf( label, 64, "outport %d", i );
\r
2340 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2341 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2345 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2346 snprintf( label, 64, "inport %d", i );
\r
2347 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2348 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2352 // Setup the buffer conversion information structure. We don't use
\r
2353 // buffers to do channel offsets, so we override that parameter
\r
2355 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2361 pthread_cond_destroy( &handle->condition );
\r
2362 jack_client_close( handle->client );
\r
2364 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2365 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2368 stream_.apiHandle = 0;
\r
2371 for ( int i=0; i<2; i++ ) {
\r
2372 if ( stream_.userBuffer[i] ) {
\r
2373 free( stream_.userBuffer[i] );
\r
2374 stream_.userBuffer[i] = 0;
\r
2378 if ( stream_.deviceBuffer ) {
\r
2379 free( stream_.deviceBuffer );
\r
2380 stream_.deviceBuffer = 0;
\r
2386 void RtApiJack :: closeStream( void )
\r
2388 if ( stream_.state == STREAM_CLOSED ) {
\r
2389 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2390 error( RtAudioError::WARNING );
\r
2394 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2397 if ( stream_.state == STREAM_RUNNING )
\r
2398 jack_deactivate( handle->client );
\r
2400 jack_client_close( handle->client );
\r
2404 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2405 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2406 pthread_cond_destroy( &handle->condition );
\r
2408 stream_.apiHandle = 0;
\r
2411 for ( int i=0; i<2; i++ ) {
\r
2412 if ( stream_.userBuffer[i] ) {
\r
2413 free( stream_.userBuffer[i] );
\r
2414 stream_.userBuffer[i] = 0;
\r
2418 if ( stream_.deviceBuffer ) {
\r
2419 free( stream_.deviceBuffer );
\r
2420 stream_.deviceBuffer = 0;
\r
2423 stream_.mode = UNINITIALIZED;
\r
2424 stream_.state = STREAM_CLOSED;
\r
2427 void RtApiJack :: startStream( void )
\r
2430 if ( stream_.state == STREAM_RUNNING ) {
\r
2431 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2432 error( RtAudioError::WARNING );
\r
2436 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2437 int result = jack_activate( handle->client );
\r
2439 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2443 const char **ports;
\r
2445 // Get the list of available ports.
\r
2446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2454 // Now make the port connections. Since RtAudio wasn't designed to
\r
2455 // allow the user to select particular channels of a device, we'll
\r
2456 // just open the first "nChannels" ports with offset.
\r
2457 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2459 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2460 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2463 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2470 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2472 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2473 if ( ports == NULL) {
\r
2474 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2478 // Now make the port connections. See note above.
\r
2479 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2481 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2482 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2485 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2492 handle->drainCounter = 0;
\r
2493 handle->internalDrain = false;
\r
2494 stream_.state = STREAM_RUNNING;
\r
2497 if ( result == 0 ) return;
\r
2498 error( RtAudioError::SYSTEM_ERROR );
\r
2501 void RtApiJack :: stopStream( void )
\r
2504 if ( stream_.state == STREAM_STOPPED ) {
\r
2505 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2506 error( RtAudioError::WARNING );
\r
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2511 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2513 if ( handle->drainCounter == 0 ) {
\r
2514 handle->drainCounter = 2;
\r
2515 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2519 jack_deactivate( handle->client );
\r
2520 stream_.state = STREAM_STOPPED;
\r
2523 void RtApiJack :: abortStream( void )
\r
2526 if ( stream_.state == STREAM_STOPPED ) {
\r
2527 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2528 error( RtAudioError::WARNING );
\r
2532 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2533 handle->drainCounter = 2;
\r
2538 // This function will be called by a spawned thread when the user
\r
2539 // callback function signals that the stream should be stopped or
\r
2540 // aborted. It is necessary to handle it this way because the
\r
2541 // callbackEvent() function must return before the jack_deactivate()
\r
2542 // function will return.
\r
2543 static void *jackStopStream( void *ptr )
\r
2545 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2546 RtApiJack *object = (RtApiJack *) info->object;
\r
2548 object->stopStream();
\r
2549 pthread_exit( NULL );
\r
2552 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2554 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2555 if ( stream_.state == STREAM_CLOSED ) {
\r
2556 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2557 error( RtAudioError::WARNING );
\r
2560 if ( stream_.bufferSize != nframes ) {
\r
2561 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2562 error( RtAudioError::WARNING );
\r
2566 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2567 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2569 // Check if we were draining the stream and signal is finished.
\r
2570 if ( handle->drainCounter > 3 ) {
\r
2571 ThreadHandle threadId;
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 if ( handle->internalDrain == true )
\r
2575 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2577 pthread_cond_signal( &handle->condition );
\r
2581 // Invoke user callback first, to get fresh output data.
\r
2582 if ( handle->drainCounter == 0 ) {
\r
2583 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2584 double streamTime = getStreamTime();
\r
2585 RtAudioStreamStatus status = 0;
\r
2586 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2587 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2588 handle->xrun[0] = false;
\r
2590 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2591 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2592 handle->xrun[1] = false;
\r
2594 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2595 stream_.bufferSize, streamTime, status, info->userData );
\r
2596 if ( cbReturnValue == 2 ) {
\r
2597 stream_.state = STREAM_STOPPING;
\r
2598 handle->drainCounter = 2;
\r
2600 pthread_create( &id, NULL, jackStopStream, info );
\r
2603 else if ( cbReturnValue == 1 ) {
\r
2604 handle->drainCounter = 1;
\r
2605 handle->internalDrain = true;
\r
2609 jack_default_audio_sample_t *jackbuffer;
\r
2610 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2611 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2613 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2615 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2616 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2617 memset( jackbuffer, 0, bufferBytes );
\r
2621 else if ( stream_.doConvertBuffer[0] ) {
\r
2623 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2627 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2630 else { // no buffer conversion
\r
2631 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2632 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2633 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2638 // Don't bother draining input
\r
2639 if ( handle->drainCounter ) {
\r
2640 handle->drainCounter++;
\r
2644 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2646 if ( stream_.doConvertBuffer[1] ) {
\r
2647 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2648 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2649 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2651 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2653 else { // no buffer conversion
\r
2654 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2655 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2656 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2662 RtApi::tickStreamTime();
\r
2665 //******************** End of __UNIX_JACK__ *********************//
\r
2668 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2670 // The ASIO API is designed around a callback scheme, so this
\r
2671 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2672 // Jack. The primary constraint with ASIO is that it only allows
\r
2673 // access to a single driver at a time. Thus, it is not possible to
\r
2674 // have more than one simultaneous RtAudio stream.
\r
2676 // This implementation also requires a number of external ASIO files
\r
2677 // and a few global variables. The ASIO callback scheme does not
\r
2678 // allow for the passing of user data, so we must create a global
\r
2679 // pointer to our callbackInfo structure.
\r
2681 // On unix systems, we make use of a pthread condition variable.
\r
2682 // Since there is no equivalent in Windows, I hacked something based
\r
2683 // on information found in
\r
2684 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2686 #include "asiosys.h"
\r
2688 #include "iasiothiscallresolver.h"
\r
2689 #include "asiodrivers.h"
\r
2692 static AsioDrivers drivers;
\r
2693 static ASIOCallbacks asioCallbacks;
\r
2694 static ASIODriverInfo driverInfo;
\r
2695 static CallbackInfo *asioCallbackInfo;
\r
2696 static bool asioXRun;
\r
2698 struct AsioHandle {
\r
2699 int drainCounter; // Tracks callback counts when draining
\r
2700 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2701 ASIOBufferInfo *bufferInfos;
\r
2705 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2708 // Function declarations (definitions at end of section)
\r
2709 static const char* getAsioErrorString( ASIOError result );
\r
2710 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2711 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2713 RtApiAsio :: RtApiAsio()
\r
2715 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2716 // CoInitialize beforehand, but it must be for appartment threading
\r
2717 // (in which case, CoInitilialize will return S_FALSE here).
\r
2718 coInitialized_ = false;
\r
2719 HRESULT hr = CoInitialize( NULL );
\r
2720 if ( FAILED(hr) ) {
\r
2721 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2722 error( RtAudioError::WARNING );
\r
2724 coInitialized_ = true;
\r
2726 drivers.removeCurrentDriver();
\r
2727 driverInfo.asioVersion = 2;
\r
2729 // See note in DirectSound implementation about GetDesktopWindow().
\r
2730 driverInfo.sysRef = GetForegroundWindow();
\r
2733 RtApiAsio :: ~RtApiAsio()
\r
2735 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2736 if ( coInitialized_ ) CoUninitialize();
\r
2739 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2741 return (unsigned int) drivers.asioGetNumDev();
\r
2744 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2746 RtAudio::DeviceInfo info;
\r
2747 info.probed = false;
\r
2750 unsigned int nDevices = getDeviceCount();
\r
2751 if ( nDevices == 0 ) {
\r
2752 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2753 error( RtAudioError::INVALID_USE );
\r
2757 if ( device >= nDevices ) {
\r
2758 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2759 error( RtAudioError::INVALID_USE );
\r
2763 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2764 if ( stream_.state != STREAM_CLOSED ) {
\r
2765 if ( device >= devices_.size() ) {
\r
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2767 error( RtAudioError::WARNING );
\r
2770 return devices_[ device ];
\r
2773 char driverName[32];
\r
2774 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2775 if ( result != ASE_OK ) {
\r
2776 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2777 errorText_ = errorStream_.str();
\r
2778 error( RtAudioError::WARNING );
\r
2782 info.name = driverName;
\r
2784 if ( !drivers.loadDriver( driverName ) ) {
\r
2785 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2786 errorText_ = errorStream_.str();
\r
2787 error( RtAudioError::WARNING );
\r
2791 result = ASIOInit( &driverInfo );
\r
2792 if ( result != ASE_OK ) {
\r
2793 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2794 errorText_ = errorStream_.str();
\r
2795 error( RtAudioError::WARNING );
\r
2799 // Determine the device channel information.
\r
2800 long inputChannels, outputChannels;
\r
2801 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2802 if ( result != ASE_OK ) {
\r
2803 drivers.removeCurrentDriver();
\r
2804 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2805 errorText_ = errorStream_.str();
\r
2806 error( RtAudioError::WARNING );
\r
2810 info.outputChannels = outputChannels;
\r
2811 info.inputChannels = inputChannels;
\r
2812 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2813 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2815 // Determine the supported sample rates.
\r
2816 info.sampleRates.clear();
\r
2817 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2818 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2819 if ( result == ASE_OK ) {
\r
2820 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2822 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2823 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2827 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2828 ASIOChannelInfo channelInfo;
\r
2829 channelInfo.channel = 0;
\r
2830 channelInfo.isInput = true;
\r
2831 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2832 result = ASIOGetChannelInfo( &channelInfo );
\r
2833 if ( result != ASE_OK ) {
\r
2834 drivers.removeCurrentDriver();
\r
2835 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2836 errorText_ = errorStream_.str();
\r
2837 error( RtAudioError::WARNING );
\r
2841 info.nativeFormats = 0;
\r
2842 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2843 info.nativeFormats |= RTAUDIO_SINT16;
\r
2844 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2845 info.nativeFormats |= RTAUDIO_SINT32;
\r
2846 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2847 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2848 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2849 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2850 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2851 info.nativeFormats |= RTAUDIO_SINT24;
\r
2853 if ( info.outputChannels > 0 )
\r
2854 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2855 if ( info.inputChannels > 0 )
\r
2856 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2858 info.probed = true;
\r
2859 drivers.removeCurrentDriver();
\r
2863 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2865 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2866 object->callbackEvent( index );
\r
2869 void RtApiAsio :: saveDeviceInfo( void )
\r
2873 unsigned int nDevices = getDeviceCount();
\r
2874 devices_.resize( nDevices );
\r
2875 for ( unsigned int i=0; i<nDevices; i++ )
\r
2876 devices_[i] = getDeviceInfo( i );
\r
2879 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2880 unsigned int firstChannel, unsigned int sampleRate,
\r
2881 RtAudioFormat format, unsigned int *bufferSize,
\r
2882 RtAudio::StreamOptions *options )
\r
2883 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2885 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2887 // For ASIO, a duplex stream MUST use the same driver.
\r
2888 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2889 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2893 char driverName[32];
\r
2894 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2895 if ( result != ASE_OK ) {
\r
2896 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2897 errorText_ = errorStream_.str();
\r
2901 // Only load the driver once for duplex stream.
\r
2902 if ( !isDuplexInput ) {
\r
2903 // The getDeviceInfo() function will not work when a stream is open
\r
2904 // because ASIO does not allow multiple devices to run at the same
\r
2905 // time. Thus, we'll probe the system before opening a stream and
\r
2906 // save the results for use by getDeviceInfo().
\r
2907 this->saveDeviceInfo();
\r
2909 if ( !drivers.loadDriver( driverName ) ) {
\r
2910 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2911 errorText_ = errorStream_.str();
\r
2915 result = ASIOInit( &driverInfo );
\r
2916 if ( result != ASE_OK ) {
\r
2917 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2918 errorText_ = errorStream_.str();
\r
2923 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2924 bool buffersAllocated = false;
\r
2925 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2926 unsigned int nChannels;
\r
2929 // Check the device channel count.
\r
2930 long inputChannels, outputChannels;
\r
2931 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2932 if ( result != ASE_OK ) {
\r
2933 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2934 errorText_ = errorStream_.str();
\r
2938 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2939 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2940 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2941 errorText_ = errorStream_.str();
\r
2944 stream_.nDeviceChannels[mode] = channels;
\r
2945 stream_.nUserChannels[mode] = channels;
\r
2946 stream_.channelOffset[mode] = firstChannel;
\r
2948 // Verify the sample rate is supported.
\r
2949 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2950 if ( result != ASE_OK ) {
\r
2951 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2952 errorText_ = errorStream_.str();
\r
2956 // Get the current sample rate
\r
2957 ASIOSampleRate currentRate;
\r
2958 result = ASIOGetSampleRate( ¤tRate );
\r
2959 if ( result != ASE_OK ) {
\r
2960 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2961 errorText_ = errorStream_.str();
\r
2965 // Set the sample rate only if necessary
\r
2966 if ( currentRate != sampleRate ) {
\r
2967 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2968 if ( result != ASE_OK ) {
\r
2969 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2970 errorText_ = errorStream_.str();
\r
2975 // Determine the driver data type.
\r
2976 ASIOChannelInfo channelInfo;
\r
2977 channelInfo.channel = 0;
\r
2978 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2979 else channelInfo.isInput = true;
\r
2980 result = ASIOGetChannelInfo( &channelInfo );
\r
2981 if ( result != ASE_OK ) {
\r
2982 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2983 errorText_ = errorStream_.str();
\r
2987 // Assuming WINDOWS host is always little-endian.
\r
2988 stream_.doByteSwap[mode] = false;
\r
2989 stream_.userFormat = format;
\r
2990 stream_.deviceFormat[mode] = 0;
\r
2991 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2992 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2993 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2995 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2996 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2997 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2999 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
3000 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
3001 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
3003 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
3004 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
3005 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
3007 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
3008 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
3009 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
3012 if ( stream_.deviceFormat[mode] == 0 ) {
\r
3013 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
3014 errorText_ = errorStream_.str();
\r
3018 // Set the buffer size. For a duplex stream, this will end up
\r
3019 // setting the buffer size based on the input constraints, which
\r
3021 long minSize, maxSize, preferSize, granularity;
\r
3022 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
3023 if ( result != ASE_OK ) {
\r
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3025 errorText_ = errorStream_.str();
\r
3029 if ( isDuplexInput ) {
\r
3030 // When this is the duplex input (output was opened before), then we have to use the same
\r
3031 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3032 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3033 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3034 // to the "bufferSize" param as usual to set up processing buffers.
\r
3036 *bufferSize = stream_.bufferSize;
\r
3039 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3040 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3041 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3042 else if ( granularity == -1 ) {
\r
3043 // Make sure bufferSize is a power of two.
\r
3044 int log2_of_min_size = 0;
\r
3045 int log2_of_max_size = 0;
\r
3047 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3048 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3049 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3052 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3053 int min_delta_num = log2_of_min_size;
\r
3055 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3056 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3057 if (current_delta < min_delta) {
\r
3058 min_delta = current_delta;
\r
3059 min_delta_num = i;
\r
3063 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3064 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3065 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3067 else if ( granularity != 0 ) {
\r
3068 // Set to an even multiple of granularity, rounding up.
\r
3069 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3074 // we don't use it anymore, see above!
\r
3075 // Just left it here for the case...
\r
3076 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3077 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3082 stream_.bufferSize = *bufferSize;
\r
3083 stream_.nBuffers = 2;
\r
3085 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3086 else stream_.userInterleaved = true;
\r
3088 // ASIO always uses non-interleaved buffers.
\r
3089 stream_.deviceInterleaved[mode] = false;
\r
3091 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3092 if ( handle == 0 ) {
\r
3094 handle = new AsioHandle;
\r
3096 catch ( std::bad_alloc& ) {
\r
3097 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3100 handle->bufferInfos = 0;
\r
3102 // Create a manual-reset event.
\r
3103 handle->condition = CreateEvent( NULL, // no security
\r
3104 TRUE, // manual-reset
\r
3105 FALSE, // non-signaled initially
\r
3106 NULL ); // unnamed
\r
3107 stream_.apiHandle = (void *) handle;
\r
3110 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3111 // and output separately, we'll have to dispose of previously
\r
3112 // created output buffers for a duplex stream.
\r
3113 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3114 ASIODisposeBuffers();
\r
3115 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3118 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3120 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3121 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3122 if ( handle->bufferInfos == NULL ) {
\r
3123 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3124 errorText_ = errorStream_.str();
\r
3128 ASIOBufferInfo *infos;
\r
3129 infos = handle->bufferInfos;
\r
3130 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3131 infos->isInput = ASIOFalse;
\r
3132 infos->channelNum = i + stream_.channelOffset[0];
\r
3133 infos->buffers[0] = infos->buffers[1] = 0;
\r
3135 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3136 infos->isInput = ASIOTrue;
\r
3137 infos->channelNum = i + stream_.channelOffset[1];
\r
3138 infos->buffers[0] = infos->buffers[1] = 0;
\r
3141 // prepare for callbacks
\r
3142 stream_.sampleRate = sampleRate;
\r
3143 stream_.device[mode] = device;
\r
3144 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3146 // store this class instance before registering callbacks, that are going to use it
\r
3147 asioCallbackInfo = &stream_.callbackInfo;
\r
3148 stream_.callbackInfo.object = (void *) this;
\r
3150 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3151 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3152 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3153 asioCallbacks.asioMessage = &asioMessages;
\r
3154 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3155 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3156 if ( result != ASE_OK ) {
\r
3157 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3158 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3159 // in that case, let's be naïve and try that instead
\r
3160 *bufferSize = preferSize;
\r
3161 stream_.bufferSize = *bufferSize;
\r
3162 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3165 if ( result != ASE_OK ) {
\r
3166 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3167 errorText_ = errorStream_.str();
\r
3170 buffersAllocated = true;
\r
3171 stream_.state = STREAM_STOPPED;
\r
3173 // Set flags for buffer conversion.
\r
3174 stream_.doConvertBuffer[mode] = false;
\r
3175 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3176 stream_.doConvertBuffer[mode] = true;
\r
3177 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3178 stream_.nUserChannels[mode] > 1 )
\r
3179 stream_.doConvertBuffer[mode] = true;
\r
3181 // Allocate necessary internal buffers
\r
3182 unsigned long bufferBytes;
\r
3183 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3184 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3185 if ( stream_.userBuffer[mode] == NULL ) {
\r
3186 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3190 if ( stream_.doConvertBuffer[mode] ) {
\r
3192 bool makeBuffer = true;
\r
3193 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3194 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3195 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3196 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3199 if ( makeBuffer ) {
\r
3200 bufferBytes *= *bufferSize;
\r
3201 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3202 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3203 if ( stream_.deviceBuffer == NULL ) {
\r
3204 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3210 // Determine device latencies
\r
3211 long inputLatency, outputLatency;
\r
3212 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3213 if ( result != ASE_OK ) {
\r
3214 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3215 errorText_ = errorStream_.str();
\r
3216 error( RtAudioError::WARNING); // warn but don't fail
\r
3219 stream_.latency[0] = outputLatency;
\r
3220 stream_.latency[1] = inputLatency;
\r
3223 // Setup the buffer conversion information structure. We don't use
\r
3224 // buffers to do channel offsets, so we override that parameter
\r
3226 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3231 if ( !isDuplexInput ) {
\r
3232 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3233 // So we clean up for single channel only
\r
3235 if ( buffersAllocated )
\r
3236 ASIODisposeBuffers();
\r
3238 drivers.removeCurrentDriver();
\r
3241 CloseHandle( handle->condition );
\r
3242 if ( handle->bufferInfos )
\r
3243 free( handle->bufferInfos );
\r
3246 stream_.apiHandle = 0;
\r
3250 if ( stream_.userBuffer[mode] ) {
\r
3251 free( stream_.userBuffer[mode] );
\r
3252 stream_.userBuffer[mode] = 0;
\r
3255 if ( stream_.deviceBuffer ) {
\r
3256 free( stream_.deviceBuffer );
\r
3257 stream_.deviceBuffer = 0;
\r
3262 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3264 void RtApiAsio :: closeStream()
\r
3266 if ( stream_.state == STREAM_CLOSED ) {
\r
3267 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3268 error( RtAudioError::WARNING );
\r
3272 if ( stream_.state == STREAM_RUNNING ) {
\r
3273 stream_.state = STREAM_STOPPED;
\r
3276 ASIODisposeBuffers();
\r
3277 drivers.removeCurrentDriver();
\r
3279 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3281 CloseHandle( handle->condition );
\r
3282 if ( handle->bufferInfos )
\r
3283 free( handle->bufferInfos );
\r
3285 stream_.apiHandle = 0;
\r
3288 for ( int i=0; i<2; i++ ) {
\r
3289 if ( stream_.userBuffer[i] ) {
\r
3290 free( stream_.userBuffer[i] );
\r
3291 stream_.userBuffer[i] = 0;
\r
3295 if ( stream_.deviceBuffer ) {
\r
3296 free( stream_.deviceBuffer );
\r
3297 stream_.deviceBuffer = 0;
\r
3300 stream_.mode = UNINITIALIZED;
\r
3301 stream_.state = STREAM_CLOSED;
\r
3304 bool stopThreadCalled = false;
\r
3306 void RtApiAsio :: startStream()
\r
3309 if ( stream_.state == STREAM_RUNNING ) {
\r
3310 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3311 error( RtAudioError::WARNING );
\r
3315 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3316 ASIOError result = ASIOStart();
\r
3317 if ( result != ASE_OK ) {
\r
3318 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3319 errorText_ = errorStream_.str();
\r
3323 handle->drainCounter = 0;
\r
3324 handle->internalDrain = false;
\r
3325 ResetEvent( handle->condition );
\r
3326 stream_.state = STREAM_RUNNING;
\r
3330 stopThreadCalled = false;
\r
3332 if ( result == ASE_OK ) return;
\r
3333 error( RtAudioError::SYSTEM_ERROR );
\r
3336 void RtApiAsio :: stopStream()
\r
3339 if ( stream_.state == STREAM_STOPPED ) {
\r
3340 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3341 error( RtAudioError::WARNING );
\r
3345 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3347 if ( handle->drainCounter == 0 ) {
\r
3348 handle->drainCounter = 2;
\r
3349 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3353 stream_.state = STREAM_STOPPED;
\r
3355 ASIOError result = ASIOStop();
\r
3356 if ( result != ASE_OK ) {
\r
3357 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3358 errorText_ = errorStream_.str();
\r
3361 if ( result == ASE_OK ) return;
\r
3362 error( RtAudioError::SYSTEM_ERROR );
\r
3365 void RtApiAsio :: abortStream()
\r
3368 if ( stream_.state == STREAM_STOPPED ) {
\r
3369 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3370 error( RtAudioError::WARNING );
\r
3374 // The following lines were commented-out because some behavior was
\r
3375 // noted where the device buffers need to be zeroed to avoid
\r
3376 // continuing sound, even when the device buffers are completely
\r
3377 // disposed. So now, calling abort is the same as calling stop.
\r
3378 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3379 // handle->drainCounter = 2;
\r
3383 // This function will be called by a spawned thread when the user
\r
3384 // callback function signals that the stream should be stopped or
\r
3385 // aborted. It is necessary to handle it this way because the
\r
3386 // callbackEvent() function must return before the ASIOStop()
\r
3387 // function will return.
\r
3388 static unsigned __stdcall asioStopStream( void *ptr )
\r
3390 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3391 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3393 object->stopStream();
\r
3394 _endthreadex( 0 );
\r
3398 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3400 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3401 if ( stream_.state == STREAM_CLOSED ) {
\r
3402 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3403 error( RtAudioError::WARNING );
\r
3407 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3408 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3410 // Check if we were draining the stream and signal if finished.
\r
3411 if ( handle->drainCounter > 3 ) {
\r
3413 stream_.state = STREAM_STOPPING;
\r
3414 if ( handle->internalDrain == false )
\r
3415 SetEvent( handle->condition );
\r
3416 else { // spawn a thread to stop the stream
\r
3417 unsigned threadId;
\r
3418 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3419 &stream_.callbackInfo, 0, &threadId );
\r
3424 // Invoke user callback to get fresh output data UNLESS we are
\r
3425 // draining stream.
\r
3426 if ( handle->drainCounter == 0 ) {
\r
3427 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3428 double streamTime = getStreamTime();
\r
3429 RtAudioStreamStatus status = 0;
\r
3430 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3431 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3434 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3435 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3438 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3439 stream_.bufferSize, streamTime, status, info->userData );
\r
3440 if ( cbReturnValue == 2 ) {
\r
3441 stream_.state = STREAM_STOPPING;
\r
3442 handle->drainCounter = 2;
\r
3443 unsigned threadId;
\r
3444 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3445 &stream_.callbackInfo, 0, &threadId );
\r
3448 else if ( cbReturnValue == 1 ) {
\r
3449 handle->drainCounter = 1;
\r
3450 handle->internalDrain = true;
\r
3454 unsigned int nChannels, bufferBytes, i, j;
\r
3455 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3458 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3460 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3462 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3463 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3464 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3468 else if ( stream_.doConvertBuffer[0] ) {
\r
3470 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3471 if ( stream_.doByteSwap[0] )
\r
3472 byteSwapBuffer( stream_.deviceBuffer,
\r
3473 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3474 stream_.deviceFormat[0] );
\r
3476 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3477 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3478 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3479 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3485 if ( stream_.doByteSwap[0] )
\r
3486 byteSwapBuffer( stream_.userBuffer[0],
\r
3487 stream_.bufferSize * stream_.nUserChannels[0],
\r
3488 stream_.userFormat );
\r
3490 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3491 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3492 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3493 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3499 // Don't bother draining input
\r
3500 if ( handle->drainCounter ) {
\r
3501 handle->drainCounter++;
\r
3505 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3507 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3509 if (stream_.doConvertBuffer[1]) {
\r
3511 // Always interleave ASIO input data.
\r
3512 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3513 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3514 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3515 handle->bufferInfos[i].buffers[bufferIndex],
\r
3519 if ( stream_.doByteSwap[1] )
\r
3520 byteSwapBuffer( stream_.deviceBuffer,
\r
3521 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3522 stream_.deviceFormat[1] );
\r
3523 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3527 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3528 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3529 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3530 handle->bufferInfos[i].buffers[bufferIndex],
\r
3535 if ( stream_.doByteSwap[1] )
\r
3536 byteSwapBuffer( stream_.userBuffer[1],
\r
3537 stream_.bufferSize * stream_.nUserChannels[1],
\r
3538 stream_.userFormat );
\r
3543 // The following call was suggested by Malte Clasen. While the API
\r
3544 // documentation indicates it should not be required, some device
\r
3545 // drivers apparently do not function correctly without it.
\r
3546 ASIOOutputReady();
\r
3548 RtApi::tickStreamTime();
\r
3552 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3554 // The ASIO documentation says that this usually only happens during
\r
3555 // external sync. Audio processing is not stopped by the driver,
\r
3556 // actual sample rate might not have even changed, maybe only the
\r
3557 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3560 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3562 object->stopStream();
\r
3564 catch ( RtAudioError &exception ) {
\r
3565 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3569 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3572 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3576 switch( selector ) {
\r
3577 case kAsioSelectorSupported:
\r
3578 if ( value == kAsioResetRequest
\r
3579 || value == kAsioEngineVersion
\r
3580 || value == kAsioResyncRequest
\r
3581 || value == kAsioLatenciesChanged
\r
3582 // The following three were added for ASIO 2.0, you don't
\r
3583 // necessarily have to support them.
\r
3584 || value == kAsioSupportsTimeInfo
\r
3585 || value == kAsioSupportsTimeCode
\r
3586 || value == kAsioSupportsInputMonitor)
\r
3589 case kAsioResetRequest:
\r
3590 // Defer the task and perform the reset of the driver during the
\r
3591 // next "safe" situation. You cannot reset the driver right now,
\r
3592 // as this code is called from the driver. Reset the driver is
\r
3593 // done by completely destruct is. I.e. ASIOStop(),
\r
3594 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3596 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3599 case kAsioResyncRequest:
\r
3600 // This informs the application that the driver encountered some
\r
3601 // non-fatal data loss. It is used for synchronization purposes
\r
3602 // of different media. Added mainly to work around the Win16Mutex
\r
3603 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3604 // which could lose data because the Mutex was held too long by
\r
3605 // another thread. However a driver can issue it in other
\r
3606 // situations, too.
\r
3607 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3611 case kAsioLatenciesChanged:
\r
3612 // This will inform the host application that the drivers were
\r
3613 // latencies changed. Beware, it this does not mean that the
\r
3614 // buffer sizes have changed! You might need to update internal
\r
3616 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3619 case kAsioEngineVersion:
\r
3620 // Return the supported ASIO version of the host application. If
\r
3621 // a host application does not implement this selector, ASIO 1.0
\r
3622 // is assumed by the driver.
\r
3625 case kAsioSupportsTimeInfo:
\r
3626 // Informs the driver whether the
\r
3627 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3628 // For compatibility with ASIO 1.0 drivers the host application
\r
3629 // should always support the "old" bufferSwitch method, too.
\r
3632 case kAsioSupportsTimeCode:
\r
3633 // Informs the driver whether application is interested in time
\r
3634 // code info. If an application does not need to know about time
\r
3635 // code, the driver has less work to do.
\r
3642 static const char* getAsioErrorString( ASIOError result )
\r
3647 const char*message;
\r
3650 static const Messages m[] =
\r
3652 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3653 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3654 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3655 { ASE_InvalidMode, "Invalid mode." },
\r
3656 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3657 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3658 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3661 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3662 if ( m[i].value == result ) return m[i].message;
\r
3664 return "Unknown error.";
\r
3667 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3671 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3673 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3674 // - Introduces support for the Windows WASAPI API
\r
3675 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3676 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3677 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3682 #include <audioclient.h>
\r
3684 #include <mmdeviceapi.h>
\r
3685 #include <functiondiscoverykeys_devpkey.h>
\r
3687 //=============================================================================
\r
3689 #define SAFE_RELEASE( objectPtr )\
\r
3692 objectPtr->Release();\
\r
3693 objectPtr = NULL;\
\r
3696 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3698 //-----------------------------------------------------------------------------
\r
3700 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3701 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3702 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3703 // provide intermediate storage for read / write synchronization.
\r
3704 class WasapiBuffer
\r
3708 : buffer_( NULL ),
\r
3717 // sets the length of the internal ring buffer
\r
3718 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3721 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3723 bufferSize_ = bufferSize;
\r
3728 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3729 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3731 if ( !buffer || // incoming buffer is NULL
\r
3732 bufferSize == 0 || // incoming buffer has no data
\r
3733 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3738 unsigned int relOutIndex = outIndex_;
\r
3739 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3740 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3741 relOutIndex += bufferSize_;
\r
3744 // "in" index can end on the "out" index but cannot begin at it
\r
3745 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3746 return false; // not enough space between "in" index and "out" index
\r
3749 // copy buffer from external to internal
\r
3750 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3751 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3752 int fromInSize = bufferSize - fromZeroSize;
\r
3756 case RTAUDIO_SINT8:
\r
3757 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3758 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3760 case RTAUDIO_SINT16:
\r
3761 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3762 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3764 case RTAUDIO_SINT24:
\r
3765 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3766 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3768 case RTAUDIO_SINT32:
\r
3769 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3770 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3772 case RTAUDIO_FLOAT32:
\r
3773 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3774 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3776 case RTAUDIO_FLOAT64:
\r
3777 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3778 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3782 // update "in" index
\r
3783 inIndex_ += bufferSize;
\r
3784 inIndex_ %= bufferSize_;
\r
3789 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3790 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3792 if ( !buffer || // incoming buffer is NULL
\r
3793 bufferSize == 0 || // incoming buffer has no data
\r
3794 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3799 unsigned int relInIndex = inIndex_;
\r
3800 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3801 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3802 relInIndex += bufferSize_;
\r
3805 // "out" index can begin at and end on the "in" index
\r
3806 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3807 return false; // not enough space between "out" index and "in" index
\r
3810 // copy buffer from internal to external
\r
3811 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3812 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3813 int fromOutSize = bufferSize - fromZeroSize;
\r
3817 case RTAUDIO_SINT8:
\r
3818 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3819 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3821 case RTAUDIO_SINT16:
\r
3822 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3823 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3825 case RTAUDIO_SINT24:
\r
3826 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3827 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3829 case RTAUDIO_SINT32:
\r
3830 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3831 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3833 case RTAUDIO_FLOAT32:
\r
3834 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3835 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3837 case RTAUDIO_FLOAT64:
\r
3838 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3839 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3843 // update "out" index
\r
3844 outIndex_ += bufferSize;
\r
3845 outIndex_ %= bufferSize_;
\r
3852 unsigned int bufferSize_;
\r
3853 unsigned int inIndex_;
\r
3854 unsigned int outIndex_;
\r
3857 //-----------------------------------------------------------------------------
\r
3859 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3860 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3861 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3862 // This sample rate converter works best with conversions between one rate and its multiple.
\r
3863 void convertBufferWasapi( char* outBuffer,
\r
3864 const char* inBuffer,
\r
3865 const unsigned int& channelCount,
\r
3866 const unsigned int& inSampleRate,
\r
3867 const unsigned int& outSampleRate,
\r
3868 const unsigned int& inSampleCount,
\r
3869 unsigned int& outSampleCount,
\r
3870 const RtAudioFormat& format )
\r
3872 // calculate the new outSampleCount and relative sampleStep
\r
3873 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3874 float sampleRatioInv = ( float ) 1 / sampleRatio;
\r
3875 float sampleStep = 1.0f / sampleRatio;
\r
3876 float inSampleFraction = 0.0f;
\r
3878 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3880 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
\r
3881 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
\r
3883 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3884 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3886 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3890 case RTAUDIO_SINT8:
\r
3891 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3893 case RTAUDIO_SINT16:
\r
3894 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3896 case RTAUDIO_SINT24:
\r
3897 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3899 case RTAUDIO_SINT32:
\r
3900 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3902 case RTAUDIO_FLOAT32:
\r
3903 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3905 case RTAUDIO_FLOAT64:
\r
3906 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3910 // jump to next in sample
\r
3911 inSampleFraction += sampleStep;
\r
3914 else // else interpolate
\r
3916 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3917 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3919 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3923 case RTAUDIO_SINT8:
\r
3925 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3927 char fromSample = ( ( char* ) inBuffer )[ ( inSample * channelCount ) + channel ];
\r
3928 char toSample = ( ( char* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];
\r
3929 float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - inSample );
\r
3930 ( ( char* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( char ) sampleDiff;
\r
3934 case RTAUDIO_SINT16:
\r
3936 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3938 short fromSample = ( ( short* ) inBuffer )[ ( inSample * channelCount ) + channel ];
\r
3939 short toSample = ( ( short* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];
\r
3940 float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - inSample );
\r
3941 ( ( short* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( short ) sampleDiff;
\r
3945 case RTAUDIO_SINT24:
\r
3947 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3949 int fromSample = ( ( S24* ) inBuffer )[ ( inSample * channelCount ) + channel ].asInt();
\r
3950 int toSample = ( ( S24* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ].asInt();
\r
3951 float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - inSample );
\r
3952 ( ( S24* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( int ) sampleDiff;
\r
3956 case RTAUDIO_SINT32:
\r
3958 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3960 int fromSample = ( ( int* ) inBuffer )[ ( inSample * channelCount ) + channel ];
\r
3961 int toSample = ( ( int* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];
\r
3962 float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - inSample );
\r
3963 ( ( int* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( int ) sampleDiff;
\r
3967 case RTAUDIO_FLOAT32:
\r
3969 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3971 float fromSample = ( ( float* ) inBuffer )[ ( inSample * channelCount ) + channel ];
\r
3972 float toSample = ( ( float* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];
\r
3973 float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - inSample );
\r
3974 ( ( float* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + sampleDiff;
\r
3978 case RTAUDIO_FLOAT64:
\r
3980 for ( unsigned int channel = 0; channel < channelCount; channel++ )
\r
3982 double fromSample = ( ( double* ) inBuffer )[ ( inSample * channelCount ) + channel ];
\r
3983 double toSample = ( ( double* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];
\r
3984 double sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - inSample );
\r
3985 ( ( double* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + sampleDiff;
\r
3991 // jump to next in sample
\r
3992 inSampleFraction += sampleStep;
\r
3997 //-----------------------------------------------------------------------------
\r
3999 // A structure to hold various information related to the WASAPI implementation.
\r
4000 struct WasapiHandle
\r
4002 IAudioClient* captureAudioClient;
\r
4003 IAudioClient* renderAudioClient;
\r
4004 IAudioCaptureClient* captureClient;
\r
4005 IAudioRenderClient* renderClient;
\r
4006 HANDLE captureEvent;
\r
4007 HANDLE renderEvent;
\r
4010 : captureAudioClient( NULL ),
\r
4011 renderAudioClient( NULL ),
\r
4012 captureClient( NULL ),
\r
4013 renderClient( NULL ),
\r
4014 captureEvent( NULL ),
\r
4015 renderEvent( NULL ) {}
\r
4018 //=============================================================================
\r
4020 RtApiWasapi::RtApiWasapi()
\r
4021 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
4023 // WASAPI can run either apartment or multi-threaded
\r
4024 HRESULT hr = CoInitialize( NULL );
\r
4025 if ( !FAILED( hr ) )
\r
4026 coInitialized_ = true;
\r
4028 // Instantiate device enumerator
\r
4029 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
4030 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
4031 ( void** ) &deviceEnumerator_ );
\r
4033 if ( FAILED( hr ) ) {
\r
4034 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
4035 error( RtAudioError::DRIVER_ERROR );
\r
4039 //-----------------------------------------------------------------------------
\r
4041 RtApiWasapi::~RtApiWasapi()
\r
4043 if ( stream_.state != STREAM_CLOSED )
\r
4046 SAFE_RELEASE( deviceEnumerator_ );
\r
4048 // If this object previously called CoInitialize()
\r
4049 if ( coInitialized_ )
\r
4053 //=============================================================================
\r
4055 unsigned int RtApiWasapi::getDeviceCount( void )
\r
4057 unsigned int captureDeviceCount = 0;
\r
4058 unsigned int renderDeviceCount = 0;
\r
4060 IMMDeviceCollection* captureDevices = NULL;
\r
4061 IMMDeviceCollection* renderDevices = NULL;
\r
4063 // Count capture devices
\r
4064 errorText_.clear();
\r
4065 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4066 if ( FAILED( hr ) ) {
\r
4067 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
4071 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4072 if ( FAILED( hr ) ) {
\r
4073 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
4077 // Count render devices
\r
4078 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4079 if ( FAILED( hr ) ) {
\r
4080 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
4084 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4085 if ( FAILED( hr ) ) {
\r
4086 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
4091 // release all references
\r
4092 SAFE_RELEASE( captureDevices );
\r
4093 SAFE_RELEASE( renderDevices );
\r
4095 if ( errorText_.empty() )
\r
4096 return captureDeviceCount + renderDeviceCount;
\r
4098 error( RtAudioError::DRIVER_ERROR );
\r
4102 //-----------------------------------------------------------------------------
\r
4104 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
4106 RtAudio::DeviceInfo info;
\r
4107 unsigned int captureDeviceCount = 0;
\r
4108 unsigned int renderDeviceCount = 0;
\r
4109 std::string defaultDeviceName;
\r
4110 bool isCaptureDevice = false;
\r
4112 PROPVARIANT deviceNameProp;
\r
4113 PROPVARIANT defaultDeviceNameProp;
\r
4115 IMMDeviceCollection* captureDevices = NULL;
\r
4116 IMMDeviceCollection* renderDevices = NULL;
\r
4117 IMMDevice* devicePtr = NULL;
\r
4118 IMMDevice* defaultDevicePtr = NULL;
\r
4119 IAudioClient* audioClient = NULL;
\r
4120 IPropertyStore* devicePropStore = NULL;
\r
4121 IPropertyStore* defaultDevicePropStore = NULL;
\r
4123 WAVEFORMATEX* deviceFormat = NULL;
\r
4124 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4127 info.probed = false;
\r
4129 // Count capture devices
\r
4130 errorText_.clear();
\r
4131 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4132 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4133 if ( FAILED( hr ) ) {
\r
4134 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4138 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4139 if ( FAILED( hr ) ) {
\r
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4144 // Count render devices
\r
4145 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4146 if ( FAILED( hr ) ) {
\r
4147 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4151 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4152 if ( FAILED( hr ) ) {
\r
4153 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4157 // validate device index
\r
4158 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4159 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4160 errorType = RtAudioError::INVALID_USE;
\r
4164 // determine whether index falls within capture or render devices
\r
4165 if ( device >= renderDeviceCount ) {
\r
4166 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4167 if ( FAILED( hr ) ) {
\r
4168 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4171 isCaptureDevice = true;
\r
4174 hr = renderDevices->Item( device, &devicePtr );
\r
4175 if ( FAILED( hr ) ) {
\r
4176 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4179 isCaptureDevice = false;
\r
4182 // get default device name
\r
4183 if ( isCaptureDevice ) {
\r
4184 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4185 if ( FAILED( hr ) ) {
\r
4186 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4191 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4192 if ( FAILED( hr ) ) {
\r
4193 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4198 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4199 if ( FAILED( hr ) ) {
\r
4200 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4203 PropVariantInit( &defaultDeviceNameProp );
\r
4205 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4206 if ( FAILED( hr ) ) {
\r
4207 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4211 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4214 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4215 if ( FAILED( hr ) ) {
\r
4216 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4220 PropVariantInit( &deviceNameProp );
\r
4222 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4223 if ( FAILED( hr ) ) {
\r
4224 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4228 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4231 if ( isCaptureDevice ) {
\r
4232 info.isDefaultInput = info.name == defaultDeviceName;
\r
4233 info.isDefaultOutput = false;
\r
4236 info.isDefaultInput = false;
\r
4237 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4241 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4242 if ( FAILED( hr ) ) {
\r
4243 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4247 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4248 if ( FAILED( hr ) ) {
\r
4249 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4253 if ( isCaptureDevice ) {
\r
4254 info.inputChannels = deviceFormat->nChannels;
\r
4255 info.outputChannels = 0;
\r
4256 info.duplexChannels = 0;
\r
4259 info.inputChannels = 0;
\r
4260 info.outputChannels = deviceFormat->nChannels;
\r
4261 info.duplexChannels = 0;
\r
4265 info.sampleRates.clear();
\r
4267 // allow support for all sample rates as we have a built-in sample rate converter
\r
4268 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4269 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4271 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4274 info.nativeFormats = 0;
\r
4276 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4277 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4278 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4280 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4281 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4283 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4284 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4287 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4288 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4289 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4291 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4292 info.nativeFormats |= RTAUDIO_SINT8;
\r
4294 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4295 info.nativeFormats |= RTAUDIO_SINT16;
\r
4297 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4298 info.nativeFormats |= RTAUDIO_SINT24;
\r
4300 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4301 info.nativeFormats |= RTAUDIO_SINT32;
\r
4306 info.probed = true;
\r
4309 // release all references
\r
4310 PropVariantClear( &deviceNameProp );
\r
4311 PropVariantClear( &defaultDeviceNameProp );
\r
4313 SAFE_RELEASE( captureDevices );
\r
4314 SAFE_RELEASE( renderDevices );
\r
4315 SAFE_RELEASE( devicePtr );
\r
4316 SAFE_RELEASE( defaultDevicePtr );
\r
4317 SAFE_RELEASE( audioClient );
\r
4318 SAFE_RELEASE( devicePropStore );
\r
4319 SAFE_RELEASE( defaultDevicePropStore );
\r
4321 CoTaskMemFree( deviceFormat );
\r
4322 CoTaskMemFree( closestMatchFormat );
\r
4324 if ( !errorText_.empty() )
\r
4325 error( errorType );
\r
4329 //-----------------------------------------------------------------------------
\r
4331 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4333 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4334 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4342 //-----------------------------------------------------------------------------
\r
4344 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4346 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4347 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4355 //-----------------------------------------------------------------------------
\r
4357 void RtApiWasapi::closeStream( void )
\r
4359 if ( stream_.state == STREAM_CLOSED ) {
\r
4360 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4361 error( RtAudioError::WARNING );
\r
4365 if ( stream_.state != STREAM_STOPPED )
\r
4368 // clean up stream memory
\r
4369 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4370 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4372 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4373 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4375 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4376 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4378 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4379 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4381 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4382 stream_.apiHandle = NULL;
\r
4384 for ( int i = 0; i < 2; i++ ) {
\r
4385 if ( stream_.userBuffer[i] ) {
\r
4386 free( stream_.userBuffer[i] );
\r
4387 stream_.userBuffer[i] = 0;
\r
4391 if ( stream_.deviceBuffer ) {
\r
4392 free( stream_.deviceBuffer );
\r
4393 stream_.deviceBuffer = 0;
\r
4396 // update stream state
\r
4397 stream_.state = STREAM_CLOSED;
\r
4400 //-----------------------------------------------------------------------------
\r
4402 void RtApiWasapi::startStream( void )
\r
4406 if ( stream_.state == STREAM_RUNNING ) {
\r
4407 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4408 error( RtAudioError::WARNING );
\r
4412 // update stream state
\r
4413 stream_.state = STREAM_RUNNING;
\r
4415 // create WASAPI stream thread
\r
4416 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4418 if ( !stream_.callbackInfo.thread ) {
\r
4419 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4420 error( RtAudioError::THREAD_ERROR );
\r
4423 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4424 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4428 //-----------------------------------------------------------------------------
\r
4430 void RtApiWasapi::stopStream( void )
\r
4434 if ( stream_.state == STREAM_STOPPED ) {
\r
4435 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4436 error( RtAudioError::WARNING );
\r
4440 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4441 stream_.state = STREAM_STOPPING;
\r
4443 // wait until stream thread is stopped
\r
4444 while( stream_.state != STREAM_STOPPED ) {
\r
4448 // Wait for the last buffer to play before stopping.
\r
4449 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4451 // stop capture client if applicable
\r
4452 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4453 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4454 if ( FAILED( hr ) ) {
\r
4455 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4456 error( RtAudioError::DRIVER_ERROR );
\r
4461 // stop render client if applicable
\r
4462 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4463 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4464 if ( FAILED( hr ) ) {
\r
4465 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4466 error( RtAudioError::DRIVER_ERROR );
\r
4471 // close thread handle
\r
4472 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4473 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4474 error( RtAudioError::THREAD_ERROR );
\r
4478 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4481 //-----------------------------------------------------------------------------
\r
4483 void RtApiWasapi::abortStream( void )
\r
4487 if ( stream_.state == STREAM_STOPPED ) {
\r
4488 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4489 error( RtAudioError::WARNING );
\r
4493 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4494 stream_.state = STREAM_STOPPING;
\r
4496 // wait until stream thread is stopped
\r
4497 while ( stream_.state != STREAM_STOPPED ) {
\r
4501 // stop capture client if applicable
\r
4502 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4503 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4504 if ( FAILED( hr ) ) {
\r
4505 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4506 error( RtAudioError::DRIVER_ERROR );
\r
4511 // stop render client if applicable
\r
4512 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4513 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4514 if ( FAILED( hr ) ) {
\r
4515 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4516 error( RtAudioError::DRIVER_ERROR );
\r
4521 // close thread handle
\r
4522 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4523 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4524 error( RtAudioError::THREAD_ERROR );
\r
4528 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4531 //-----------------------------------------------------------------------------
\r
4533 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4534 unsigned int firstChannel, unsigned int sampleRate,
\r
4535 RtAudioFormat format, unsigned int* bufferSize,
\r
4536 RtAudio::StreamOptions* options )
\r
4538 bool methodResult = FAILURE;
\r
4539 unsigned int captureDeviceCount = 0;
\r
4540 unsigned int renderDeviceCount = 0;
\r
4542 IMMDeviceCollection* captureDevices = NULL;
\r
4543 IMMDeviceCollection* renderDevices = NULL;
\r
4544 IMMDevice* devicePtr = NULL;
\r
4545 WAVEFORMATEX* deviceFormat = NULL;
\r
4546 unsigned int bufferBytes;
\r
4547 stream_.state = STREAM_STOPPED;
\r
4549 // create API Handle if not already created
\r
4550 if ( !stream_.apiHandle )
\r
4551 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4553 // Count capture devices
\r
4554 errorText_.clear();
\r
4555 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4556 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4557 if ( FAILED( hr ) ) {
\r
4558 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4562 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4563 if ( FAILED( hr ) ) {
\r
4564 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4568 // Count render devices
\r
4569 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4570 if ( FAILED( hr ) ) {
\r
4571 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4575 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4576 if ( FAILED( hr ) ) {
\r
4577 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4581 // validate device index
\r
4582 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4583 errorType = RtAudioError::INVALID_USE;
\r
4584 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4588 // determine whether index falls within capture or render devices
\r
4589 if ( device >= renderDeviceCount ) {
\r
4590 if ( mode != INPUT ) {
\r
4591 errorType = RtAudioError::INVALID_USE;
\r
4592 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4596 // retrieve captureAudioClient from devicePtr
\r
4597 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4599 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4600 if ( FAILED( hr ) ) {
\r
4601 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4605 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4606 NULL, ( void** ) &captureAudioClient );
\r
4607 if ( FAILED( hr ) ) {
\r
4608 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4612 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4613 if ( FAILED( hr ) ) {
\r
4614 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4618 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4619 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4622 if ( mode != OUTPUT ) {
\r
4623 errorType = RtAudioError::INVALID_USE;
\r
4624 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4628 // retrieve renderAudioClient from devicePtr
\r
4629 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4631 hr = renderDevices->Item( device, &devicePtr );
\r
4632 if ( FAILED( hr ) ) {
\r
4633 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4637 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4638 NULL, ( void** ) &renderAudioClient );
\r
4639 if ( FAILED( hr ) ) {
\r
4640 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4644 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4645 if ( FAILED( hr ) ) {
\r
4646 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4650 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4651 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4654 // fill stream data
\r
4655 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4656 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4657 stream_.mode = DUPLEX;
\r
4660 stream_.mode = mode;
\r
4663 stream_.device[mode] = device;
\r
4664 stream_.doByteSwap[mode] = false;
\r
4665 stream_.sampleRate = sampleRate;
\r
4666 stream_.bufferSize = *bufferSize;
\r
4667 stream_.nBuffers = 1;
\r
4668 stream_.nUserChannels[mode] = channels;
\r
4669 stream_.channelOffset[mode] = firstChannel;
\r
4670 stream_.userFormat = format;
\r
4671 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4673 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4674 stream_.userInterleaved = false;
\r
4676 stream_.userInterleaved = true;
\r
4677 stream_.deviceInterleaved[mode] = true;
\r
4679 // Set flags for buffer conversion.
\r
4680 stream_.doConvertBuffer[mode] = false;
\r
4681 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4682 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4683 stream_.doConvertBuffer[mode] = true;
\r
4684 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4685 stream_.nUserChannels[mode] > 1 )
\r
4686 stream_.doConvertBuffer[mode] = true;
\r
4688 if ( stream_.doConvertBuffer[mode] )
\r
4689 setConvertInfo( mode, 0 );
\r
4691 // Allocate necessary internal buffers
\r
4692 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4694 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4695 if ( !stream_.userBuffer[mode] ) {
\r
4696 errorType = RtAudioError::MEMORY_ERROR;
\r
4697 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4701 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4702 stream_.callbackInfo.priority = 15;
\r
4704 stream_.callbackInfo.priority = 0;
\r
4706 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4707 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4709 methodResult = SUCCESS;
\r
4713 SAFE_RELEASE( captureDevices );
\r
4714 SAFE_RELEASE( renderDevices );
\r
4715 SAFE_RELEASE( devicePtr );
\r
4716 CoTaskMemFree( deviceFormat );
\r
4718 // if method failed, close the stream
\r
4719 if ( methodResult == FAILURE )
\r
4722 if ( !errorText_.empty() )
\r
4723 error( errorType );
\r
4724 return methodResult;
\r
4727 //=============================================================================
\r
4729 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4732 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4737 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4740 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4745 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4748 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4753 //-----------------------------------------------------------------------------
\r
4755 void RtApiWasapi::wasapiThread()
\r
4757 // as this is a new thread, we must CoInitialize it
\r
4758 CoInitialize( NULL );
\r
4762 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4763 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4764 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4765 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4766 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4767 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4769 WAVEFORMATEX* captureFormat = NULL;
\r
4770 WAVEFORMATEX* renderFormat = NULL;
\r
4771 float captureSrRatio = 0.0f;
\r
4772 float renderSrRatio = 0.0f;
\r
4773 WasapiBuffer captureBuffer;
\r
4774 WasapiBuffer renderBuffer;
\r
4776 // declare local stream variables
\r
4777 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4778 BYTE* streamBuffer = NULL;
\r
4779 unsigned long captureFlags = 0;
\r
4780 unsigned int bufferFrameCount = 0;
\r
4781 unsigned int numFramesPadding = 0;
\r
4782 unsigned int convBufferSize = 0;
\r
4783 bool callbackPushed = false;
\r
4784 bool callbackPulled = false;
\r
4785 bool callbackStopped = false;
\r
4786 int callbackResult = 0;
\r
4788 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4789 char* convBuffer = NULL;
\r
4790 unsigned int convBuffSize = 0;
\r
4791 unsigned int deviceBuffSize = 0;
\r
4793 errorText_.clear();
\r
4794 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4796 // Attempt to assign "Pro Audio" characteristic to thread
\r
4797 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4799 DWORD taskIndex = 0;
\r
4800 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4801 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4802 FreeLibrary( AvrtDll );
\r
4805 // start capture stream if applicable
\r
4806 if ( captureAudioClient ) {
\r
4807 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4808 if ( FAILED( hr ) ) {
\r
4809 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4813 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4815 // initialize capture stream according to desire buffer size
\r
4816 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4817 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4819 if ( !captureClient ) {
\r
4820 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4821 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4822 desiredBufferPeriod,
\r
4823 desiredBufferPeriod,
\r
4826 if ( FAILED( hr ) ) {
\r
4827 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4831 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4832 ( void** ) &captureClient );
\r
4833 if ( FAILED( hr ) ) {
\r
4834 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4838 // configure captureEvent to trigger on every available capture buffer
\r
4839 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4840 if ( !captureEvent ) {
\r
4841 errorType = RtAudioError::SYSTEM_ERROR;
\r
4842 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4846 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4847 if ( FAILED( hr ) ) {
\r
4848 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4852 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4853 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4856 unsigned int inBufferSize = 0;
\r
4857 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4858 if ( FAILED( hr ) ) {
\r
4859 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4863 // scale outBufferSize according to stream->user sample rate ratio
\r
4864 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4865 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4867 // set captureBuffer size
\r
4868 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4870 // reset the capture stream
\r
4871 hr = captureAudioClient->Reset();
\r
4872 if ( FAILED( hr ) ) {
\r
4873 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4877 // start the capture stream
\r
4878 hr = captureAudioClient->Start();
\r
4879 if ( FAILED( hr ) ) {
\r
4880 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4885 // start render stream if applicable
\r
4886 if ( renderAudioClient ) {
\r
4887 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4888 if ( FAILED( hr ) ) {
\r
4889 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4893 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4895 // initialize render stream according to desire buffer size
\r
4896 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4897 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4899 if ( !renderClient ) {
\r
4900 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4901 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4902 desiredBufferPeriod,
\r
4903 desiredBufferPeriod,
\r
4906 if ( FAILED( hr ) ) {
\r
4907 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4911 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4912 ( void** ) &renderClient );
\r
4913 if ( FAILED( hr ) ) {
\r
4914 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4918 // configure renderEvent to trigger on every available render buffer
\r
4919 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4920 if ( !renderEvent ) {
\r
4921 errorType = RtAudioError::SYSTEM_ERROR;
\r
4922 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4926 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4927 if ( FAILED( hr ) ) {
\r
4928 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4932 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4933 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4936 unsigned int outBufferSize = 0;
\r
4937 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4938 if ( FAILED( hr ) ) {
\r
4939 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4943 // scale inBufferSize according to user->stream sample rate ratio
\r
4944 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4945 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4947 // set renderBuffer size
\r
4948 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4950 // reset the render stream
\r
4951 hr = renderAudioClient->Reset();
\r
4952 if ( FAILED( hr ) ) {
\r
4953 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4957 // start the render stream
\r
4958 hr = renderAudioClient->Start();
\r
4959 if ( FAILED( hr ) ) {
\r
4960 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4965 if ( stream_.mode == INPUT ) {
\r
4966 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4967 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4969 else if ( stream_.mode == OUTPUT ) {
\r
4970 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4971 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4973 else if ( stream_.mode == DUPLEX ) {
\r
4974 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4975 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4976 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4977 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4980 convBuffer = ( char* ) malloc( convBuffSize );
\r
4981 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4982 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4983 errorType = RtAudioError::MEMORY_ERROR;
\r
4984 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4988 // stream process loop
\r
4989 while ( stream_.state != STREAM_STOPPING ) {
\r
4990 if ( !callbackPulled ) {
\r
4993 // 1. Pull callback buffer from inputBuffer
\r
4994 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4995 // Convert callback buffer to user format
\r
4997 if ( captureAudioClient ) {
\r
4998 // Pull callback buffer from inputBuffer
\r
4999 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
5000 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
5001 stream_.deviceFormat[INPUT] );
\r
5003 if ( callbackPulled ) {
\r
5004 // Convert callback buffer to user sample rate
\r
5005 convertBufferWasapi( stream_.deviceBuffer,
\r
5007 stream_.nDeviceChannels[INPUT],
\r
5008 captureFormat->nSamplesPerSec,
\r
5009 stream_.sampleRate,
\r
5010 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
5012 stream_.deviceFormat[INPUT] );
\r
5014 if ( stream_.doConvertBuffer[INPUT] ) {
\r
5015 // Convert callback buffer to user format
\r
5016 convertBuffer( stream_.userBuffer[INPUT],
\r
5017 stream_.deviceBuffer,
\r
5018 stream_.convertInfo[INPUT] );
\r
5021 // no further conversion, simple copy deviceBuffer to userBuffer
\r
5022 memcpy( stream_.userBuffer[INPUT],
\r
5023 stream_.deviceBuffer,
\r
5024 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
5029 // if there is no capture stream, set callbackPulled flag
\r
5030 callbackPulled = true;
\r
5033 // Execute Callback
\r
5034 // ================
\r
5035 // 1. Execute user callback method
\r
5036 // 2. Handle return value from callback
\r
5038 // if callback has not requested the stream to stop
\r
5039 if ( callbackPulled && !callbackStopped ) {
\r
5040 // Execute user callback method
\r
5041 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
5042 stream_.userBuffer[INPUT],
\r
5043 stream_.bufferSize,
\r
5045 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
5046 stream_.callbackInfo.userData );
\r
5048 // Handle return value from callback
\r
5049 if ( callbackResult == 1 ) {
\r
5050 // instantiate a thread to stop this thread
\r
5051 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
5052 if ( !threadHandle ) {
\r
5053 errorType = RtAudioError::THREAD_ERROR;
\r
5054 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
5057 else if ( !CloseHandle( threadHandle ) ) {
\r
5058 errorType = RtAudioError::THREAD_ERROR;
\r
5059 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
5063 callbackStopped = true;
\r
5065 else if ( callbackResult == 2 ) {
\r
5066 // instantiate a thread to stop this thread
\r
5067 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
5068 if ( !threadHandle ) {
\r
5069 errorType = RtAudioError::THREAD_ERROR;
\r
5070 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
5073 else if ( !CloseHandle( threadHandle ) ) {
\r
5074 errorType = RtAudioError::THREAD_ERROR;
\r
5075 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
5079 callbackStopped = true;
\r
5084 // Callback Output
\r
5085 // ===============
\r
5086 // 1. Convert callback buffer to stream format
\r
5087 // 2. Convert callback buffer to stream sample rate and channel count
\r
5088 // 3. Push callback buffer into outputBuffer
\r
5090 if ( renderAudioClient && callbackPulled ) {
\r
5091 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
5092 // Convert callback buffer to stream format
\r
5093 convertBuffer( stream_.deviceBuffer,
\r
5094 stream_.userBuffer[OUTPUT],
\r
5095 stream_.convertInfo[OUTPUT] );
\r
5099 // Convert callback buffer to stream sample rate
\r
5100 convertBufferWasapi( convBuffer,
\r
5101 stream_.deviceBuffer,
\r
5102 stream_.nDeviceChannels[OUTPUT],
\r
5103 stream_.sampleRate,
\r
5104 renderFormat->nSamplesPerSec,
\r
5105 stream_.bufferSize,
\r
5107 stream_.deviceFormat[OUTPUT] );
\r
5109 // Push callback buffer into outputBuffer
\r
5110 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5111 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5112 stream_.deviceFormat[OUTPUT] );
\r
5115 // if there is no render stream, set callbackPushed flag
\r
5116 callbackPushed = true;
\r
5121 // 1. Get capture buffer from stream
\r
5122 // 2. Push capture buffer into inputBuffer
\r
5123 // 3. If 2. was successful: Release capture buffer
\r
5125 if ( captureAudioClient ) {
\r
5126 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5127 if ( !callbackPulled ) {
\r
5128 WaitForSingleObject( captureEvent, INFINITE );
\r
5131 // Get capture buffer from stream
\r
5132 hr = captureClient->GetBuffer( &streamBuffer,
\r
5133 &bufferFrameCount,
\r
5134 &captureFlags, NULL, NULL );
\r
5135 if ( FAILED( hr ) ) {
\r
5136 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5140 if ( bufferFrameCount != 0 ) {
\r
5141 // Push capture buffer into inputBuffer
\r
5142 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5143 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5144 stream_.deviceFormat[INPUT] ) )
\r
5146 // Release capture buffer
\r
5147 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5148 if ( FAILED( hr ) ) {
\r
5149 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5155 // Inform WASAPI that capture was unsuccessful
\r
5156 hr = captureClient->ReleaseBuffer( 0 );
\r
5157 if ( FAILED( hr ) ) {
\r
5158 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5165 // Inform WASAPI that capture was unsuccessful
\r
5166 hr = captureClient->ReleaseBuffer( 0 );
\r
5167 if ( FAILED( hr ) ) {
\r
5168 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5176 // 1. Get render buffer from stream
\r
5177 // 2. Pull next buffer from outputBuffer
\r
5178 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5179 // Release render buffer
\r
5181 if ( renderAudioClient ) {
\r
5182 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5183 if ( callbackPulled && !callbackPushed ) {
\r
5184 WaitForSingleObject( renderEvent, INFINITE );
\r
5187 // Get render buffer from stream
\r
5188 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5189 if ( FAILED( hr ) ) {
\r
5190 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5194 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5195 if ( FAILED( hr ) ) {
\r
5196 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5200 bufferFrameCount -= numFramesPadding;
\r
5202 if ( bufferFrameCount != 0 ) {
\r
5203 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5204 if ( FAILED( hr ) ) {
\r
5205 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5209 // Pull next buffer from outputBuffer
\r
5210 // Fill render buffer with next buffer
\r
5211 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5212 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5213 stream_.deviceFormat[OUTPUT] ) )
\r
5215 // Release render buffer
\r
5216 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5217 if ( FAILED( hr ) ) {
\r
5218 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5224 // Inform WASAPI that render was unsuccessful
\r
5225 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5226 if ( FAILED( hr ) ) {
\r
5227 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5234 // Inform WASAPI that render was unsuccessful
\r
5235 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5236 if ( FAILED( hr ) ) {
\r
5237 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5243 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5244 if ( callbackPushed ) {
\r
5245 callbackPulled = false;
\r
5246 // tick stream time
\r
5247 RtApi::tickStreamTime();
\r
5254 CoTaskMemFree( captureFormat );
\r
5255 CoTaskMemFree( renderFormat );
\r
5257 free ( convBuffer );
\r
5261 // update stream state
\r
5262 stream_.state = STREAM_STOPPED;
\r
5264 if ( errorText_.empty() )
\r
5267 error( errorType );
\r
5270 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5274 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5276 // Modified by Robin Davies, October 2005
\r
5277 // - Improvements to DirectX pointer chasing.
\r
5278 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5279 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5280 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5281 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5283 #include <dsound.h>
\r
5284 #include <assert.h>
\r
5285 #include <algorithm>
\r
5287 #if defined(__MINGW32__)
\r
5288 // missing from latest mingw winapi
\r
5289 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5290 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5291 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5292 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5295 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5297 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5298 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5301 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5303 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5304 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5305 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5306 return pointer >= earlierPointer && pointer < laterPointer;
\r
5309 // A structure to hold various information related to the DirectSound
\r
5310 // API implementation.
\r
5312 unsigned int drainCounter; // Tracks callback counts when draining
\r
5313 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5317 UINT bufferPointer[2];
\r
5318 DWORD dsBufferSize[2];
\r
5319 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5323 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5326 // Declarations for utility functions, callbacks, and structures
\r
5327 // specific to the DirectSound implementation.
\r
5328 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5329 LPCTSTR description,
\r
5331 LPVOID lpContext );
\r
5333 static const char* getErrorString( int code );
\r
5335 static unsigned __stdcall callbackHandler( void *ptr );
\r
5344 : found(false) { validId[0] = false; validId[1] = false; }
\r
5347 struct DsProbeData {
\r
5349 std::vector<struct DsDevice>* dsDevices;
\r
5352 RtApiDs :: RtApiDs()
\r
5354 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5355 // accept whatever the mainline chose for a threading model.
\r
5356 coInitialized_ = false;
\r
5357 HRESULT hr = CoInitialize( NULL );
\r
5358 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5361 RtApiDs :: ~RtApiDs()
\r
5363 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5364 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5367 // The DirectSound default output is always the first device.
\r
5368 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5373 // The DirectSound default input is always the first input device,
\r
5374 // which is the first capture device enumerated.
\r
5375 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5380 unsigned int RtApiDs :: getDeviceCount( void )
\r
5382 // Set query flag for previously found devices to false, so that we
\r
5383 // can check for any devices that have disappeared.
\r
5384 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5385 dsDevices[i].found = false;
\r
5387 // Query DirectSound devices.
\r
5388 struct DsProbeData probeInfo;
\r
5389 probeInfo.isInput = false;
\r
5390 probeInfo.dsDevices = &dsDevices;
\r
5391 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5392 if ( FAILED( result ) ) {
\r
5393 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5394 errorText_ = errorStream_.str();
\r
5395 error( RtAudioError::WARNING );
\r
5398 // Query DirectSoundCapture devices.
\r
5399 probeInfo.isInput = true;
\r
5400 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5401 if ( FAILED( result ) ) {
\r
5402 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5403 errorText_ = errorStream_.str();
\r
5404 error( RtAudioError::WARNING );
\r
5407 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5408 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5409 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5413 return static_cast<unsigned int>(dsDevices.size());
\r
5416 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5418 RtAudio::DeviceInfo info;
\r
5419 info.probed = false;
\r
5421 if ( dsDevices.size() == 0 ) {
\r
5422 // Force a query of all devices
\r
5424 if ( dsDevices.size() == 0 ) {
\r
5425 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5426 error( RtAudioError::INVALID_USE );
\r
5431 if ( device >= dsDevices.size() ) {
\r
5432 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5433 error( RtAudioError::INVALID_USE );
\r
5438 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5440 LPDIRECTSOUND output;
\r
5442 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5443 if ( FAILED( result ) ) {
\r
5444 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5445 errorText_ = errorStream_.str();
\r
5446 error( RtAudioError::WARNING );
\r
5450 outCaps.dwSize = sizeof( outCaps );
\r
5451 result = output->GetCaps( &outCaps );
\r
5452 if ( FAILED( result ) ) {
\r
5453 output->Release();
\r
5454 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5455 errorText_ = errorStream_.str();
\r
5456 error( RtAudioError::WARNING );
\r
5460 // Get output channel information.
\r
5461 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5463 // Get sample rate information.
\r
5464 info.sampleRates.clear();
\r
5465 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5466 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5467 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5468 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5470 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5471 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5475 // Get format information.
\r
5476 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5477 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5479 output->Release();
\r
5481 if ( getDefaultOutputDevice() == device )
\r
5482 info.isDefaultOutput = true;
\r
5484 if ( dsDevices[ device ].validId[1] == false ) {
\r
5485 info.name = dsDevices[ device ].name;
\r
5486 info.probed = true;
\r
5492 LPDIRECTSOUNDCAPTURE input;
\r
5493 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5494 if ( FAILED( result ) ) {
\r
5495 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5496 errorText_ = errorStream_.str();
\r
5497 error( RtAudioError::WARNING );
\r
5502 inCaps.dwSize = sizeof( inCaps );
\r
5503 result = input->GetCaps( &inCaps );
\r
5504 if ( FAILED( result ) ) {
\r
5506 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5507 errorText_ = errorStream_.str();
\r
5508 error( RtAudioError::WARNING );
\r
5512 // Get input channel information.
\r
5513 info.inputChannels = inCaps.dwChannels;
\r
5515 // Get sample rate and format information.
\r
5516 std::vector<unsigned int> rates;
\r
5517 if ( inCaps.dwChannels >= 2 ) {
\r
5518 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5519 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5520 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5521 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5522 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5523 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5524 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5525 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5527 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5528 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5529 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5530 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5531 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5533 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5534 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5535 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5536 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5537 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5540 else if ( inCaps.dwChannels == 1 ) {
\r
5541 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5542 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5543 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5544 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5545 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5546 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5547 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5548 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5550 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5551 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5552 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5553 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5554 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5556 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5557 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5558 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5559 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5560 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5563 else info.inputChannels = 0; // technically, this would be an error
\r
5567 if ( info.inputChannels == 0 ) return info;
\r
5569 // Copy the supported rates to the info structure but avoid duplication.
\r
5571 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5573 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5574 if ( rates[i] == info.sampleRates[j] ) {
\r
5579 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5581 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5583 // If device opens for both playback and capture, we determine the channels.
\r
5584 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5585 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5587 if ( device == 0 ) info.isDefaultInput = true;
\r
5589 // Copy name and return.
\r
5590 info.name = dsDevices[ device ].name;
\r
5591 info.probed = true;
\r
5595 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5596 unsigned int firstChannel, unsigned int sampleRate,
\r
5597 RtAudioFormat format, unsigned int *bufferSize,
\r
5598 RtAudio::StreamOptions *options )
\r
5600 if ( channels + firstChannel > 2 ) {
\r
5601 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5605 size_t nDevices = dsDevices.size();
\r
5606 if ( nDevices == 0 ) {
\r
5607 // This should not happen because a check is made before this function is called.
\r
5608 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5612 if ( device >= nDevices ) {
\r
5613 // This should not happen because a check is made before this function is called.
\r
5614 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5618 if ( mode == OUTPUT ) {
\r
5619 if ( dsDevices[ device ].validId[0] == false ) {
\r
5620 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5621 errorText_ = errorStream_.str();
\r
5625 else { // mode == INPUT
\r
5626 if ( dsDevices[ device ].validId[1] == false ) {
\r
5627 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5628 errorText_ = errorStream_.str();
\r
5633 // According to a note in PortAudio, using GetDesktopWindow()
\r
5634 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5635 // that occur when the application's window is not the foreground
\r
5636 // window. Also, if the application window closes before the
\r
5637 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5638 // problems when using GetDesktopWindow() but it seems fine now
\r
5639 // (January 2010). I'll leave it commented here.
\r
5640 // HWND hWnd = GetForegroundWindow();
\r
5641 HWND hWnd = GetDesktopWindow();
\r
5643 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5644 // two. This is a judgement call and a value of two is probably too
\r
5645 // low for capture, but it should work for playback.
\r
5647 if ( options ) nBuffers = options->numberOfBuffers;
\r
5648 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5649 if ( nBuffers < 2 ) nBuffers = 3;
\r
5651 // Check the lower range of the user-specified buffer size and set
\r
5652 // (arbitrarily) to a lower bound of 32.
\r
5653 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5655 // Create the wave format structure. The data format setting will
\r
5656 // be determined later.
\r
5657 WAVEFORMATEX waveFormat;
\r
5658 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5659 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5660 waveFormat.nChannels = channels + firstChannel;
\r
5661 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5663 // Determine the device buffer size. By default, we'll use the value
\r
5664 // defined above (32K), but we will grow it to make allowances for
\r
5665 // very large software buffer sizes.
\r
5666 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5667 DWORD dsPointerLeadTime = 0;
\r
5669 void *ohandle = 0, *bhandle = 0;
\r
5671 if ( mode == OUTPUT ) {
\r
5673 LPDIRECTSOUND output;
\r
5674 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5675 if ( FAILED( result ) ) {
\r
5676 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5677 errorText_ = errorStream_.str();
\r
5682 outCaps.dwSize = sizeof( outCaps );
\r
5683 result = output->GetCaps( &outCaps );
\r
5684 if ( FAILED( result ) ) {
\r
5685 output->Release();
\r
5686 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5687 errorText_ = errorStream_.str();
\r
5691 // Check channel information.
\r
5692 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5693 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5694 errorText_ = errorStream_.str();
\r
5698 // Check format information. Use 16-bit format unless not
\r
5699 // supported or user requests 8-bit.
\r
5700 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5701 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5702 waveFormat.wBitsPerSample = 16;
\r
5703 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5706 waveFormat.wBitsPerSample = 8;
\r
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5709 stream_.userFormat = format;
\r
5711 // Update wave format structure and buffer information.
\r
5712 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5713 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5714 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5716 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5717 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5718 dsBufferSize *= 2;
\r
5720 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5721 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5722 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5723 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5724 if ( FAILED( result ) ) {
\r
5725 output->Release();
\r
5726 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5727 errorText_ = errorStream_.str();
\r
5731 // Even though we will write to the secondary buffer, we need to
\r
5732 // access the primary buffer to set the correct output format
\r
5733 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5734 // buffer description.
\r
5735 DSBUFFERDESC bufferDescription;
\r
5736 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5737 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5738 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5740 // Obtain the primary buffer
\r
5741 LPDIRECTSOUNDBUFFER buffer;
\r
5742 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5743 if ( FAILED( result ) ) {
\r
5744 output->Release();
\r
5745 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5746 errorText_ = errorStream_.str();
\r
5750 // Set the primary DS buffer sound format.
\r
5751 result = buffer->SetFormat( &waveFormat );
\r
5752 if ( FAILED( result ) ) {
\r
5753 output->Release();
\r
5754 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5755 errorText_ = errorStream_.str();
\r
5759 // Setup the secondary DS buffer description.
\r
5760 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5761 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5762 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5763 DSBCAPS_GLOBALFOCUS |
\r
5764 DSBCAPS_GETCURRENTPOSITION2 |
\r
5765 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5766 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5767 bufferDescription.lpwfxFormat = &waveFormat;
\r
5769 // Try to create the secondary DS buffer. If that doesn't work,
\r
5770 // try to use software mixing. Otherwise, there's a problem.
\r
5771 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5772 if ( FAILED( result ) ) {
\r
5773 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5774 DSBCAPS_GLOBALFOCUS |
\r
5775 DSBCAPS_GETCURRENTPOSITION2 |
\r
5776 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5777 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5778 if ( FAILED( result ) ) {
\r
5779 output->Release();
\r
5780 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5781 errorText_ = errorStream_.str();
\r
5786 // Get the buffer size ... might be different from what we specified.
\r
5788 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5789 result = buffer->GetCaps( &dsbcaps );
\r
5790 if ( FAILED( result ) ) {
\r
5791 output->Release();
\r
5792 buffer->Release();
\r
5793 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5794 errorText_ = errorStream_.str();
\r
5798 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5800 // Lock the DS buffer
\r
5803 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5804 if ( FAILED( result ) ) {
\r
5805 output->Release();
\r
5806 buffer->Release();
\r
5807 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5808 errorText_ = errorStream_.str();
\r
5812 // Zero the DS buffer
\r
5813 ZeroMemory( audioPtr, dataLen );
\r
5815 // Unlock the DS buffer
\r
5816 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5817 if ( FAILED( result ) ) {
\r
5818 output->Release();
\r
5819 buffer->Release();
\r
5820 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5821 errorText_ = errorStream_.str();
\r
5825 ohandle = (void *) output;
\r
5826 bhandle = (void *) buffer;
\r
5829 if ( mode == INPUT ) {
\r
5831 LPDIRECTSOUNDCAPTURE input;
\r
5832 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5833 if ( FAILED( result ) ) {
\r
5834 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5835 errorText_ = errorStream_.str();
\r
5840 inCaps.dwSize = sizeof( inCaps );
\r
5841 result = input->GetCaps( &inCaps );
\r
5842 if ( FAILED( result ) ) {
\r
5844 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5845 errorText_ = errorStream_.str();
\r
5849 // Check channel information.
\r
5850 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5851 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5855 // Check format information. Use 16-bit format unless user
\r
5856 // requests 8-bit.
\r
5857 DWORD deviceFormats;
\r
5858 if ( channels + firstChannel == 2 ) {
\r
5859 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5860 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5861 waveFormat.wBitsPerSample = 8;
\r
5862 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5864 else { // assume 16-bit is supported
\r
5865 waveFormat.wBitsPerSample = 16;
\r
5866 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5869 else { // channel == 1
\r
5870 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5871 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5872 waveFormat.wBitsPerSample = 8;
\r
5873 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5875 else { // assume 16-bit is supported
\r
5876 waveFormat.wBitsPerSample = 16;
\r
5877 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5880 stream_.userFormat = format;
\r
5882 // Update wave format structure and buffer information.
\r
5883 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5884 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5885 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5887 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5888 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5889 dsBufferSize *= 2;
\r
5891 // Setup the secondary DS buffer description.
\r
5892 DSCBUFFERDESC bufferDescription;
\r
5893 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5894 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5895 bufferDescription.dwFlags = 0;
\r
5896 bufferDescription.dwReserved = 0;
\r
5897 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5898 bufferDescription.lpwfxFormat = &waveFormat;
\r
5900 // Create the capture buffer.
\r
5901 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5902 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5903 if ( FAILED( result ) ) {
\r
5905 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5906 errorText_ = errorStream_.str();
\r
5910 // Get the buffer size ... might be different from what we specified.
\r
5911 DSCBCAPS dscbcaps;
\r
5912 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5913 result = buffer->GetCaps( &dscbcaps );
\r
5914 if ( FAILED( result ) ) {
\r
5916 buffer->Release();
\r
5917 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5918 errorText_ = errorStream_.str();
\r
5922 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5924 // NOTE: We could have a problem here if this is a duplex stream
\r
5925 // and the play and capture hardware buffer sizes are different
\r
5926 // (I'm actually not sure if that is a problem or not).
\r
5927 // Currently, we are not verifying that.
\r
5929 // Lock the capture buffer
\r
5932 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5933 if ( FAILED( result ) ) {
\r
5935 buffer->Release();
\r
5936 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5937 errorText_ = errorStream_.str();
\r
5941 // Zero the buffer
\r
5942 ZeroMemory( audioPtr, dataLen );
\r
5944 // Unlock the buffer
\r
5945 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5946 if ( FAILED( result ) ) {
\r
5948 buffer->Release();
\r
5949 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5950 errorText_ = errorStream_.str();
\r
5954 ohandle = (void *) input;
\r
5955 bhandle = (void *) buffer;
\r
5958 // Set various stream parameters
\r
5959 DsHandle *handle = 0;
\r
5960 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5961 stream_.nUserChannels[mode] = channels;
\r
5962 stream_.bufferSize = *bufferSize;
\r
5963 stream_.channelOffset[mode] = firstChannel;
\r
5964 stream_.deviceInterleaved[mode] = true;
\r
5965 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5966 else stream_.userInterleaved = true;
\r
5968 // Set flag for buffer conversion
\r
5969 stream_.doConvertBuffer[mode] = false;
\r
5970 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5971 stream_.doConvertBuffer[mode] = true;
\r
5972 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5973 stream_.doConvertBuffer[mode] = true;
\r
5974 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5975 stream_.nUserChannels[mode] > 1 )
\r
5976 stream_.doConvertBuffer[mode] = true;
\r
5978 // Allocate necessary internal buffers
\r
5979 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5980 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5981 if ( stream_.userBuffer[mode] == NULL ) {
\r
5982 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5986 if ( stream_.doConvertBuffer[mode] ) {
\r
5988 bool makeBuffer = true;
\r
5989 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5990 if ( mode == INPUT ) {
\r
5991 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5992 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5993 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5997 if ( makeBuffer ) {
\r
5998 bufferBytes *= *bufferSize;
\r
5999 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6000 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6001 if ( stream_.deviceBuffer == NULL ) {
\r
6002 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
6008 // Allocate our DsHandle structures for the stream.
\r
6009 if ( stream_.apiHandle == 0 ) {
\r
6011 handle = new DsHandle;
\r
6013 catch ( std::bad_alloc& ) {
\r
6014 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
6018 // Create a manual-reset event.
\r
6019 handle->condition = CreateEvent( NULL, // no security
\r
6020 TRUE, // manual-reset
\r
6021 FALSE, // non-signaled initially
\r
6022 NULL ); // unnamed
\r
6023 stream_.apiHandle = (void *) handle;
\r
6026 handle = (DsHandle *) stream_.apiHandle;
\r
6027 handle->id[mode] = ohandle;
\r
6028 handle->buffer[mode] = bhandle;
\r
6029 handle->dsBufferSize[mode] = dsBufferSize;
\r
6030 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
6032 stream_.device[mode] = device;
\r
6033 stream_.state = STREAM_STOPPED;
\r
6034 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
6035 // We had already set up an output stream.
\r
6036 stream_.mode = DUPLEX;
\r
6038 stream_.mode = mode;
\r
6039 stream_.nBuffers = nBuffers;
\r
6040 stream_.sampleRate = sampleRate;
\r
6042 // Setup the buffer conversion information structure.
\r
6043 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6045 // Setup the callback thread.
\r
6046 if ( stream_.callbackInfo.isRunning == false ) {
\r
6047 unsigned threadId;
\r
6048 stream_.callbackInfo.isRunning = true;
\r
6049 stream_.callbackInfo.object = (void *) this;
\r
6050 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
6051 &stream_.callbackInfo, 0, &threadId );
\r
6052 if ( stream_.callbackInfo.thread == 0 ) {
\r
6053 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
6057 // Boost DS thread priority
\r
6058 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
6064 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6065 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6066 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6067 if ( buffer ) buffer->Release();
\r
6068 object->Release();
\r
6070 if ( handle->buffer[1] ) {
\r
6071 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6072 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6073 if ( buffer ) buffer->Release();
\r
6074 object->Release();
\r
6076 CloseHandle( handle->condition );
\r
6078 stream_.apiHandle = 0;
\r
6081 for ( int i=0; i<2; i++ ) {
\r
6082 if ( stream_.userBuffer[i] ) {
\r
6083 free( stream_.userBuffer[i] );
\r
6084 stream_.userBuffer[i] = 0;
\r
6088 if ( stream_.deviceBuffer ) {
\r
6089 free( stream_.deviceBuffer );
\r
6090 stream_.deviceBuffer = 0;
\r
6093 stream_.state = STREAM_CLOSED;
\r
6097 void RtApiDs :: closeStream()
\r
6099 if ( stream_.state == STREAM_CLOSED ) {
\r
6100 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
6101 error( RtAudioError::WARNING );
\r
6105 // Stop the callback thread.
\r
6106 stream_.callbackInfo.isRunning = false;
\r
6107 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
6108 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6110 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6112 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6113 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6114 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6117 buffer->Release();
\r
6119 object->Release();
\r
6121 if ( handle->buffer[1] ) {
\r
6122 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6123 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6126 buffer->Release();
\r
6128 object->Release();
\r
6130 CloseHandle( handle->condition );
\r
6132 stream_.apiHandle = 0;
\r
6135 for ( int i=0; i<2; i++ ) {
\r
6136 if ( stream_.userBuffer[i] ) {
\r
6137 free( stream_.userBuffer[i] );
\r
6138 stream_.userBuffer[i] = 0;
\r
6142 if ( stream_.deviceBuffer ) {
\r
6143 free( stream_.deviceBuffer );
\r
6144 stream_.deviceBuffer = 0;
\r
6147 stream_.mode = UNINITIALIZED;
\r
6148 stream_.state = STREAM_CLOSED;
\r
6151 void RtApiDs :: startStream()
\r
6154 if ( stream_.state == STREAM_RUNNING ) {
\r
6155 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6156 error( RtAudioError::WARNING );
\r
6160 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6162 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6163 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6164 // this is already in effect.
\r
6165 timeBeginPeriod( 1 );
\r
6167 buffersRolling = false;
\r
6168 duplexPrerollBytes = 0;
\r
6170 if ( stream_.mode == DUPLEX ) {
\r
6171 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6172 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6175 HRESULT result = 0;
\r
6176 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6178 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6179 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6180 if ( FAILED( result ) ) {
\r
6181 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6182 errorText_ = errorStream_.str();
\r
6187 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6189 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6190 result = buffer->Start( DSCBSTART_LOOPING );
\r
6191 if ( FAILED( result ) ) {
\r
6192 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6193 errorText_ = errorStream_.str();
\r
6198 handle->drainCounter = 0;
\r
6199 handle->internalDrain = false;
\r
6200 ResetEvent( handle->condition );
\r
6201 stream_.state = STREAM_RUNNING;
\r
6204 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6207 void RtApiDs :: stopStream()
\r
6210 if ( stream_.state == STREAM_STOPPED ) {
\r
6211 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6212 error( RtAudioError::WARNING );
\r
6216 HRESULT result = 0;
\r
6219 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6220 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6221 if ( handle->drainCounter == 0 ) {
\r
6222 handle->drainCounter = 2;
\r
6223 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6226 stream_.state = STREAM_STOPPED;
\r
6228 MUTEX_LOCK( &stream_.mutex );
\r
6230 // Stop the buffer and clear memory
\r
6231 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6232 result = buffer->Stop();
\r
6233 if ( FAILED( result ) ) {
\r
6234 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6235 errorText_ = errorStream_.str();
\r
6239 // Lock the buffer and clear it so that if we start to play again,
\r
6240 // we won't have old data playing.
\r
6241 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6242 if ( FAILED( result ) ) {
\r
6243 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6244 errorText_ = errorStream_.str();
\r
6248 // Zero the DS buffer
\r
6249 ZeroMemory( audioPtr, dataLen );
\r
6251 // Unlock the DS buffer
\r
6252 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6253 if ( FAILED( result ) ) {
\r
6254 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6255 errorText_ = errorStream_.str();
\r
6259 // If we start playing again, we must begin at beginning of buffer.
\r
6260 handle->bufferPointer[0] = 0;
\r
6263 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6264 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6268 stream_.state = STREAM_STOPPED;
\r
6270 if ( stream_.mode != DUPLEX )
\r
6271 MUTEX_LOCK( &stream_.mutex );
\r
6273 result = buffer->Stop();
\r
6274 if ( FAILED( result ) ) {
\r
6275 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6276 errorText_ = errorStream_.str();
\r
6280 // Lock the buffer and clear it so that if we start to play again,
\r
6281 // we won't have old data playing.
\r
6282 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6283 if ( FAILED( result ) ) {
\r
6284 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6285 errorText_ = errorStream_.str();
\r
6289 // Zero the DS buffer
\r
6290 ZeroMemory( audioPtr, dataLen );
\r
6292 // Unlock the DS buffer
\r
6293 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6294 if ( FAILED( result ) ) {
\r
6295 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6296 errorText_ = errorStream_.str();
\r
6300 // If we start recording again, we must begin at beginning of buffer.
\r
6301 handle->bufferPointer[1] = 0;
\r
6305 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6306 MUTEX_UNLOCK( &stream_.mutex );
\r
6308 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6311 void RtApiDs :: abortStream()
\r
6314 if ( stream_.state == STREAM_STOPPED ) {
\r
6315 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6316 error( RtAudioError::WARNING );
\r
6320 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6321 handle->drainCounter = 2;
\r
6326 void RtApiDs :: callbackEvent()
\r
6328 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6329 Sleep( 50 ); // sleep 50 milliseconds
\r
6333 if ( stream_.state == STREAM_CLOSED ) {
\r
6334 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6335 error( RtAudioError::WARNING );
\r
6339 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6340 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6342 // Check if we were draining the stream and signal is finished.
\r
6343 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6345 stream_.state = STREAM_STOPPING;
\r
6346 if ( handle->internalDrain == false )
\r
6347 SetEvent( handle->condition );
\r
6353 // Invoke user callback to get fresh output data UNLESS we are
\r
6354 // draining stream.
\r
6355 if ( handle->drainCounter == 0 ) {
\r
6356 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6357 double streamTime = getStreamTime();
\r
6358 RtAudioStreamStatus status = 0;
\r
6359 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6360 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6361 handle->xrun[0] = false;
\r
6363 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6364 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6365 handle->xrun[1] = false;
\r
6367 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6368 stream_.bufferSize, streamTime, status, info->userData );
\r
6369 if ( cbReturnValue == 2 ) {
\r
6370 stream_.state = STREAM_STOPPING;
\r
6371 handle->drainCounter = 2;
\r
6375 else if ( cbReturnValue == 1 ) {
\r
6376 handle->drainCounter = 1;
\r
6377 handle->internalDrain = true;
\r
6382 DWORD currentWritePointer, safeWritePointer;
\r
6383 DWORD currentReadPointer, safeReadPointer;
\r
6384 UINT nextWritePointer;
\r
6386 LPVOID buffer1 = NULL;
\r
6387 LPVOID buffer2 = NULL;
\r
6388 DWORD bufferSize1 = 0;
\r
6389 DWORD bufferSize2 = 0;
\r
6394 MUTEX_LOCK( &stream_.mutex );
\r
6395 if ( stream_.state == STREAM_STOPPED ) {
\r
6396 MUTEX_UNLOCK( &stream_.mutex );
\r
6400 if ( buffersRolling == false ) {
\r
6401 if ( stream_.mode == DUPLEX ) {
\r
6402 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6404 // It takes a while for the devices to get rolling. As a result,
\r
6405 // there's no guarantee that the capture and write device pointers
\r
6406 // will move in lockstep. Wait here for both devices to start
\r
6407 // rolling, and then set our buffer pointers accordingly.
\r
6408 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6409 // bytes later than the write buffer.
\r
6411 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6412 // take place between the two GetCurrentPosition calls... but I'm
\r
6413 // really not sure how to solve the problem. Temporarily boost to
\r
6414 // Realtime priority, maybe; but I'm not sure what priority the
\r
6415 // DirectSound service threads run at. We *should* be roughly
\r
6416 // within a ms or so of correct.
\r
6418 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6419 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6421 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6423 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6424 if ( FAILED( result ) ) {
\r
6425 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6426 errorText_ = errorStream_.str();
\r
6427 MUTEX_UNLOCK( &stream_.mutex );
\r
6428 error( RtAudioError::SYSTEM_ERROR );
\r
6431 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6432 if ( FAILED( result ) ) {
\r
6433 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6434 errorText_ = errorStream_.str();
\r
6435 MUTEX_UNLOCK( &stream_.mutex );
\r
6436 error( RtAudioError::SYSTEM_ERROR );
\r
6440 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6441 if ( FAILED( result ) ) {
\r
6442 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6443 errorText_ = errorStream_.str();
\r
6444 MUTEX_UNLOCK( &stream_.mutex );
\r
6445 error( RtAudioError::SYSTEM_ERROR );
\r
6448 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6449 if ( FAILED( result ) ) {
\r
6450 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6451 errorText_ = errorStream_.str();
\r
6452 MUTEX_UNLOCK( &stream_.mutex );
\r
6453 error( RtAudioError::SYSTEM_ERROR );
\r
6456 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6460 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6462 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6463 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6464 handle->bufferPointer[1] = safeReadPointer;
\r
6466 else if ( stream_.mode == OUTPUT ) {
\r
6468 // Set the proper nextWritePosition after initial startup.
\r
6469 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6470 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6471 if ( FAILED( result ) ) {
\r
6472 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6473 errorText_ = errorStream_.str();
\r
6474 MUTEX_UNLOCK( &stream_.mutex );
\r
6475 error( RtAudioError::SYSTEM_ERROR );
\r
6478 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6479 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6482 buffersRolling = true;
\r
6485 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6487 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6489 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6490 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6491 bufferBytes *= formatBytes( stream_.userFormat );
\r
6492 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6495 // Setup parameters and do buffer conversion if necessary.
\r
6496 if ( stream_.doConvertBuffer[0] ) {
\r
6497 buffer = stream_.deviceBuffer;
\r
6498 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6499 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6500 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6503 buffer = stream_.userBuffer[0];
\r
6504 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6505 bufferBytes *= formatBytes( stream_.userFormat );
\r
6508 // No byte swapping necessary in DirectSound implementation.
\r
6510 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6511 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6513 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6514 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6516 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6517 nextWritePointer = handle->bufferPointer[0];
\r
6519 DWORD endWrite, leadPointer;
\r
6521 // Find out where the read and "safe write" pointers are.
\r
6522 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6523 if ( FAILED( result ) ) {
\r
6524 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6525 errorText_ = errorStream_.str();
\r
6526 MUTEX_UNLOCK( &stream_.mutex );
\r
6527 error( RtAudioError::SYSTEM_ERROR );
\r
6531 // We will copy our output buffer into the region between
\r
6532 // safeWritePointer and leadPointer. If leadPointer is not
\r
6533 // beyond the next endWrite position, wait until it is.
\r
6534 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6535 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6536 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6537 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6538 endWrite = nextWritePointer + bufferBytes;
\r
6540 // Check whether the entire write region is behind the play pointer.
\r
6541 if ( leadPointer >= endWrite ) break;
\r
6543 // If we are here, then we must wait until the leadPointer advances
\r
6544 // beyond the end of our next write region. We use the
\r
6545 // Sleep() function to suspend operation until that happens.
\r
6546 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6547 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6548 if ( millis < 1.0 ) millis = 1.0;
\r
6549 Sleep( (DWORD) millis );
\r
6552 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6553 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6554 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6555 handle->xrun[0] = true;
\r
6556 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6557 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6558 handle->bufferPointer[0] = nextWritePointer;
\r
6559 endWrite = nextWritePointer + bufferBytes;
\r
6562 // Lock free space in the buffer
\r
6563 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6564 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6565 if ( FAILED( result ) ) {
\r
6566 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6567 errorText_ = errorStream_.str();
\r
6568 MUTEX_UNLOCK( &stream_.mutex );
\r
6569 error( RtAudioError::SYSTEM_ERROR );
\r
6573 // Copy our buffer into the DS buffer
\r
6574 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6575 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6577 // Update our buffer offset and unlock sound buffer
\r
6578 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6579 if ( FAILED( result ) ) {
\r
6580 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6581 errorText_ = errorStream_.str();
\r
6582 MUTEX_UNLOCK( &stream_.mutex );
\r
6583 error( RtAudioError::SYSTEM_ERROR );
\r
6586 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6587 handle->bufferPointer[0] = nextWritePointer;
\r
6590 // Don't bother draining input
\r
6591 if ( handle->drainCounter ) {
\r
6592 handle->drainCounter++;
\r
6596 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6598 // Setup parameters.
\r
6599 if ( stream_.doConvertBuffer[1] ) {
\r
6600 buffer = stream_.deviceBuffer;
\r
6601 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6602 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6605 buffer = stream_.userBuffer[1];
\r
6606 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6607 bufferBytes *= formatBytes( stream_.userFormat );
\r
6610 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6611 long nextReadPointer = handle->bufferPointer[1];
\r
6612 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6614 // Find out where the write and "safe read" pointers are.
\r
6615 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6616 if ( FAILED( result ) ) {
\r
6617 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6618 errorText_ = errorStream_.str();
\r
6619 MUTEX_UNLOCK( &stream_.mutex );
\r
6620 error( RtAudioError::SYSTEM_ERROR );
\r
6624 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6625 DWORD endRead = nextReadPointer + bufferBytes;
\r
6627 // Handling depends on whether we are INPUT or DUPLEX.
\r
6628 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6629 // then a wait here will drag the write pointers into the forbidden zone.
\r
6631 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6632 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6633 // practical way to sync up the read and write pointers reliably, given the
\r
6634 // the very complex relationship between phase and increment of the read and write
\r
6637 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6638 // provide a pre-roll period of 0.5 seconds in which we return
\r
6639 // zeros from the read buffer while the pointers sync up.
\r
6641 if ( stream_.mode == DUPLEX ) {
\r
6642 if ( safeReadPointer < endRead ) {
\r
6643 if ( duplexPrerollBytes <= 0 ) {
\r
6644 // Pre-roll time over. Be more agressive.
\r
6645 int adjustment = endRead-safeReadPointer;
\r
6647 handle->xrun[1] = true;
\r
6649 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6650 // and perform fine adjustments later.
\r
6651 // - small adjustments: back off by twice as much.
\r
6652 if ( adjustment >= 2*bufferBytes )
\r
6653 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6655 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6657 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6661 // In pre=roll time. Just do it.
\r
6662 nextReadPointer = safeReadPointer - bufferBytes;
\r
6663 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6665 endRead = nextReadPointer + bufferBytes;
\r
6668 else { // mode == INPUT
\r
6669 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6670 // See comments for playback.
\r
6671 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6672 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6673 if ( millis < 1.0 ) millis = 1.0;
\r
6674 Sleep( (DWORD) millis );
\r
6676 // Wake up and find out where we are now.
\r
6677 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6678 if ( FAILED( result ) ) {
\r
6679 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6680 errorText_ = errorStream_.str();
\r
6681 MUTEX_UNLOCK( &stream_.mutex );
\r
6682 error( RtAudioError::SYSTEM_ERROR );
\r
6686 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6690 // Lock free space in the buffer
\r
6691 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6692 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6693 if ( FAILED( result ) ) {
\r
6694 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6695 errorText_ = errorStream_.str();
\r
6696 MUTEX_UNLOCK( &stream_.mutex );
\r
6697 error( RtAudioError::SYSTEM_ERROR );
\r
6701 if ( duplexPrerollBytes <= 0 ) {
\r
6702 // Copy our buffer into the DS buffer
\r
6703 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6704 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6707 memset( buffer, 0, bufferSize1 );
\r
6708 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6709 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6712 // Update our buffer offset and unlock sound buffer
\r
6713 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6714 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6715 if ( FAILED( result ) ) {
\r
6716 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6717 errorText_ = errorStream_.str();
\r
6718 MUTEX_UNLOCK( &stream_.mutex );
\r
6719 error( RtAudioError::SYSTEM_ERROR );
\r
6722 handle->bufferPointer[1] = nextReadPointer;
\r
6724 // No byte swapping necessary in DirectSound implementation.
\r
6726 // If necessary, convert 8-bit data from unsigned to signed.
\r
6727 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6728 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6730 // Do buffer conversion if necessary.
\r
6731 if ( stream_.doConvertBuffer[1] )
\r
6732 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6736 MUTEX_UNLOCK( &stream_.mutex );
\r
6737 RtApi::tickStreamTime();
\r
6740 // Definitions for utility functions and callbacks
\r
6741 // specific to the DirectSound implementation.
\r
6743 static unsigned __stdcall callbackHandler( void *ptr )
\r
6745 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6746 RtApiDs *object = (RtApiDs *) info->object;
\r
6747 bool* isRunning = &info->isRunning;
\r
6749 while ( *isRunning == true ) {
\r
6750 object->callbackEvent();
\r
6753 _endthreadex( 0 );
\r
6757 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6758 LPCTSTR description,
\r
6759 LPCTSTR /*module*/,
\r
6760 LPVOID lpContext )
\r
6762 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6763 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6766 bool validDevice = false;
\r
6767 if ( probeInfo.isInput == true ) {
\r
6769 LPDIRECTSOUNDCAPTURE object;
\r
6771 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6772 if ( hr != DS_OK ) return TRUE;
\r
6774 caps.dwSize = sizeof(caps);
\r
6775 hr = object->GetCaps( &caps );
\r
6776 if ( hr == DS_OK ) {
\r
6777 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6778 validDevice = true;
\r
6780 object->Release();
\r
6784 LPDIRECTSOUND object;
\r
6785 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6786 if ( hr != DS_OK ) return TRUE;
\r
6788 caps.dwSize = sizeof(caps);
\r
6789 hr = object->GetCaps( &caps );
\r
6790 if ( hr == DS_OK ) {
\r
6791 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6792 validDevice = true;
\r
6794 object->Release();
\r
6797 // If good device, then save its name and guid.
\r
6798 std::string name = convertCharPointerToStdString( description );
\r
6799 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6800 if ( lpguid == NULL )
\r
6801 name = "Default Device";
\r
6802 if ( validDevice ) {
\r
6803 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6804 if ( dsDevices[i].name == name ) {
\r
6805 dsDevices[i].found = true;
\r
6806 if ( probeInfo.isInput ) {
\r
6807 dsDevices[i].id[1] = lpguid;
\r
6808 dsDevices[i].validId[1] = true;
\r
6811 dsDevices[i].id[0] = lpguid;
\r
6812 dsDevices[i].validId[0] = true;
\r
6819 device.name = name;
\r
6820 device.found = true;
\r
6821 if ( probeInfo.isInput ) {
\r
6822 device.id[1] = lpguid;
\r
6823 device.validId[1] = true;
\r
6826 device.id[0] = lpguid;
\r
6827 device.validId[0] = true;
\r
6829 dsDevices.push_back( device );
\r
6835 static const char* getErrorString( int code )
\r
6839 case DSERR_ALLOCATED:
\r
6840 return "Already allocated";
\r
6842 case DSERR_CONTROLUNAVAIL:
\r
6843 return "Control unavailable";
\r
6845 case DSERR_INVALIDPARAM:
\r
6846 return "Invalid parameter";
\r
6848 case DSERR_INVALIDCALL:
\r
6849 return "Invalid call";
\r
6851 case DSERR_GENERIC:
\r
6852 return "Generic error";
\r
6854 case DSERR_PRIOLEVELNEEDED:
\r
6855 return "Priority level needed";
\r
6857 case DSERR_OUTOFMEMORY:
\r
6858 return "Out of memory";
\r
6860 case DSERR_BADFORMAT:
\r
6861 return "The sample rate or the channel format is not supported";
\r
6863 case DSERR_UNSUPPORTED:
\r
6864 return "Not supported";
\r
6866 case DSERR_NODRIVER:
\r
6867 return "No driver";
\r
6869 case DSERR_ALREADYINITIALIZED:
\r
6870 return "Already initialized";
\r
6872 case DSERR_NOAGGREGATION:
\r
6873 return "No aggregation";
\r
6875 case DSERR_BUFFERLOST:
\r
6876 return "Buffer lost";
\r
6878 case DSERR_OTHERAPPHASPRIO:
\r
6879 return "Another application already has priority";
\r
6881 case DSERR_UNINITIALIZED:
\r
6882 return "Uninitialized";
\r
6885 return "DirectSound unknown error";
\r
6888 //******************** End of __WINDOWS_DS__ *********************//
\r
6892 #if defined(__LINUX_ALSA__)
\r
6894 #include <alsa/asoundlib.h>
\r
6895 #include <unistd.h>
\r
6897 // A structure to hold various information related to the ALSA API
\r
6898 // implementation.
\r
6899 struct AlsaHandle {
\r
6900 snd_pcm_t *handles[2];
\r
6901 bool synchronized;
\r
6903 pthread_cond_t runnable_cv;
\r
6907 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6910 static void *alsaCallbackHandler( void * ptr );
\r
6912 RtApiAlsa :: RtApiAlsa()
\r
6914 // Nothing to do here.
\r
6917 RtApiAlsa :: ~RtApiAlsa()
\r
6919 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6922 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6924 unsigned nDevices = 0;
\r
6925 int result, subdevice, card;
\r
6927 snd_ctl_t *handle;
\r
6929 // Count cards and devices
\r
6931 snd_card_next( &card );
\r
6932 while ( card >= 0 ) {
\r
6933 sprintf( name, "hw:%d", card );
\r
6934 result = snd_ctl_open( &handle, name, 0 );
\r
6935 if ( result < 0 ) {
\r
6936 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6937 errorText_ = errorStream_.str();
\r
6938 error( RtAudioError::WARNING );
\r
6943 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6944 if ( result < 0 ) {
\r
6945 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6946 errorText_ = errorStream_.str();
\r
6947 error( RtAudioError::WARNING );
\r
6950 if ( subdevice < 0 )
\r
6955 snd_ctl_close( handle );
\r
6956 snd_card_next( &card );
\r
6959 result = snd_ctl_open( &handle, "default", 0 );
\r
6960 if (result == 0) {
\r
6962 snd_ctl_close( handle );
\r
6968 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6970 RtAudio::DeviceInfo info;
\r
6971 info.probed = false;
\r
6973 unsigned nDevices = 0;
\r
6974 int result, subdevice, card;
\r
6976 snd_ctl_t *chandle;
\r
6978 // Count cards and devices
\r
6981 snd_card_next( &card );
\r
6982 while ( card >= 0 ) {
\r
6983 sprintf( name, "hw:%d", card );
\r
6984 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6985 if ( result < 0 ) {
\r
6986 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6987 errorText_ = errorStream_.str();
\r
6988 error( RtAudioError::WARNING );
\r
6993 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6994 if ( result < 0 ) {
\r
6995 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6996 errorText_ = errorStream_.str();
\r
6997 error( RtAudioError::WARNING );
\r
7000 if ( subdevice < 0 ) break;
\r
7001 if ( nDevices == device ) {
\r
7002 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7008 snd_ctl_close( chandle );
\r
7009 snd_card_next( &card );
\r
7012 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7013 if ( result == 0 ) {
\r
7014 if ( nDevices == device ) {
\r
7015 strcpy( name, "default" );
\r
7021 if ( nDevices == 0 ) {
\r
7022 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
7023 error( RtAudioError::INVALID_USE );
\r
7027 if ( device >= nDevices ) {
\r
7028 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
7029 error( RtAudioError::INVALID_USE );
\r
7035 // If a stream is already open, we cannot probe the stream devices.
\r
7036 // Thus, use the saved results.
\r
7037 if ( stream_.state != STREAM_CLOSED &&
\r
7038 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
7039 snd_ctl_close( chandle );
\r
7040 if ( device >= devices_.size() ) {
\r
7041 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
7042 error( RtAudioError::WARNING );
\r
7045 return devices_[ device ];
\r
7048 int openMode = SND_PCM_ASYNC;
\r
7049 snd_pcm_stream_t stream;
\r
7050 snd_pcm_info_t *pcminfo;
\r
7051 snd_pcm_info_alloca( &pcminfo );
\r
7052 snd_pcm_t *phandle;
\r
7053 snd_pcm_hw_params_t *params;
\r
7054 snd_pcm_hw_params_alloca( ¶ms );
\r
7056 // First try for playback unless default device (which has subdev -1)
\r
7057 stream = SND_PCM_STREAM_PLAYBACK;
\r
7058 snd_pcm_info_set_stream( pcminfo, stream );
\r
7059 if ( subdevice != -1 ) {
\r
7060 snd_pcm_info_set_device( pcminfo, subdevice );
\r
7061 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
7063 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7064 if ( result < 0 ) {
\r
7065 // Device probably doesn't support playback.
\r
7066 goto captureProbe;
\r
7070 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
7071 if ( result < 0 ) {
\r
7072 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7073 errorText_ = errorStream_.str();
\r
7074 error( RtAudioError::WARNING );
\r
7075 goto captureProbe;
\r
7078 // The device is open ... fill the parameter structure.
\r
7079 result = snd_pcm_hw_params_any( phandle, params );
\r
7080 if ( result < 0 ) {
\r
7081 snd_pcm_close( phandle );
\r
7082 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7083 errorText_ = errorStream_.str();
\r
7084 error( RtAudioError::WARNING );
\r
7085 goto captureProbe;
\r
7088 // Get output channel information.
\r
7089 unsigned int value;
\r
7090 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7091 if ( result < 0 ) {
\r
7092 snd_pcm_close( phandle );
\r
7093 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
7094 errorText_ = errorStream_.str();
\r
7095 error( RtAudioError::WARNING );
\r
7096 goto captureProbe;
\r
7098 info.outputChannels = value;
\r
7099 snd_pcm_close( phandle );
\r
7102 stream = SND_PCM_STREAM_CAPTURE;
\r
7103 snd_pcm_info_set_stream( pcminfo, stream );
\r
7105 // Now try for capture unless default device (with subdev = -1)
\r
7106 if ( subdevice != -1 ) {
\r
7107 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
7108 snd_ctl_close( chandle );
\r
7109 if ( result < 0 ) {
\r
7110 // Device probably doesn't support capture.
\r
7111 if ( info.outputChannels == 0 ) return info;
\r
7112 goto probeParameters;
\r
7116 snd_ctl_close( chandle );
\r
7118 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7119 if ( result < 0 ) {
\r
7120 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7121 errorText_ = errorStream_.str();
\r
7122 error( RtAudioError::WARNING );
\r
7123 if ( info.outputChannels == 0 ) return info;
\r
7124 goto probeParameters;
\r
7127 // The device is open ... fill the parameter structure.
\r
7128 result = snd_pcm_hw_params_any( phandle, params );
\r
7129 if ( result < 0 ) {
\r
7130 snd_pcm_close( phandle );
\r
7131 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7132 errorText_ = errorStream_.str();
\r
7133 error( RtAudioError::WARNING );
\r
7134 if ( info.outputChannels == 0 ) return info;
\r
7135 goto probeParameters;
\r
7138 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7139 if ( result < 0 ) {
\r
7140 snd_pcm_close( phandle );
\r
7141 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7142 errorText_ = errorStream_.str();
\r
7143 error( RtAudioError::WARNING );
\r
7144 if ( info.outputChannels == 0 ) return info;
\r
7145 goto probeParameters;
\r
7147 info.inputChannels = value;
\r
7148 snd_pcm_close( phandle );
\r
7150 // If device opens for both playback and capture, we determine the channels.
\r
7151 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7152 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7154 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7155 if ( device == 0 && info.outputChannels > 0 )
\r
7156 info.isDefaultOutput = true;
\r
7157 if ( device == 0 && info.inputChannels > 0 )
\r
7158 info.isDefaultInput = true;
\r
7161 // At this point, we just need to figure out the supported data
\r
7162 // formats and sample rates. We'll proceed by opening the device in
\r
7163 // the direction with the maximum number of channels, or playback if
\r
7164 // they are equal. This might limit our sample rate options, but so
\r
7167 if ( info.outputChannels >= info.inputChannels )
\r
7168 stream = SND_PCM_STREAM_PLAYBACK;
\r
7170 stream = SND_PCM_STREAM_CAPTURE;
\r
7171 snd_pcm_info_set_stream( pcminfo, stream );
\r
7173 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7174 if ( result < 0 ) {
\r
7175 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7176 errorText_ = errorStream_.str();
\r
7177 error( RtAudioError::WARNING );
\r
7181 // The device is open ... fill the parameter structure.
\r
7182 result = snd_pcm_hw_params_any( phandle, params );
\r
7183 if ( result < 0 ) {
\r
7184 snd_pcm_close( phandle );
\r
7185 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7186 errorText_ = errorStream_.str();
\r
7187 error( RtAudioError::WARNING );
\r
7191 // Test our discrete set of sample rate values.
\r
7192 info.sampleRates.clear();
\r
7193 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7194 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7195 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7197 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7198 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7201 if ( info.sampleRates.size() == 0 ) {
\r
7202 snd_pcm_close( phandle );
\r
7203 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7204 errorText_ = errorStream_.str();
\r
7205 error( RtAudioError::WARNING );
\r
7209 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7210 snd_pcm_format_t format;
\r
7211 info.nativeFormats = 0;
\r
7212 format = SND_PCM_FORMAT_S8;
\r
7213 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7214 info.nativeFormats |= RTAUDIO_SINT8;
\r
7215 format = SND_PCM_FORMAT_S16;
\r
7216 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7217 info.nativeFormats |= RTAUDIO_SINT16;
\r
7218 format = SND_PCM_FORMAT_S24;
\r
7219 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7220 info.nativeFormats |= RTAUDIO_SINT24;
\r
7221 format = SND_PCM_FORMAT_S32;
\r
7222 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7223 info.nativeFormats |= RTAUDIO_SINT32;
\r
7224 format = SND_PCM_FORMAT_FLOAT;
\r
7225 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7226 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7227 format = SND_PCM_FORMAT_FLOAT64;
\r
7228 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7229 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7231 // Check that we have at least one supported format
\r
7232 if ( info.nativeFormats == 0 ) {
\r
7233 snd_pcm_close( phandle );
\r
7234 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7235 errorText_ = errorStream_.str();
\r
7236 error( RtAudioError::WARNING );
\r
7240 // Get the device name
\r
7242 result = snd_card_get_name( card, &cardname );
\r
7243 if ( result >= 0 ) {
\r
7244 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7249 // That's all ... close the device and return
\r
7250 snd_pcm_close( phandle );
\r
7251 info.probed = true;
\r
7255 void RtApiAlsa :: saveDeviceInfo( void )
\r
7259 unsigned int nDevices = getDeviceCount();
\r
7260 devices_.resize( nDevices );
\r
7261 for ( unsigned int i=0; i<nDevices; i++ )
\r
7262 devices_[i] = getDeviceInfo( i );
\r
7265 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7266 unsigned int firstChannel, unsigned int sampleRate,
\r
7267 RtAudioFormat format, unsigned int *bufferSize,
\r
7268 RtAudio::StreamOptions *options )
\r
7271 #if defined(__RTAUDIO_DEBUG__)
\r
7272 snd_output_t *out;
\r
7273 snd_output_stdio_attach(&out, stderr, 0);
\r
7276 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7278 unsigned nDevices = 0;
\r
7279 int result, subdevice, card;
\r
7281 snd_ctl_t *chandle;
\r
7283 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7284 snprintf(name, sizeof(name), "%s", "default");
\r
7286 // Count cards and devices
\r
7288 snd_card_next( &card );
\r
7289 while ( card >= 0 ) {
\r
7290 sprintf( name, "hw:%d", card );
\r
7291 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7292 if ( result < 0 ) {
\r
7293 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7294 errorText_ = errorStream_.str();
\r
7299 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7300 if ( result < 0 ) break;
\r
7301 if ( subdevice < 0 ) break;
\r
7302 if ( nDevices == device ) {
\r
7303 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7304 snd_ctl_close( chandle );
\r
7309 snd_ctl_close( chandle );
\r
7310 snd_card_next( &card );
\r
7313 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7314 if ( result == 0 ) {
\r
7315 if ( nDevices == device ) {
\r
7316 strcpy( name, "default" );
\r
7322 if ( nDevices == 0 ) {
\r
7323 // This should not happen because a check is made before this function is called.
\r
7324 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7328 if ( device >= nDevices ) {
\r
7329 // This should not happen because a check is made before this function is called.
\r
7330 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7337 // The getDeviceInfo() function will not work for a device that is
\r
7338 // already open. Thus, we'll probe the system before opening a
\r
7339 // stream and save the results for use by getDeviceInfo().
\r
7340 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7341 this->saveDeviceInfo();
\r
7343 snd_pcm_stream_t stream;
\r
7344 if ( mode == OUTPUT )
\r
7345 stream = SND_PCM_STREAM_PLAYBACK;
\r
7347 stream = SND_PCM_STREAM_CAPTURE;
\r
7349 snd_pcm_t *phandle;
\r
7350 int openMode = SND_PCM_ASYNC;
\r
7351 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7352 if ( result < 0 ) {
\r
7353 if ( mode == OUTPUT )
\r
7354 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7356 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7357 errorText_ = errorStream_.str();
\r
7361 // Fill the parameter structure.
\r
7362 snd_pcm_hw_params_t *hw_params;
\r
7363 snd_pcm_hw_params_alloca( &hw_params );
\r
7364 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7365 if ( result < 0 ) {
\r
7366 snd_pcm_close( phandle );
\r
7367 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7368 errorText_ = errorStream_.str();
\r
7372 #if defined(__RTAUDIO_DEBUG__)
\r
7373 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7374 snd_pcm_hw_params_dump( hw_params, out );
\r
7377 // Set access ... check user preference.
\r
7378 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7379 stream_.userInterleaved = false;
\r
7380 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7381 if ( result < 0 ) {
\r
7382 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7383 stream_.deviceInterleaved[mode] = true;
\r
7386 stream_.deviceInterleaved[mode] = false;
\r
7389 stream_.userInterleaved = true;
\r
7390 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7391 if ( result < 0 ) {
\r
7392 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7393 stream_.deviceInterleaved[mode] = false;
\r
7396 stream_.deviceInterleaved[mode] = true;
\r
7399 if ( result < 0 ) {
\r
7400 snd_pcm_close( phandle );
\r
7401 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7402 errorText_ = errorStream_.str();
\r
7406 // Determine how to set the device format.
\r
7407 stream_.userFormat = format;
\r
7408 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7410 if ( format == RTAUDIO_SINT8 )
\r
7411 deviceFormat = SND_PCM_FORMAT_S8;
\r
7412 else if ( format == RTAUDIO_SINT16 )
\r
7413 deviceFormat = SND_PCM_FORMAT_S16;
\r
7414 else if ( format == RTAUDIO_SINT24 )
\r
7415 deviceFormat = SND_PCM_FORMAT_S24;
\r
7416 else if ( format == RTAUDIO_SINT32 )
\r
7417 deviceFormat = SND_PCM_FORMAT_S32;
\r
7418 else if ( format == RTAUDIO_FLOAT32 )
\r
7419 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7420 else if ( format == RTAUDIO_FLOAT64 )
\r
7421 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7423 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7424 stream_.deviceFormat[mode] = format;
\r
7428 // The user requested format is not natively supported by the device.
\r
7429 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7430 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7431 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7435 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7436 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7437 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7441 deviceFormat = SND_PCM_FORMAT_S32;
\r
7442 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7443 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7447 deviceFormat = SND_PCM_FORMAT_S24;
\r
7448 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7449 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7453 deviceFormat = SND_PCM_FORMAT_S16;
\r
7454 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7455 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7459 deviceFormat = SND_PCM_FORMAT_S8;
\r
7460 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7461 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7465 // If we get here, no supported format was found.
\r
7466 snd_pcm_close( phandle );
\r
7467 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7468 errorText_ = errorStream_.str();
\r
7472 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7473 if ( result < 0 ) {
\r
7474 snd_pcm_close( phandle );
\r
7475 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7476 errorText_ = errorStream_.str();
\r
7480 // Determine whether byte-swaping is necessary.
\r
7481 stream_.doByteSwap[mode] = false;
\r
7482 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7483 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7484 if ( result == 0 )
\r
7485 stream_.doByteSwap[mode] = true;
\r
7486 else if (result < 0) {
\r
7487 snd_pcm_close( phandle );
\r
7488 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7489 errorText_ = errorStream_.str();
\r
7494 // Set the sample rate.
\r
7495 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7496 if ( result < 0 ) {
\r
7497 snd_pcm_close( phandle );
\r
7498 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7499 errorText_ = errorStream_.str();
\r
7503 // Determine the number of channels for this device. We support a possible
\r
7504 // minimum device channel number > than the value requested by the user.
\r
7505 stream_.nUserChannels[mode] = channels;
\r
7506 unsigned int value;
\r
7507 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7508 unsigned int deviceChannels = value;
\r
7509 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7510 snd_pcm_close( phandle );
\r
7511 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7512 errorText_ = errorStream_.str();
\r
7516 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7517 if ( result < 0 ) {
\r
7518 snd_pcm_close( phandle );
\r
7519 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7520 errorText_ = errorStream_.str();
\r
7523 deviceChannels = value;
\r
7524 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7525 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7527 // Set the device channels.
\r
7528 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7529 if ( result < 0 ) {
\r
7530 snd_pcm_close( phandle );
\r
7531 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7532 errorText_ = errorStream_.str();
\r
7536 // Set the buffer (or period) size.
\r
7538 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7539 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7540 if ( result < 0 ) {
\r
7541 snd_pcm_close( phandle );
\r
7542 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7543 errorText_ = errorStream_.str();
\r
7546 *bufferSize = periodSize;
\r
7548 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7549 unsigned int periods = 0;
\r
7550 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7551 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7552 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7553 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7554 if ( result < 0 ) {
\r
7555 snd_pcm_close( phandle );
\r
7556 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7557 errorText_ = errorStream_.str();
\r
7561 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7562 // MUST be the same in both directions!
\r
7563 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7564 snd_pcm_close( phandle );
\r
7565 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7566 errorText_ = errorStream_.str();
\r
7570 stream_.bufferSize = *bufferSize;
\r
7572 // Install the hardware configuration
\r
7573 result = snd_pcm_hw_params( phandle, hw_params );
\r
7574 if ( result < 0 ) {
\r
7575 snd_pcm_close( phandle );
\r
7576 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7577 errorText_ = errorStream_.str();
\r
7581 #if defined(__RTAUDIO_DEBUG__)
\r
7582 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7583 snd_pcm_hw_params_dump( hw_params, out );
\r
7586 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7587 snd_pcm_sw_params_t *sw_params = NULL;
\r
7588 snd_pcm_sw_params_alloca( &sw_params );
\r
7589 snd_pcm_sw_params_current( phandle, sw_params );
\r
7590 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7591 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7592 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7594 // The following two settings were suggested by Theo Veenker
\r
7595 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7596 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7598 // here are two options for a fix
\r
7599 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7600 snd_pcm_uframes_t val;
\r
7601 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7602 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7604 result = snd_pcm_sw_params( phandle, sw_params );
\r
7605 if ( result < 0 ) {
\r
7606 snd_pcm_close( phandle );
\r
7607 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7608 errorText_ = errorStream_.str();
\r
7612 #if defined(__RTAUDIO_DEBUG__)
\r
7613 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7614 snd_pcm_sw_params_dump( sw_params, out );
\r
7617 // Set flags for buffer conversion
\r
7618 stream_.doConvertBuffer[mode] = false;
\r
7619 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7620 stream_.doConvertBuffer[mode] = true;
\r
7621 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7622 stream_.doConvertBuffer[mode] = true;
\r
7623 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7624 stream_.nUserChannels[mode] > 1 )
\r
7625 stream_.doConvertBuffer[mode] = true;
\r
7627 // Allocate the ApiHandle if necessary and then save.
\r
7628 AlsaHandle *apiInfo = 0;
\r
7629 if ( stream_.apiHandle == 0 ) {
\r
7631 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7633 catch ( std::bad_alloc& ) {
\r
7634 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7638 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7639 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7643 stream_.apiHandle = (void *) apiInfo;
\r
7644 apiInfo->handles[0] = 0;
\r
7645 apiInfo->handles[1] = 0;
\r
7648 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7650 apiInfo->handles[mode] = phandle;
\r
7653 // Allocate necessary internal buffers.
\r
7654 unsigned long bufferBytes;
\r
7655 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7656 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7657 if ( stream_.userBuffer[mode] == NULL ) {
\r
7658 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7662 if ( stream_.doConvertBuffer[mode] ) {
\r
7664 bool makeBuffer = true;
\r
7665 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7666 if ( mode == INPUT ) {
\r
7667 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7668 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7669 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7673 if ( makeBuffer ) {
\r
7674 bufferBytes *= *bufferSize;
\r
7675 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7676 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7677 if ( stream_.deviceBuffer == NULL ) {
\r
7678 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7684 stream_.sampleRate = sampleRate;
\r
7685 stream_.nBuffers = periods;
\r
7686 stream_.device[mode] = device;
\r
7687 stream_.state = STREAM_STOPPED;
\r
7689 // Setup the buffer conversion information structure.
\r
7690 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7692 // Setup thread if necessary.
\r
7693 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7694 // We had already set up an output stream.
\r
7695 stream_.mode = DUPLEX;
\r
7696 // Link the streams if possible.
\r
7697 apiInfo->synchronized = false;
\r
7698 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7699 apiInfo->synchronized = true;
\r
7701 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7702 error( RtAudioError::WARNING );
\r
7706 stream_.mode = mode;
\r
7708 // Setup callback thread.
\r
7709 stream_.callbackInfo.object = (void *) this;
\r
7711 // Set the thread attributes for joinable and realtime scheduling
\r
7712 // priority (optional). The higher priority will only take affect
\r
7713 // if the program is run as root or suid. Note, under Linux
\r
7714 // processes with CAP_SYS_NICE privilege, a user can change
\r
7715 // scheduling policy and priority (thus need not be root). See
\r
7716 // POSIX "capabilities".
\r
7717 pthread_attr_t attr;
\r
7718 pthread_attr_init( &attr );
\r
7719 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7721 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7722 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7723 // We previously attempted to increase the audio callback priority
\r
7724 // to SCHED_RR here via the attributes. However, while no errors
\r
7725 // were reported in doing so, it did not work. So, now this is
\r
7726 // done in the alsaCallbackHandler function.
\r
7727 stream_.callbackInfo.doRealtime = true;
\r
7728 int priority = options->priority;
\r
7729 int min = sched_get_priority_min( SCHED_RR );
\r
7730 int max = sched_get_priority_max( SCHED_RR );
\r
7731 if ( priority < min ) priority = min;
\r
7732 else if ( priority > max ) priority = max;
\r
7733 stream_.callbackInfo.priority = priority;
\r
7737 stream_.callbackInfo.isRunning = true;
\r
7738 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7739 pthread_attr_destroy( &attr );
\r
7741 stream_.callbackInfo.isRunning = false;
\r
7742 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7751 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7752 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7753 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7755 stream_.apiHandle = 0;
\r
7758 if ( phandle) snd_pcm_close( phandle );
\r
7760 for ( int i=0; i<2; i++ ) {
\r
7761 if ( stream_.userBuffer[i] ) {
\r
7762 free( stream_.userBuffer[i] );
\r
7763 stream_.userBuffer[i] = 0;
\r
7767 if ( stream_.deviceBuffer ) {
\r
7768 free( stream_.deviceBuffer );
\r
7769 stream_.deviceBuffer = 0;
\r
7772 stream_.state = STREAM_CLOSED;
\r
7776 void RtApiAlsa :: closeStream()
\r
7778 if ( stream_.state == STREAM_CLOSED ) {
\r
7779 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7780 error( RtAudioError::WARNING );
\r
7784 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7785 stream_.callbackInfo.isRunning = false;
\r
7786 MUTEX_LOCK( &stream_.mutex );
\r
7787 if ( stream_.state == STREAM_STOPPED ) {
\r
7788 apiInfo->runnable = true;
\r
7789 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7791 MUTEX_UNLOCK( &stream_.mutex );
\r
7792 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7794 if ( stream_.state == STREAM_RUNNING ) {
\r
7795 stream_.state = STREAM_STOPPED;
\r
7796 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7797 snd_pcm_drop( apiInfo->handles[0] );
\r
7798 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7799 snd_pcm_drop( apiInfo->handles[1] );
\r
7803 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7804 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7805 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7807 stream_.apiHandle = 0;
\r
7810 for ( int i=0; i<2; i++ ) {
\r
7811 if ( stream_.userBuffer[i] ) {
\r
7812 free( stream_.userBuffer[i] );
\r
7813 stream_.userBuffer[i] = 0;
\r
7817 if ( stream_.deviceBuffer ) {
\r
7818 free( stream_.deviceBuffer );
\r
7819 stream_.deviceBuffer = 0;
\r
7822 stream_.mode = UNINITIALIZED;
\r
7823 stream_.state = STREAM_CLOSED;
\r
7826 void RtApiAlsa :: startStream()
\r
7828 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7831 if ( stream_.state == STREAM_RUNNING ) {
\r
7832 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7833 error( RtAudioError::WARNING );
\r
7837 MUTEX_LOCK( &stream_.mutex );
\r
7840 snd_pcm_state_t state;
\r
7841 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7842 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7843 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7844 state = snd_pcm_state( handle[0] );
\r
7845 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7846 result = snd_pcm_prepare( handle[0] );
\r
7847 if ( result < 0 ) {
\r
7848 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7849 errorText_ = errorStream_.str();
\r
7855 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7856 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7857 state = snd_pcm_state( handle[1] );
\r
7858 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7859 result = snd_pcm_prepare( handle[1] );
\r
7860 if ( result < 0 ) {
\r
7861 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7862 errorText_ = errorStream_.str();
\r
7868 stream_.state = STREAM_RUNNING;
\r
7871 apiInfo->runnable = true;
\r
7872 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7873 MUTEX_UNLOCK( &stream_.mutex );
\r
7875 if ( result >= 0 ) return;
\r
7876 error( RtAudioError::SYSTEM_ERROR );
\r
7879 void RtApiAlsa :: stopStream()
\r
7882 if ( stream_.state == STREAM_STOPPED ) {
\r
7883 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7884 error( RtAudioError::WARNING );
\r
7888 stream_.state = STREAM_STOPPED;
\r
7889 MUTEX_LOCK( &stream_.mutex );
\r
7892 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7893 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7894 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7895 if ( apiInfo->synchronized )
\r
7896 result = snd_pcm_drop( handle[0] );
\r
7898 result = snd_pcm_drain( handle[0] );
\r
7899 if ( result < 0 ) {
\r
7900 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7901 errorText_ = errorStream_.str();
\r
7906 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7907 result = snd_pcm_drop( handle[1] );
\r
7908 if ( result < 0 ) {
\r
7909 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7910 errorText_ = errorStream_.str();
\r
7916 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7917 MUTEX_UNLOCK( &stream_.mutex );
\r
7919 if ( result >= 0 ) return;
\r
7920 error( RtAudioError::SYSTEM_ERROR );
\r
7923 void RtApiAlsa :: abortStream()
\r
7926 if ( stream_.state == STREAM_STOPPED ) {
\r
7927 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7928 error( RtAudioError::WARNING );
\r
7932 stream_.state = STREAM_STOPPED;
\r
7933 MUTEX_LOCK( &stream_.mutex );
\r
7936 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7937 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7938 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7939 result = snd_pcm_drop( handle[0] );
\r
7940 if ( result < 0 ) {
\r
7941 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7942 errorText_ = errorStream_.str();
\r
7947 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7948 result = snd_pcm_drop( handle[1] );
\r
7949 if ( result < 0 ) {
\r
7950 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7951 errorText_ = errorStream_.str();
\r
7957 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7958 MUTEX_UNLOCK( &stream_.mutex );
\r
7960 if ( result >= 0 ) return;
\r
7961 error( RtAudioError::SYSTEM_ERROR );
\r
7964 void RtApiAlsa :: callbackEvent()
\r
7966 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7967 if ( stream_.state == STREAM_STOPPED ) {
\r
7968 MUTEX_LOCK( &stream_.mutex );
\r
7969 while ( !apiInfo->runnable )
\r
7970 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7972 if ( stream_.state != STREAM_RUNNING ) {
\r
7973 MUTEX_UNLOCK( &stream_.mutex );
\r
7976 MUTEX_UNLOCK( &stream_.mutex );
\r
7979 if ( stream_.state == STREAM_CLOSED ) {
\r
7980 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7981 error( RtAudioError::WARNING );
\r
7985 int doStopStream = 0;
\r
7986 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7987 double streamTime = getStreamTime();
\r
7988 RtAudioStreamStatus status = 0;
\r
7989 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7990 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7991 apiInfo->xrun[0] = false;
\r
7993 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7994 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7995 apiInfo->xrun[1] = false;
\r
7997 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7998 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
8000 if ( doStopStream == 2 ) {
\r
8005 MUTEX_LOCK( &stream_.mutex );
\r
8007 // The state might change while waiting on a mutex.
\r
8008 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
8013 snd_pcm_t **handle;
\r
8014 snd_pcm_sframes_t frames;
\r
8015 RtAudioFormat format;
\r
8016 handle = (snd_pcm_t **) apiInfo->handles;
\r
8018 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
8020 // Setup parameters.
\r
8021 if ( stream_.doConvertBuffer[1] ) {
\r
8022 buffer = stream_.deviceBuffer;
\r
8023 channels = stream_.nDeviceChannels[1];
\r
8024 format = stream_.deviceFormat[1];
\r
8027 buffer = stream_.userBuffer[1];
\r
8028 channels = stream_.nUserChannels[1];
\r
8029 format = stream_.userFormat;
\r
8032 // Read samples from device in interleaved/non-interleaved format.
\r
8033 if ( stream_.deviceInterleaved[1] )
\r
8034 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
8036 void *bufs[channels];
\r
8037 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8038 for ( int i=0; i<channels; i++ )
\r
8039 bufs[i] = (void *) (buffer + (i * offset));
\r
8040 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
8043 if ( result < (int) stream_.bufferSize ) {
\r
8044 // Either an error or overrun occured.
\r
8045 if ( result == -EPIPE ) {
\r
8046 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
8047 if ( state == SND_PCM_STATE_XRUN ) {
\r
8048 apiInfo->xrun[1] = true;
\r
8049 result = snd_pcm_prepare( handle[1] );
\r
8050 if ( result < 0 ) {
\r
8051 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
8052 errorText_ = errorStream_.str();
\r
8056 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8057 errorText_ = errorStream_.str();
\r
8061 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
8062 errorText_ = errorStream_.str();
\r
8064 error( RtAudioError::WARNING );
\r
8068 // Do byte swapping if necessary.
\r
8069 if ( stream_.doByteSwap[1] )
\r
8070 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
8072 // Do buffer conversion if necessary.
\r
8073 if ( stream_.doConvertBuffer[1] )
\r
8074 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
8076 // Check stream latency
\r
8077 result = snd_pcm_delay( handle[1], &frames );
\r
8078 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
8083 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8085 // Setup parameters and do buffer conversion if necessary.
\r
8086 if ( stream_.doConvertBuffer[0] ) {
\r
8087 buffer = stream_.deviceBuffer;
\r
8088 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
8089 channels = stream_.nDeviceChannels[0];
\r
8090 format = stream_.deviceFormat[0];
\r
8093 buffer = stream_.userBuffer[0];
\r
8094 channels = stream_.nUserChannels[0];
\r
8095 format = stream_.userFormat;
\r
8098 // Do byte swapping if necessary.
\r
8099 if ( stream_.doByteSwap[0] )
\r
8100 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
8102 // Write samples to device in interleaved/non-interleaved format.
\r
8103 if ( stream_.deviceInterleaved[0] )
\r
8104 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
8106 void *bufs[channels];
\r
8107 size_t offset = stream_.bufferSize * formatBytes( format );
\r
8108 for ( int i=0; i<channels; i++ )
\r
8109 bufs[i] = (void *) (buffer + (i * offset));
\r
8110 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8113 if ( result < (int) stream_.bufferSize ) {
\r
8114 // Either an error or underrun occured.
\r
8115 if ( result == -EPIPE ) {
\r
8116 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8117 if ( state == SND_PCM_STATE_XRUN ) {
\r
8118 apiInfo->xrun[0] = true;
\r
8119 result = snd_pcm_prepare( handle[0] );
\r
8120 if ( result < 0 ) {
\r
8121 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8122 errorText_ = errorStream_.str();
\r
8125 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
\r
8128 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8129 errorText_ = errorStream_.str();
\r
8133 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8134 errorText_ = errorStream_.str();
\r
8136 error( RtAudioError::WARNING );
\r
8140 // Check stream latency
\r
8141 result = snd_pcm_delay( handle[0], &frames );
\r
8142 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8146 MUTEX_UNLOCK( &stream_.mutex );
\r
8148 RtApi::tickStreamTime();
\r
8149 if ( doStopStream == 1 ) this->stopStream();
\r
8152 static void *alsaCallbackHandler( void *ptr )
\r
8154 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8155 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8156 bool *isRunning = &info->isRunning;
\r
8158 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8159 if ( info->doRealtime ) {
\r
8160 pthread_t tID = pthread_self(); // ID of this thread
\r
8161 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8162 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8166 while ( *isRunning == true ) {
\r
8167 pthread_testcancel();
\r
8168 object->callbackEvent();
\r
8171 pthread_exit( NULL );
\r
8174 //******************** End of __LINUX_ALSA__ *********************//
\r
8177 #if defined(__LINUX_PULSE__)
\r
8179 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8180 // and Tristan Matthews.
\r
8182 #include <pulse/error.h>
\r
8183 #include <pulse/simple.h>
\r
8186 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8187 44100, 48000, 96000, 0};
\r
8189 struct rtaudio_pa_format_mapping_t {
\r
8190 RtAudioFormat rtaudio_format;
\r
8191 pa_sample_format_t pa_format;
\r
8194 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8195 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8196 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8197 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8198 {0, PA_SAMPLE_INVALID}};
\r
8200 struct PulseAudioHandle {
\r
8201 pa_simple *s_play;
\r
8204 pthread_cond_t runnable_cv;
\r
8206 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8209 RtApiPulse::~RtApiPulse()
\r
8211 if ( stream_.state != STREAM_CLOSED )
\r
8215 unsigned int RtApiPulse::getDeviceCount( void )
\r
8220 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8222 RtAudio::DeviceInfo info;
\r
8223 info.probed = true;
\r
8224 info.name = "PulseAudio";
\r
8225 info.outputChannels = 2;
\r
8226 info.inputChannels = 2;
\r
8227 info.duplexChannels = 2;
\r
8228 info.isDefaultOutput = true;
\r
8229 info.isDefaultInput = true;
\r
8231 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8232 info.sampleRates.push_back( *sr );
\r
8234 info.preferredSampleRate = 48000;
\r
8235 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8240 static void *pulseaudio_callback( void * user )
\r
8242 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8243 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8244 volatile bool *isRunning = &cbi->isRunning;
\r
8246 while ( *isRunning ) {
\r
8247 pthread_testcancel();
\r
8248 context->callbackEvent();
\r
8251 pthread_exit( NULL );
\r
8254 void RtApiPulse::closeStream( void )
\r
8256 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8258 stream_.callbackInfo.isRunning = false;
\r
8260 MUTEX_LOCK( &stream_.mutex );
\r
8261 if ( stream_.state == STREAM_STOPPED ) {
\r
8262 pah->runnable = true;
\r
8263 pthread_cond_signal( &pah->runnable_cv );
\r
8265 MUTEX_UNLOCK( &stream_.mutex );
\r
8267 pthread_join( pah->thread, 0 );
\r
8268 if ( pah->s_play ) {
\r
8269 pa_simple_flush( pah->s_play, NULL );
\r
8270 pa_simple_free( pah->s_play );
\r
8273 pa_simple_free( pah->s_rec );
\r
8275 pthread_cond_destroy( &pah->runnable_cv );
\r
8277 stream_.apiHandle = 0;
\r
8280 if ( stream_.userBuffer[0] ) {
\r
8281 free( stream_.userBuffer[0] );
\r
8282 stream_.userBuffer[0] = 0;
\r
8284 if ( stream_.userBuffer[1] ) {
\r
8285 free( stream_.userBuffer[1] );
\r
8286 stream_.userBuffer[1] = 0;
\r
8289 stream_.state = STREAM_CLOSED;
\r
8290 stream_.mode = UNINITIALIZED;
\r
8293 void RtApiPulse::callbackEvent( void )
\r
8295 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8297 if ( stream_.state == STREAM_STOPPED ) {
\r
8298 MUTEX_LOCK( &stream_.mutex );
\r
8299 while ( !pah->runnable )
\r
8300 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8302 if ( stream_.state != STREAM_RUNNING ) {
\r
8303 MUTEX_UNLOCK( &stream_.mutex );
\r
8306 MUTEX_UNLOCK( &stream_.mutex );
\r
8309 if ( stream_.state == STREAM_CLOSED ) {
\r
8310 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8311 "this shouldn't happen!";
\r
8312 error( RtAudioError::WARNING );
\r
8316 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8317 double streamTime = getStreamTime();
\r
8318 RtAudioStreamStatus status = 0;
\r
8319 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8320 stream_.bufferSize, streamTime, status,
\r
8321 stream_.callbackInfo.userData );
\r
8323 if ( doStopStream == 2 ) {
\r
8328 MUTEX_LOCK( &stream_.mutex );
\r
8329 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8330 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8332 if ( stream_.state != STREAM_RUNNING )
\r
8337 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8338 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8339 convertBuffer( stream_.deviceBuffer,
\r
8340 stream_.userBuffer[OUTPUT],
\r
8341 stream_.convertInfo[OUTPUT] );
\r
8342 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8343 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8345 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8346 formatBytes( stream_.userFormat );
\r
8348 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8349 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8350 pa_strerror( pa_error ) << ".";
\r
8351 errorText_ = errorStream_.str();
\r
8352 error( RtAudioError::WARNING );
\r
8356 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8357 if ( stream_.doConvertBuffer[INPUT] )
\r
8358 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8359 formatBytes( stream_.deviceFormat[INPUT] );
\r
8361 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8362 formatBytes( stream_.userFormat );
\r
8364 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8365 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8366 pa_strerror( pa_error ) << ".";
\r
8367 errorText_ = errorStream_.str();
\r
8368 error( RtAudioError::WARNING );
\r
8370 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8371 convertBuffer( stream_.userBuffer[INPUT],
\r
8372 stream_.deviceBuffer,
\r
8373 stream_.convertInfo[INPUT] );
\r
8378 MUTEX_UNLOCK( &stream_.mutex );
\r
8379 RtApi::tickStreamTime();
\r
8381 if ( doStopStream == 1 )
\r
8385 void RtApiPulse::startStream( void )
\r
8387 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8389 if ( stream_.state == STREAM_CLOSED ) {
\r
8390 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8391 error( RtAudioError::INVALID_USE );
\r
8394 if ( stream_.state == STREAM_RUNNING ) {
\r
8395 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8396 error( RtAudioError::WARNING );
\r
8400 MUTEX_LOCK( &stream_.mutex );
\r
8402 stream_.state = STREAM_RUNNING;
\r
8404 pah->runnable = true;
\r
8405 pthread_cond_signal( &pah->runnable_cv );
\r
8406 MUTEX_UNLOCK( &stream_.mutex );
\r
8409 void RtApiPulse::stopStream( void )
\r
8411 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8413 if ( stream_.state == STREAM_CLOSED ) {
\r
8414 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8415 error( RtAudioError::INVALID_USE );
\r
8418 if ( stream_.state == STREAM_STOPPED ) {
\r
8419 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8420 error( RtAudioError::WARNING );
\r
8424 stream_.state = STREAM_STOPPED;
\r
8425 MUTEX_LOCK( &stream_.mutex );
\r
8427 if ( pah && pah->s_play ) {
\r
8429 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8430 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8431 pa_strerror( pa_error ) << ".";
\r
8432 errorText_ = errorStream_.str();
\r
8433 MUTEX_UNLOCK( &stream_.mutex );
\r
8434 error( RtAudioError::SYSTEM_ERROR );
\r
8439 stream_.state = STREAM_STOPPED;
\r
8440 MUTEX_UNLOCK( &stream_.mutex );
\r
8443 void RtApiPulse::abortStream( void )
\r
8445 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8447 if ( stream_.state == STREAM_CLOSED ) {
\r
8448 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8449 error( RtAudioError::INVALID_USE );
\r
8452 if ( stream_.state == STREAM_STOPPED ) {
\r
8453 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8454 error( RtAudioError::WARNING );
\r
8458 stream_.state = STREAM_STOPPED;
\r
8459 MUTEX_LOCK( &stream_.mutex );
\r
8461 if ( pah && pah->s_play ) {
\r
8463 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8464 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8465 pa_strerror( pa_error ) << ".";
\r
8466 errorText_ = errorStream_.str();
\r
8467 MUTEX_UNLOCK( &stream_.mutex );
\r
8468 error( RtAudioError::SYSTEM_ERROR );
\r
8473 stream_.state = STREAM_STOPPED;
\r
8474 MUTEX_UNLOCK( &stream_.mutex );
\r
8477 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8478 unsigned int channels, unsigned int firstChannel,
\r
8479 unsigned int sampleRate, RtAudioFormat format,
\r
8480 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8482 PulseAudioHandle *pah = 0;
\r
8483 unsigned long bufferBytes = 0;
\r
8484 pa_sample_spec ss;
\r
8486 if ( device != 0 ) return false;
\r
8487 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8488 if ( channels != 1 && channels != 2 ) {
\r
8489 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8492 ss.channels = channels;
\r
8494 if ( firstChannel != 0 ) return false;
\r
8496 bool sr_found = false;
\r
8497 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8498 if ( sampleRate == *sr ) {
\r
8500 stream_.sampleRate = sampleRate;
\r
8501 ss.rate = sampleRate;
\r
8505 if ( !sr_found ) {
\r
8506 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8510 bool sf_found = 0;
\r
8511 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8512 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8513 if ( format == sf->rtaudio_format ) {
\r
8515 stream_.userFormat = sf->rtaudio_format;
\r
8516 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8517 ss.format = sf->pa_format;
\r
8521 if ( !sf_found ) { // Use internal data format conversion.
\r
8522 stream_.userFormat = format;
\r
8523 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8524 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8527 // Set other stream parameters.
\r
8528 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8529 else stream_.userInterleaved = true;
\r
8530 stream_.deviceInterleaved[mode] = true;
\r
8531 stream_.nBuffers = 1;
\r
8532 stream_.doByteSwap[mode] = false;
\r
8533 stream_.nUserChannels[mode] = channels;
\r
8534 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8535 stream_.channelOffset[mode] = 0;
\r
8536 std::string streamName = "RtAudio";
\r
8538 // Set flags for buffer conversion.
\r
8539 stream_.doConvertBuffer[mode] = false;
\r
8540 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8541 stream_.doConvertBuffer[mode] = true;
\r
8542 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8543 stream_.doConvertBuffer[mode] = true;
\r
8545 // Allocate necessary internal buffers.
\r
8546 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8547 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8548 if ( stream_.userBuffer[mode] == NULL ) {
\r
8549 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8552 stream_.bufferSize = *bufferSize;
\r
8554 if ( stream_.doConvertBuffer[mode] ) {
\r
8556 bool makeBuffer = true;
\r
8557 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8558 if ( mode == INPUT ) {
\r
8559 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8560 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8561 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8565 if ( makeBuffer ) {
\r
8566 bufferBytes *= *bufferSize;
\r
8567 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8568 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8569 if ( stream_.deviceBuffer == NULL ) {
\r
8570 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8576 stream_.device[mode] = device;
\r
8578 // Setup the buffer conversion information structure.
\r
8579 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8581 if ( !stream_.apiHandle ) {
\r
8582 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8584 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8588 stream_.apiHandle = pah;
\r
8589 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8590 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8594 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8597 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8600 pa_buffer_attr buffer_attr;
\r
8601 buffer_attr.fragsize = bufferBytes;
\r
8602 buffer_attr.maxlength = -1;
\r
8604 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8605 if ( !pah->s_rec ) {
\r
8606 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8611 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8612 if ( !pah->s_play ) {
\r
8613 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8621 if ( stream_.mode == UNINITIALIZED )
\r
8622 stream_.mode = mode;
\r
8623 else if ( stream_.mode == mode )
\r
8626 stream_.mode = DUPLEX;
\r
8628 if ( !stream_.callbackInfo.isRunning ) {
\r
8629 stream_.callbackInfo.object = this;
\r
8630 stream_.callbackInfo.isRunning = true;
\r
8631 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8632 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8637 stream_.state = STREAM_STOPPED;
\r
8641 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8642 pthread_cond_destroy( &pah->runnable_cv );
\r
8644 stream_.apiHandle = 0;
\r
8647 for ( int i=0; i<2; i++ ) {
\r
8648 if ( stream_.userBuffer[i] ) {
\r
8649 free( stream_.userBuffer[i] );
\r
8650 stream_.userBuffer[i] = 0;
\r
8654 if ( stream_.deviceBuffer ) {
\r
8655 free( stream_.deviceBuffer );
\r
8656 stream_.deviceBuffer = 0;
\r
8662 //******************** End of __LINUX_PULSE__ *********************//
\r
8665 #if defined(__LINUX_OSS__)
\r
8667 #include <unistd.h>
\r
8668 #include <sys/ioctl.h>
\r
8669 #include <unistd.h>
\r
8670 #include <fcntl.h>
\r
8671 #include <sys/soundcard.h>
\r
8672 #include <errno.h>
\r
8675 static void *ossCallbackHandler(void * ptr);
\r
8677 // A structure to hold various information related to the OSS API
\r
8678 // implementation.
\r
8679 struct OssHandle {
\r
8680 int id[2]; // device ids
\r
8683 pthread_cond_t runnable;
\r
8686 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8689 RtApiOss :: RtApiOss()
\r
8691 // Nothing to do here.
\r
8694 RtApiOss :: ~RtApiOss()
\r
8696 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8699 unsigned int RtApiOss :: getDeviceCount( void )
\r
8701 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8702 if ( mixerfd == -1 ) {
\r
8703 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8704 error( RtAudioError::WARNING );
\r
8708 oss_sysinfo sysinfo;
\r
8709 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8711 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8712 error( RtAudioError::WARNING );
\r
8717 return sysinfo.numaudios;
\r
8720 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8722 RtAudio::DeviceInfo info;
\r
8723 info.probed = false;
\r
8725 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8726 if ( mixerfd == -1 ) {
\r
8727 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8728 error( RtAudioError::WARNING );
\r
8732 oss_sysinfo sysinfo;
\r
8733 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8734 if ( result == -1 ) {
\r
8736 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8737 error( RtAudioError::WARNING );
\r
8741 unsigned nDevices = sysinfo.numaudios;
\r
8742 if ( nDevices == 0 ) {
\r
8744 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8745 error( RtAudioError::INVALID_USE );
\r
8749 if ( device >= nDevices ) {
\r
8751 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8752 error( RtAudioError::INVALID_USE );
\r
8756 oss_audioinfo ainfo;
\r
8757 ainfo.dev = device;
\r
8758 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8760 if ( result == -1 ) {
\r
8761 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8762 errorText_ = errorStream_.str();
\r
8763 error( RtAudioError::WARNING );
\r
8768 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8769 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8770 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8771 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8772 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8775 // Probe data formats ... do for input
\r
8776 unsigned long mask = ainfo.iformats;
\r
8777 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8778 info.nativeFormats |= RTAUDIO_SINT16;
\r
8779 if ( mask & AFMT_S8 )
\r
8780 info.nativeFormats |= RTAUDIO_SINT8;
\r
8781 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8782 info.nativeFormats |= RTAUDIO_SINT32;
\r
8783 if ( mask & AFMT_FLOAT )
\r
8784 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8785 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8786 info.nativeFormats |= RTAUDIO_SINT24;
\r
8788 // Check that we have at least one supported format
\r
8789 if ( info.nativeFormats == 0 ) {
\r
8790 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8791 errorText_ = errorStream_.str();
\r
8792 error( RtAudioError::WARNING );
\r
8796 // Probe the supported sample rates.
\r
8797 info.sampleRates.clear();
\r
8798 if ( ainfo.nrates ) {
\r
8799 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8801 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8802 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8805 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8813 // Check min and max rate values;
\r
8814 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8815 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8816 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8818 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8819 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8824 if ( info.sampleRates.size() == 0 ) {
\r
8825 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8826 errorText_ = errorStream_.str();
\r
8827 error( RtAudioError::WARNING );
\r
8830 info.probed = true;
\r
8831 info.name = ainfo.name;
\r
8838 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8839 unsigned int firstChannel, unsigned int sampleRate,
\r
8840 RtAudioFormat format, unsigned int *bufferSize,
\r
8841 RtAudio::StreamOptions *options )
\r
8843 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8844 if ( mixerfd == -1 ) {
\r
8845 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8849 oss_sysinfo sysinfo;
\r
8850 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8851 if ( result == -1 ) {
\r
8853 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8857 unsigned nDevices = sysinfo.numaudios;
\r
8858 if ( nDevices == 0 ) {
\r
8859 // This should not happen because a check is made before this function is called.
\r
8861 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8865 if ( device >= nDevices ) {
\r
8866 // This should not happen because a check is made before this function is called.
\r
8868 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8872 oss_audioinfo ainfo;
\r
8873 ainfo.dev = device;
\r
8874 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8876 if ( result == -1 ) {
\r
8877 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8878 errorText_ = errorStream_.str();
\r
8882 // Check if device supports input or output
\r
8883 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8884 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8885 if ( mode == OUTPUT )
\r
8886 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8888 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8889 errorText_ = errorStream_.str();
\r
8894 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8895 if ( mode == OUTPUT )
\r
8896 flags |= O_WRONLY;
\r
8897 else { // mode == INPUT
\r
8898 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8899 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8900 close( handle->id[0] );
\r
8901 handle->id[0] = 0;
\r
8902 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8903 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8904 errorText_ = errorStream_.str();
\r
8907 // Check that the number previously set channels is the same.
\r
8908 if ( stream_.nUserChannels[0] != channels ) {
\r
8909 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8910 errorText_ = errorStream_.str();
\r
8916 flags |= O_RDONLY;
\r
8919 // Set exclusive access if specified.
\r
8920 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8922 // Try to open the device.
\r
8924 fd = open( ainfo.devnode, flags, 0 );
\r
8926 if ( errno == EBUSY )
\r
8927 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8929 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8930 errorText_ = errorStream_.str();
\r
8934 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8936 if ( flags | O_RDWR ) {
\r
8937 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8938 if ( result == -1) {
\r
8939 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8940 errorText_ = errorStream_.str();
\r
8946 // Check the device channel support.
\r
8947 stream_.nUserChannels[mode] = channels;
\r
8948 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8950 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8951 errorText_ = errorStream_.str();
\r
8955 // Set the number of channels.
\r
8956 int deviceChannels = channels + firstChannel;
\r
8957 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8958 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8960 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8961 errorText_ = errorStream_.str();
\r
8964 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8966 // Get the data format mask
\r
8968 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8969 if ( result == -1 ) {
\r
8971 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8972 errorText_ = errorStream_.str();
\r
8976 // Determine how to set the device format.
\r
8977 stream_.userFormat = format;
\r
8978 int deviceFormat = -1;
\r
8979 stream_.doByteSwap[mode] = false;
\r
8980 if ( format == RTAUDIO_SINT8 ) {
\r
8981 if ( mask & AFMT_S8 ) {
\r
8982 deviceFormat = AFMT_S8;
\r
8983 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8986 else if ( format == RTAUDIO_SINT16 ) {
\r
8987 if ( mask & AFMT_S16_NE ) {
\r
8988 deviceFormat = AFMT_S16_NE;
\r
8989 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8991 else if ( mask & AFMT_S16_OE ) {
\r
8992 deviceFormat = AFMT_S16_OE;
\r
8993 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8994 stream_.doByteSwap[mode] = true;
\r
8997 else if ( format == RTAUDIO_SINT24 ) {
\r
8998 if ( mask & AFMT_S24_NE ) {
\r
8999 deviceFormat = AFMT_S24_NE;
\r
9000 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9002 else if ( mask & AFMT_S24_OE ) {
\r
9003 deviceFormat = AFMT_S24_OE;
\r
9004 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9005 stream_.doByteSwap[mode] = true;
\r
9008 else if ( format == RTAUDIO_SINT32 ) {
\r
9009 if ( mask & AFMT_S32_NE ) {
\r
9010 deviceFormat = AFMT_S32_NE;
\r
9011 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9013 else if ( mask & AFMT_S32_OE ) {
\r
9014 deviceFormat = AFMT_S32_OE;
\r
9015 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9016 stream_.doByteSwap[mode] = true;
\r
9020 if ( deviceFormat == -1 ) {
\r
9021 // The user requested format is not natively supported by the device.
\r
9022 if ( mask & AFMT_S16_NE ) {
\r
9023 deviceFormat = AFMT_S16_NE;
\r
9024 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9026 else if ( mask & AFMT_S32_NE ) {
\r
9027 deviceFormat = AFMT_S32_NE;
\r
9028 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9030 else if ( mask & AFMT_S24_NE ) {
\r
9031 deviceFormat = AFMT_S24_NE;
\r
9032 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9034 else if ( mask & AFMT_S16_OE ) {
\r
9035 deviceFormat = AFMT_S16_OE;
\r
9036 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
9037 stream_.doByteSwap[mode] = true;
\r
9039 else if ( mask & AFMT_S32_OE ) {
\r
9040 deviceFormat = AFMT_S32_OE;
\r
9041 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
9042 stream_.doByteSwap[mode] = true;
\r
9044 else if ( mask & AFMT_S24_OE ) {
\r
9045 deviceFormat = AFMT_S24_OE;
\r
9046 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
9047 stream_.doByteSwap[mode] = true;
\r
9049 else if ( mask & AFMT_S8) {
\r
9050 deviceFormat = AFMT_S8;
\r
9051 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
9055 if ( stream_.deviceFormat[mode] == 0 ) {
\r
9056 // This really shouldn't happen ...
\r
9058 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
9059 errorText_ = errorStream_.str();
\r
9063 // Set the data format.
\r
9064 int temp = deviceFormat;
\r
9065 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
9066 if ( result == -1 || deviceFormat != temp ) {
\r
9068 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
9069 errorText_ = errorStream_.str();
\r
9073 // Attempt to set the buffer size. According to OSS, the minimum
\r
9074 // number of buffers is two. The supposed minimum buffer size is 16
\r
9075 // bytes, so that will be our lower bound. The argument to this
\r
9076 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
9077 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
9078 // We'll check the actual value used near the end of the setup
\r
9080 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
9081 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
9083 if ( options ) buffers = options->numberOfBuffers;
\r
9084 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
9085 if ( buffers < 2 ) buffers = 3;
\r
9086 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
9087 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
9088 if ( result == -1 ) {
\r
9090 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
9091 errorText_ = errorStream_.str();
\r
9094 stream_.nBuffers = buffers;
\r
9096 // Save buffer size (in sample frames).
\r
9097 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
9098 stream_.bufferSize = *bufferSize;
\r
9100 // Set the sample rate.
\r
9101 int srate = sampleRate;
\r
9102 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
9103 if ( result == -1 ) {
\r
9105 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
9106 errorText_ = errorStream_.str();
\r
9110 // Verify the sample rate setup worked.
\r
9111 if ( abs( srate - sampleRate ) > 100 ) {
\r
9113 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9114 errorText_ = errorStream_.str();
\r
9117 stream_.sampleRate = sampleRate;
\r
9119 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9120 // We're doing duplex setup here.
\r
9121 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9122 stream_.nDeviceChannels[0] = deviceChannels;
\r
9125 // Set interleaving parameters.
\r
9126 stream_.userInterleaved = true;
\r
9127 stream_.deviceInterleaved[mode] = true;
\r
9128 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9129 stream_.userInterleaved = false;
\r
9131 // Set flags for buffer conversion
\r
9132 stream_.doConvertBuffer[mode] = false;
\r
9133 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9134 stream_.doConvertBuffer[mode] = true;
\r
9135 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9136 stream_.doConvertBuffer[mode] = true;
\r
9137 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9138 stream_.nUserChannels[mode] > 1 )
\r
9139 stream_.doConvertBuffer[mode] = true;
\r
9141 // Allocate the stream handles if necessary and then save.
\r
9142 if ( stream_.apiHandle == 0 ) {
\r
9144 handle = new OssHandle;
\r
9146 catch ( std::bad_alloc& ) {
\r
9147 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9151 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9152 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9156 stream_.apiHandle = (void *) handle;
\r
9159 handle = (OssHandle *) stream_.apiHandle;
\r
9161 handle->id[mode] = fd;
\r
9163 // Allocate necessary internal buffers.
\r
9164 unsigned long bufferBytes;
\r
9165 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9166 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9167 if ( stream_.userBuffer[mode] == NULL ) {
\r
9168 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9172 if ( stream_.doConvertBuffer[mode] ) {
\r
9174 bool makeBuffer = true;
\r
9175 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9176 if ( mode == INPUT ) {
\r
9177 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9178 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9179 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9183 if ( makeBuffer ) {
\r
9184 bufferBytes *= *bufferSize;
\r
9185 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9186 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9187 if ( stream_.deviceBuffer == NULL ) {
\r
9188 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9194 stream_.device[mode] = device;
\r
9195 stream_.state = STREAM_STOPPED;
\r
9197 // Setup the buffer conversion information structure.
\r
9198 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9200 // Setup thread if necessary.
\r
9201 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9202 // We had already set up an output stream.
\r
9203 stream_.mode = DUPLEX;
\r
9204 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9207 stream_.mode = mode;
\r
9209 // Setup callback thread.
\r
9210 stream_.callbackInfo.object = (void *) this;
\r
9212 // Set the thread attributes for joinable and realtime scheduling
\r
9213 // priority. The higher priority will only take affect if the
\r
9214 // program is run as root or suid.
\r
9215 pthread_attr_t attr;
\r
9216 pthread_attr_init( &attr );
\r
9217 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9218 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9219 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9220 struct sched_param param;
\r
9221 int priority = options->priority;
\r
9222 int min = sched_get_priority_min( SCHED_RR );
\r
9223 int max = sched_get_priority_max( SCHED_RR );
\r
9224 if ( priority < min ) priority = min;
\r
9225 else if ( priority > max ) priority = max;
\r
9226 param.sched_priority = priority;
\r
9227 pthread_attr_setschedparam( &attr, ¶m );
\r
9228 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9231 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9233 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9236 stream_.callbackInfo.isRunning = true;
\r
9237 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9238 pthread_attr_destroy( &attr );
\r
9240 stream_.callbackInfo.isRunning = false;
\r
9241 errorText_ = "RtApiOss::error creating callback thread!";
\r
9250 pthread_cond_destroy( &handle->runnable );
\r
9251 if ( handle->id[0] ) close( handle->id[0] );
\r
9252 if ( handle->id[1] ) close( handle->id[1] );
\r
9254 stream_.apiHandle = 0;
\r
9257 for ( int i=0; i<2; i++ ) {
\r
9258 if ( stream_.userBuffer[i] ) {
\r
9259 free( stream_.userBuffer[i] );
\r
9260 stream_.userBuffer[i] = 0;
\r
9264 if ( stream_.deviceBuffer ) {
\r
9265 free( stream_.deviceBuffer );
\r
9266 stream_.deviceBuffer = 0;
\r
9272 void RtApiOss :: closeStream()
\r
9274 if ( stream_.state == STREAM_CLOSED ) {
\r
9275 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9276 error( RtAudioError::WARNING );
\r
9280 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9281 stream_.callbackInfo.isRunning = false;
\r
9282 MUTEX_LOCK( &stream_.mutex );
\r
9283 if ( stream_.state == STREAM_STOPPED )
\r
9284 pthread_cond_signal( &handle->runnable );
\r
9285 MUTEX_UNLOCK( &stream_.mutex );
\r
9286 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9288 if ( stream_.state == STREAM_RUNNING ) {
\r
9289 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9290 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9292 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9293 stream_.state = STREAM_STOPPED;
\r
9297 pthread_cond_destroy( &handle->runnable );
\r
9298 if ( handle->id[0] ) close( handle->id[0] );
\r
9299 if ( handle->id[1] ) close( handle->id[1] );
\r
9301 stream_.apiHandle = 0;
\r
9304 for ( int i=0; i<2; i++ ) {
\r
9305 if ( stream_.userBuffer[i] ) {
\r
9306 free( stream_.userBuffer[i] );
\r
9307 stream_.userBuffer[i] = 0;
\r
9311 if ( stream_.deviceBuffer ) {
\r
9312 free( stream_.deviceBuffer );
\r
9313 stream_.deviceBuffer = 0;
\r
9316 stream_.mode = UNINITIALIZED;
\r
9317 stream_.state = STREAM_CLOSED;
\r
9320 void RtApiOss :: startStream()
\r
9323 if ( stream_.state == STREAM_RUNNING ) {
\r
9324 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9325 error( RtAudioError::WARNING );
\r
9329 MUTEX_LOCK( &stream_.mutex );
\r
9331 stream_.state = STREAM_RUNNING;
\r
9333 // No need to do anything else here ... OSS automatically starts
\r
9334 // when fed samples.
\r
9336 MUTEX_UNLOCK( &stream_.mutex );
\r
9338 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9339 pthread_cond_signal( &handle->runnable );
\r
9342 void RtApiOss :: stopStream()
\r
9345 if ( stream_.state == STREAM_STOPPED ) {
\r
9346 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9347 error( RtAudioError::WARNING );
\r
9351 MUTEX_LOCK( &stream_.mutex );
\r
9353 // The state might change while waiting on a mutex.
\r
9354 if ( stream_.state == STREAM_STOPPED ) {
\r
9355 MUTEX_UNLOCK( &stream_.mutex );
\r
9360 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9361 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9363 // Flush the output with zeros a few times.
\r
9366 RtAudioFormat format;
\r
9368 if ( stream_.doConvertBuffer[0] ) {
\r
9369 buffer = stream_.deviceBuffer;
\r
9370 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9371 format = stream_.deviceFormat[0];
\r
9374 buffer = stream_.userBuffer[0];
\r
9375 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9376 format = stream_.userFormat;
\r
9379 memset( buffer, 0, samples * formatBytes(format) );
\r
9380 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9381 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9382 if ( result == -1 ) {
\r
9383 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9384 error( RtAudioError::WARNING );
\r
9388 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9389 if ( result == -1 ) {
\r
9390 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9391 errorText_ = errorStream_.str();
\r
9394 handle->triggered = false;
\r
9397 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9398 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9399 if ( result == -1 ) {
\r
9400 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9401 errorText_ = errorStream_.str();
\r
9407 stream_.state = STREAM_STOPPED;
\r
9408 MUTEX_UNLOCK( &stream_.mutex );
\r
9410 if ( result != -1 ) return;
\r
9411 error( RtAudioError::SYSTEM_ERROR );
\r
9414 void RtApiOss :: abortStream()
\r
9417 if ( stream_.state == STREAM_STOPPED ) {
\r
9418 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9419 error( RtAudioError::WARNING );
\r
9423 MUTEX_LOCK( &stream_.mutex );
\r
9425 // The state might change while waiting on a mutex.
\r
9426 if ( stream_.state == STREAM_STOPPED ) {
\r
9427 MUTEX_UNLOCK( &stream_.mutex );
\r
9432 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9433 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9434 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9435 if ( result == -1 ) {
\r
9436 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9437 errorText_ = errorStream_.str();
\r
9440 handle->triggered = false;
\r
9443 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9444 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9445 if ( result == -1 ) {
\r
9446 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9447 errorText_ = errorStream_.str();
\r
9453 stream_.state = STREAM_STOPPED;
\r
9454 MUTEX_UNLOCK( &stream_.mutex );
\r
9456 if ( result != -1 ) return;
\r
9457 error( RtAudioError::SYSTEM_ERROR );
\r
9460 void RtApiOss :: callbackEvent()
\r
9462 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9463 if ( stream_.state == STREAM_STOPPED ) {
\r
9464 MUTEX_LOCK( &stream_.mutex );
\r
9465 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9466 if ( stream_.state != STREAM_RUNNING ) {
\r
9467 MUTEX_UNLOCK( &stream_.mutex );
\r
9470 MUTEX_UNLOCK( &stream_.mutex );
\r
9473 if ( stream_.state == STREAM_CLOSED ) {
\r
9474 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9475 error( RtAudioError::WARNING );
\r
9479 // Invoke user callback to get fresh output data.
\r
9480 int doStopStream = 0;
\r
9481 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9482 double streamTime = getStreamTime();
\r
9483 RtAudioStreamStatus status = 0;
\r
9484 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9485 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9486 handle->xrun[0] = false;
\r
9488 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9489 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9490 handle->xrun[1] = false;
\r
9492 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9493 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9494 if ( doStopStream == 2 ) {
\r
9495 this->abortStream();
\r
9499 MUTEX_LOCK( &stream_.mutex );
\r
9501 // The state might change while waiting on a mutex.
\r
9502 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9507 RtAudioFormat format;
\r
9509 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9511 // Setup parameters and do buffer conversion if necessary.
\r
9512 if ( stream_.doConvertBuffer[0] ) {
\r
9513 buffer = stream_.deviceBuffer;
\r
9514 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9515 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9516 format = stream_.deviceFormat[0];
\r
9519 buffer = stream_.userBuffer[0];
\r
9520 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9521 format = stream_.userFormat;
\r
9524 // Do byte swapping if necessary.
\r
9525 if ( stream_.doByteSwap[0] )
\r
9526 byteSwapBuffer( buffer, samples, format );
\r
9528 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9530 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9531 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9532 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9533 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9534 handle->triggered = true;
\r
9537 // Write samples to device.
\r
9538 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9540 if ( result == -1 ) {
\r
9541 // We'll assume this is an underrun, though there isn't a
\r
9542 // specific means for determining that.
\r
9543 handle->xrun[0] = true;
\r
9544 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9545 error( RtAudioError::WARNING );
\r
9546 // Continue on to input section.
\r
9550 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9552 // Setup parameters.
\r
9553 if ( stream_.doConvertBuffer[1] ) {
\r
9554 buffer = stream_.deviceBuffer;
\r
9555 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9556 format = stream_.deviceFormat[1];
\r
9559 buffer = stream_.userBuffer[1];
\r
9560 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9561 format = stream_.userFormat;
\r
9564 // Read samples from device.
\r
9565 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9567 if ( result == -1 ) {
\r
9568 // We'll assume this is an overrun, though there isn't a
\r
9569 // specific means for determining that.
\r
9570 handle->xrun[1] = true;
\r
9571 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9572 error( RtAudioError::WARNING );
\r
9576 // Do byte swapping if necessary.
\r
9577 if ( stream_.doByteSwap[1] )
\r
9578 byteSwapBuffer( buffer, samples, format );
\r
9580 // Do buffer conversion if necessary.
\r
9581 if ( stream_.doConvertBuffer[1] )
\r
9582 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9586 MUTEX_UNLOCK( &stream_.mutex );
\r
9588 RtApi::tickStreamTime();
\r
9589 if ( doStopStream == 1 ) this->stopStream();
\r
9592 static void *ossCallbackHandler( void *ptr )
\r
9594 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9595 RtApiOss *object = (RtApiOss *) info->object;
\r
9596 bool *isRunning = &info->isRunning;
\r
9598 while ( *isRunning == true ) {
\r
9599 pthread_testcancel();
\r
9600 object->callbackEvent();
\r
9603 pthread_exit( NULL );
\r
9606 //******************** End of __LINUX_OSS__ *********************//
\r
9610 // *************************************************** //
\r
9612 // Protected common (OS-independent) RtAudio methods.
\r
9614 // *************************************************** //
\r
9616 // This method can be modified to control the behavior of error
\r
9617 // message printing.
\r
9618 void RtApi :: error( RtAudioError::Type type )
\r
9620 errorStream_.str(""); // clear the ostringstream
\r
9622 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9623 if ( errorCallback ) {
\r
9624 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9626 if ( firstErrorOccurred_ )
\r
9629 firstErrorOccurred_ = true;
\r
9630 const std::string errorMessage = errorText_;
\r
9632 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9633 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9637 errorCallback( type, errorMessage );
\r
9638 firstErrorOccurred_ = false;
\r
9642 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9643 std::cerr << '\n' << errorText_ << "\n\n";
\r
9644 else if ( type != RtAudioError::WARNING )
\r
9645 throw( RtAudioError( errorText_, type ) );
\r
9648 void RtApi :: verifyStream()
\r
9650 if ( stream_.state == STREAM_CLOSED ) {
\r
9651 errorText_ = "RtApi:: a stream is not open!";
\r
9652 error( RtAudioError::INVALID_USE );
\r
9656 void RtApi :: clearStreamInfo()
\r
9658 stream_.mode = UNINITIALIZED;
\r
9659 stream_.state = STREAM_CLOSED;
\r
9660 stream_.sampleRate = 0;
\r
9661 stream_.bufferSize = 0;
\r
9662 stream_.nBuffers = 0;
\r
9663 stream_.userFormat = 0;
\r
9664 stream_.userInterleaved = true;
\r
9665 stream_.streamTime = 0.0;
\r
9666 stream_.apiHandle = 0;
\r
9667 stream_.deviceBuffer = 0;
\r
9668 stream_.callbackInfo.callback = 0;
\r
9669 stream_.callbackInfo.userData = 0;
\r
9670 stream_.callbackInfo.isRunning = false;
\r
9671 stream_.callbackInfo.errorCallback = 0;
\r
9672 for ( int i=0; i<2; i++ ) {
\r
9673 stream_.device[i] = 11111;
\r
9674 stream_.doConvertBuffer[i] = false;
\r
9675 stream_.deviceInterleaved[i] = true;
\r
9676 stream_.doByteSwap[i] = false;
\r
9677 stream_.nUserChannels[i] = 0;
\r
9678 stream_.nDeviceChannels[i] = 0;
\r
9679 stream_.channelOffset[i] = 0;
\r
9680 stream_.deviceFormat[i] = 0;
\r
9681 stream_.latency[i] = 0;
\r
9682 stream_.userBuffer[i] = 0;
\r
9683 stream_.convertInfo[i].channels = 0;
\r
9684 stream_.convertInfo[i].inJump = 0;
\r
9685 stream_.convertInfo[i].outJump = 0;
\r
9686 stream_.convertInfo[i].inFormat = 0;
\r
9687 stream_.convertInfo[i].outFormat = 0;
\r
9688 stream_.convertInfo[i].inOffset.clear();
\r
9689 stream_.convertInfo[i].outOffset.clear();
\r
9693 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9695 if ( format == RTAUDIO_SINT16 )
\r
9697 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9699 else if ( format == RTAUDIO_FLOAT64 )
\r
9701 else if ( format == RTAUDIO_SINT24 )
\r
9703 else if ( format == RTAUDIO_SINT8 )
\r
9706 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9707 error( RtAudioError::WARNING );
\r
9712 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9714 if ( mode == INPUT ) { // convert device to user buffer
\r
9715 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9716 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9717 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9718 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9720 else { // convert user to device buffer
\r
9721 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9722 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9723 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9724 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9727 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9728 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9730 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9732 // Set up the interleave/deinterleave offsets.
\r
9733 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9734 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9735 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9736 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9737 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9738 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9739 stream_.convertInfo[mode].inJump = 1;
\r
9743 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9744 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9745 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9746 stream_.convertInfo[mode].outJump = 1;
\r
9750 else { // no (de)interleaving
\r
9751 if ( stream_.userInterleaved ) {
\r
9752 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9753 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9754 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9758 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9759 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9760 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9761 stream_.convertInfo[mode].inJump = 1;
\r
9762 stream_.convertInfo[mode].outJump = 1;
\r
9767 // Add channel offset.
\r
9768 if ( firstChannel > 0 ) {
\r
9769 if ( stream_.deviceInterleaved[mode] ) {
\r
9770 if ( mode == OUTPUT ) {
\r
9771 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9772 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9775 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9776 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9780 if ( mode == OUTPUT ) {
\r
9781 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9782 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9785 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9786 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9792 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9794 // This function does format conversion, input/output channel compensation, and
\r
9795 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9796 // the lower three bytes of a 32-bit integer.
\r
9798 // Clear our device buffer when in/out duplex device channels are different
\r
9799 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9800 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9801 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9804 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9806 Float64 *out = (Float64 *)outBuffer;
\r
9808 if (info.inFormat == RTAUDIO_SINT8) {
\r
9809 signed char *in = (signed char *)inBuffer;
\r
9810 scale = 1.0 / 127.5;
\r
9811 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9812 for (j=0; j<info.channels; j++) {
\r
9813 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9814 out[info.outOffset[j]] += 0.5;
\r
9815 out[info.outOffset[j]] *= scale;
\r
9817 in += info.inJump;
\r
9818 out += info.outJump;
\r
9821 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9822 Int16 *in = (Int16 *)inBuffer;
\r
9823 scale = 1.0 / 32767.5;
\r
9824 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9825 for (j=0; j<info.channels; j++) {
\r
9826 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9827 out[info.outOffset[j]] += 0.5;
\r
9828 out[info.outOffset[j]] *= scale;
\r
9830 in += info.inJump;
\r
9831 out += info.outJump;
\r
9834 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9835 Int24 *in = (Int24 *)inBuffer;
\r
9836 scale = 1.0 / 8388607.5;
\r
9837 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9838 for (j=0; j<info.channels; j++) {
\r
9839 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9840 out[info.outOffset[j]] += 0.5;
\r
9841 out[info.outOffset[j]] *= scale;
\r
9843 in += info.inJump;
\r
9844 out += info.outJump;
\r
9847 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9848 Int32 *in = (Int32 *)inBuffer;
\r
9849 scale = 1.0 / 2147483647.5;
\r
9850 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9851 for (j=0; j<info.channels; j++) {
\r
9852 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9853 out[info.outOffset[j]] += 0.5;
\r
9854 out[info.outOffset[j]] *= scale;
\r
9856 in += info.inJump;
\r
9857 out += info.outJump;
\r
9860 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9861 Float32 *in = (Float32 *)inBuffer;
\r
9862 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9863 for (j=0; j<info.channels; j++) {
\r
9864 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9866 in += info.inJump;
\r
9867 out += info.outJump;
\r
9870 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9871 // Channel compensation and/or (de)interleaving only.
\r
9872 Float64 *in = (Float64 *)inBuffer;
\r
9873 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9874 for (j=0; j<info.channels; j++) {
\r
9875 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9877 in += info.inJump;
\r
9878 out += info.outJump;
\r
9882 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9884 Float32 *out = (Float32 *)outBuffer;
\r
9886 if (info.inFormat == RTAUDIO_SINT8) {
\r
9887 signed char *in = (signed char *)inBuffer;
\r
9888 scale = (Float32) ( 1.0 / 127.5 );
\r
9889 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9890 for (j=0; j<info.channels; j++) {
\r
9891 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9892 out[info.outOffset[j]] += 0.5;
\r
9893 out[info.outOffset[j]] *= scale;
\r
9895 in += info.inJump;
\r
9896 out += info.outJump;
\r
9899 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9900 Int16 *in = (Int16 *)inBuffer;
\r
9901 scale = (Float32) ( 1.0 / 32767.5 );
\r
9902 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9903 for (j=0; j<info.channels; j++) {
\r
9904 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9905 out[info.outOffset[j]] += 0.5;
\r
9906 out[info.outOffset[j]] *= scale;
\r
9908 in += info.inJump;
\r
9909 out += info.outJump;
\r
9912 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9913 Int24 *in = (Int24 *)inBuffer;
\r
9914 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9915 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9916 for (j=0; j<info.channels; j++) {
\r
9917 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9918 out[info.outOffset[j]] += 0.5;
\r
9919 out[info.outOffset[j]] *= scale;
\r
9921 in += info.inJump;
\r
9922 out += info.outJump;
\r
9925 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9926 Int32 *in = (Int32 *)inBuffer;
\r
9927 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9928 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9929 for (j=0; j<info.channels; j++) {
\r
9930 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9931 out[info.outOffset[j]] += 0.5;
\r
9932 out[info.outOffset[j]] *= scale;
\r
9934 in += info.inJump;
\r
9935 out += info.outJump;
\r
9938 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9939 // Channel compensation and/or (de)interleaving only.
\r
9940 Float32 *in = (Float32 *)inBuffer;
\r
9941 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9942 for (j=0; j<info.channels; j++) {
\r
9943 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9945 in += info.inJump;
\r
9946 out += info.outJump;
\r
9949 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9950 Float64 *in = (Float64 *)inBuffer;
\r
9951 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9952 for (j=0; j<info.channels; j++) {
\r
9953 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9955 in += info.inJump;
\r
9956 out += info.outJump;
\r
9960 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9961 Int32 *out = (Int32 *)outBuffer;
\r
9962 if (info.inFormat == RTAUDIO_SINT8) {
\r
9963 signed char *in = (signed char *)inBuffer;
\r
9964 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9965 for (j=0; j<info.channels; j++) {
\r
9966 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9967 out[info.outOffset[j]] <<= 24;
\r
9969 in += info.inJump;
\r
9970 out += info.outJump;
\r
9973 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9974 Int16 *in = (Int16 *)inBuffer;
\r
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9976 for (j=0; j<info.channels; j++) {
\r
9977 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9978 out[info.outOffset[j]] <<= 16;
\r
9980 in += info.inJump;
\r
9981 out += info.outJump;
\r
9984 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9985 Int24 *in = (Int24 *)inBuffer;
\r
9986 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9987 for (j=0; j<info.channels; j++) {
\r
9988 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9989 out[info.outOffset[j]] <<= 8;
\r
9991 in += info.inJump;
\r
9992 out += info.outJump;
\r
9995 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9996 // Channel compensation and/or (de)interleaving only.
\r
9997 Int32 *in = (Int32 *)inBuffer;
\r
9998 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9999 for (j=0; j<info.channels; j++) {
\r
10000 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10002 in += info.inJump;
\r
10003 out += info.outJump;
\r
10006 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10007 Float32 *in = (Float32 *)inBuffer;
\r
10008 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10009 for (j=0; j<info.channels; j++) {
\r
10010 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10012 in += info.inJump;
\r
10013 out += info.outJump;
\r
10016 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10017 Float64 *in = (Float64 *)inBuffer;
\r
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10019 for (j=0; j<info.channels; j++) {
\r
10020 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
10022 in += info.inJump;
\r
10023 out += info.outJump;
\r
10027 else if (info.outFormat == RTAUDIO_SINT24) {
\r
10028 Int24 *out = (Int24 *)outBuffer;
\r
10029 if (info.inFormat == RTAUDIO_SINT8) {
\r
10030 signed char *in = (signed char *)inBuffer;
\r
10031 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10032 for (j=0; j<info.channels; j++) {
\r
10033 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
10034 //out[info.outOffset[j]] <<= 16;
\r
10036 in += info.inJump;
\r
10037 out += info.outJump;
\r
10040 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10041 Int16 *in = (Int16 *)inBuffer;
\r
10042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10043 for (j=0; j<info.channels; j++) {
\r
10044 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
10045 //out[info.outOffset[j]] <<= 8;
\r
10047 in += info.inJump;
\r
10048 out += info.outJump;
\r
10051 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10052 // Channel compensation and/or (de)interleaving only.
\r
10053 Int24 *in = (Int24 *)inBuffer;
\r
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10055 for (j=0; j<info.channels; j++) {
\r
10056 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10058 in += info.inJump;
\r
10059 out += info.outJump;
\r
10062 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10063 Int32 *in = (Int32 *)inBuffer;
\r
10064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10065 for (j=0; j<info.channels; j++) {
\r
10066 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
10067 //out[info.outOffset[j]] >>= 8;
\r
10069 in += info.inJump;
\r
10070 out += info.outJump;
\r
10073 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10074 Float32 *in = (Float32 *)inBuffer;
\r
10075 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10076 for (j=0; j<info.channels; j++) {
\r
10077 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10079 in += info.inJump;
\r
10080 out += info.outJump;
\r
10083 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10084 Float64 *in = (Float64 *)inBuffer;
\r
10085 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10086 for (j=0; j<info.channels; j++) {
\r
10087 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
10089 in += info.inJump;
\r
10090 out += info.outJump;
\r
10094 else if (info.outFormat == RTAUDIO_SINT16) {
\r
10095 Int16 *out = (Int16 *)outBuffer;
\r
10096 if (info.inFormat == RTAUDIO_SINT8) {
\r
10097 signed char *in = (signed char *)inBuffer;
\r
10098 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10099 for (j=0; j<info.channels; j++) {
\r
10100 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
10101 out[info.outOffset[j]] <<= 8;
\r
10103 in += info.inJump;
\r
10104 out += info.outJump;
\r
10107 else if (info.inFormat == RTAUDIO_SINT16) {
\r
10108 // Channel compensation and/or (de)interleaving only.
\r
10109 Int16 *in = (Int16 *)inBuffer;
\r
10110 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10111 for (j=0; j<info.channels; j++) {
\r
10112 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10114 in += info.inJump;
\r
10115 out += info.outJump;
\r
10118 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10119 Int24 *in = (Int24 *)inBuffer;
\r
10120 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10121 for (j=0; j<info.channels; j++) {
\r
10122 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10124 in += info.inJump;
\r
10125 out += info.outJump;
\r
10128 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10129 Int32 *in = (Int32 *)inBuffer;
\r
10130 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10131 for (j=0; j<info.channels; j++) {
\r
10132 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10134 in += info.inJump;
\r
10135 out += info.outJump;
\r
10138 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10139 Float32 *in = (Float32 *)inBuffer;
\r
10140 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10141 for (j=0; j<info.channels; j++) {
\r
10142 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10144 in += info.inJump;
\r
10145 out += info.outJump;
\r
10148 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10149 Float64 *in = (Float64 *)inBuffer;
\r
10150 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10151 for (j=0; j<info.channels; j++) {
\r
10152 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10154 in += info.inJump;
\r
10155 out += info.outJump;
\r
10159 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10160 signed char *out = (signed char *)outBuffer;
\r
10161 if (info.inFormat == RTAUDIO_SINT8) {
\r
10162 // Channel compensation and/or (de)interleaving only.
\r
10163 signed char *in = (signed char *)inBuffer;
\r
10164 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10165 for (j=0; j<info.channels; j++) {
\r
10166 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10168 in += info.inJump;
\r
10169 out += info.outJump;
\r
10172 if (info.inFormat == RTAUDIO_SINT16) {
\r
10173 Int16 *in = (Int16 *)inBuffer;
\r
10174 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10175 for (j=0; j<info.channels; j++) {
\r
10176 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10178 in += info.inJump;
\r
10179 out += info.outJump;
\r
10182 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10183 Int24 *in = (Int24 *)inBuffer;
\r
10184 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10185 for (j=0; j<info.channels; j++) {
\r
10186 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10188 in += info.inJump;
\r
10189 out += info.outJump;
\r
10192 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10193 Int32 *in = (Int32 *)inBuffer;
\r
10194 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10195 for (j=0; j<info.channels; j++) {
\r
10196 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10198 in += info.inJump;
\r
10199 out += info.outJump;
\r
10202 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10203 Float32 *in = (Float32 *)inBuffer;
\r
10204 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10205 for (j=0; j<info.channels; j++) {
\r
10206 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10208 in += info.inJump;
\r
10209 out += info.outJump;
\r
10212 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10213 Float64 *in = (Float64 *)inBuffer;
\r
10214 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10215 for (j=0; j<info.channels; j++) {
\r
10216 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10218 in += info.inJump;
\r
10219 out += info.outJump;
\r
10225 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10226 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10227 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10229 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10235 if ( format == RTAUDIO_SINT16 ) {
\r
10236 for ( unsigned int i=0; i<samples; i++ ) {
\r
10237 // Swap 1st and 2nd bytes.
\r
10239 *(ptr) = *(ptr+1);
\r
10242 // Increment 2 bytes.
\r
10246 else if ( format == RTAUDIO_SINT32 ||
\r
10247 format == RTAUDIO_FLOAT32 ) {
\r
10248 for ( unsigned int i=0; i<samples; i++ ) {
\r
10249 // Swap 1st and 4th bytes.
\r
10251 *(ptr) = *(ptr+3);
\r
10254 // Swap 2nd and 3rd bytes.
\r
10257 *(ptr) = *(ptr+1);
\r
10260 // Increment 3 more bytes.
\r
10264 else if ( format == RTAUDIO_SINT24 ) {
\r
10265 for ( unsigned int i=0; i<samples; i++ ) {
\r
10266 // Swap 1st and 3rd bytes.
\r
10268 *(ptr) = *(ptr+2);
\r
10271 // Increment 2 more bytes.
\r
10275 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10276 for ( unsigned int i=0; i<samples; i++ ) {
\r
10277 // Swap 1st and 8th bytes
\r
10279 *(ptr) = *(ptr+7);
\r
10282 // Swap 2nd and 7th bytes
\r
10285 *(ptr) = *(ptr+5);
\r
10288 // Swap 3rd and 6th bytes
\r
10291 *(ptr) = *(ptr+3);
\r
10294 // Swap 4th and 5th bytes
\r
10297 *(ptr) = *(ptr+1);
\r
10300 // Increment 5 more bytes.
\r
10306 // Indentation settings for Vim and Emacs
\r
10308 // Local Variables:
\r
10309 // c-basic-offset: 2
\r
10310 // indent-tabs-mode: nil
\r
10313 // vim: et sts=2 sw=2
\r