{
/* Deinterleave and convert to float */
- float* samples[_fs->audio_channels()];
int const total_samples = size / bytes_per_audio_sample();
int const frames = total_samples / _fs->audio_channels();
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- samples[i] = new float[frames];
- }
+ shared_ptr<AudioBuffers> audio (new AudioBuffers (_fs->audio_channels(), frames));
switch (audio_sample_format()) {
case AV_SAMPLE_FMT_S16:
/* signed sample */
int const os = ou >= 0x8000 ? (- 0x10000 + ou) : ou;
/* float sample */
- samples[channel][sample] = float(os) / 0x8000;
+ audio->data(channel)[sample] = float(os) / 0x8000;
++channel;
if (channel == _fs->audio_channels()) {
{
float* p = reinterpret_cast<float*> (data);
for (int i = 0; i < _fs->audio_channels(); ++i) {
- memcpy (samples[i], p, frames * sizeof(float));
+ memcpy (audio->data(i), p, frames * sizeof(float));
p += frames;
}
}
float const linear_gain = pow (10, _fs->audio_gain() / 20);
for (int i = 0; i < _fs->audio_channels(); ++i) {
for (int j = 0; j < frames; ++j) {
- samples[i][j] *= linear_gain;
+ audio->data(i)[j] *= linear_gain;
}
}
}
/* Update the number of audio frames we've pushed to the encoder */
_audio_frames_processed += frames;
- Audio (samples, frames);
-
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- delete[] samples[i];
- }
+ Audio (audio);
}
/** Called by subclasses to tell the world that some video data is ready.
*/
sigc::signal<void, boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle> > Video;
- /** Emitted when some audio data is ready.
- * First parameter is an array of pointers to deinterleaved, floating point sample data for each channel.
- * Second parameter is the size of the data in frames (ie samples on each channel).
- */
- sigc::signal<void, float**, int> Audio;
+ /** Emitted when some audio data is ready */
+ sigc::signal<void, boost::shared_ptr<AudioBuffers> > Audio;
protected:
/** perform a single pass at our content */
class Image;
class Log;
class Subtitle;
+class AudioBuffers;
/** @class Encoder
* @brief Parent class for classes which can encode video and audio frames.
* @param d Array of pointers to floating point sample data for each channel.
* @param s Number of frames (ie number of samples in each channel)
*/
- virtual void process_audio (float** d, int s) = 0;
+ virtual void process_audio (boost::shared_ptr<const AudioBuffers>) = 0;
/** Called when a processing run has finished */
virtual void process_end () = 0;
void process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format) {}
void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>);
- void process_audio (float**, int) {}
+ void process_audio (boost::shared_ptr<const AudioBuffers>) {}
void process_end () {}
};
void process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format) {}
void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>);
- void process_audio (float**, int) {}
+ void process_audio (boost::shared_ptr<const AudioBuffers>) {}
void process_end () {}
};
#if HAVE_SWRESAMPLE
if (_swr_context) {
- float* out[_fs->audio_channels()];
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- out[i] = new float[256];
- }
+ shared_ptr<AudioBuffers> out (new AudioBuffers (_fs->audio_channels(), 256));
while (1) {
- int const frames = swr_convert (_swr_context, (uint8_t **) out, 256, 0, 0);
+ int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
if (frames < 0) {
throw EncodeError ("could not run sample-rate converter");
break;
}
- write_audio (out, frames);
- }
-
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- delete[] out[i];
+ write_audio (out);
}
swr_free (&_swr_context);
}
void
-J2KWAVEncoder::process_audio (float** data, int frames)
+J2KWAVEncoder::process_audio (shared_ptr<const AudioBuffers> audio)
{
- float* resampled[_fs->audio_channels()];
+ shared_ptr<AudioBuffers> resampled;
-#if HAVE_SWRESAMPLE
+#if HAVE_SWRESAMPLE
/* Maybe sample-rate convert */
if (_swr_context) {
/* Compute the resampled frames count and add 32 for luck */
- int const max_resampled_frames = ceil (frames * _fs->target_sample_rate() / _fs->audio_sample_rate()) + 32;
+ int const max_resampled_frames = ceil (audio->frames() * _fs->target_sample_rate() / _fs->audio_sample_rate()) + 32;
- /* Make a buffer to put the result in */
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- resampled[i] = new float[max_resampled_frames];
- }
+ resampled.reset (new AudioBuffers (_fs->audio_channels(), max_resampled_frames));
/* Resample audio */
- int const resampled_frames = swr_convert (_swr_context, (uint8_t **) resampled, max_resampled_frames, (uint8_t const **) data, frames);
+ int const resampled_frames = swr_convert (
+ _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) audio->data(), audio->frames()
+ );
+
if (resampled_frames < 0) {
throw EncodeError ("could not run sample-rate converter");
}
+ resampled->set_frames (resampled_frames);
+
/* And point our variables at the resampled audio */
- data = resampled;
- frames = resampled_frames;
+ audio = resampled;
}
#endif
- write_audio (data, frames);
-
-#if HAVE_SWRESAMPLE
- if (_swr_context) {
- for (int i = 0; i < _fs->audio_channels(); ++i) {
- delete[] resampled[i];
- }
- }
-#endif
+ write_audio (audio);
}
void
-J2KWAVEncoder::write_audio (float** data, int frames)
+J2KWAVEncoder::write_audio (shared_ptr<const AudioBuffers> audio) const
{
for (int i = 0; i < _fs->audio_channels(); ++i) {
- sf_write_float (_sound_files[i], data[i], frames);
+ sf_write_float (_sound_files[i], audio->data(i), audio->frames());
}
}
class Image;
class Log;
class Subtitle;
+class AudioBuffers;
/** @class J2KWAVEncoder
* @brief An encoder which writes JPEG2000 and WAV files.
void process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format);
void process_video (boost::shared_ptr<Image>, int, boost::shared_ptr<Subtitle>);
- void process_audio (float**, int);
+ void process_audio (boost::shared_ptr<const AudioBuffers>);
void process_end ();
private:
- void write_audio (float** data, int frames);
+ void write_audio (boost::shared_ptr<const AudioBuffers> audio) const;
void encoder_thread (ServerDescription *);
void close_sound_files ();
void terminate_worker_threads ();
return lexical_cast<int> (i->second);
}
+
+AudioBuffers::AudioBuffers (int channels, int frames)
+ : _channels (channels)
+ , _frames (frames)
+{
+ _data = new float*[_channels];
+ for (int i = 0; i < _channels; ++i) {
+ _data[i] = new float[frames];
+ }
+}
+
+AudioBuffers::~AudioBuffers ()
+{
+ for (int i = 0; i < _channels; ++i) {
+ delete[] _data[i];
+ }
+
+ delete[] _data;
+}
+
+float*
+AudioBuffers::data (int c) const
+{
+ assert (c >= 0 && c < _channels);
+ return _data[c];
+}
+
+void
+AudioBuffers::set_frames (int f)
+{
+ assert (f <= _frames);
+ _frames = f;
+}
+
+
+
int _buffer_data;
};
+class AudioBuffers
+{
+public:
+ AudioBuffers (int channels, int frames);
+ ~AudioBuffers ();
+
+ float** data () const {
+ return _data;
+ }
+
+ float* data (int) const;
+
+ int frames () const {
+ return _frames;
+ }
+
+ void set_frames (int f);
+
+private:
+ int _channels;
+ int _frames;
+ float** _data;
+};
+
#endif