#include "player_video.h"
#include "log.h"
#include "image.h"
+#include "cross.h"
+#include "butler.h"
#include "compose.hpp"
#include <iostream>
using std::string;
using std::runtime_error;
using std::cout;
+using std::pair;
using boost::shared_ptr;
using boost::bind;
using boost::weak_ptr;
return out;
}
-FFmpegEncoder::FFmpegEncoder (shared_ptr<const Film> film, weak_ptr<Job> job, boost::filesystem::path output, Format format)
+FFmpegEncoder::FFmpegEncoder (shared_ptr<const Film> film, weak_ptr<Job> job, boost::filesystem::path output, Format format, bool mixdown_to_stereo)
: Encoder (film, job)
, _video_options (0)
, _history (1000)
, _output (output)
- , _pending_audio (new AudioBuffers (film->audio_channels(), 0))
{
switch (format) {
case FORMAT_PRORES:
_audio_codec_name = "aac";
break;
}
+
+ _player->set_always_burn_subtitles (true);
+ _player->set_play_referenced ();
+
+ int const ch = film->audio_channels ();
+
+ AudioMapping map;
+ if (mixdown_to_stereo) {
+ _output_audio_channels = 2;
+ map = AudioMapping (ch, 2);
+ float const overall_gain = 2 / (4 + sqrt(2));
+ float const minus_3dB = 1 / sqrt(2);
+ map.set (dcp::LEFT, 0, overall_gain);
+ map.set (dcp::RIGHT, 1, overall_gain);
+ map.set (dcp::CENTRE, 0, overall_gain * minus_3dB);
+ map.set (dcp::CENTRE, 1, overall_gain * minus_3dB);
+ map.set (dcp::LS, 0, overall_gain);
+ map.set (dcp::RS, 1, overall_gain);
+ _pending_audio.reset (new AudioBuffers (2, 0));
+ } else {
+ _output_audio_channels = ch;
+ map = AudioMapping (ch, ch);
+ _pending_audio.reset (new AudioBuffers (ch, 0));
+ for (int i = 0; i < ch; ++i) {
+ map.set (i, i, 1);
+ }
+ }
+
+ _butler.reset (new Butler (_player, film->log(), map, _output_audio_channels));
}
void
_audio_codec_context->bit_rate = 256 * 1024;
_audio_codec_context->sample_fmt = _sample_format;
_audio_codec_context->sample_rate = _film->audio_frame_rate ();
- _audio_codec_context->channel_layout = av_get_default_channel_layout (_film->audio_channels ());
- _audio_codec_context->channels = _film->audio_channels ();
+ _audio_codec_context->channel_layout = av_get_default_channel_layout (_output_audio_channels);
+ _audio_codec_context->channels = _output_audio_channels;
}
void
throw runtime_error (String::compose ("could not open FFmpeg audio codec (%1)", buffer));
}
- if (avio_open (&_format_context->pb, _output.c_str(), AVIO_FLAG_WRITE) < 0) {
+ if (avio_open_boost (&_format_context->pb, _output, AVIO_FLAG_WRITE) < 0) {
throw runtime_error ("could not open FFmpeg output file");
}
job->sub (_("Encoding"));
}
- while (!_player->pass ()) {}
+ DCPTime const video_frame = DCPTime::from_frames (1, _film->video_frame_rate ());
+ int const audio_frames = video_frame.frames_round(_film->audio_frame_rate());
+ float* interleaved = new float[_output_audio_channels * audio_frames];
+ shared_ptr<AudioBuffers> deinterleaved (new AudioBuffers (_output_audio_channels, audio_frames));
+ for (DCPTime i; i < _film->length(); i += video_frame) {
+ pair<shared_ptr<PlayerVideo>, DCPTime> v = _butler->get_video ();
+ video (v.first, v.second);
+ _butler->get_audio (interleaved, audio_frames);
+ /* XXX: inefficient; butler interleaves and we deinterleave again */
+ float* p = interleaved;
+ for (int i = 0; i < audio_frames; ++i) {
+ for (int j = 0; j < _output_audio_channels; ++j) {
+ deinterleaved->data(j)[i] = *p++;
+ }
+ }
+ audio (deinterleaved, i);
+ }
+ delete[] interleaved;
if (_pending_audio->frames() > 0) {
audio_frame (_pending_audio->frames ());
packet.data = 0;
packet.size = 0;
- avcodec_encode_audio2 (_video_codec_context, &packet, 0, &got_packet);
+ avcodec_encode_audio2 (_audio_codec_context, &packet, 0, &got_packet);
if (got_packet) {
packet.stream_index = 0;
av_interleaved_write_frame (_format_context, &packet);
DCPOMATIC_ASSERT (frame);
for (int i = 0; i < 3; ++i) {
- size_t const size = image->stride()[i] * image->size().height;
+ size_t const size = image->stride()[i] * image->sample_size(i).height;
AVBufferRef* buffer = av_buffer_alloc (size);
DCPOMATIC_ASSERT (buffer);
/* XXX: inefficient */
}
}
+/** Called when the player gives us some audio */
void
-FFmpegEncoder::audio (shared_ptr<AudioBuffers> audio, DCPTime time)
+FFmpegEncoder::audio (shared_ptr<AudioBuffers> audio, DCPTime)
{
_pending_audio->append (audio);
- while (_pending_audio->frames() >= _audio_codec_context->frame_size) {
- audio_frame (_audio_codec_context->frame_size);
+ int frame_size = _audio_codec_context->frame_size;
+ if (frame_size == 0) {
+ /* codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE */
+ frame_size = 2000;
+ }
+
+ while (_pending_audio->frames() >= frame_size) {
+ audio_frame (frame_size);
}
}
void
FFmpegEncoder::audio_frame (int size)
{
+ DCPOMATIC_ASSERT (size);
+
AVFrame* frame = av_frame_alloc ();
DCPOMATIC_ASSERT (frame);
- int const channels = _audio_codec_context->channels;
+ int const channels = _pending_audio->channels();
+ DCPOMATIC_ASSERT (channels);
int const buffer_size = av_samples_get_buffer_size (0, channels, size, _audio_codec_context->sample_fmt, 0);
DCPOMATIC_ASSERT (buffer_size >= 0);
{
int16_t* q = reinterpret_cast<int16_t*> (samples);
for (int i = 0; i < size; ++i) {
- for (int j = 0; j < channels; ++i) {
+ for (int j = 0; j < channels; ++j) {
*q++ = p[j][i] * 32767;
}
}
}
void
-FFmpegEncoder::subtitle (PlayerSubtitles subs, DCPTimePeriod period)
+FFmpegEncoder::subtitle (PlayerSubtitles, DCPTimePeriod)
{
}