X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_encoder.cc;h=d4f0b4b472612bb7ee449bd7b51bad00d81fd9f6;hb=809bcfd85fad2ef7d4131c054be4cccd5bcc9d05;hp=734c9810d16c701bf28497e99f8dbfcaa3f906ff;hpb=d8ea1796f34ff894b148a0af78c0a547e0496ee1;p=dcpomatic.git diff --git a/src/lib/ffmpeg_encoder.cc b/src/lib/ffmpeg_encoder.cc index 734c9810d..d4f0b4b47 100644 --- a/src/lib/ffmpeg_encoder.cc +++ b/src/lib/ffmpeg_encoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2017 Carl Hetherington + Copyright (C) 2017-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -18,214 +18,284 @@ */ + +#include "butler.h" +#include "cross.h" #include "ffmpeg_encoder.h" #include "film.h" +#include "image.h" #include "job.h" +#include "log.h" #include "player.h" #include "player_video.h" -#include "log.h" -#include "image.h" #include "compose.hpp" #include #include "i18n.h" -using std::string; -using std::runtime_error; + using std::cout; -using boost::shared_ptr; +using std::list; +using std::make_shared; +using std::shared_ptr; +using std::string; +using std::weak_ptr; using boost::bind; -using boost::weak_ptr; - -static AVPixelFormat -force_pixel_format (AVPixelFormat, AVPixelFormat out) -{ - return out; -} - -FFmpegEncoder::FFmpegEncoder (shared_ptr film, weak_ptr job, boost::filesystem::path output, Format format) +using boost::optional; +using namespace dcpomatic; +#if BOOST_VERSION >= 106100 +using namespace boost::placeholders; +#endif + + +/** @param key Key to use to encrypt MP4 outputs */ +FFmpegEncoder::FFmpegEncoder ( + shared_ptr film, + weak_ptr job, + boost::filesystem::path output, + ExportFormat format, + bool mixdown_to_stereo, + bool split_reels, + bool audio_stream_per_channel, + int x264_crf + ) : Encoder (film, job) - , _history (1000) + , _history (200) , _output (output) + , _format (format) + , _split_reels (split_reels) + , _audio_stream_per_channel (audio_stream_per_channel) + , _x264_crf (x264_crf) { - switch (format) { - case FORMAT_PRORES: - _pixel_format = AV_PIX_FMT_YUV422P10; - _codec_name = "prores_ks"; - break; - case FORMAT_H264: - _pixel_format = AV_PIX_FMT_YUV420P; - _codec_name = "libx264"; - break; + _player->set_always_burn_open_subtitles (); + _player->set_play_referenced (); + + int const ch = film->audio_channels (); + + AudioMapping map; + if (mixdown_to_stereo) { + _output_audio_channels = 2; + map = AudioMapping (ch, 2); + float const overall_gain = 2 / (4 + sqrt(2)); + float const minus_3dB = 1 / sqrt(2); + if (ch == 2) { + map.set (dcp::Channel::LEFT, 0, 1); + map.set (dcp::Channel::RIGHT, 1, 1); + } else if (ch == 4) { + map.set (dcp::Channel::LEFT, 0, overall_gain); + map.set (dcp::Channel::RIGHT, 1, overall_gain); + map.set (dcp::Channel::CENTRE, 0, overall_gain * minus_3dB); + map.set (dcp::Channel::CENTRE, 1, overall_gain * minus_3dB); + map.set (dcp::Channel::LS, 0, overall_gain); + } else if (ch >= 6) { + map.set (dcp::Channel::LEFT, 0, overall_gain); + map.set (dcp::Channel::RIGHT, 1, overall_gain); + map.set (dcp::Channel::CENTRE, 0, overall_gain * minus_3dB); + map.set (dcp::Channel::CENTRE, 1, overall_gain * minus_3dB); + map.set (dcp::Channel::LS, 0, overall_gain); + map.set (dcp::Channel::RS, 1, overall_gain); + } + /* XXX: maybe we should do something better for >6 channel DCPs */ + } else { + /* Our encoders don't really want to encode any channel count between 9 and 15 inclusive, + * so let's just use 16 channel exports for any project with more than 8 channels. + */ + _output_audio_channels = ch > 8 ? 16 : ch; + map = AudioMapping (ch, _output_audio_channels); + for (int i = 0; i < ch; ++i) { + map.set (i, i, 1); + } } + + _butler = std::make_shared( + _film, _player, map, _output_audio_channels, bind(&PlayerVideo::force, FFmpegFileEncoder::pixel_format(format)), VideoRange::VIDEO, Image::Alignment::PADDED, false, false + ); } + void FFmpegEncoder::go () { - AVCodec* codec = avcodec_find_encoder_by_name (_codec_name.c_str()); - if (!codec) { - throw runtime_error (String::compose ("could not find FFmpeg encoder %1", _codec_name)); - } - - _codec_context = avcodec_alloc_context3 (codec); - if (!_codec_context) { - throw runtime_error ("could not allocate FFmpeg context"); - } - - avcodec_get_context_defaults3 (_codec_context, codec); - - /* Variable quantisation */ - _codec_context->global_quality = 0; - _codec_context->width = _film->frame_size().width; - _codec_context->height = _film->frame_size().height; - _codec_context->time_base = (AVRational) { 1, _film->video_frame_rate() }; - _codec_context->pix_fmt = _pixel_format; - _codec_context->flags |= CODEC_FLAG_QSCALE | CODEC_FLAG_GLOBAL_HEADER; - - avformat_alloc_output_context2 (&_format_context, 0, 0, _output.string().c_str()); - if (!_format_context) { - throw runtime_error ("could not allocate FFmpeg format context"); - } - - _video_stream = avformat_new_stream (_format_context, codec); - if (!_video_stream) { - throw runtime_error ("could not create FFmpeg output video stream"); - } - - /* Note: needs to increment with each stream */ - _video_stream->id = 0; - _video_stream->codec = _codec_context; - - AVDictionary* options = 0; - av_dict_set (&options, "profile", "3", 0); - av_dict_set (&options, "threads", "auto", 0); - - if (avcodec_open2 (_codec_context, codec, &options) < 0) { - throw runtime_error ("could not open FFmpeg codec"); - } - - if (avio_open (&_format_context->pb, _output.c_str(), AVIO_FLAG_WRITE) < 0) { - throw runtime_error ("could not open FFmpeg output file"); - } - - if (avformat_write_header (_format_context, &options) < 0) { - throw runtime_error ("could not write header to FFmpeg output file"); - } - { - shared_ptr job = _job.lock (); + auto job = _job.lock (); DCPOMATIC_ASSERT (job); job->sub (_("Encoding")); } - while (!_player->pass ()) {} + Waker waker; - while (true) { - AVPacket packet; - av_init_packet (&packet); - packet.data = 0; - packet.size = 0; - - int got_packet; - avcodec_encode_video2 (_codec_context, &packet, 0, &got_packet); - if (!got_packet) { - break; - } - - packet.stream_index = 0; - av_interleaved_write_frame (_format_context, &packet); - av_packet_unref (&packet); - } + list file_encoders; - av_write_trailer (_format_context); + int const files = _split_reels ? _film->reels().size() : 1; + for (int i = 0; i < files; ++i) { - avcodec_close (_codec_context); - avio_close (_format_context->pb); - avformat_free_context (_format_context); -} + boost::filesystem::path filename = _output; + string extension = boost::filesystem::extension (filename); + filename = boost::filesystem::change_extension (filename, ""); -void -FFmpegEncoder::video (shared_ptr video, DCPTime time) -{ - shared_ptr image = video->image ( - bind (&Log::dcp_log, _film->log().get(), _1, _2), - bind (&force_pixel_format, _1, _pixel_format), - true, - false - ); + if (files > 1) { + /// TRANSLATORS: _reel%1 here is to be added to an export filename to indicate + /// which reel it is. Preserve the %1; it will be replaced with the reel number. + filename = filename.string() + String::compose(_("_reel%1"), i + 1); + } - AVFrame* frame = av_frame_alloc (); - - for (int i = 0; i < 3; ++i) { - size_t const size = image->stride()[i] * image->size().height; - AVBufferRef* buffer = av_buffer_alloc (size); - /* XXX: inefficient */ - memcpy (buffer->data, image->data()[i], size); - frame->buf[i] = av_buffer_ref (buffer); - frame->data[i] = buffer->data; - frame->linesize[i] = image->stride()[i]; - av_buffer_unref (&buffer); + file_encoders.push_back ( + FileEncoderSet ( + _film->frame_size(), + _film->video_frame_rate(), + _film->audio_frame_rate(), + _output_audio_channels, + _format, + _audio_stream_per_channel, + _x264_crf, + _film->three_d(), + filename, + extension + ) + ); } - frame->width = image->size().width; - frame->height = image->size().height; - frame->format = _pixel_format; - frame->pts = time.seconds() / av_q2d (_video_stream->time_base); + auto reel_periods = _film->reels (); + auto reel = reel_periods.begin (); + auto encoder = file_encoders.begin (); + + auto const video_frame = DCPTime::from_frames (1, _film->video_frame_rate ()); + int const audio_frames = video_frame.frames_round(_film->audio_frame_rate()); + std::vector interleaved(_output_audio_channels * audio_frames); + auto deinterleaved = make_shared(_output_audio_channels, audio_frames); + int const gets_per_frame = _film->three_d() ? 2 : 1; + for (DCPTime i; i < _film->length(); i += video_frame) { + + if (file_encoders.size() > 1 && !reel->contains(i)) { + /* Next reel and file */ + ++reel; + ++encoder; + DCPOMATIC_ASSERT (reel != reel_periods.end()); + DCPOMATIC_ASSERT (encoder != file_encoders.end()); + } - AVPacket packet; - av_init_packet (&packet); - packet.data = 0; - packet.size = 0; + for (int j = 0; j < gets_per_frame; ++j) { + Butler::Error e; + auto v = _butler->get_video (Butler::Behaviour::BLOCKING, &e); + _butler->rethrow (); + if (v.first) { + auto fe = encoder->get (v.first->eyes()); + if (fe) { + fe->video(v.first, v.second - reel->from); + } + } else { + if (e.code != Butler::Error::Code::FINISHED) { + throw DecodeError(String::compose("Error during decoding: %1", e.summary())); + } + } + } - int got_packet; - if (avcodec_encode_video2 (_codec_context, &packet, frame, &got_packet) < 0) { - throw EncodeError ("FFmpeg video encode failed"); - } + _history.event (); - if (got_packet && packet.size) { - /* XXX: this should not be hard-wired */ - packet.stream_index = 0; - av_interleaved_write_frame (_format_context, &packet); - av_packet_unref (&packet); - } + { + boost::mutex::scoped_lock lm (_mutex); + _last_time = i; + } - av_frame_free (&frame); + auto job = _job.lock (); + if (job) { + job->set_progress (float(i.get()) / _film->length().get()); + } - _history.event (); + waker.nudge (); - { - boost::mutex::scoped_lock lm (_mutex); - _last_time = time; + _butler->get_audio (Butler::Behaviour::BLOCKING, interleaved.data(), audio_frames); + /* XXX: inefficient; butler interleaves and we deinterleave again */ + float* p = interleaved.data(); + for (int j = 0; j < audio_frames; ++j) { + for (int k = 0; k < _output_audio_channels; ++k) { + deinterleaved->data(k)[j] = *p++; + } + } + encoder->audio (deinterleaved); } - shared_ptr job = _job.lock (); - if (job) { - job->set_progress (float(time.get()) / _film->length().get()); + for (auto i: file_encoders) { + i.flush (); } } -void -FFmpegEncoder::audio (shared_ptr audio, DCPTime time) +optional +FFmpegEncoder::current_rate () const { + return _history.rate (); +} +Frame +FFmpegEncoder::frames_done () const +{ + boost::mutex::scoped_lock lm (_mutex); + return _last_time.frames_round (_film->video_frame_rate ()); } -void -FFmpegEncoder::subtitle (PlayerSubtitles subs, DCPTimePeriod period) +FFmpegEncoder::FileEncoderSet::FileEncoderSet ( + dcp::Size video_frame_size, + int video_frame_rate, + int audio_frame_rate, + int channels, + ExportFormat format, + bool audio_stream_per_channel, + int x264_crf, + bool three_d, + boost::filesystem::path output, + string extension + ) +{ + if (three_d) { + /// TRANSLATORS: L here is an abbreviation for "left", to indicate the left-eye part of a 3D export + _encoders[Eyes::LEFT] = make_shared( + video_frame_size, video_frame_rate, audio_frame_rate, channels, format, + audio_stream_per_channel, x264_crf, String::compose("%1_%2%3", output.string(), _("L"), extension) + ); + /// TRANSLATORS: R here is an abbreviation for "right", to indicate the right-eye part of a 3D export + _encoders[Eyes::RIGHT] = make_shared( + video_frame_size, video_frame_rate, audio_frame_rate, channels, format, + audio_stream_per_channel, x264_crf, String::compose("%1_%2%3", output.string(), _("R"), extension) + ); + } else { + _encoders[Eyes::BOTH] = make_shared( + video_frame_size, video_frame_rate, audio_frame_rate, channels, format, + audio_stream_per_channel, x264_crf, String::compose("%1%2", output.string(), extension) + ); + } +} + +shared_ptr +FFmpegEncoder::FileEncoderSet::get (Eyes eyes) const { + if (_encoders.size() == 1) { + /* We are doing a 2D export... */ + if (eyes == Eyes::LEFT) { + /* ...but we got some 3D data; put the left eye into the output... */ + eyes = Eyes::BOTH; + } else if (eyes == Eyes::RIGHT) { + /* ...and ignore the right eye.*/ + return {}; + } + } + auto i = _encoders.find (eyes); + DCPOMATIC_ASSERT (i != _encoders.end()); + return i->second; } -float -FFmpegEncoder::current_rate () const +void +FFmpegEncoder::FileEncoderSet::flush () { - return _history.rate (); + for (auto& i: _encoders) { + i.second->flush (); + } } -Frame -FFmpegEncoder::frames_done () const +void +FFmpegEncoder::FileEncoderSet::audio (shared_ptr a) { - boost::mutex::scoped_lock lm (_mutex); - return _last_time.frames_round (_film->video_frame_rate ()); + for (auto& i: _encoders) { + i.second->audio (a); + } }