X-Git-Url: https://git.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fffmpeg.cc;h=6c8509b6cd71de2eef9e5a75e3c58b2e9f1cd87f;hp=8dc525db970d4c6efb6a2ea0e1fade96e0c8ca23;hb=182b9d2e2feb6545592868606aaf0f0146095481;hpb=3e56ff52385513e78fbe3b03e9787a6606b762d9 diff --git a/src/lib/ffmpeg.cc b/src/lib/ffmpeg.cc index 8dc525db9..6c8509b6c 100644 --- a/src/lib/ffmpeg.cc +++ b/src/lib/ffmpeg.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2013-2019 Carl Hetherington + Copyright (C) 2013-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,19 +18,20 @@ */ + +#include "compose.hpp" +#include "config.h" +#include "dcpomatic_log.h" +#include "digester.h" +#include "exceptions.h" #include "ffmpeg.h" +#include "ffmpeg_audio_stream.h" #include "ffmpeg_content.h" +#include "ffmpeg_subtitle_stream.h" #include "film.h" -#include "exceptions.h" -#include "util.h" #include "log.h" -#include "dcpomatic_log.h" -#include "ffmpeg_subtitle_stream.h" -#include "ffmpeg_audio_stream.h" -#include "decrypted_ecinema_kdm.h" -#include "digester.h" -#include "compose.hpp" -#include "config.h" +#include "memory_util.h" +#include "util.h" #include extern "C" { #include @@ -38,58 +39,63 @@ extern "C" { #include } #include -#include #include #include "i18n.h" + using std::string; using std::cout; using std::cerr; using std::vector; -using boost::shared_ptr; +using std::shared_ptr; using boost::optional; using dcp::raw_convert; using namespace dcpomatic; + boost::mutex FFmpeg::_mutex; -FFmpeg::FFmpeg (boost::shared_ptr c) + +FFmpeg::FFmpeg (std::shared_ptr c) : _ffmpeg_content (c) - , _avio_buffer (0) - , _avio_buffer_size (4096) - , _avio_context (0) - , _format_context (0) - , _frame (0) { setup_general (); setup_decoders (); } + FFmpeg::~FFmpeg () { boost::mutex::scoped_lock lm (_mutex); - for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - avcodec_close (_format_context->streams[i]->codec); + for (auto& i: _codec_context) { + avcodec_free_context (&i); + } + + av_frame_free (&_video_frame); + for (auto& audio_frame: _audio_frame) { + av_frame_free (&audio_frame.second); } - av_frame_free (&_frame); avformat_close_input (&_format_context); } + static int avio_read_wrapper (void* data, uint8_t* buffer, int amount) { return reinterpret_cast(data)->avio_read (buffer, amount); } + static int64_t avio_seek_wrapper (void* data, int64_t offset, int whence) { return reinterpret_cast(data)->avio_seek (offset, whence); } + void FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl) { @@ -105,6 +111,7 @@ FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl) dcpomatic_log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL); } + void FFmpeg::setup_general () { @@ -114,19 +121,18 @@ FFmpeg::setup_general () av_log_set_callback (FFmpeg::ffmpeg_log_callback); _file_group.set_paths (_ffmpeg_content->paths ()); - _avio_buffer = static_cast (wrapped_av_malloc (_avio_buffer_size)); + _avio_buffer = static_cast (wrapped_av_malloc(_avio_buffer_size)); _avio_context = avio_alloc_context (_avio_buffer, _avio_buffer_size, 0, this, avio_read_wrapper, 0, avio_seek_wrapper); + if (!_avio_context) { + throw std::bad_alloc (); + } _format_context = avformat_alloc_context (); - _format_context->pb = _avio_context; - - AVDictionary* options = 0; -#ifdef DCPOMATIC_VARIANT_SWAROOP - if (_ffmpeg_content->kdm()) { - DecryptedECinemaKDM kdm (_ffmpeg_content->kdm().get(), Config::instance()->decryption_chain()->key().get()); - av_dict_set (&options, "decryption_key", kdm.key().hex().c_str(), 0); + if (!_format_context) { + throw std::bad_alloc (); } -#endif + _format_context->pb = _avio_context; + AVDictionary* options = nullptr; int e = avformat_open_input (&_format_context, 0, 0, &options); if (e < 0) { throw OpenFileError (_ffmpeg_content->path(0).string(), e, OpenFileError::READ); @@ -141,8 +147,13 @@ FFmpeg::setup_general () optional video_stream_undefined_frame_rate; for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - AVStream* s = _format_context->streams[i]; - if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codec->codec_id)) { + auto s = _format_context->streams[i]; + if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) { + auto const frame_rate = av_q2d(s->avg_frame_rate); + if (frame_rate < 1 || frame_rate > 1000) { + /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */ + continue; + } if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) { /* This is definitely our video stream */ _video_stream = i; @@ -160,11 +171,6 @@ FFmpeg::setup_general () _video_stream = video_stream_undefined_frame_rate.get(); } - /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */ - if (_video_stream && av_q2d(av_guess_frame_rate(_format_context, _format_context->streams[_video_stream.get()], 0)) > 1000) { - _video_stream = optional(); - } - /* Hack: if the AVStreams have duplicate IDs, replace them with our own. We use the IDs so that we can cope with VOBs, in which streams move about in index but remain with the same ID in different @@ -187,24 +193,37 @@ FFmpeg::setup_general () } } - _frame = av_frame_alloc (); - if (_frame == 0) { - throw DecodeError (N_("could not allocate frame")); + _video_frame = av_frame_alloc (); + if (_video_frame == nullptr) { + throw std::bad_alloc (); } } + void FFmpeg::setup_decoders () { boost::mutex::scoped_lock lm (_mutex); + _codec_context.resize (_format_context->nb_streams); for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - AVCodecContext* context = _format_context->streams[i]->codec; - - AVCodec* codec = avcodec_find_decoder (context->codec_id); + auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id); if (codec) { + auto context = avcodec_alloc_context3 (codec); + if (!context) { + throw std::bad_alloc (); + } + _codec_context[i] = context; - AVDictionary* options = 0; + int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar); + if (r < 0) { + throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r); + } + + context->thread_count = 8; + context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE; + + AVDictionary* options = nullptr; /* This option disables decoding of DCA frame footers in our patched version of FFmpeg. I believe these footers are of no use to us, and they can cause problems when FFmpeg fails to decode them (mantis #352). @@ -217,8 +236,9 @@ FFmpeg::setup_decoders () /* Enable following of links in files */ av_dict_set_int (&options, "enable_drefs", 1, 0); - if (avcodec_open2 (context, codec, &options) < 0) { - throw DecodeError (N_("could not open decoder")); + r = avcodec_open2 (context, codec, &options); + if (r < 0) { + throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r); } } else { dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING); @@ -226,32 +246,37 @@ FFmpeg::setup_decoders () } } + AVCodecContext * FFmpeg::video_codec_context () const { if (!_video_stream) { - return 0; + return nullptr; } - return _format_context->streams[_video_stream.get()]->codec; + return _codec_context[_video_stream.get()]; } + AVCodecContext * FFmpeg::subtitle_codec_context () const { - if (!_ffmpeg_content->subtitle_stream ()) { - return 0; + auto str = _ffmpeg_content->subtitle_stream(); + if (!str) { + return nullptr; } - return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec; + return _codec_context[str->index(_format_context)]; } + int FFmpeg::avio_read (uint8_t* buffer, int const amount) { return _file_group.read (buffer, amount); } + int64_t FFmpeg::avio_seek (int64_t const pos, int whence) { @@ -262,29 +287,33 @@ FFmpeg::avio_seek (int64_t const pos, int whence) return _file_group.seek (pos, whence); } + FFmpegSubtitlePeriod -FFmpeg::subtitle_period (AVSubtitle const & sub) +FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub) { - ContentTime const packet_time = ContentTime::from_seconds (static_cast (sub.pts) / AV_TIME_BASE); + auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base)); + auto const start = packet_time + ContentTime::from_seconds(sub.start_display_time / 1e3); - if (sub.end_display_time == static_cast (-1)) { - /* End time is not known */ - return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3)); + if (sub.end_display_time == 0 || sub.end_display_time == static_cast(-1)) { + /* End time is not in the AVSubtitle; perhaps we can use the AVPacket's duration */ + if (packet->duration) { + return FFmpegSubtitlePeriod(start, start + ContentTime::from_seconds(packet->duration * av_q2d(stream->time_base))); + } else { + return FFmpegSubtitlePeriod(start); + } } - return FFmpegSubtitlePeriod ( - packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3), - packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3) - ); + return FFmpegSubtitlePeriod (start, packet_time + ContentTime::from_seconds(sub.end_display_time / 1e3)); } + /** Compute the pts offset to use given a set of audio streams and some video details. * Sometimes these parameters will have just been determined by an Examiner, sometimes * they will have been retrieved from a piece of Content, hence the need for this method * in FFmpeg. */ ContentTime -FFmpeg::pts_offset (vector > audio_streams, optional first_video, double video_frame_rate) const +FFmpeg::pts_offset (vector> audio_streams, optional first_video, double video_frame_rate) const { /* Audio and video frame PTS values may not start with 0. We want to fiddle them so that: @@ -302,13 +331,13 @@ FFmpeg::pts_offset (vector > audio_streams, option /* First, make one of them start at 0 */ - ContentTime po = ContentTime::min (); + auto po = ContentTime::min (); if (first_video) { po = - first_video.get (); } - BOOST_FOREACH (shared_ptr i, audio_streams) { + for (auto i: audio_streams) { if (i->first_audio) { po = max (po, - i->first_audio.get ()); } @@ -324,9 +353,29 @@ FFmpeg::pts_offset (vector > audio_streams, option /* Now adjust so that the video pts starts on a frame */ if (first_video) { - ContentTime const fvc = first_video.get() + po; + auto const fvc = first_video.get() + po; po += fvc.ceil (video_frame_rate) - fvc; } return po; } + + +AVFrame * +FFmpeg::audio_frame (shared_ptr stream) +{ + auto iter = _audio_frame.find(stream); + if (iter != _audio_frame.end()) { + return iter->second; + } + + auto frame = av_frame_alloc (); + if (frame == nullptr) { + throw std::bad_alloc(); + } + + _audio_frame[stream] = frame; + return frame; + +} +