X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg.cc;h=503f8e51cf13b146d25d81d443688428a87e5f97;hb=6e3e984162ca7a181bc7c98d90c295e88e4e7f6c;hp=0e70d9c6f89350f12e5e1ffba1573f2d3af5aed2;hpb=e7440b69bf0dc486314544b0e1fb5ac2d45a9a8d;p=dcpomatic.git diff --git a/src/lib/ffmpeg.cc b/src/lib/ffmpeg.cc index 0e70d9c6f..503f8e51c 100644 --- a/src/lib/ffmpeg.cc +++ b/src/lib/ffmpeg.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2013-2019 Carl Hetherington + Copyright (C) 2013-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,6 +18,7 @@ */ + #include "ffmpeg.h" #include "ffmpeg_content.h" #include "film.h" @@ -41,6 +42,7 @@ extern "C" { #include "i18n.h" + using std::string; using std::cout; using std::cerr; @@ -50,46 +52,45 @@ using boost::optional; using dcp::raw_convert; using namespace dcpomatic; + boost::mutex FFmpeg::_mutex; + FFmpeg::FFmpeg (std::shared_ptr c) : _ffmpeg_content (c) - , _avio_buffer (0) - , _avio_buffer_size (4096) - , _avio_context (0) - , _format_context (0) - , _frame (0) { setup_general (); setup_decoders (); } + FFmpeg::~FFmpeg () { boost::mutex::scoped_lock lm (_mutex); -DCPOMATIC_DISABLE_WARNINGS - for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - avcodec_close (_format_context->streams[i]->codec); + for (auto& i: _codec_context) { + avcodec_free_context (&i); } -DCPOMATIC_ENABLE_WARNINGS av_frame_free (&_frame); avformat_close_input (&_format_context); } + static int avio_read_wrapper (void* data, uint8_t* buffer, int amount) { return reinterpret_cast(data)->avio_read (buffer, amount); } + static int64_t avio_seek_wrapper (void* data, int64_t offset, int whence) { return reinterpret_cast(data)->avio_seek (offset, whence); } + void FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl) { @@ -105,6 +106,7 @@ FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl) dcpomatic_log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL); } + void FFmpeg::setup_general () { @@ -114,7 +116,7 @@ FFmpeg::setup_general () av_log_set_callback (FFmpeg::ffmpeg_log_callback); _file_group.set_paths (_ffmpeg_content->paths ()); - _avio_buffer = static_cast (wrapped_av_malloc (_avio_buffer_size)); + _avio_buffer = static_cast (wrapped_av_malloc(_avio_buffer_size)); _avio_context = avio_alloc_context (_avio_buffer, _avio_buffer_size, 0, this, avio_read_wrapper, 0, avio_seek_wrapper); if (!_avio_context) { throw std::bad_alloc (); @@ -139,10 +141,9 @@ FFmpeg::setup_general () optional video_stream_undefined_frame_rate; -DCPOMATIC_DISABLE_WARNINGS for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { auto s = _format_context->streams[i]; - if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codec->codec_id)) { + if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) { if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) { /* This is definitely our video stream */ _video_stream = i; @@ -152,7 +153,6 @@ DCPOMATIC_DISABLE_WARNINGS } } } -DCPOMATIC_ENABLE_WARNINGS /* Files from iTunes sometimes have two video streams, one with the avg_frame_rate.num and .den set to zero. Only use such a stream if there is no alternative. @@ -190,21 +190,33 @@ DCPOMATIC_ENABLE_WARNINGS _frame = av_frame_alloc (); if (_frame == 0) { - throw DecodeError (N_("could not allocate frame")); + throw std::bad_alloc (); } } + void FFmpeg::setup_decoders () { boost::mutex::scoped_lock lm (_mutex); -DCPOMATIC_DISABLE_WARNINGS + _codec_context.resize (_format_context->nb_streams); for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - auto context = _format_context->streams[i]->codec; - - AVCodec* codec = avcodec_find_decoder (context->codec_id); + auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id); if (codec) { + auto context = avcodec_alloc_context3 (codec); + if (!context) { + throw std::bad_alloc (); + } + _codec_context[i] = context; + + int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar); + if (r < 0) { + throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r); + } + + context->thread_count = 8; + context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE; AVDictionary* options = nullptr; /* This option disables decoding of DCA frame footers in our patched version @@ -219,17 +231,17 @@ DCPOMATIC_DISABLE_WARNINGS /* Enable following of links in files */ av_dict_set_int (&options, "enable_drefs", 1, 0); - if (avcodec_open2 (context, codec, &options) < 0) { - throw DecodeError (N_("could not open decoder")); + r = avcodec_open2 (context, codec, &options); + if (r < 0) { + throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r); } } else { dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING); } } -DCPOMATIC_ENABLE_WARNINGS } -DCPOMATIC_DISABLE_WARNINGS + AVCodecContext * FFmpeg::video_codec_context () const { @@ -237,19 +249,21 @@ FFmpeg::video_codec_context () const return nullptr; } - return _format_context->streams[_video_stream.get()]->codec; + return _codec_context[_video_stream.get()]; } + AVCodecContext * FFmpeg::subtitle_codec_context () const { - if (!_ffmpeg_content->subtitle_stream ()) { + auto str = _ffmpeg_content->subtitle_stream(); + if (!str) { return nullptr; } - return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec; + return _codec_context[str->index(_format_context)]; } -DCPOMATIC_ENABLE_WARNINGS + int FFmpeg::avio_read (uint8_t* buffer, int const amount) @@ -257,6 +271,7 @@ FFmpeg::avio_read (uint8_t* buffer, int const amount) return _file_group.read (buffer, amount); } + int64_t FFmpeg::avio_seek (int64_t const pos, int whence) { @@ -267,12 +282,13 @@ FFmpeg::avio_seek (int64_t const pos, int whence) return _file_group.seek (pos, whence); } + FFmpegSubtitlePeriod -FFmpeg::subtitle_period (AVSubtitle const & sub) +FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub) { - auto const packet_time = ContentTime::from_seconds (static_cast (sub.pts) / AV_TIME_BASE); + auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base)); - if (sub.end_display_time == static_cast (-1)) { + if (sub.end_display_time == 0 || sub.end_display_time == static_cast(-1)) { /* End time is not known */ return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3)); } @@ -283,6 +299,7 @@ FFmpeg::subtitle_period (AVSubtitle const & sub) ); } + /** Compute the pts offset to use given a set of audio streams and some video details. * Sometimes these parameters will have just been determined by an Examiner, sometimes * they will have been retrieved from a piece of Content, hence the need for this method