/*
- Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
*/
+
+#include "compose.hpp"
+#include "config.h"
+#include "dcpomatic_log.h"
+#include "digester.h"
+#include "exceptions.h"
#include "ffmpeg.h"
+#include "ffmpeg_audio_stream.h"
#include "ffmpeg_content.h"
+#include "ffmpeg_subtitle_stream.h"
#include "film.h"
-#include "exceptions.h"
-#include "util.h"
#include "log.h"
-#include "dcpomatic_log.h"
-#include "ffmpeg_subtitle_stream.h"
-#include "ffmpeg_audio_stream.h"
-#include "digester.h"
-#include "compose.hpp"
-#include "config.h"
+#include "memory_util.h"
+#include "util.h"
#include <dcp/raw_convert.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include "i18n.h"
+
using std::string;
using std::cout;
using std::cerr;
using dcp::raw_convert;
using namespace dcpomatic;
+
boost::mutex FFmpeg::_mutex;
+
FFmpeg::FFmpeg (std::shared_ptr<const FFmpegContent> c)
: _ffmpeg_content (c)
- , _avio_buffer (0)
- , _avio_buffer_size (4096)
- , _avio_context (0)
- , _format_context (0)
- , _frame (0)
{
setup_general ();
setup_decoders ();
}
+
FFmpeg::~FFmpeg ()
{
boost::mutex::scoped_lock lm (_mutex);
-DCPOMATIC_DISABLE_WARNINGS
- for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- avcodec_close (_format_context->streams[i]->codec);
+ for (auto& i: _codec_context) {
+ avcodec_free_context (&i);
+ }
+
+ av_frame_free (&_video_frame);
+ for (auto& audio_frame: _audio_frame) {
+ av_frame_free (&audio_frame.second);
}
-DCPOMATIC_ENABLE_WARNINGS
- av_frame_free (&_frame);
avformat_close_input (&_format_context);
}
+
static int
avio_read_wrapper (void* data, uint8_t* buffer, int amount)
{
return reinterpret_cast<FFmpeg*>(data)->avio_read (buffer, amount);
}
+
static int64_t
avio_seek_wrapper (void* data, int64_t offset, int whence)
{
return reinterpret_cast<FFmpeg*>(data)->avio_seek (offset, whence);
}
+
void
FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl)
{
dcpomatic_log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL);
}
+
void
FFmpeg::setup_general ()
{
av_log_set_callback (FFmpeg::ffmpeg_log_callback);
_file_group.set_paths (_ffmpeg_content->paths ());
- _avio_buffer = static_cast<uint8_t*> (wrapped_av_malloc (_avio_buffer_size));
+ _avio_buffer = static_cast<uint8_t*> (wrapped_av_malloc(_avio_buffer_size));
_avio_context = avio_alloc_context (_avio_buffer, _avio_buffer_size, 0, this, avio_read_wrapper, 0, avio_seek_wrapper);
if (!_avio_context) {
throw std::bad_alloc ();
optional<int> video_stream_undefined_frame_rate;
-DCPOMATIC_DISABLE_WARNINGS
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
auto s = _format_context->streams[i];
- if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codec->codec_id)) {
+ if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) {
+ auto const frame_rate = av_q2d(s->avg_frame_rate);
+ if (frame_rate < 1 || frame_rate > 1000) {
+ /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */
+ continue;
+ }
if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) {
/* This is definitely our video stream */
_video_stream = i;
}
}
}
-DCPOMATIC_ENABLE_WARNINGS
/* Files from iTunes sometimes have two video streams, one with the avg_frame_rate.num and .den set
to zero. Only use such a stream if there is no alternative.
_video_stream = video_stream_undefined_frame_rate.get();
}
- /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */
- if (_video_stream && av_q2d(av_guess_frame_rate(_format_context, _format_context->streams[_video_stream.get()], 0)) > 1000) {
- _video_stream = optional<int>();
- }
-
/* Hack: if the AVStreams have duplicate IDs, replace them with our
own. We use the IDs so that we can cope with VOBs, in which streams
move about in index but remain with the same ID in different
}
}
- _frame = av_frame_alloc ();
- if (_frame == 0) {
- throw DecodeError (N_("could not allocate frame"));
+ _video_frame = av_frame_alloc ();
+ if (_video_frame == nullptr) {
+ throw std::bad_alloc ();
}
}
+
void
FFmpeg::setup_decoders ()
{
boost::mutex::scoped_lock lm (_mutex);
-DCPOMATIC_DISABLE_WARNINGS
+ _codec_context.resize (_format_context->nb_streams);
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- auto context = _format_context->streams[i]->codec;
-
- AVCodec* codec = avcodec_find_decoder (context->codec_id);
+ auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id);
if (codec) {
+ auto context = avcodec_alloc_context3 (codec);
+ if (!context) {
+ throw std::bad_alloc ();
+ }
+ _codec_context[i] = context;
+
+ int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar);
+ if (r < 0) {
+ throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r);
+ }
+
+ context->thread_count = 8;
+ context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
AVDictionary* options = nullptr;
/* This option disables decoding of DCA frame footers in our patched version
/* Enable following of links in files */
av_dict_set_int (&options, "enable_drefs", 1, 0);
- if (avcodec_open2 (context, codec, &options) < 0) {
- throw DecodeError (N_("could not open decoder"));
+ r = avcodec_open2 (context, codec, &options);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r);
}
} else {
dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING);
}
}
-DCPOMATIC_ENABLE_WARNINGS
}
-DCPOMATIC_DISABLE_WARNINGS
+
AVCodecContext *
FFmpeg::video_codec_context () const
{
return nullptr;
}
- return _format_context->streams[_video_stream.get()]->codec;
+ return _codec_context[_video_stream.get()];
}
+
AVCodecContext *
FFmpeg::subtitle_codec_context () const
{
- if (!_ffmpeg_content->subtitle_stream ()) {
+ auto str = _ffmpeg_content->subtitle_stream();
+ if (!str) {
return nullptr;
}
- return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec;
+ return _codec_context[str->index(_format_context)];
}
-DCPOMATIC_ENABLE_WARNINGS
+
int
FFmpeg::avio_read (uint8_t* buffer, int const amount)
return _file_group.read (buffer, amount);
}
+
int64_t
FFmpeg::avio_seek (int64_t const pos, int whence)
{
return _file_group.seek (pos, whence);
}
+
FFmpegSubtitlePeriod
-FFmpeg::subtitle_period (AVSubtitle const & sub)
+FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub)
{
- auto const packet_time = ContentTime::from_seconds (static_cast<double> (sub.pts) / AV_TIME_BASE);
+ auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base));
+ auto const start = packet_time + ContentTime::from_seconds(sub.start_display_time / 1e3);
- if (sub.end_display_time == static_cast<uint32_t> (-1)) {
- /* End time is not known */
- return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3));
+ if (sub.end_display_time == 0 || sub.end_display_time == static_cast<uint32_t>(-1)) {
+ /* End time is not in the AVSubtitle; perhaps we can use the AVPacket's duration */
+ if (packet->duration) {
+ return FFmpegSubtitlePeriod(start, start + ContentTime::from_seconds(packet->duration * av_q2d(stream->time_base)));
+ } else {
+ return FFmpegSubtitlePeriod(start);
+ }
}
- return FFmpegSubtitlePeriod (
- packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3),
- packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3)
- );
+ return FFmpegSubtitlePeriod (start, packet_time + ContentTime::from_seconds(sub.end_display_time / 1e3));
}
+
/** Compute the pts offset to use given a set of audio streams and some video details.
* Sometimes these parameters will have just been determined by an Examiner, sometimes
* they will have been retrieved from a piece of Content, hence the need for this method
return po;
}
+
+
+AVFrame *
+FFmpeg::audio_frame (shared_ptr<const FFmpegAudioStream> stream)
+{
+ auto iter = _audio_frame.find(stream);
+ if (iter != _audio_frame.end()) {
+ return iter->second;
+ }
+
+ auto frame = av_frame_alloc ();
+ if (frame == nullptr) {
+ throw std::bad_alloc();
+ }
+
+ _audio_frame[stream] = frame;
+ return frame;
+
+}
+