X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_examiner.cc;h=16a89faa75a580e1ef55aa1a595d0e46fbc43d14;hb=e60bb3e51bd1508b149e6b8f6608f09b5196ae26;hp=e439566a10367ee105f9bbc5311098191931f38a;hpb=cc3900735839ff4b0da0c046b5c606c440ba917a;p=dcpomatic.git diff --git a/src/lib/ffmpeg_examiner.cc b/src/lib/ffmpeg_examiner.cc index e439566a1..16a89faa7 100644 --- a/src/lib/ffmpeg_examiner.cc +++ b/src/lib/ffmpeg_examiner.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2013 Carl Hetherington + Copyright (C) 2013-2015 Carl Hetherington This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -23,18 +23,25 @@ extern "C" { } #include "ffmpeg_examiner.h" #include "ffmpeg_content.h" +#include "job.h" +#include "ffmpeg_audio_stream.h" +#include "ffmpeg_subtitle_stream.h" +#include "util.h" +#include "safe_stringstream.h" #include "i18n.h" using std::string; using std::cout; using std::max; -using std::stringstream; using boost::shared_ptr; using boost::optional; -FFmpegExaminer::FFmpegExaminer (shared_ptr c) +/** @param job job that the examiner is operating in, or 0 */ +FFmpegExaminer::FFmpegExaminer (shared_ptr c, shared_ptr job) : FFmpeg (c) + , _video_length (0) + , _need_video_length (false) { /* Find audio and subtitle streams */ @@ -49,7 +56,7 @@ FFmpegExaminer::FFmpegExaminer (shared_ptr c) if (s->codec->channel_layout == 0) { s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels); } - + _audio_streams.push_back ( shared_ptr ( new FFmpegAudioStream (audio_stream_name (s), s->id, s->codec->sample_rate, s->codec->channels) @@ -61,70 +68,135 @@ FFmpegExaminer::FFmpegExaminer (shared_ptr c) } } - /* Run through until we find the first audio (for each stream) and video */ + /* See if the header has duration information in it */ + _need_video_length = _format_context->duration == AV_NOPTS_VALUE; + if (!_need_video_length) { + _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get (); + } else if (job) { + job->sub (_("Finding length")); + job->set_progress_unknown (); + } - while (1) { + if (job) { + job->sub (_("Finding subtitles")); + } + + /* Run through until we find: + * - the first video. + * - the first audio for each stream. + * - the subtitle periods for each stream. + * + * We have to note subtitle periods as otherwise we have no way of knowing + * where we should look for subtitles (video and audio are always present, + * so they are ok). + */ + while (true) { int r = av_read_frame (_format_context, &_packet); if (r < 0) { break; } - int frame_finished; + if (job) { + job->set_progress_unknown (); + } AVCodecContext* context = _format_context->streams[_packet.stream_index]->codec; - if (_packet.stream_index == _video_stream && !_first_video) { - if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) { - _first_video = frame_time (_format_context->streams[_video_stream]); - } - } else { - for (size_t i = 0; i < _audio_streams.size(); ++i) { - if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index) && !_audio_streams[i]->first_audio) { - if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) { - _audio_streams[i]->first_audio = frame_time (_audio_streams[i]->stream (_format_context)); - } - } + if (_packet.stream_index == _video_stream) { + video_packet (context); + } + + for (size_t i = 0; i < _audio_streams.size(); ++i) { + if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index)) { + audio_packet (context, _audio_streams[i]); } } - bool have_all_audio = true; - size_t i = 0; - while (i < _audio_streams.size() && have_all_audio) { - have_all_audio = _audio_streams[i]->first_audio; - ++i; + for (size_t i = 0; i < _subtitle_streams.size(); ++i) { + if (_subtitle_streams[i]->uses_index (_format_context, _packet.stream_index)) { + subtitle_packet (context, _subtitle_streams[i]); + } } av_free_packet (&_packet); - - if (_first_video && have_all_audio) { - break; + } +} + +void +FFmpegExaminer::video_packet (AVCodecContext* context) +{ + if (_first_video && !_need_video_length) { + return; + } + + int frame_finished; + if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) { + if (!_first_video) { + _first_video = frame_time (_format_context->streams[_video_stream]); + } + if (_need_video_length) { + _video_length = frame_time ( + _format_context->streams[_video_stream] + ).get_value_or (ContentTime ()).frames (video_frame_rate().get ()); + } + } +} + +void +FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr stream) +{ + if (stream->first_audio) { + return; + } + + int frame_finished; + if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) { + stream->first_audio = frame_time (stream->stream (_format_context)); + } +} + +void +FFmpegExaminer::subtitle_packet (AVCodecContext* context, shared_ptr stream) +{ + int frame_finished; + AVSubtitle sub; + if (avcodec_decode_subtitle2 (context, &sub, &frame_finished, &_packet) >= 0 && frame_finished) { + FFmpegSubtitlePeriod const period = subtitle_period (sub); + if (sub.num_rects <= 0 && _last_subtitle_start) { + stream->add_subtitle (ContentTimePeriod (_last_subtitle_start.get (), period.from)); + _last_subtitle_start = optional (); + } else if (sub.num_rects == 1) { + if (period.to) { + stream->add_subtitle (ContentTimePeriod (period.from, period.to.get ())); + } else { + _last_subtitle_start = period.from; + } } + avsubtitle_free (&sub); } } -optional +optional FFmpegExaminer::frame_time (AVStream* s) const { - optional t; - + optional t; + int64_t const bet = av_frame_get_best_effort_timestamp (_frame); if (bet != AV_NOPTS_VALUE) { - t = bet * av_q2d (s->time_base); + t = ContentTime::from_seconds (bet * av_q2d (s->time_base)); } return t; } -float +optional FFmpegExaminer::video_frame_rate () const { - AVStream* s = _format_context->streams[_video_stream]; - - if (s->avg_frame_rate.num && s->avg_frame_rate.den) { - return av_q2d (s->avg_frame_rate); - } - - return av_q2d (s->r_frame_rate); + /* This use of r_frame_rate is debateable; there's a few different + * frame rates in the format context, but this one seems to be the most + * reliable. + */ + return av_q2d (av_stream_get_r_frame_rate (_format_context->streams[_video_stream])); } dcp::Size @@ -133,18 +205,28 @@ FFmpegExaminer::video_size () const return dcp::Size (video_codec_context()->width, video_codec_context()->height); } -/** @return Length (in video frames) according to our content's header */ -VideoFrame +/** @return Length according to our content's header */ +Frame FFmpegExaminer::video_length () const { - VideoFrame const length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate(); - return max (1, length); + return max (Frame (1), _video_length); +} + +optional +FFmpegExaminer::sample_aspect_ratio () const +{ + AVRational sar = av_guess_sample_aspect_ratio (_format_context, _format_context->streams[_video_stream], 0); + if (sar.num == 0) { + /* I assume this means that we don't know */ + return optional (); + } + return float (sar.num) / sar.den; } string FFmpegExaminer::audio_stream_name (AVStream* s) const { - stringstream n; + SafeStringStream n; n << stream_name (s); @@ -160,7 +242,7 @@ FFmpegExaminer::audio_stream_name (AVStream* s) const string FFmpegExaminer::subtitle_stream_name (AVStream* s) const { - stringstream n; + SafeStringStream n; n << stream_name (s); @@ -174,14 +256,14 @@ FFmpegExaminer::subtitle_stream_name (AVStream* s) const string FFmpegExaminer::stream_name (AVStream* s) const { - stringstream n; + SafeStringStream n; if (s->metadata) { AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0); if (lang) { n << lang->value; } - + AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0); if (title) { if (!n.str().empty()) {