2 Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include <libavcodec/avcodec.h>
23 #include <libavformat/avformat.h>
24 #include <libavutil/pixfmt.h>
25 #include <libavutil/pixdesc.h>
27 #include "ffmpeg_examiner.h"
28 #include "ffmpeg_content.h"
30 #include "ffmpeg_audio_stream.h"
31 #include "ffmpeg_subtitle_stream.h"
33 #include "safe_stringstream.h"
34 #include <boost/foreach.hpp>
42 using boost::shared_ptr;
43 using boost::optional;
45 /** @param job job that the examiner is operating in, or 0 */
46 FFmpegExaminer::FFmpegExaminer (shared_ptr<const FFmpegContent> c, shared_ptr<Job> job)
49 , _need_video_length (false)
51 /* Find audio and subtitle streams */
53 for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
54 AVStream* s = _format_context->streams[i];
55 if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
57 /* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
58 so bodge it here. No idea why we should have to do this.
61 if (s->codec->channel_layout == 0) {
62 s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
65 DCPOMATIC_ASSERT (_format_context->duration != AV_NOPTS_VALUE);
67 _audio_streams.push_back (
68 shared_ptr<FFmpegAudioStream> (
69 new FFmpegAudioStream (
72 s->codec->sample_rate,
73 (double (_format_context->duration) / AV_TIME_BASE) * s->codec->sample_rate,
79 } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
80 _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (subtitle_stream_name (s), s->id)));
85 /* See if the header has duration information in it */
86 _need_video_length = _format_context->duration == AV_NOPTS_VALUE;
87 if (!_need_video_length) {
88 _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get ();
93 if (_need_video_length) {
94 job->sub (_("Finding length and subtitles"));
95 } else if (!_subtitle_streams.empty()) {
96 job->sub (_("Finding subtitles"));
98 job->sub (_("Finding length"));
102 /* Run through until we find:
104 * - the first audio for each stream.
105 * - the subtitle periods for each stream.
107 * We have to note subtitle periods as otherwise we have no way of knowing
108 * where we should look for subtitles (video and audio are always present,
112 int64_t const len = _file_group.length ();
114 int r = av_read_frame (_format_context, &_packet);
121 job->set_progress (float (_format_context->pb->pos) / len);
123 job->set_progress_unknown ();
127 AVCodecContext* context = _format_context->streams[_packet.stream_index]->codec;
129 if (_video_stream && _packet.stream_index == _video_stream.get()) {
130 video_packet (context);
133 bool got_all_audio = true;
135 for (size_t i = 0; i < _audio_streams.size(); ++i) {
136 if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index)) {
137 audio_packet (context, _audio_streams[i]);
139 if (!_audio_streams[i]->first_audio) {
140 got_all_audio = false;
144 for (size_t i = 0; i < _subtitle_streams.size(); ++i) {
145 if (_subtitle_streams[i]->uses_index (_format_context, _packet.stream_index)) {
146 subtitle_packet (context, _subtitle_streams[i]);
150 av_packet_unref (&_packet);
152 if (_first_video && got_all_audio && _subtitle_streams.empty ()) {
158 /* Finish off any hanging subtitles at the end */
159 for (LastSubtitleMap::const_iterator i = _last_subtitle_start.begin(); i != _last_subtitle_start.end(); ++i) {
161 if (i->second->image) {
162 i->first->add_image_subtitle (
166 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
170 i->first->add_text_subtitle (
174 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
181 /* We just added subtitles to our streams without taking the PTS offset into account;
182 this is because we might not know the PTS offset when the first subtitle is seen.
183 Now we know the PTS offset so we can apply it to those subtitles.
185 if (has_video() && video_frame_rate()) {
186 BOOST_FOREACH (shared_ptr<FFmpegSubtitleStream> i, _subtitle_streams) {
187 i->add_offset (pts_offset (_audio_streams, _first_video, video_frame_rate().get()));
193 FFmpegExaminer::video_packet (AVCodecContext* context)
195 DCPOMATIC_ASSERT (_video_stream);
197 if (_first_video && !_need_video_length) {
202 if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
204 _first_video = frame_time (_format_context->streams[_video_stream.get()]);
206 if (_need_video_length) {
207 _video_length = frame_time (
208 _format_context->streams[_video_stream.get()]
209 ).get_value_or (ContentTime ()).frames_round (video_frame_rate().get ());
215 FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr<FFmpegAudioStream> stream)
217 if (stream->first_audio) {
222 if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
223 stream->first_audio = frame_time (stream->stream (_format_context));
228 FFmpegExaminer::subtitle_packet (AVCodecContext* context, shared_ptr<FFmpegSubtitleStream> stream)
232 if (avcodec_decode_subtitle2 (context, &sub, &frame_finished, &_packet) >= 0 && frame_finished) {
233 string id = subtitle_id (sub);
234 FFmpegSubtitlePeriod const period = subtitle_period (sub);
235 bool const starts_image = subtitle_starts_image (sub);
237 LastSubtitleMap::iterator last = _last_subtitle_start.find (stream);
238 if (last != _last_subtitle_start.end() && last->second) {
239 /* We have seen the start of a subtitle but not yet the end. Whatever this is
240 finishes the previous subtitle, so add it */
241 if (last->second->image) {
242 stream->add_image_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
244 stream->add_text_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
246 if (sub.num_rects == 0) {
247 /* This is a `proper' end-of-subtitle */
248 _last_subtitle_start[stream] = optional<SubtitleStart> ();
250 /* This is just another subtitle, so we start again */
251 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
253 } else if (sub.num_rects == 1) {
256 stream->add_image_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
258 stream->add_text_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
261 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
265 for (unsigned int i = 0; i < sub.num_rects; ++i) {
266 if (sub.rects[i]->type == SUBTITLE_BITMAP) {
267 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
268 uint32_t* palette = (uint32_t *) sub.rects[i]->pict.data[1];
269 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
271 (palette[j] & 0x00ff0000) >> 16,
272 (palette[j] & 0x0000ff00) >> 8,
273 (palette[j] & 0x000000ff) >> 0,
274 (palette[j] & 0xff000000) >> 24
277 stream->set_colour (rgba, rgba);
280 uint32_t* palette = (uint32_t *) sub.rects[i]->data[1];
281 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
283 (palette[j] & 0x00ff0000) >> 16,
284 (palette[j] & 0x0000ff00) >> 8,
285 (palette[j] & 0x000000ff) >> 0,
286 (palette[j] & 0xff000000) >> 24
289 stream->set_colour (rgba, rgba);
295 avsubtitle_free (&sub);
299 optional<ContentTime>
300 FFmpegExaminer::frame_time (AVStream* s) const
302 optional<ContentTime> t;
304 int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
305 if (bet != AV_NOPTS_VALUE) {
306 t = ContentTime::from_seconds (bet * av_q2d (s->time_base));
313 FFmpegExaminer::video_frame_rate () const
315 DCPOMATIC_ASSERT (_video_stream);
316 /* This use of r_frame_rate is debateable; there's a few different
317 * frame rates in the format context, but this one seems to be the most
320 return av_q2d (av_stream_get_r_frame_rate (_format_context->streams[_video_stream.get()]));
324 FFmpegExaminer::video_size () const
326 return dcp::Size (video_codec_context()->width, video_codec_context()->height);
329 /** @return Length according to our content's header */
331 FFmpegExaminer::video_length () const
333 return max (Frame (1), _video_length);
337 FFmpegExaminer::sample_aspect_ratio () const
339 DCPOMATIC_ASSERT (_video_stream);
340 AVRational sar = av_guess_sample_aspect_ratio (_format_context, _format_context->streams[_video_stream.get()], 0);
342 /* I assume this means that we don't know */
343 return optional<double> ();
345 return double (sar.num) / sar.den;
349 FFmpegExaminer::subtitle_stream_name (AVStream* s) const
353 n << stream_name (s);
355 if (n.str().empty()) {
363 FFmpegExaminer::stream_name (AVStream* s) const
368 AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0);
373 AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0);
375 if (!n.str().empty()) {
386 FFmpegExaminer::bits_per_pixel () const
388 if (video_codec_context()->pix_fmt == -1) {
389 throw DecodeError (_("Could not find pixel format for video."));
392 AVPixFmtDescriptor const * d = av_pix_fmt_desc_get (video_codec_context()->pix_fmt);
393 DCPOMATIC_ASSERT (d);
394 return av_get_bits_per_pixel (d);
398 FFmpegExaminer::yuv () const
400 switch (video_codec_context()->pix_fmt) {
401 case AV_PIX_FMT_YUV420P:
402 case AV_PIX_FMT_YUYV422:
403 case AV_PIX_FMT_YUV422P:
404 case AV_PIX_FMT_YUV444P:
405 case AV_PIX_FMT_YUV410P:
406 case AV_PIX_FMT_YUV411P:
407 case AV_PIX_FMT_YUVJ420P:
408 case AV_PIX_FMT_YUVJ422P:
409 case AV_PIX_FMT_YUVJ444P:
410 case AV_PIX_FMT_UYVY422:
411 case AV_PIX_FMT_UYYVYY411:
412 case AV_PIX_FMT_NV12:
413 case AV_PIX_FMT_NV21:
414 case AV_PIX_FMT_YUV440P:
415 case AV_PIX_FMT_YUVJ440P:
416 case AV_PIX_FMT_YUVA420P:
417 case AV_PIX_FMT_YUV420P16LE:
418 case AV_PIX_FMT_YUV420P16BE:
419 case AV_PIX_FMT_YUV422P16LE:
420 case AV_PIX_FMT_YUV422P16BE:
421 case AV_PIX_FMT_YUV444P16LE:
422 case AV_PIX_FMT_YUV444P16BE:
423 case AV_PIX_FMT_YUV420P9BE:
424 case AV_PIX_FMT_YUV420P9LE:
425 case AV_PIX_FMT_YUV420P10BE:
426 case AV_PIX_FMT_YUV420P10LE:
427 case AV_PIX_FMT_YUV422P10BE:
428 case AV_PIX_FMT_YUV422P10LE:
429 case AV_PIX_FMT_YUV444P9BE:
430 case AV_PIX_FMT_YUV444P9LE:
431 case AV_PIX_FMT_YUV444P10BE:
432 case AV_PIX_FMT_YUV444P10LE:
433 case AV_PIX_FMT_YUV422P9BE:
434 case AV_PIX_FMT_YUV422P9LE:
435 case AV_PIX_FMT_YUVA420P9BE:
436 case AV_PIX_FMT_YUVA420P9LE:
437 case AV_PIX_FMT_YUVA422P9BE:
438 case AV_PIX_FMT_YUVA422P9LE:
439 case AV_PIX_FMT_YUVA444P9BE:
440 case AV_PIX_FMT_YUVA444P9LE:
441 case AV_PIX_FMT_YUVA420P10BE:
442 case AV_PIX_FMT_YUVA420P10LE:
443 case AV_PIX_FMT_YUVA422P10BE:
444 case AV_PIX_FMT_YUVA422P10LE:
445 case AV_PIX_FMT_YUVA444P10BE:
446 case AV_PIX_FMT_YUVA444P10LE:
447 case AV_PIX_FMT_YUVA420P16BE:
448 case AV_PIX_FMT_YUVA420P16LE:
449 case AV_PIX_FMT_YUVA422P16BE:
450 case AV_PIX_FMT_YUVA422P16LE:
451 case AV_PIX_FMT_YUVA444P16BE:
452 case AV_PIX_FMT_YUVA444P16LE:
453 case AV_PIX_FMT_NV16:
454 case AV_PIX_FMT_NV20LE:
455 case AV_PIX_FMT_NV20BE:
456 case AV_PIX_FMT_YVYU422:
457 case AV_PIX_FMT_YUVA444P:
458 case AV_PIX_FMT_YUVA422P:
459 case AV_PIX_FMT_YUV420P12BE:
460 case AV_PIX_FMT_YUV420P12LE:
461 case AV_PIX_FMT_YUV420P14BE:
462 case AV_PIX_FMT_YUV420P14LE:
463 case AV_PIX_FMT_YUV422P12BE:
464 case AV_PIX_FMT_YUV422P12LE:
465 case AV_PIX_FMT_YUV422P14BE:
466 case AV_PIX_FMT_YUV422P14LE:
467 case AV_PIX_FMT_YUV444P12BE:
468 case AV_PIX_FMT_YUV444P12LE:
469 case AV_PIX_FMT_YUV444P14BE:
470 case AV_PIX_FMT_YUV444P14LE:
471 case AV_PIX_FMT_YUVJ411P:
479 FFmpegExaminer::has_video () const
481 return static_cast<bool> (_video_stream);