2 Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 /** @file src/ffmpeg_decoder.cc
22 * @brief A decoder using FFmpeg to decode content.
26 #include "exceptions.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
54 #include <boost/algorithm/string.hpp>
70 using std::shared_ptr;
71 using boost::is_any_of;
73 using boost::optional;
74 using std::dynamic_pointer_cast;
76 using namespace dcpomatic;
78 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
81 , _have_current_subtitle (false)
83 if (c->video && c->video->use()) {
84 video.reset (new VideoDecoder (this, c));
85 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
86 /* It doesn't matter what size or pixel format this is, it just needs to be black */
87 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
88 _black_image->make_black ();
90 _pts_offset = ContentTime ();
94 audio.reset (new AudioDecoder (this, c->audio, fast));
98 /* XXX: this time here should be the time of the first subtitle, not 0 */
99 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
102 _next_time.resize (_format_context->nb_streams);
106 FFmpegDecoder::flush ()
108 /* Get any remaining frames */
113 /* XXX: should we reset _packet.data and size after each *_decode_* call? */
115 while (video && decode_video_packet()) {}
118 decode_audio_packet ();
121 /* Make sure all streams are the same length and round up to the next video frame */
123 FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
124 ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
125 full_length = full_length.ceil (frc.source);
127 double const vfr = _ffmpeg_content->video_frame_rate().get();
128 Frame const f = full_length.frames_round (vfr);
129 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
131 video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
136 for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
137 ContentTime a = audio->stream_position(film(), i);
138 /* Unfortunately if a is 0 that really means that we don't know the stream position since
139 there has been no data on it since the last seek. In this case we'll just do nothing
140 here. I'm not sure if that's the right idea.
142 if (a > ContentTime()) {
143 while (a < full_length) {
144 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
145 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
146 silence->make_silent ();
147 audio->emit (film(), i, silence, a, true);
159 FFmpegDecoder::pass ()
161 int r = av_read_frame (_format_context, &_packet);
163 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
164 has pretty-much succeeded (and hence generated data which should be processed).
165 Hence it makes sense to continue here in that case.
167 if (r < 0 && r != AVERROR_INVALIDDATA) {
168 if (r != AVERROR_EOF) {
169 /* Maybe we should fail here, but for now we'll just finish off instead */
171 av_strerror (r, buf, sizeof(buf));
172 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
179 int const si = _packet.stream_index;
180 shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
182 if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
183 decode_video_packet ();
184 } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
185 decode_subtitle_packet ();
187 decode_audio_packet ();
190 av_packet_unref (&_packet);
194 /** @param data pointer to array of pointers to buffers.
195 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
197 shared_ptr<AudioBuffers>
198 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
200 DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
202 DCPOMATIC_DISABLE_WARNINGS
203 int const size = av_samples_get_buffer_size (
204 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
206 DCPOMATIC_ENABLE_WARNINGS
208 /* XXX: can't we just use _frame->nb_samples directly here? */
209 /* XXX: can't we use swr_convert() to do the format conversion? */
211 /* Deinterleave and convert to float */
213 /* total_samples and frames will be rounded down here, so if there are stray samples at the end
214 of the block that do not form a complete sample or frame they will be dropped.
216 int const total_samples = size / bytes_per_audio_sample (stream);
217 int const channels = stream->channels();
218 int const frames = total_samples / channels;
219 shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
220 float** data = audio->data();
222 switch (audio_sample_format (stream)) {
223 case AV_SAMPLE_FMT_U8:
225 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
228 for (int i = 0; i < total_samples; ++i) {
229 data[channel][sample] = float(*p++) / (1 << 23);
232 if (channel == channels) {
240 case AV_SAMPLE_FMT_S16:
242 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
245 for (int i = 0; i < total_samples; ++i) {
246 data[channel][sample] = float(*p++) / (1 << 15);
249 if (channel == channels) {
257 case AV_SAMPLE_FMT_S16P:
259 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
260 for (int i = 0; i < channels; ++i) {
261 for (int j = 0; j < frames; ++j) {
262 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
268 case AV_SAMPLE_FMT_S32:
270 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
273 for (int i = 0; i < total_samples; ++i) {
274 data[channel][sample] = static_cast<float>(*p++) / 2147483648;
277 if (channel == channels) {
285 case AV_SAMPLE_FMT_S32P:
287 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
288 for (int i = 0; i < channels; ++i) {
289 for (int j = 0; j < frames; ++j) {
290 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
296 case AV_SAMPLE_FMT_FLT:
298 float* p = reinterpret_cast<float*> (_frame->data[0]);
301 for (int i = 0; i < total_samples; ++i) {
302 data[channel][sample] = *p++;
305 if (channel == channels) {
313 case AV_SAMPLE_FMT_FLTP:
315 float** p = reinterpret_cast<float**> (_frame->data);
316 DCPOMATIC_ASSERT (_frame->channels <= channels);
317 /* Sometimes there aren't as many channels in the _frame as in the stream */
318 for (int i = 0; i < _frame->channels; ++i) {
319 memcpy (data[i], p[i], frames * sizeof(float));
321 for (int i = _frame->channels; i < channels; ++i) {
322 audio->make_silent (i);
328 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
335 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
337 DCPOMATIC_DISABLE_WARNINGS
338 return stream->stream (_format_context)->codec->sample_fmt;
339 DCPOMATIC_ENABLE_WARNINGS
343 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
345 return av_get_bytes_per_sample (audio_sample_format (stream));
349 FFmpegDecoder::seek (ContentTime time, bool accurate)
351 Decoder::seek (time, accurate);
353 /* If we are doing an `accurate' seek, we need to use pre-roll, as
354 we don't really know what the seek will give us.
357 ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
360 /* XXX: it seems debatable whether PTS should be used here...
361 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
364 optional<int> stream;
367 stream = _video_stream;
369 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
370 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
372 stream = s->index (_format_context);
376 DCPOMATIC_ASSERT (stream);
378 ContentTime u = time - _pts_offset;
379 if (u < ContentTime ()) {
385 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
390 /* Force re-creation of filter graphs to reset them and hence to make sure
391 they don't have any pre-seek frames knocking about.
393 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
394 _filter_graphs.clear ();
397 if (video_codec_context ()) {
398 avcodec_flush_buffers (video_codec_context());
401 DCPOMATIC_DISABLE_WARNINGS
402 for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
403 avcodec_flush_buffers (i->stream(_format_context)->codec);
405 DCPOMATIC_ENABLE_WARNINGS
407 if (subtitle_codec_context ()) {
408 avcodec_flush_buffers (subtitle_codec_context ());
411 _have_current_subtitle = false;
413 for (auto& i: _next_time) {
414 i = optional<ContentTime>();
419 FFmpegDecoder::decode_audio_packet ()
421 /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
425 AVPacket copy_packet = _packet;
426 int const stream_index = copy_packet.stream_index;
428 /* XXX: inefficient */
429 vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
430 vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
431 while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
435 if (stream == streams.end ()) {
436 /* The packet's stream may not be an audio one; just ignore it in this method if so */
440 DCPOMATIC_DISABLE_WARNINGS
441 while (copy_packet.size > 0) {
444 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, ©_packet);
445 if (decode_result < 0) {
446 /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
447 some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
448 if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
449 even in the face of such an error, so I think we should too.
451 Returning from the method here caused mantis #352.
453 LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
455 /* Fudge decode_result so that we come out of the while loop when
456 we've processed this data.
458 decode_result = copy_packet.size;
461 if (frame_finished) {
462 shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
465 if (_frame->pts == AV_NOPTS_VALUE) {
466 /* In some streams we see not every frame coming through with a timestamp; for those
467 that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
468 particularly noticeable with TrueHD streams (see #1111).
470 if (_next_time[stream_index]) {
471 ct = *_next_time[stream_index];
474 ct = ContentTime::from_seconds (
475 av_frame_get_best_effort_timestamp (_frame) *
476 av_q2d ((*stream)->stream (_format_context)->time_base))
480 _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
482 if (ct < ContentTime ()) {
483 /* Discard audio data that comes before time 0 */
484 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
485 data->move (data->frames() - remove, remove, 0);
486 data->set_frames (data->frames() - remove);
487 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
490 if (ct < ContentTime()) {
492 "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
495 copy_packet.stream_index,
497 av_frame_get_best_effort_timestamp(_frame),
498 av_q2d((*stream)->stream(_format_context)->time_base),
499 to_string(_pts_offset)
502 DCPOMATIC_ENABLE_WARNINGS
504 /* Give this data provided there is some, and its time is sane */
505 if (ct >= ContentTime() && data->frames() > 0) {
506 audio->emit (film(), *stream, data, ct);
510 copy_packet.data += decode_result;
511 copy_packet.size -= decode_result;
516 FFmpegDecoder::decode_video_packet ()
518 DCPOMATIC_ASSERT (_video_stream);
521 DCPOMATIC_DISABLE_WARNINGS
522 if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
525 DCPOMATIC_ENABLE_WARNINGS
527 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
529 shared_ptr<VideoFilterGraph> graph;
531 list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
532 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
536 if (i == _filter_graphs.end ()) {
537 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
538 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
539 graph->setup (_ffmpeg_content->filters ());
540 _filter_graphs.push_back (graph);
541 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
546 list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
548 for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
550 shared_ptr<Image> image = i->first;
552 if (i->second != AV_NOPTS_VALUE) {
553 double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
557 shared_ptr<ImageProxy> (new RawImageProxy (image)),
558 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
561 LOG_WARNING_NC ("Dropping frame without PTS");
569 FFmpegDecoder::decode_subtitle_packet ()
573 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
577 /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
578 if (_have_current_subtitle) {
579 if (_current_subtitle_to) {
580 only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
582 only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
584 _have_current_subtitle = false;
587 if (sub.num_rects <= 0) {
588 /* Nothing new in this subtitle */
592 /* Subtitle PTS (within the source, not taking into account any of the
593 source that we may have chopped off for the DCP).
595 FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
597 from = sub_period.from + _pts_offset;
599 _current_subtitle_to = *sub_period.to + _pts_offset;
601 _current_subtitle_to = optional<ContentTime>();
602 _have_current_subtitle = true;
605 for (unsigned int i = 0; i < sub.num_rects; ++i) {
606 AVSubtitleRect const * rect = sub.rects[i];
608 switch (rect->type) {
611 case SUBTITLE_BITMAP:
612 decode_bitmap_subtitle (rect, from);
615 cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
618 decode_ass_subtitle (rect->ass, from);
623 if (_current_subtitle_to) {
624 only_text()->emit_stop (*_current_subtitle_to);
627 avsubtitle_free (&sub);
631 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
633 /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
634 G, third R, fourth A.
636 shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
638 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
639 /* Start of the first line in the subtitle */
640 uint8_t* sub_p = rect->pict.data[0];
641 /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
642 (i.e. first byte B, second G, third R, fourth A)
644 uint8_t const * palette = rect->pict.data[1];
646 /* Start of the first line in the subtitle */
647 uint8_t* sub_p = rect->data[0];
648 /* sub_p looks up into a BGRA palette which is at rect->data[1].
649 (first byte B, second G, third R, fourth A)
651 uint8_t const * palette = rect->data[1];
653 /* And the stream has a map of those palette colours to colours
654 chosen by the user; created a `mapped' palette from those settings.
656 map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
657 vector<RGBA> mapped_palette (rect->nb_colors);
658 for (int i = 0; i < rect->nb_colors; ++i) {
659 RGBA c (palette[2], palette[1], palette[0], palette[3]);
660 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
661 if (j != colour_map.end ()) {
662 mapped_palette[i] = j->second;
664 /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
665 it is from a project that was created before this stuff was added. Just use the
666 colour straight from the original palette.
668 mapped_palette[i] = c;
673 /* Start of the output data */
674 uint8_t* out_p = image->data()[0];
676 for (int y = 0; y < rect->h; ++y) {
677 uint8_t* sub_line_p = sub_p;
678 uint8_t* out_line_p = out_p;
679 for (int x = 0; x < rect->w; ++x) {
680 RGBA const p = mapped_palette[*sub_line_p++];
686 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
687 sub_p += rect->pict.linesize[0];
689 sub_p += rect->linesize[0];
691 out_p += image->stride()[0];
694 int target_width = subtitle_codec_context()->width;
695 if (target_width == 0 && video_codec_context()) {
696 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
697 know if it's supposed to mean something from FFmpeg's point of view.
699 target_width = video_codec_context()->width;
701 int target_height = subtitle_codec_context()->height;
702 if (target_height == 0 && video_codec_context()) {
703 target_height = video_codec_context()->height;
705 DCPOMATIC_ASSERT (target_width);
706 DCPOMATIC_ASSERT (target_height);
707 dcpomatic::Rect<double> const scaled_rect (
708 static_cast<double> (rect->x) / target_width,
709 static_cast<double> (rect->y) / target_height,
710 static_cast<double> (rect->w) / target_width,
711 static_cast<double> (rect->h) / target_height
714 only_text()->emit_bitmap_start (from, image, scaled_rect);
718 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
720 /* We have no styles and no Format: line, so I'm assuming that FFmpeg
721 produces a single format of Dialogue: lines...
726 for (size_t i = 0; i < ass.length(); ++i) {
727 if (commas < 9 && ass[i] == ',') {
729 } else if (commas == 9) {
738 sub::RawSubtitle base;
739 auto raw = sub::SSAReader::parse_line (
742 _ffmpeg_content->video->size().width,
743 _ffmpeg_content->video->size().height
746 for (auto const& i: sub::collect<vector<sub::Subtitle>> (raw)) {
747 only_text()->emit_plain_start (from, i);