2 Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 /** @file src/ffmpeg_decoder.cc
22 * @brief A decoder using FFmpeg to decode content.
26 #include "exceptions.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
54 #include <boost/foreach.hpp>
55 #include <boost/algorithm/string.hpp>
71 using boost::shared_ptr;
72 using boost::is_any_of;
74 using boost::optional;
75 using boost::dynamic_pointer_cast;
77 using namespace dcpomatic;
79 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
82 , _have_current_subtitle (false)
84 if (c->video && c->video->use()) {
85 video.reset (new VideoDecoder (this, c));
86 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
87 /* It doesn't matter what size or pixel format this is, it just needs to be black */
88 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
89 _black_image->make_black ();
91 _pts_offset = ContentTime ();
95 audio.reset (new AudioDecoder (this, c->audio, fast));
99 /* XXX: this time here should be the time of the first subtitle, not 0 */
100 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
103 _next_time.resize (_format_context->nb_streams);
107 FFmpegDecoder::flush ()
109 /* Get any remaining frames */
114 /* XXX: should we reset _packet.data and size after each *_decode_* call? */
116 while (video && decode_video_packet()) {}
119 decode_audio_packet ();
122 /* Make sure all streams are the same length and round up to the next video frame */
124 FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
125 ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
126 full_length = full_length.ceil (frc.source);
128 double const vfr = _ffmpeg_content->video_frame_rate().get();
129 Frame const f = full_length.frames_round (vfr);
130 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
132 video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
137 BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
138 ContentTime a = audio->stream_position(film(), i);
139 /* Unfortunately if a is 0 that really means that we don't know the stream position since
140 there has been no data on it since the last seek. In this case we'll just do nothing
141 here. I'm not sure if that's the right idea.
143 if (a > ContentTime()) {
144 while (a < full_length) {
145 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
146 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
147 silence->make_silent ();
148 audio->emit (film(), i, silence, a);
160 FFmpegDecoder::pass ()
162 #ifdef DCPOMATIC_VARIANT_SWAROOP
163 if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) {
168 int r = av_read_frame (_format_context, &_packet);
170 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
171 has pretty-much succeeded (and hence generated data which should be processed).
172 Hence it makes sense to continue here in that case.
174 if (r < 0 && r != AVERROR_INVALIDDATA) {
175 if (r != AVERROR_EOF) {
176 /* Maybe we should fail here, but for now we'll just finish off instead */
178 av_strerror (r, buf, sizeof(buf));
179 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
186 int const si = _packet.stream_index;
187 shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
189 if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
190 decode_video_packet ();
191 } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
192 decode_subtitle_packet ();
194 decode_audio_packet ();
197 av_packet_unref (&_packet);
201 /** @param data pointer to array of pointers to buffers.
202 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
204 shared_ptr<AudioBuffers>
205 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
207 DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
209 int const size = av_samples_get_buffer_size (
210 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
213 /* XXX: can't we just use _frame->nb_samples directly here? */
214 /* XXX: can't we use swr_convert() to do the format conversion? */
216 /* Deinterleave and convert to float */
218 /* total_samples and frames will be rounded down here, so if there are stray samples at the end
219 of the block that do not form a complete sample or frame they will be dropped.
221 int const total_samples = size / bytes_per_audio_sample (stream);
222 int const channels = stream->channels();
223 int const frames = total_samples / channels;
224 shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
225 float** data = audio->data();
227 switch (audio_sample_format (stream)) {
228 case AV_SAMPLE_FMT_U8:
230 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
233 for (int i = 0; i < total_samples; ++i) {
234 data[channel][sample] = float(*p++) / (1 << 23);
237 if (channel == channels) {
245 case AV_SAMPLE_FMT_S16:
247 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
250 for (int i = 0; i < total_samples; ++i) {
251 data[channel][sample] = float(*p++) / (1 << 15);
254 if (channel == channels) {
262 case AV_SAMPLE_FMT_S16P:
264 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
265 for (int i = 0; i < channels; ++i) {
266 for (int j = 0; j < frames; ++j) {
267 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
273 case AV_SAMPLE_FMT_S32:
275 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
278 for (int i = 0; i < total_samples; ++i) {
279 data[channel][sample] = static_cast<float>(*p++) / 2147483648;
282 if (channel == channels) {
290 case AV_SAMPLE_FMT_S32P:
292 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
293 for (int i = 0; i < channels; ++i) {
294 for (int j = 0; j < frames; ++j) {
295 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
301 case AV_SAMPLE_FMT_FLT:
303 float* p = reinterpret_cast<float*> (_frame->data[0]);
306 for (int i = 0; i < total_samples; ++i) {
307 data[channel][sample] = *p++;
310 if (channel == channels) {
318 case AV_SAMPLE_FMT_FLTP:
320 float** p = reinterpret_cast<float**> (_frame->data);
321 DCPOMATIC_ASSERT (_frame->channels <= channels);
322 /* Sometimes there aren't as many channels in the _frame as in the stream */
323 for (int i = 0; i < _frame->channels; ++i) {
324 memcpy (data[i], p[i], frames * sizeof(float));
326 for (int i = _frame->channels; i < channels; ++i) {
327 audio->make_silent (i);
333 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
340 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
342 return stream->stream (_format_context)->codec->sample_fmt;
346 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
348 return av_get_bytes_per_sample (audio_sample_format (stream));
352 FFmpegDecoder::seek (ContentTime time, bool accurate)
354 Decoder::seek (time, accurate);
356 /* If we are doing an `accurate' seek, we need to use pre-roll, as
357 we don't really know what the seek will give us.
360 ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
363 /* XXX: it seems debatable whether PTS should be used here...
364 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
367 optional<int> stream;
370 stream = _video_stream;
372 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
373 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
375 stream = s->index (_format_context);
379 DCPOMATIC_ASSERT (stream);
381 ContentTime u = time - _pts_offset;
382 if (u < ContentTime ()) {
388 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
393 /* Force re-creation of filter graphs to reset them and hence to make sure
394 they don't have any pre-seek frames knocking about.
396 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
397 _filter_graphs.clear ();
400 if (video_codec_context ()) {
401 avcodec_flush_buffers (video_codec_context());
404 BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
405 avcodec_flush_buffers (i->stream(_format_context)->codec);
408 if (subtitle_codec_context ()) {
409 avcodec_flush_buffers (subtitle_codec_context ());
412 _have_current_subtitle = false;
416 FFmpegDecoder::decode_audio_packet ()
418 /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
422 AVPacket copy_packet = _packet;
423 int const stream_index = copy_packet.stream_index;
425 /* XXX: inefficient */
426 vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
427 vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
428 while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
432 if (stream == streams.end ()) {
433 /* The packet's stream may not be an audio one; just ignore it in this method if so */
437 while (copy_packet.size > 0) {
440 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, ©_packet);
441 if (decode_result < 0) {
442 /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
443 some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
444 if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
445 even in the face of such an error, so I think we should too.
447 Returning from the method here caused mantis #352.
449 LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
451 /* Fudge decode_result so that we come out of the while loop when
452 we've processed this data.
454 decode_result = copy_packet.size;
457 if (frame_finished) {
458 shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
461 if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
462 /* In some streams we see not every frame coming through with a timestamp; for those
463 that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
464 particularly noticeable with TrueHD streams (see #1111).
466 ct = *_next_time[stream_index];
468 ct = ContentTime::from_seconds (
469 av_frame_get_best_effort_timestamp (_frame) *
470 av_q2d ((*stream)->stream (_format_context)->time_base))
474 _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
476 if (ct < ContentTime ()) {
477 /* Discard audio data that comes before time 0 */
478 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
479 data->move (data->frames() - remove, remove, 0);
480 data->set_frames (data->frames() - remove);
481 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
484 if (ct < ContentTime()) {
486 "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
489 copy_packet.stream_index,
491 av_frame_get_best_effort_timestamp(_frame),
492 av_q2d((*stream)->stream(_format_context)->time_base),
493 to_string(_pts_offset)
497 /* Give this data provided there is some, and its time is sane */
498 if (ct >= ContentTime() && data->frames() > 0) {
499 audio->emit (film(), *stream, data, ct);
503 copy_packet.data += decode_result;
504 copy_packet.size -= decode_result;
509 FFmpegDecoder::decode_video_packet ()
511 DCPOMATIC_ASSERT (_video_stream);
514 if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
518 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
520 shared_ptr<VideoFilterGraph> graph;
522 list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
523 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
527 if (i == _filter_graphs.end ()) {
528 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
529 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
530 graph->setup (_ffmpeg_content->filters ());
531 _filter_graphs.push_back (graph);
532 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
537 list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
539 for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
541 shared_ptr<Image> image = i->first;
543 if (i->second != AV_NOPTS_VALUE) {
544 double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
548 shared_ptr<ImageProxy> (new RawImageProxy (image)),
549 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
552 LOG_WARNING_NC ("Dropping frame without PTS");
560 FFmpegDecoder::decode_subtitle_packet ()
564 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
568 /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
569 if (_have_current_subtitle) {
570 if (_current_subtitle_to) {
571 only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
573 only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
575 _have_current_subtitle = false;
578 if (sub.num_rects <= 0) {
579 /* Nothing new in this subtitle */
583 /* Subtitle PTS (within the source, not taking into account any of the
584 source that we may have chopped off for the DCP).
586 FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
588 from = sub_period.from + _pts_offset;
590 _current_subtitle_to = *sub_period.to + _pts_offset;
592 _current_subtitle_to = optional<ContentTime>();
593 _have_current_subtitle = true;
596 for (unsigned int i = 0; i < sub.num_rects; ++i) {
597 AVSubtitleRect const * rect = sub.rects[i];
599 switch (rect->type) {
602 case SUBTITLE_BITMAP:
603 decode_bitmap_subtitle (rect, from);
606 cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
609 decode_ass_subtitle (rect->ass, from);
614 if (_current_subtitle_to) {
615 only_text()->emit_stop (*_current_subtitle_to);
618 avsubtitle_free (&sub);
622 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
624 /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
625 G, third R, fourth A.
627 shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
629 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
630 /* Start of the first line in the subtitle */
631 uint8_t* sub_p = rect->pict.data[0];
632 /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
633 (i.e. first byte B, second G, third R, fourth A)
635 uint8_t const * palette = rect->pict.data[1];
637 /* Start of the first line in the subtitle */
638 uint8_t* sub_p = rect->data[0];
639 /* sub_p looks up into a BGRA palette which is at rect->data[1].
640 (first byte B, second G, third R, fourth A)
642 uint8_t const * palette = rect->data[1];
644 /* And the stream has a map of those palette colours to colours
645 chosen by the user; created a `mapped' palette from those settings.
647 map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
648 vector<RGBA> mapped_palette (rect->nb_colors);
649 for (int i = 0; i < rect->nb_colors; ++i) {
650 RGBA c (palette[2], palette[1], palette[0], palette[3]);
651 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
652 if (j != colour_map.end ()) {
653 mapped_palette[i] = j->second;
655 /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
656 it is from a project that was created before this stuff was added. Just use the
657 colour straight from the original palette.
659 mapped_palette[i] = c;
664 /* Start of the output data */
665 uint8_t* out_p = image->data()[0];
667 for (int y = 0; y < rect->h; ++y) {
668 uint8_t* sub_line_p = sub_p;
669 uint8_t* out_line_p = out_p;
670 for (int x = 0; x < rect->w; ++x) {
671 RGBA const p = mapped_palette[*sub_line_p++];
677 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
678 sub_p += rect->pict.linesize[0];
680 sub_p += rect->linesize[0];
682 out_p += image->stride()[0];
685 int target_width = subtitle_codec_context()->width;
686 if (target_width == 0 && video_codec_context()) {
687 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
688 know if it's supposed to mean something from FFmpeg's point of view.
690 target_width = video_codec_context()->width;
692 int target_height = subtitle_codec_context()->height;
693 if (target_height == 0 && video_codec_context()) {
694 target_height = video_codec_context()->height;
696 DCPOMATIC_ASSERT (target_width);
697 DCPOMATIC_ASSERT (target_height);
698 dcpomatic::Rect<double> const scaled_rect (
699 static_cast<double> (rect->x) / target_width,
700 static_cast<double> (rect->y) / target_height,
701 static_cast<double> (rect->w) / target_width,
702 static_cast<double> (rect->h) / target_height
705 only_text()->emit_bitmap_start (from, image, scaled_rect);
709 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
711 /* We have no styles and no Format: line, so I'm assuming that FFmpeg
712 produces a single format of Dialogue: lines...
717 for (size_t i = 0; i < ass.length(); ++i) {
718 if (commas < 9 && ass[i] == ',') {
720 } else if (commas == 9) {
729 sub::RawSubtitle base;
730 list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
733 _ffmpeg_content->video->size().width,
734 _ffmpeg_content->video->size().height
737 BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
738 only_text()->emit_plain_start (from, i);