2 Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 /** @file src/ffmpeg_decoder.cc
22 * @brief A decoder using FFmpeg to decode content.
26 #include "exceptions.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
54 #include <boost/algorithm/string.hpp>
70 using std::shared_ptr;
71 using std::make_shared;
73 using boost::is_any_of;
75 using boost::optional;
76 using std::dynamic_pointer_cast;
78 using namespace dcpomatic;
81 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c)
85 if (c->video && c->video->use()) {
86 video = make_shared<VideoDecoder>(this, c);
87 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
88 /* It doesn't matter what size or pixel format this is, it just needs to be black */
89 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (128, 128), true);
90 _black_image->make_black ();
96 audio = make_shared<AudioDecoder>(this, c->audio);
100 /* XXX: this time here should be the time of the first subtitle, not 0 */
101 text.push_back (make_shared<TextDecoder>(this, c->only_text(), ContentTime()));
104 for (auto i: c->ffmpeg_audio_streams()) {
105 _next_time[i] = boost::optional<dcpomatic::ContentTime>();
111 FFmpegDecoder::flush ()
113 /* Flush video and audio once */
115 bool did_something = false;
117 if (decode_and_process_video_packet(nullptr)) {
118 did_something = true;
122 for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
123 auto context = _codec_context[i->index(_format_context)];
124 int r = avcodec_send_packet (context, nullptr);
125 if (r < 0 && r != AVERROR_EOF) {
126 /* EOF can happen if we've already sent a flush packet */
127 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
129 r = avcodec_receive_frame (context, _frame);
131 process_audio_frame (i);
132 did_something = true;
137 /* We want to be called again */
141 /* Make sure all streams are the same length and round up to the next video frame */
143 auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
144 ContentTime full_length (_ffmpeg_content->full_length_dcp(film()), frc);
145 full_length = full_length.ceil (frc.source);
147 double const vfr = _ffmpeg_content->video_frame_rate().get();
148 auto const f = full_length.frames_round (vfr);
149 auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
151 video->emit (film(), make_shared<const RawImageProxy>(_black_image), v);
156 for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
157 auto a = audio->stream_position(film(), i);
158 /* Unfortunately if a is 0 that really means that we don't know the stream position since
159 there has been no data on it since the last seek. In this case we'll just do nothing
160 here. I'm not sure if that's the right idea.
162 if (a > ContentTime()) {
163 while (a < full_length) {
164 auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
165 auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
166 silence->make_silent ();
167 audio->emit (i, silence, a, true);
184 FFmpegDecoder::do_pass ()
186 auto packet = av_packet_alloc();
187 DCPOMATIC_ASSERT (packet);
189 int r = av_read_frame (_format_context, packet);
191 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
192 has pretty-much succeeded (and hence generated data which should be processed).
193 Hence it makes sense to continue here in that case.
195 if (r < 0 && r != AVERROR_INVALIDDATA) {
196 if (r != AVERROR_EOF) {
197 /* Maybe we should fail here, but for now we'll just finish off instead */
199 av_strerror (r, buf, sizeof(buf));
200 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
203 av_packet_free (&packet);
207 int const si = packet->stream_index;
208 auto fc = _ffmpeg_content;
210 if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
211 decode_and_process_video_packet (packet);
212 } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
213 decode_and_process_subtitle_packet (packet);
215 decode_and_process_audio_packet (packet);
218 av_packet_free (&packet);
223 /** @param data pointer to array of pointers to buffers.
224 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
226 shared_ptr<AudioBuffers>
227 FFmpegDecoder::deinterleave_audio (AVFrame* frame)
229 auto format = static_cast<AVSampleFormat>(frame->format);
231 /* XXX: can't we use swr_convert() to do the format conversion? */
233 int const channels = frame->channels;
234 int const frames = frame->nb_samples;
235 int const total_samples = frames * channels;
236 auto audio = make_shared<AudioBuffers>(channels, frames);
237 auto data = audio->data();
240 case AV_SAMPLE_FMT_U8:
242 auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
245 for (int i = 0; i < total_samples; ++i) {
246 data[channel][sample] = float(*p++) / (1 << 23);
249 if (channel == channels) {
257 case AV_SAMPLE_FMT_S16:
259 auto p = reinterpret_cast<int16_t *> (frame->data[0]);
262 for (int i = 0; i < total_samples; ++i) {
263 data[channel][sample] = float(*p++) / (1 << 15);
266 if (channel == channels) {
274 case AV_SAMPLE_FMT_S16P:
276 auto p = reinterpret_cast<int16_t **> (frame->data);
277 for (int i = 0; i < channels; ++i) {
278 for (int j = 0; j < frames; ++j) {
279 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
285 case AV_SAMPLE_FMT_S32:
287 auto p = reinterpret_cast<int32_t *> (frame->data[0]);
290 for (int i = 0; i < total_samples; ++i) {
291 data[channel][sample] = static_cast<float>(*p++) / 2147483648;
294 if (channel == channels) {
302 case AV_SAMPLE_FMT_S32P:
304 auto p = reinterpret_cast<int32_t **> (frame->data);
305 for (int i = 0; i < channels; ++i) {
306 for (int j = 0; j < frames; ++j) {
307 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
313 case AV_SAMPLE_FMT_FLT:
315 auto p = reinterpret_cast<float*> (frame->data[0]);
318 for (int i = 0; i < total_samples; ++i) {
319 data[channel][sample] = *p++;
322 if (channel == channels) {
330 case AV_SAMPLE_FMT_FLTP:
332 auto p = reinterpret_cast<float**> (frame->data);
333 DCPOMATIC_ASSERT (frame->channels <= channels);
334 /* Sometimes there aren't as many channels in the frame as in the stream */
335 for (int i = 0; i < frame->channels; ++i) {
336 memcpy (data[i], p[i], frames * sizeof(float));
338 for (int i = frame->channels; i < channels; ++i) {
339 audio->make_silent (i);
345 throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
353 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
355 return static_cast<AVSampleFormat>(stream->stream(_format_context)->codecpar->format);
360 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
362 return av_get_bytes_per_sample (audio_sample_format (stream));
367 FFmpegDecoder::seek (ContentTime time, bool accurate)
369 Decoder::seek (time, accurate);
371 /* If we are doing an `accurate' seek, we need to use pre-roll, as
372 we don't really know what the seek will give us.
375 auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
378 /* XXX: it seems debatable whether PTS should be used here...
379 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
382 optional<int> stream;
385 stream = _video_stream;
387 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
388 auto s = dynamic_pointer_cast<FFmpegAudioStream>(_ffmpeg_content->audio->stream());
390 stream = s->index (_format_context);
394 DCPOMATIC_ASSERT (stream);
396 auto u = time - _pts_offset;
397 if (u < ContentTime ()) {
403 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
408 /* Force re-creation of filter graphs to reset them and hence to make sure
409 they don't have any pre-seek frames knocking about.
411 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
412 _filter_graphs.clear ();
415 if (video_codec_context ()) {
416 avcodec_flush_buffers (video_codec_context());
419 for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
420 avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
423 if (subtitle_codec_context ()) {
424 avcodec_flush_buffers (subtitle_codec_context ());
427 _have_current_subtitle = false;
429 for (auto& i: _next_time) {
430 i.second = boost::optional<dcpomatic::ContentTime>();
435 shared_ptr<FFmpegAudioStream>
436 FFmpegDecoder::audio_stream_from_index (int index) const
438 /* XXX: inefficient */
439 auto streams = ffmpeg_content()->ffmpeg_audio_streams();
440 auto stream = streams.begin();
441 while (stream != streams.end() && !(*stream)->uses_index(_format_context, index)) {
445 if (stream == streams.end ()) {
454 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
456 auto data = deinterleave_audio (_frame);
459 if (_frame->pts == AV_NOPTS_VALUE) {
460 /* In some streams we see not every frame coming through with a timestamp; for those
461 that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
462 particularly noticeable with TrueHD streams (see #1111).
464 if (_next_time[stream]) {
465 ct = *_next_time[stream];
468 ct = ContentTime::from_seconds (
469 _frame->best_effort_timestamp *
470 av_q2d (stream->stream(_format_context)->time_base))
474 _next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
476 if (ct < ContentTime()) {
477 /* Discard audio data that comes before time 0 */
478 auto const remove = min (int64_t(data->frames()), (-ct).frames_ceil(double(stream->frame_rate())));
479 data->move (data->frames() - remove, remove, 0);
480 data->set_frames (data->frames() - remove);
481 ct += ContentTime::from_frames (remove, stream->frame_rate());
484 if (ct < ContentTime()) {
486 "Crazy timestamp %1 for %2 samples in stream %3 (ts=%4 tb=%5, off=%6)",
490 _frame->best_effort_timestamp,
491 av_q2d(stream->stream(_format_context)->time_base),
492 to_string(_pts_offset)
496 /* Give this data provided there is some, and its time is sane */
497 if (ct >= ContentTime() && data->frames() > 0) {
498 audio->emit (stream, data, ct);
504 FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
506 auto stream = audio_stream_from_index (packet->stream_index);
511 auto context = _codec_context[stream->index(_format_context)];
513 int r = avcodec_send_packet (context, packet);
515 /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
516 * Likewise I think AVERROR_EOF should not happen.
518 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_audio_packet"), r);
522 r = avcodec_receive_frame (context, _frame);
523 if (r == AVERROR(EAGAIN)) {
524 /* More input is required */
528 /* We choose to be relaxed here about other errors; it seems that there may be valid
529 * data to decode even if an error occurred. #352 may be related (though this was
530 * when we were using an old version of the FFmpeg API).
532 process_audio_frame (stream);
538 FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet)
540 DCPOMATIC_ASSERT (_video_stream);
542 auto context = video_codec_context();
544 int r = avcodec_send_packet (context, packet);
545 if (r < 0 && !(r == AVERROR_EOF && !packet)) {
546 /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
547 * AVERROR_EOF can happen during flush if we've already sent a flush packet.
549 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_video_packet"), r);
552 r = avcodec_receive_frame (context, _frame);
553 if (r == AVERROR(EAGAIN) || r == AVERROR_EOF) {
554 /* More input is required, or no more frames are coming */
558 /* We assume we'll only get one frame here, which I think is safe */
560 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
562 shared_ptr<VideoFilterGraph> graph;
564 auto i = _filter_graphs.begin();
565 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
569 if (i == _filter_graphs.end ()) {
570 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
571 graph = make_shared<VideoFilterGraph>(dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr);
572 graph->setup (_ffmpeg_content->filters ());
573 _filter_graphs.push_back (graph);
574 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
579 auto images = graph->process (_frame);
581 for (auto const& i: images) {
583 auto image = i.first;
585 if (i.second != AV_NOPTS_VALUE) {
586 double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
590 make_shared<RawImageProxy>(image),
591 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
594 LOG_WARNING_NC ("Dropping frame without PTS");
603 FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
607 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
611 /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
612 if (_have_current_subtitle) {
613 if (_current_subtitle_to) {
614 only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
616 only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
618 _have_current_subtitle = false;
621 if (sub.num_rects <= 0) {
622 /* Nothing new in this subtitle */
626 /* Subtitle PTS (within the source, not taking into account any of the
627 source that we may have chopped off for the DCP).
629 auto sub_period = subtitle_period (sub);
631 from = sub_period.from + _pts_offset;
633 _current_subtitle_to = *sub_period.to + _pts_offset;
635 _current_subtitle_to = optional<ContentTime>();
636 _have_current_subtitle = true;
639 for (unsigned int i = 0; i < sub.num_rects; ++i) {
640 auto const rect = sub.rects[i];
642 switch (rect->type) {
645 case SUBTITLE_BITMAP:
646 process_bitmap_subtitle (rect, from);
649 cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
652 process_ass_subtitle (rect->ass, from);
657 if (_current_subtitle_to) {
658 only_text()->emit_stop (*_current_subtitle_to);
661 avsubtitle_free (&sub);
666 FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
668 /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
669 G, third R, fourth A.
671 auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true);
673 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
674 /* Start of the first line in the subtitle */
675 auto sub_p = rect->pict.data[0];
676 /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
677 (i.e. first byte B, second G, third R, fourth A)
679 auto const palette = rect->pict.data[1];
681 /* Start of the first line in the subtitle */
682 auto sub_p = rect->data[0];
683 /* sub_p looks up into a BGRA palette which is at rect->data[1].
684 (first byte B, second G, third R, fourth A)
686 auto const* palette = rect->data[1];
688 /* And the stream has a map of those palette colours to colours
689 chosen by the user; created a `mapped' palette from those settings.
691 auto colour_map = ffmpeg_content()->subtitle_stream()->colours();
692 vector<RGBA> mapped_palette (rect->nb_colors);
693 for (int i = 0; i < rect->nb_colors; ++i) {
694 RGBA c (palette[2], palette[1], palette[0], palette[3]);
695 auto j = colour_map.find (c);
696 if (j != colour_map.end ()) {
697 mapped_palette[i] = j->second;
699 /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
700 it is from a project that was created before this stuff was added. Just use the
701 colour straight from the original palette.
703 mapped_palette[i] = c;
708 /* Start of the output data */
709 auto out_p = image->data()[0];
711 for (int y = 0; y < rect->h; ++y) {
712 auto sub_line_p = sub_p;
713 auto out_line_p = out_p;
714 for (int x = 0; x < rect->w; ++x) {
715 auto const p = mapped_palette[*sub_line_p++];
721 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
722 sub_p += rect->pict.linesize[0];
724 sub_p += rect->linesize[0];
726 out_p += image->stride()[0];
729 int target_width = subtitle_codec_context()->width;
730 if (target_width == 0 && video_codec_context()) {
731 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
732 know if it's supposed to mean something from FFmpeg's point of view.
734 target_width = video_codec_context()->width;
736 int target_height = subtitle_codec_context()->height;
737 if (target_height == 0 && video_codec_context()) {
738 target_height = video_codec_context()->height;
740 DCPOMATIC_ASSERT (target_width);
741 DCPOMATIC_ASSERT (target_height);
742 dcpomatic::Rect<double> const scaled_rect (
743 static_cast<double>(rect->x) / target_width,
744 static_cast<double>(rect->y) / target_height,
745 static_cast<double>(rect->w) / target_width,
746 static_cast<double>(rect->h) / target_height
749 only_text()->emit_bitmap_start (from, image, scaled_rect);
754 FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from)
756 /* We have no styles and no Format: line, so I'm assuming that FFmpeg
757 produces a single format of Dialogue: lines...
762 for (size_t i = 0; i < ass.length(); ++i) {
763 if (commas < 9 && ass[i] == ',') {
765 } else if (commas == 9) {
774 sub::RawSubtitle base;
775 auto raw = sub::SSAReader::parse_line (
778 _ffmpeg_content->video->size().width,
779 _ffmpeg_content->video->size().height
782 for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
783 only_text()->emit_plain_start (from, i);