Merge tag 'v2.16.78' into v2.17.x
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 /** @file  src/ffmpeg_decoder.cc
23  *  @brief A decoder using FFmpeg to decode content.
24  */
25
26
27 #include "audio_buffers.h"
28 #include "audio_content.h"
29 #include "audio_decoder.h"
30 #include "compose.hpp"
31 #include "dcpomatic_log.h"
32 #include "exceptions.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_content.h"
35 #include "ffmpeg_decoder.h"
36 #include "ffmpeg_subtitle_stream.h"
37 #include "film.h"
38 #include "filter.h"
39 #include "frame_interval_checker.h"
40 #include "image.h"
41 #include "log.h"
42 #include "raw_image_proxy.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "util.h"
46 #include "video_decoder.h"
47 #include "video_filter_graph.h"
48 #include <dcp/subtitle_string.h>
49 #include <sub/ssa_reader.h>
50 #include <sub/subtitle.h>
51 #include <sub/collect.h>
52 extern "C" {
53 #include <libavcodec/avcodec.h>
54 #include <libavformat/avformat.h>
55 }
56 #include <boost/algorithm/string.hpp>
57 #include <iomanip>
58 #include <iostream>
59 #include <vector>
60 #include <stdint.h>
61
62 #include "i18n.h"
63
64
65 using std::cout;
66 using std::dynamic_pointer_cast;
67 using std::make_shared;
68 using std::min;
69 using std::shared_ptr;
70 using std::string;
71 using std::vector;
72 using boost::optional;
73 using dcp::Size;
74 using namespace dcpomatic;
75
76
77 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
78         : FFmpeg (c)
79         , Decoder (film)
80         , _filter_graphs(c->filters(), dcp::Fraction(lrint(_ffmpeg_content->video_frame_rate().get_value_or(24) * 1000), 1000))
81 {
82         if (c->video && c->video->use()) {
83                 video = make_shared<VideoDecoder>(this, c);
84                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
85                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
86                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (128, 128), Image::Alignment::PADDED);
87                 _black_image->make_black ();
88         } else {
89                 _pts_offset = {};
90         }
91
92         if (c->audio) {
93                 audio = make_shared<AudioDecoder>(this, c->audio, fast);
94         }
95
96         if (c->only_text()) {
97                 text.push_back (make_shared<TextDecoder>(this, c->only_text()));
98                 /* XXX: we should be calling maybe_set_position() on this TextDecoder, but we can't easily find
99                  * the time of the first subtitle at this point.
100                  */
101         }
102
103         for (auto i: c->ffmpeg_audio_streams()) {
104                 _next_time[i] = boost::optional<dcpomatic::ContentTime>();
105         }
106 }
107
108
109 FFmpegDecoder::FlushResult
110 FFmpegDecoder::flush ()
111 {
112         LOG_DEBUG_PLAYER("Flush FFmpeg decoder: current state %1", static_cast<int>(_flush_state));
113
114         switch (_flush_state) {
115         case FlushState::CODECS:
116                 if (flush_codecs() == FlushResult::DONE) {
117                         LOG_DEBUG_PLAYER_NC("Finished flushing codecs");
118                         _flush_state = FlushState::AUDIO_DECODER;
119                 }
120                 break;
121         case FlushState::AUDIO_DECODER:
122                 if (audio) {
123                         audio->flush();
124                 }
125                 LOG_DEBUG_PLAYER_NC("Finished flushing audio decoder");
126                 _flush_state = FlushState::FILL;
127                 break;
128         case FlushState::FILL:
129                 if (flush_fill() == FlushResult::DONE) {
130                         LOG_DEBUG_PLAYER_NC("Finished flushing fills");
131                         return FlushResult::DONE;
132                 }
133                 break;
134         }
135
136         return FlushResult::AGAIN;
137 }
138
139
140 /** @return true if we have finished flushing the codecs */
141 FFmpegDecoder::FlushResult
142 FFmpegDecoder::flush_codecs()
143 {
144         bool did_something = false;
145         if (video) {
146                 if (decode_and_process_video_packet(nullptr)) {
147                         did_something = true;
148                 }
149         }
150
151         for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
152                 auto context = _codec_context[i->index(_format_context)];
153                 int r = avcodec_send_packet (context, nullptr);
154                 if (r < 0 && r != AVERROR_EOF) {
155                         /* EOF can happen if we've already sent a flush packet */
156                         throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
157                 }
158                 r = avcodec_receive_frame (context, audio_frame(i));
159                 if (r >= 0) {
160                         process_audio_frame (i);
161                         did_something = true;
162                 }
163         }
164
165         return did_something ? FlushResult::AGAIN : FlushResult::DONE;
166 }
167
168
169 FFmpegDecoder::FlushResult
170 FFmpegDecoder::flush_fill()
171 {
172         /* Make sure all streams are the same length and round up to the next video frame */
173
174         bool did_something = false;
175
176         auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
177         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
178         full_length = full_length.ceil (frc.source);
179         if (video && !video->ignore()) {
180                 double const vfr = _ffmpeg_content->video_frame_rate().get();
181                 auto const v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr);
182                 if (v < full_length) {
183                         video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
184                         did_something = true;
185                 }
186         }
187
188         if (audio && !audio->ignore()) {
189                 for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
190                         auto const a = audio->stream_position(film(), i);
191                         /* Unfortunately if a is 0 that really means that we don't know the stream position since
192                            there has been no data on it since the last seek.  In this case we'll just do nothing
193                            here.  I'm not sure if that's the right idea.
194                         */
195                         if (a > ContentTime() && a < full_length) {
196                                 LOG_DEBUG_PLAYER("Flush inserts silence at %1", to_string(a));
197                                 auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
198                                 auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
199                                 silence->make_silent ();
200                                 audio->emit (film(), i, silence, a, true);
201                                 did_something = true;
202                         }
203                 }
204         }
205
206         return did_something ? FlushResult::AGAIN : FlushResult::DONE;
207 }
208
209
210 bool
211 FFmpegDecoder::pass ()
212 {
213         auto packet = av_packet_alloc();
214         DCPOMATIC_ASSERT (packet);
215
216         int r = av_read_frame (_format_context, packet);
217
218         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
219            has pretty-much succeeded (and hence generated data which should be processed).
220            Hence it makes sense to continue here in that case.
221         */
222         if (r < 0 && r != AVERROR_INVALIDDATA) {
223                 LOG_DEBUG_PLAYER("FFpmegDecoder::pass flushes because av_read_frame returned %1", r);
224                 if (r != AVERROR_EOF) {
225                         /* Maybe we should fail here, but for now we'll just finish off instead */
226                         char buf[256];
227                         av_strerror (r, buf, sizeof(buf));
228                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
229                 }
230
231                 av_packet_free (&packet);
232                 return flush() == FlushResult::DONE;
233         }
234
235         int const si = packet->stream_index;
236         auto fc = _ffmpeg_content;
237
238         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
239                 decode_and_process_video_packet (packet);
240         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
241                 decode_and_process_subtitle_packet (packet);
242         } else {
243                 decode_and_process_audio_packet (packet);
244         }
245
246         av_packet_free (&packet);
247         return false;
248 }
249
250
251 /** @param data pointer to array of pointers to buffers.
252  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
253  */
254 static
255 shared_ptr<AudioBuffers>
256 deinterleave_audio(AVFrame* frame)
257 {
258         auto format = static_cast<AVSampleFormat>(frame->format);
259
260         /* XXX: can't we use swr_convert() to do the format conversion? */
261
262         int const channels = frame->ch_layout.nb_channels;
263         int const frames = frame->nb_samples;
264         int const total_samples = frames * channels;
265         auto audio = make_shared<AudioBuffers>(channels, frames);
266         auto data = audio->data();
267
268         if (frames == 0) {
269                 return audio;
270         }
271
272         switch (format) {
273         case AV_SAMPLE_FMT_U8:
274         {
275                 auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
276                 int sample = 0;
277                 int channel = 0;
278                 for (int i = 0; i < total_samples; ++i) {
279                         data[channel][sample] = float(*p++) / (1 << 23);
280
281                         ++channel;
282                         if (channel == channels) {
283                                 channel = 0;
284                                 ++sample;
285                         }
286                 }
287         }
288         break;
289
290         case AV_SAMPLE_FMT_S16:
291         {
292                 auto p = reinterpret_cast<int16_t *> (frame->data[0]);
293                 int sample = 0;
294                 int channel = 0;
295                 for (int i = 0; i < total_samples; ++i) {
296                         data[channel][sample] = float(*p++) / (1 << 15);
297
298                         ++channel;
299                         if (channel == channels) {
300                                 channel = 0;
301                                 ++sample;
302                         }
303                 }
304         }
305         break;
306
307         case AV_SAMPLE_FMT_S16P:
308         {
309                 auto p = reinterpret_cast<int16_t **> (frame->data);
310                 for (int i = 0; i < channels; ++i) {
311                         for (int j = 0; j < frames; ++j) {
312                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
313                         }
314                 }
315         }
316         break;
317
318         case AV_SAMPLE_FMT_S32:
319         {
320                 auto p = reinterpret_cast<int32_t *> (frame->data[0]);
321                 int sample = 0;
322                 int channel = 0;
323                 for (int i = 0; i < total_samples; ++i) {
324                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
325
326                         ++channel;
327                         if (channel == channels) {
328                                 channel = 0;
329                                 ++sample;
330                         }
331                 }
332         }
333         break;
334
335         case AV_SAMPLE_FMT_S32P:
336         {
337                 auto p = reinterpret_cast<int32_t **> (frame->data);
338                 for (int i = 0; i < channels; ++i) {
339                         for (int j = 0; j < frames; ++j) {
340                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
341                         }
342                 }
343         }
344         break;
345
346         case AV_SAMPLE_FMT_FLT:
347         {
348                 auto p = reinterpret_cast<float*> (frame->data[0]);
349                 int sample = 0;
350                 int channel = 0;
351                 for (int i = 0; i < total_samples; ++i) {
352                         data[channel][sample] = *p++;
353
354                         ++channel;
355                         if (channel == channels) {
356                                 channel = 0;
357                                 ++sample;
358                         }
359                 }
360         }
361         break;
362
363         case AV_SAMPLE_FMT_FLTP:
364         {
365                 auto p = reinterpret_cast<float**> (frame->data);
366                 for (int i = 0; i < channels; ++i) {
367                         memcpy (data[i], p[i], frames * sizeof(float));
368                 }
369         }
370         break;
371
372         default:
373                 throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
374         }
375
376         return audio;
377 }
378
379
380 AVSampleFormat
381 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
382 {
383         return static_cast<AVSampleFormat>(stream->stream(_format_context)->codecpar->format);
384 }
385
386
387 int
388 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
389 {
390         return av_get_bytes_per_sample (audio_sample_format (stream));
391 }
392
393
394 void
395 FFmpegDecoder::seek (ContentTime time, bool accurate)
396 {
397         Decoder::seek (time, accurate);
398
399         /* If we are doing an `accurate' seek, we need to use pre-roll, as
400            we don't really know what the seek will give us.
401         */
402
403         auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
404         time -= pre_roll;
405
406         /* XXX: it seems debatable whether PTS should be used here...
407            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
408         */
409
410         optional<int> stream;
411
412         if (_video_stream) {
413                 stream = _video_stream;
414         } else {
415                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
416                 auto s = dynamic_pointer_cast<FFmpegAudioStream>(_ffmpeg_content->audio->stream());
417                 if (s) {
418                         stream = s->index (_format_context);
419                 }
420         }
421
422         DCPOMATIC_ASSERT (stream);
423
424         auto u = time - _pts_offset;
425         if (u < ContentTime ()) {
426                 u = ContentTime ();
427         }
428         av_seek_frame (
429                 _format_context,
430                 stream.get(),
431                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
432                 AVSEEK_FLAG_BACKWARD
433                 );
434
435         /* Force re-creation of filter graphs to reset them and hence to make sure
436            they don't have any pre-seek frames knocking about.
437         */
438         _filter_graphs.clear();
439
440         if (video_codec_context ()) {
441                 avcodec_flush_buffers (video_codec_context());
442         }
443
444         for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
445                 avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
446         }
447
448         if (subtitle_codec_context ()) {
449                 avcodec_flush_buffers (subtitle_codec_context ());
450         }
451
452         _have_current_subtitle = false;
453
454         for (auto& i: _next_time) {
455                 i.second = boost::optional<dcpomatic::ContentTime>();
456         }
457 }
458
459
460 shared_ptr<FFmpegAudioStream>
461 FFmpegDecoder::audio_stream_from_index (int index) const
462 {
463         /* XXX: inefficient */
464         auto streams = ffmpeg_content()->ffmpeg_audio_streams();
465         auto stream = streams.begin();
466         while (stream != streams.end() && !(*stream)->uses_index(_format_context, index)) {
467                 ++stream;
468         }
469
470         if (stream == streams.end ()) {
471                 return {};
472         }
473
474         return *stream;
475 }
476
477
478 void
479 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
480 {
481         auto frame = audio_frame (stream);
482         auto data = deinterleave_audio(frame);
483
484         auto const time_base = stream->stream(_format_context)->time_base;
485
486         ContentTime ct;
487         if (frame->pts == AV_NOPTS_VALUE) {
488                 /* In some streams we see not every frame coming through with a timestamp; for those
489                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
490                    particularly noticeable with TrueHD streams (see #1111).
491                    */
492                 if (_next_time[stream]) {
493                         ct = *_next_time[stream];
494                 }
495         } else {
496                 ct = ContentTime::from_seconds (
497                         frame->best_effort_timestamp *
498                         av_q2d(time_base))
499                         + _pts_offset;
500                 LOG_DEBUG_PLAYER(
501                         "Process audio with timestamp %1 (BET %2, timebase %3/%4, (PTS offset %5)",
502                         to_string(ct),
503                         frame->best_effort_timestamp,
504                         time_base.num,
505                         time_base.den,
506                         to_string(_pts_offset)
507                         );
508         }
509
510         _next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
511
512         if (ct < ContentTime()) {
513                 /* Discard audio data that comes before time 0 */
514                 auto const remove = min (int64_t(data->frames()), (-ct).frames_ceil(double(stream->frame_rate())));
515                 data->move (data->frames() - remove, remove, 0);
516                 data->set_frames (data->frames() - remove);
517                 ct += ContentTime::from_frames (remove, stream->frame_rate());
518         }
519
520         if (ct < ContentTime()) {
521                 LOG_WARNING (
522                         "Crazy timestamp %1 for %2 samples in stream %3 (ts=%4 tb=%5, off=%6)",
523                         to_string(ct),
524                         data->frames(),
525                         stream->id(),
526                         frame->best_effort_timestamp,
527                         av_q2d(time_base),
528                         to_string(_pts_offset)
529                         );
530         }
531
532         /* Give this data provided there is some, and its time is sane */
533         if (ct >= ContentTime() && data->frames() > 0) {
534                 audio->emit (film(), stream, data, ct);
535         }
536 }
537
538
539 void
540 FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
541 {
542         auto stream = audio_stream_from_index (packet->stream_index);
543         if (!stream) {
544                 return;
545         }
546
547         auto context = _codec_context[stream->index(_format_context)];
548         auto frame = audio_frame (stream);
549
550         LOG_DEBUG_PLAYER("Send audio packet on stream %1", stream->index(_format_context));
551         int r = avcodec_send_packet (context, packet);
552         if (r < 0) {
553                 LOG_WARNING("avcodec_send_packet returned %1 for an audio packet", r);
554         }
555         while (r >= 0) {
556                 r = avcodec_receive_frame (context, frame);
557                 if (r == AVERROR(EAGAIN)) {
558                         /* More input is required */
559                         LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive audio frame");
560                         return;
561                 }
562
563                 /* We choose to be relaxed here about other errors; it seems that there may be valid
564                  * data to decode even if an error occurred.  #352 may be related (though this was
565                  * when we were using an old version of the FFmpeg API).
566                  */
567                 process_audio_frame (stream);
568         }
569 }
570
571
572 bool
573 FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet)
574 {
575         DCPOMATIC_ASSERT (_video_stream);
576
577         auto context = video_codec_context();
578
579         bool pending = false;
580         do {
581                 int r = avcodec_send_packet (context, packet);
582                 if (r < 0) {
583                         LOG_WARNING("avcodec_send_packet returned %1 for a video packet", r);
584                 }
585
586                 /* EAGAIN means we should call avcodec_receive_frame and then re-send the same packet */
587                 pending = r == AVERROR(EAGAIN);
588
589                 while (true) {
590                         r = avcodec_receive_frame (context, _video_frame);
591                         if (r == AVERROR(EAGAIN) || r == AVERROR_EOF || (r < 0 && !packet)) {
592                                 /* More input is required, no more frames are coming, or we are flushing and there was
593                                  * some error which we just want to ignore.
594                                  */
595                                 return false;
596                         } else if (r < 0) {
597                                 throw DecodeError (N_("avcodec_receive_frame"), N_("FFmpeg::decode_and_process_video_packet"), r);
598                         }
599
600                         process_video_frame ();
601                 }
602         } while (pending);
603
604         return true;
605 }
606
607
608 void
609 FFmpegDecoder::process_video_frame ()
610 {
611         auto graph = _filter_graphs.get(dcp::Size(_video_frame->width, _video_frame->height), static_cast<AVPixelFormat>(_video_frame->format));
612         auto images = graph->process (_video_frame);
613
614         for (auto const& i: images) {
615
616                 auto image = i.first;
617
618                 if (i.second != AV_NOPTS_VALUE) {
619                         double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
620
621                         video->emit (
622                                 film(),
623                                 make_shared<RawImageProxy>(image),
624                                 ContentTime::from_seconds(pts)
625                                 );
626                 } else {
627                         LOG_WARNING_NC ("Dropping frame without PTS");
628                 }
629         }
630 }
631
632
633 void
634 FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
635 {
636         auto context = subtitle_codec_context();
637         if (!context) {
638                 return;
639         }
640
641         int got_subtitle;
642         AVSubtitle sub;
643         if (avcodec_decode_subtitle2(context, &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
644                 return;
645         }
646
647         auto sub_period = subtitle_period (packet, ffmpeg_content()->subtitle_stream()->stream(_format_context), sub);
648
649         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
650         if (_have_current_subtitle) {
651                 if (_current_subtitle_to) {
652                         only_text()->emit_stop (min(*_current_subtitle_to, sub_period.from + _pts_offset));
653                 } else {
654                         only_text()->emit_stop (sub_period.from + _pts_offset);
655                 }
656                 _have_current_subtitle = false;
657         }
658
659         if (sub.num_rects <= 0) {
660                 /* Nothing new in this subtitle */
661                 avsubtitle_free (&sub);
662                 return;
663         }
664
665         /* Subtitle PTS (within the source, not taking into account any of the
666            source that we may have chopped off for the DCP).
667         */
668         ContentTime from;
669         from = sub_period.from + _pts_offset;
670         if (sub_period.to) {
671                 _current_subtitle_to = *sub_period.to + _pts_offset;
672         } else {
673                 _current_subtitle_to = optional<ContentTime>();
674                 _have_current_subtitle = true;
675         }
676
677         ContentBitmapText bitmap_text(from);
678         for (unsigned int i = 0; i < sub.num_rects; ++i) {
679                 auto const rect = sub.rects[i];
680
681                 switch (rect->type) {
682                 case SUBTITLE_NONE:
683                         break;
684                 case SUBTITLE_BITMAP:
685                         bitmap_text.subs.push_back(process_bitmap_subtitle(rect));
686                         break;
687                 case SUBTITLE_TEXT:
688                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
689                         break;
690                 case SUBTITLE_ASS:
691                         process_ass_subtitle (rect->ass, from);
692                         break;
693                 }
694         }
695
696         if (!bitmap_text.subs.empty()) {
697                 only_text()->emit_bitmap_start(bitmap_text);
698         }
699
700         if (_current_subtitle_to) {
701                 only_text()->emit_stop (*_current_subtitle_to);
702         }
703
704         avsubtitle_free (&sub);
705 }
706
707
708 BitmapText
709 FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect)
710 {
711         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
712            G, third R, fourth A.
713         */
714         auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), Image::Alignment::PADDED);
715
716 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
717         /* Start of the first line in the subtitle */
718         auto sub_p = rect->pict.data[0];
719         /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
720            (i.e. first byte B, second G, third R, fourth A)
721         */
722         auto const* palette = rect->pict.data[1];
723 #else
724         /* Start of the first line in the subtitle */
725         auto sub_p = rect->data[0];
726         /* sub_p looks up into a BGRA palette which is at rect->data[1].
727            (first byte B, second G, third R, fourth A)
728         */
729         auto const* palette = rect->data[1];
730 #endif
731         /* And the stream has a map of those palette colours to colours
732            chosen by the user; created a `mapped' palette from those settings.
733         */
734         auto colour_map = ffmpeg_content()->subtitle_stream()->colours();
735         vector<RGBA> mapped_palette (rect->nb_colors);
736         for (int i = 0; i < rect->nb_colors; ++i) {
737                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
738                 auto j = colour_map.find (c);
739                 if (j != colour_map.end ()) {
740                         mapped_palette[i] = j->second;
741                 } else {
742                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
743                            it is from a project that was created before this stuff was added.  Just use the
744                            colour straight from the original palette.
745                         */
746                         mapped_palette[i] = c;
747                 }
748                 palette += 4;
749         }
750
751         /* Start of the output data */
752         auto out_p = image->data()[0];
753
754         for (int y = 0; y < rect->h; ++y) {
755                 auto sub_line_p = sub_p;
756                 auto out_line_p = out_p;
757                 for (int x = 0; x < rect->w; ++x) {
758                         auto const p = mapped_palette[*sub_line_p++];
759                         *out_line_p++ = p.b;
760                         *out_line_p++ = p.g;
761                         *out_line_p++ = p.r;
762                         *out_line_p++ = p.a;
763                 }
764 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
765                 sub_p += rect->pict.linesize[0];
766 #else
767                 sub_p += rect->linesize[0];
768 #endif
769                 out_p += image->stride()[0];
770         }
771
772         int target_width = subtitle_codec_context()->width;
773         if (target_width == 0 && video_codec_context()) {
774                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
775                    know if it's supposed to mean something from FFmpeg's point of view.
776                 */
777                 target_width = video_codec_context()->width;
778         }
779         int target_height = subtitle_codec_context()->height;
780         if (target_height == 0 && video_codec_context()) {
781                 target_height = video_codec_context()->height;
782         }
783
784         int x_offset = 0;
785         int y_offset = 0;
786         if (_ffmpeg_content->video && _ffmpeg_content->video->use()) {
787                 auto const crop = _ffmpeg_content->video->actual_crop();
788                 target_width -= crop.left + crop.right;
789                 target_height -= crop.top + crop.bottom;
790                 x_offset = -crop.left;
791                 y_offset = -crop.top;
792         }
793
794         DCPOMATIC_ASSERT(target_width > 0);
795         DCPOMATIC_ASSERT(target_height > 0);
796
797         dcpomatic::Rect<double> const scaled_rect (
798                 static_cast<double>(rect->x + x_offset) / target_width,
799                 static_cast<double>(rect->y + y_offset) / target_height,
800                 static_cast<double>(rect->w) / target_width,
801                 static_cast<double>(rect->h) / target_height
802                 );
803
804         return { image, scaled_rect };
805 }
806
807
808 void
809 FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from)
810 {
811         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
812            produces a single format of Dialogue: lines...
813         */
814
815         int commas = 0;
816         string text;
817         for (size_t i = 0; i < ass.length(); ++i) {
818                 if (commas < 9 && ass[i] == ',') {
819                         ++commas;
820                 } else if (commas == 9) {
821                         text += ass[i];
822                 }
823         }
824
825         if (text.empty ()) {
826                 return;
827         }
828
829         sub::RawSubtitle base;
830         auto video_size = _ffmpeg_content->video->size();
831         DCPOMATIC_ASSERT(video_size);
832
833         auto raw = sub::SSAReader::parse_line (
834                 base,
835                 text,
836                 video_size->width,
837                 video_size->height,
838                 sub::Colour(1, 1, 1)
839                 );
840
841         for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
842                 only_text()->emit_plain_start (from, i);
843         }
844 }