85f79b513664f28f01640662b998d541ffcdde4a
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 /** @file  src/ffmpeg_decoder.cc
23  *  @brief A decoder using FFmpeg to decode content.
24  */
25
26
27 #include "audio_buffers.h"
28 #include "audio_content.h"
29 #include "audio_decoder.h"
30 #include "compose.hpp"
31 #include "dcpomatic_log.h"
32 #include "exceptions.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_content.h"
35 #include "ffmpeg_decoder.h"
36 #include "ffmpeg_subtitle_stream.h"
37 #include "film.h"
38 #include "filter.h"
39 #include "frame_interval_checker.h"
40 #include "image.h"
41 #include "log.h"
42 #include "raw_image_proxy.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "util.h"
46 #include "video_decoder.h"
47 #include "video_filter_graph.h"
48 #include <dcp/subtitle_string.h>
49 #include <sub/ssa_reader.h>
50 #include <sub/subtitle.h>
51 #include <sub/collect.h>
52 extern "C" {
53 #include <libavcodec/avcodec.h>
54 #include <libavformat/avformat.h>
55 }
56 #include <boost/algorithm/string.hpp>
57 #include <iomanip>
58 #include <iostream>
59 #include <vector>
60 #include <stdint.h>
61
62 #include "i18n.h"
63
64
65 using std::cout;
66 using std::dynamic_pointer_cast;
67 using std::make_shared;
68 using std::min;
69 using std::shared_ptr;
70 using std::string;
71 using std::vector;
72 using boost::optional;
73 using dcp::Size;
74 using namespace dcpomatic;
75
76
77 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
78         : FFmpeg (c)
79         , Decoder (film)
80         , _filter_graphs(c->filters(), dcp::Fraction(lrint(_ffmpeg_content->video_frame_rate().get_value_or(24) * 1000), 1000))
81 {
82         if (c->video && c->video->use()) {
83                 video = make_shared<VideoDecoder>(this, c);
84                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
85                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
86                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (128, 128), Image::Alignment::PADDED);
87                 _black_image->make_black ();
88         } else {
89                 _pts_offset = {};
90         }
91
92         if (c->audio) {
93                 audio = make_shared<AudioDecoder>(this, c->audio, fast);
94         }
95
96         if (c->only_text()) {
97                 text.push_back (make_shared<TextDecoder>(this, c->only_text()));
98                 /* XXX: we should be calling maybe_set_position() on this TextDecoder, but we can't easily find
99                  * the time of the first subtitle at this point.
100                  */
101         }
102
103         for (auto i: c->ffmpeg_audio_streams()) {
104                 _next_time[i] = boost::optional<dcpomatic::ContentTime>();
105         }
106 }
107
108
109 FFmpegDecoder::FlushResult
110 FFmpegDecoder::flush ()
111 {
112         LOG_DEBUG_PLAYER("Flush FFmpeg decoder: current state %1", static_cast<int>(_flush_state));
113
114         switch (_flush_state) {
115         case FlushState::CODECS:
116                 if (flush_codecs() == FlushResult::DONE) {
117                         LOG_DEBUG_PLAYER_NC("Finished flushing codecs");
118                         _flush_state = FlushState::AUDIO_DECODER;
119                 }
120                 break;
121         case FlushState::AUDIO_DECODER:
122                 if (audio) {
123                         audio->flush();
124                 }
125                 LOG_DEBUG_PLAYER_NC("Finished flushing audio decoder");
126                 _flush_state = FlushState::FILL;
127                 break;
128         case FlushState::FILL:
129                 if (flush_fill() == FlushResult::DONE) {
130                         LOG_DEBUG_PLAYER_NC("Finished flushing fills");
131                         return FlushResult::DONE;
132                 }
133                 break;
134         }
135
136         return FlushResult::AGAIN;
137 }
138
139
140 /** @return true if we have finished flushing the codecs */
141 FFmpegDecoder::FlushResult
142 FFmpegDecoder::flush_codecs()
143 {
144         bool did_something = false;
145         if (video) {
146                 if (decode_and_process_video_packet(nullptr)) {
147                         did_something = true;
148                 }
149         }
150
151         for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
152                 auto context = _codec_context[i->index(_format_context)];
153                 int r = avcodec_send_packet (context, nullptr);
154                 if (r < 0 && r != AVERROR_EOF) {
155                         /* EOF can happen if we've already sent a flush packet */
156                         throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
157                 }
158                 r = avcodec_receive_frame (context, audio_frame(i));
159                 if (r >= 0) {
160                         process_audio_frame (i);
161                         did_something = true;
162                 }
163         }
164
165         return did_something ? FlushResult::AGAIN : FlushResult::DONE;
166 }
167
168
169 FFmpegDecoder::FlushResult
170 FFmpegDecoder::flush_fill()
171 {
172         /* Make sure all streams are the same length and round up to the next video frame */
173
174         bool did_something = false;
175
176         auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
177         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
178         full_length = full_length.ceil (frc.source);
179         if (video && !video->ignore()) {
180                 double const vfr = _ffmpeg_content->video_frame_rate().get();
181                 auto const f = full_length.frames_round (vfr);
182                 auto const v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
183                 if (v < f) {
184                         video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
185                         did_something = true;
186                 }
187         }
188
189         if (audio && !audio->ignore()) {
190                 for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
191                         auto const a = audio->stream_position(film(), i);
192                         /* Unfortunately if a is 0 that really means that we don't know the stream position since
193                            there has been no data on it since the last seek.  In this case we'll just do nothing
194                            here.  I'm not sure if that's the right idea.
195                         */
196                         if (a > ContentTime() && a < full_length) {
197                                 LOG_DEBUG_PLAYER("Flush inserts silence at %1", to_string(a));
198                                 auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
199                                 auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
200                                 silence->make_silent ();
201                                 audio->emit (film(), i, silence, a, true);
202                                 did_something = true;
203                         }
204                 }
205         }
206
207         return did_something ? FlushResult::AGAIN : FlushResult::DONE;
208 }
209
210
211 bool
212 FFmpegDecoder::pass ()
213 {
214         auto packet = av_packet_alloc();
215         DCPOMATIC_ASSERT (packet);
216
217         int r = av_read_frame (_format_context, packet);
218
219         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
220            has pretty-much succeeded (and hence generated data which should be processed).
221            Hence it makes sense to continue here in that case.
222         */
223         if (r < 0 && r != AVERROR_INVALIDDATA) {
224                 LOG_DEBUG_PLAYER("FFpmegDecoder::pass flushes because av_read_frame returned %1", r);
225                 if (r != AVERROR_EOF) {
226                         /* Maybe we should fail here, but for now we'll just finish off instead */
227                         char buf[256];
228                         av_strerror (r, buf, sizeof(buf));
229                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
230                 }
231
232                 av_packet_free (&packet);
233                 return flush() == FlushResult::DONE;
234         }
235
236         int const si = packet->stream_index;
237         auto fc = _ffmpeg_content;
238
239         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
240                 decode_and_process_video_packet (packet);
241         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
242                 decode_and_process_subtitle_packet (packet);
243         } else {
244                 decode_and_process_audio_packet (packet);
245         }
246
247         av_packet_free (&packet);
248         return false;
249 }
250
251
252 /** @param data pointer to array of pointers to buffers.
253  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
254  */
255 static
256 shared_ptr<AudioBuffers>
257 deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
258 {
259         auto format = static_cast<AVSampleFormat>(frame->format);
260
261         /* XXX: can't we use swr_convert() to do the format conversion? */
262
263         int const channels = frame->channels;
264         int const frames = frame->nb_samples;
265         int const total_samples = frames * channels;
266         auto audio = make_shared<AudioBuffers>(channels, frames);
267         auto data = audio->data();
268
269         switch (format) {
270         case AV_SAMPLE_FMT_U8:
271         {
272                 auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
273                 int sample = 0;
274                 int channel = 0;
275                 for (int i = 0; i < total_samples; ++i) {
276                         data[channel][sample] = float(*p++) / (1 << 23);
277
278                         ++channel;
279                         if (channel == channels) {
280                                 channel = 0;
281                                 ++sample;
282                         }
283                 }
284         }
285         break;
286
287         case AV_SAMPLE_FMT_S16:
288         {
289                 auto p = reinterpret_cast<int16_t *> (frame->data[0]);
290                 int sample = 0;
291                 int channel = 0;
292                 for (int i = 0; i < total_samples; ++i) {
293                         data[channel][sample] = float(*p++) / (1 << 15);
294
295                         ++channel;
296                         if (channel == channels) {
297                                 channel = 0;
298                                 ++sample;
299                         }
300                 }
301         }
302         break;
303
304         case AV_SAMPLE_FMT_S16P:
305         {
306                 auto p = reinterpret_cast<int16_t **> (frame->data);
307                 for (int i = 0; i < channels; ++i) {
308                         for (int j = 0; j < frames; ++j) {
309                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
310                         }
311                 }
312         }
313         break;
314
315         case AV_SAMPLE_FMT_S32:
316         {
317                 auto p = reinterpret_cast<int32_t *> (frame->data[0]);
318                 int sample = 0;
319                 int channel = 0;
320                 for (int i = 0; i < total_samples; ++i) {
321                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
322
323                         ++channel;
324                         if (channel == channels) {
325                                 channel = 0;
326                                 ++sample;
327                         }
328                 }
329         }
330         break;
331
332         case AV_SAMPLE_FMT_S32P:
333         {
334                 auto p = reinterpret_cast<int32_t **> (frame->data);
335                 for (int i = 0; i < channels; ++i) {
336                         for (int j = 0; j < frames; ++j) {
337                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
338                         }
339                 }
340         }
341         break;
342
343         case AV_SAMPLE_FMT_FLT:
344         {
345                 auto p = reinterpret_cast<float*> (frame->data[0]);
346                 int sample = 0;
347                 int channel = 0;
348                 for (int i = 0; i < total_samples; ++i) {
349                         data[channel][sample] = *p++;
350
351                         ++channel;
352                         if (channel == channels) {
353                                 channel = 0;
354                                 ++sample;
355                         }
356                 }
357         }
358         break;
359
360         case AV_SAMPLE_FMT_FLTP:
361         {
362                 auto p = reinterpret_cast<float**> (frame->data);
363                 DCPOMATIC_ASSERT(channels <= stream->channels());
364                 /* Sometimes there aren't as many channels in the frame as in the stream */
365                 for (int i = 0; i < channels; ++i) {
366                         memcpy (data[i], p[i], frames * sizeof(float));
367                 }
368                 for (int i = channels; i < stream->channels(); ++i) {
369                         audio->make_silent (i);
370                 }
371         }
372         break;
373
374         default:
375                 throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
376         }
377
378         return audio;
379 }
380
381
382 AVSampleFormat
383 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
384 {
385         return static_cast<AVSampleFormat>(stream->stream(_format_context)->codecpar->format);
386 }
387
388
389 int
390 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
391 {
392         return av_get_bytes_per_sample (audio_sample_format (stream));
393 }
394
395
396 void
397 FFmpegDecoder::seek (ContentTime time, bool accurate)
398 {
399         Decoder::seek (time, accurate);
400
401         /* If we are doing an `accurate' seek, we need to use pre-roll, as
402            we don't really know what the seek will give us.
403         */
404
405         auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
406         time -= pre_roll;
407
408         /* XXX: it seems debatable whether PTS should be used here...
409            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
410         */
411
412         optional<int> stream;
413
414         if (_video_stream) {
415                 stream = _video_stream;
416         } else {
417                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
418                 auto s = dynamic_pointer_cast<FFmpegAudioStream>(_ffmpeg_content->audio->stream());
419                 if (s) {
420                         stream = s->index (_format_context);
421                 }
422         }
423
424         DCPOMATIC_ASSERT (stream);
425
426         auto u = time - _pts_offset;
427         if (u < ContentTime ()) {
428                 u = ContentTime ();
429         }
430         av_seek_frame (
431                 _format_context,
432                 stream.get(),
433                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
434                 AVSEEK_FLAG_BACKWARD
435                 );
436
437         /* Force re-creation of filter graphs to reset them and hence to make sure
438            they don't have any pre-seek frames knocking about.
439         */
440         _filter_graphs.clear();
441
442         if (video_codec_context ()) {
443                 avcodec_flush_buffers (video_codec_context());
444         }
445
446         for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
447                 avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
448         }
449
450         if (subtitle_codec_context ()) {
451                 avcodec_flush_buffers (subtitle_codec_context ());
452         }
453
454         _have_current_subtitle = false;
455
456         for (auto& i: _next_time) {
457                 i.second = boost::optional<dcpomatic::ContentTime>();
458         }
459 }
460
461
462 shared_ptr<FFmpegAudioStream>
463 FFmpegDecoder::audio_stream_from_index (int index) const
464 {
465         /* XXX: inefficient */
466         auto streams = ffmpeg_content()->ffmpeg_audio_streams();
467         auto stream = streams.begin();
468         while (stream != streams.end() && !(*stream)->uses_index(_format_context, index)) {
469                 ++stream;
470         }
471
472         if (stream == streams.end ()) {
473                 return {};
474         }
475
476         return *stream;
477 }
478
479
480 void
481 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
482 {
483         auto frame = audio_frame (stream);
484         auto data = deinterleave_audio(stream, frame);
485
486         auto const time_base = stream->stream(_format_context)->time_base;
487
488         ContentTime ct;
489         if (frame->pts == AV_NOPTS_VALUE) {
490                 /* In some streams we see not every frame coming through with a timestamp; for those
491                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
492                    particularly noticeable with TrueHD streams (see #1111).
493                    */
494                 if (_next_time[stream]) {
495                         ct = *_next_time[stream];
496                 }
497         } else {
498                 ct = ContentTime::from_seconds (
499                         frame->best_effort_timestamp *
500                         av_q2d(time_base))
501                         + _pts_offset;
502                 LOG_DEBUG_PLAYER(
503                         "Process audio with timestamp %1 (BET %2, timebase %3/%4, (PTS offset %5)",
504                         to_string(ct),
505                         frame->best_effort_timestamp,
506                         time_base.num,
507                         time_base.den,
508                         to_string(_pts_offset)
509                         );
510         }
511
512         _next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
513
514         if (ct < ContentTime()) {
515                 /* Discard audio data that comes before time 0 */
516                 auto const remove = min (int64_t(data->frames()), (-ct).frames_ceil(double(stream->frame_rate())));
517                 data->move (data->frames() - remove, remove, 0);
518                 data->set_frames (data->frames() - remove);
519                 ct += ContentTime::from_frames (remove, stream->frame_rate());
520         }
521
522         if (ct < ContentTime()) {
523                 LOG_WARNING (
524                         "Crazy timestamp %1 for %2 samples in stream %3 (ts=%4 tb=%5, off=%6)",
525                         to_string(ct),
526                         data->frames(),
527                         stream->id(),
528                         frame->best_effort_timestamp,
529                         av_q2d(time_base),
530                         to_string(_pts_offset)
531                         );
532         }
533
534         /* Give this data provided there is some, and its time is sane */
535         if (ct >= ContentTime() && data->frames() > 0) {
536                 audio->emit (film(), stream, data, ct);
537         }
538 }
539
540
541 void
542 FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
543 {
544         auto stream = audio_stream_from_index (packet->stream_index);
545         if (!stream) {
546                 return;
547         }
548
549         auto context = _codec_context[stream->index(_format_context)];
550         auto frame = audio_frame (stream);
551
552         LOG_DEBUG_PLAYER("Send audio packet on stream %1", stream->index(_format_context));
553         int r = avcodec_send_packet (context, packet);
554         if (r < 0) {
555                 LOG_WARNING("avcodec_send_packet returned %1 for an audio packet", r);
556         }
557         while (r >= 0) {
558                 r = avcodec_receive_frame (context, frame);
559                 if (r == AVERROR(EAGAIN)) {
560                         /* More input is required */
561                         LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive audio frame");
562                         return;
563                 }
564
565                 /* We choose to be relaxed here about other errors; it seems that there may be valid
566                  * data to decode even if an error occurred.  #352 may be related (though this was
567                  * when we were using an old version of the FFmpeg API).
568                  */
569                 process_audio_frame (stream);
570         }
571 }
572
573
574 bool
575 FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet)
576 {
577         DCPOMATIC_ASSERT (_video_stream);
578
579         auto context = video_codec_context();
580
581         bool pending = false;
582         do {
583                 int r = avcodec_send_packet (context, packet);
584                 if (r < 0) {
585                         LOG_WARNING("avcodec_send_packet returned %1 for a video packet", r);
586                 }
587
588                 /* EAGAIN means we should call avcodec_receive_frame and then re-send the same packet */
589                 pending = r == AVERROR(EAGAIN);
590
591                 while (true) {
592                         r = avcodec_receive_frame (context, _video_frame);
593                         if (r == AVERROR(EAGAIN) || r == AVERROR_EOF || (r < 0 && !packet)) {
594                                 /* More input is required, no more frames are coming, or we are flushing and there was
595                                  * some error which we just want to ignore.
596                                  */
597                                 return false;
598                         } else if (r < 0) {
599                                 throw DecodeError (N_("avcodec_receive_frame"), N_("FFmpeg::decode_and_process_video_packet"), r);
600                         }
601
602                         process_video_frame ();
603                 }
604         } while (pending);
605
606         return true;
607 }
608
609
610 void
611 FFmpegDecoder::process_video_frame ()
612 {
613         auto graph = _filter_graphs.get(dcp::Size(_video_frame->width, _video_frame->height), static_cast<AVPixelFormat>(_video_frame->format));
614         auto images = graph->process (_video_frame);
615
616         for (auto const& i: images) {
617
618                 auto image = i.first;
619
620                 if (i.second != AV_NOPTS_VALUE) {
621                         double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
622
623                         video->emit (
624                                 film(),
625                                 make_shared<RawImageProxy>(image),
626                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
627                                 );
628                 } else {
629                         LOG_WARNING_NC ("Dropping frame without PTS");
630                 }
631         }
632 }
633
634
635 void
636 FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
637 {
638         int got_subtitle;
639         AVSubtitle sub;
640         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
641                 return;
642         }
643
644         auto sub_period = subtitle_period (packet, ffmpeg_content()->subtitle_stream()->stream(_format_context), sub);
645
646         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
647         if (_have_current_subtitle) {
648                 if (_current_subtitle_to) {
649                         only_text()->emit_stop (min(*_current_subtitle_to, sub_period.from + _pts_offset));
650                 } else {
651                         only_text()->emit_stop (sub_period.from + _pts_offset);
652                 }
653                 _have_current_subtitle = false;
654         }
655
656         if (sub.num_rects <= 0) {
657                 /* Nothing new in this subtitle */
658                 avsubtitle_free (&sub);
659                 return;
660         }
661
662         /* Subtitle PTS (within the source, not taking into account any of the
663            source that we may have chopped off for the DCP).
664         */
665         ContentTime from;
666         from = sub_period.from + _pts_offset;
667         if (sub_period.to) {
668                 _current_subtitle_to = *sub_period.to + _pts_offset;
669         } else {
670                 _current_subtitle_to = optional<ContentTime>();
671                 _have_current_subtitle = true;
672         }
673
674         ContentBitmapText bitmap_text(from);
675         for (unsigned int i = 0; i < sub.num_rects; ++i) {
676                 auto const rect = sub.rects[i];
677
678                 switch (rect->type) {
679                 case SUBTITLE_NONE:
680                         break;
681                 case SUBTITLE_BITMAP:
682                         bitmap_text.subs.push_back(process_bitmap_subtitle(rect));
683                         break;
684                 case SUBTITLE_TEXT:
685                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
686                         break;
687                 case SUBTITLE_ASS:
688                         process_ass_subtitle (rect->ass, from);
689                         break;
690                 }
691         }
692
693         if (!bitmap_text.subs.empty()) {
694                 only_text()->emit_bitmap_start(bitmap_text);
695         }
696
697         if (_current_subtitle_to) {
698                 only_text()->emit_stop (*_current_subtitle_to);
699         }
700
701         avsubtitle_free (&sub);
702 }
703
704
705 BitmapText
706 FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect)
707 {
708         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
709            G, third R, fourth A.
710         */
711         auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), Image::Alignment::PADDED);
712
713 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
714         /* Start of the first line in the subtitle */
715         auto sub_p = rect->pict.data[0];
716         /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
717            (i.e. first byte B, second G, third R, fourth A)
718         */
719         auto const palette = rect->pict.data[1];
720 #else
721         /* Start of the first line in the subtitle */
722         auto sub_p = rect->data[0];
723         /* sub_p looks up into a BGRA palette which is at rect->data[1].
724            (first byte B, second G, third R, fourth A)
725         */
726         auto const* palette = rect->data[1];
727 #endif
728         /* And the stream has a map of those palette colours to colours
729            chosen by the user; created a `mapped' palette from those settings.
730         */
731         auto colour_map = ffmpeg_content()->subtitle_stream()->colours();
732         vector<RGBA> mapped_palette (rect->nb_colors);
733         for (int i = 0; i < rect->nb_colors; ++i) {
734                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
735                 auto j = colour_map.find (c);
736                 if (j != colour_map.end ()) {
737                         mapped_palette[i] = j->second;
738                 } else {
739                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
740                            it is from a project that was created before this stuff was added.  Just use the
741                            colour straight from the original palette.
742                         */
743                         mapped_palette[i] = c;
744                 }
745                 palette += 4;
746         }
747
748         /* Start of the output data */
749         auto out_p = image->data()[0];
750
751         for (int y = 0; y < rect->h; ++y) {
752                 auto sub_line_p = sub_p;
753                 auto out_line_p = out_p;
754                 for (int x = 0; x < rect->w; ++x) {
755                         auto const p = mapped_palette[*sub_line_p++];
756                         *out_line_p++ = p.b;
757                         *out_line_p++ = p.g;
758                         *out_line_p++ = p.r;
759                         *out_line_p++ = p.a;
760                 }
761 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
762                 sub_p += rect->pict.linesize[0];
763 #else
764                 sub_p += rect->linesize[0];
765 #endif
766                 out_p += image->stride()[0];
767         }
768
769         int target_width = subtitle_codec_context()->width;
770         if (target_width == 0 && video_codec_context()) {
771                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
772                    know if it's supposed to mean something from FFmpeg's point of view.
773                 */
774                 target_width = video_codec_context()->width;
775         }
776         int target_height = subtitle_codec_context()->height;
777         if (target_height == 0 && video_codec_context()) {
778                 target_height = video_codec_context()->height;
779         }
780         DCPOMATIC_ASSERT (target_width);
781         DCPOMATIC_ASSERT (target_height);
782         dcpomatic::Rect<double> const scaled_rect (
783                 static_cast<double>(rect->x) / target_width,
784                 static_cast<double>(rect->y) / target_height,
785                 static_cast<double>(rect->w) / target_width,
786                 static_cast<double>(rect->h) / target_height
787                 );
788
789         return { image, scaled_rect };
790 }
791
792
793 void
794 FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from)
795 {
796         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
797            produces a single format of Dialogue: lines...
798         */
799
800         int commas = 0;
801         string text;
802         for (size_t i = 0; i < ass.length(); ++i) {
803                 if (commas < 9 && ass[i] == ',') {
804                         ++commas;
805                 } else if (commas == 9) {
806                         text += ass[i];
807                 }
808         }
809
810         if (text.empty ()) {
811                 return;
812         }
813
814         sub::RawSubtitle base;
815         auto raw = sub::SSAReader::parse_line (
816                 base,
817                 text,
818                 _ffmpeg_content->video->size().width,
819                 _ffmpeg_content->video->size().height,
820                 sub::Colour(1, 1, 1)
821                 );
822
823         for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
824                 only_text()->emit_plain_start (from, i);
825         }
826 }