Remove now-unnecessary assertion.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 /** @file  src/ffmpeg_decoder.cc
23  *  @brief A decoder using FFmpeg to decode content.
24  */
25
26
27 #include "audio_buffers.h"
28 #include "audio_content.h"
29 #include "audio_decoder.h"
30 #include "compose.hpp"
31 #include "dcpomatic_log.h"
32 #include "exceptions.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_content.h"
35 #include "ffmpeg_decoder.h"
36 #include "ffmpeg_subtitle_stream.h"
37 #include "film.h"
38 #include "filter.h"
39 #include "frame_interval_checker.h"
40 #include "image.h"
41 #include "log.h"
42 #include "raw_image_proxy.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "util.h"
46 #include "video_decoder.h"
47 #include "video_filter_graph.h"
48 #include <dcp/subtitle_string.h>
49 #include <sub/ssa_reader.h>
50 #include <sub/subtitle.h>
51 #include <sub/collect.h>
52 extern "C" {
53 #include <libavcodec/avcodec.h>
54 #include <libavformat/avformat.h>
55 }
56 #include <boost/algorithm/string.hpp>
57 #include <iomanip>
58 #include <iostream>
59 #include <vector>
60 #include <stdint.h>
61
62 #include "i18n.h"
63
64
65 using std::cout;
66 using std::dynamic_pointer_cast;
67 using std::make_shared;
68 using std::min;
69 using std::shared_ptr;
70 using std::string;
71 using std::vector;
72 using boost::optional;
73 using dcp::Size;
74 using namespace dcpomatic;
75
76
77 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
78         : FFmpeg (c)
79         , Decoder (film)
80         , _filter_graphs(c->filters(), dcp::Fraction(lrint(_ffmpeg_content->video_frame_rate().get_value_or(24) * 1000), 1000))
81 {
82         if (c->video && c->video->use()) {
83                 video = make_shared<VideoDecoder>(this, c);
84                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
85                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
86                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (128, 128), Image::Alignment::PADDED);
87                 _black_image->make_black ();
88         } else {
89                 _pts_offset = {};
90         }
91
92         if (c->audio) {
93                 audio = make_shared<AudioDecoder>(this, c->audio, fast);
94         }
95
96         if (c->only_text()) {
97                 text.push_back (make_shared<TextDecoder>(this, c->only_text()));
98                 /* XXX: we should be calling maybe_set_position() on this TextDecoder, but we can't easily find
99                  * the time of the first subtitle at this point.
100                  */
101         }
102
103         for (auto i: c->ffmpeg_audio_streams()) {
104                 _next_time[i] = boost::optional<dcpomatic::ContentTime>();
105         }
106 }
107
108
109 FFmpegDecoder::FlushResult
110 FFmpegDecoder::flush ()
111 {
112         LOG_DEBUG_PLAYER("Flush FFmpeg decoder: current state %1", static_cast<int>(_flush_state));
113
114         switch (_flush_state) {
115         case FlushState::CODECS:
116                 if (flush_codecs() == FlushResult::DONE) {
117                         LOG_DEBUG_PLAYER_NC("Finished flushing codecs");
118                         _flush_state = FlushState::AUDIO_DECODER;
119                 }
120                 break;
121         case FlushState::AUDIO_DECODER:
122                 if (audio) {
123                         audio->flush();
124                 }
125                 LOG_DEBUG_PLAYER_NC("Finished flushing audio decoder");
126                 _flush_state = FlushState::FILL;
127                 break;
128         case FlushState::FILL:
129                 if (flush_fill() == FlushResult::DONE) {
130                         LOG_DEBUG_PLAYER_NC("Finished flushing fills");
131                         return FlushResult::DONE;
132                 }
133                 break;
134         }
135
136         return FlushResult::AGAIN;
137 }
138
139
140 /** @return true if we have finished flushing the codecs */
141 FFmpegDecoder::FlushResult
142 FFmpegDecoder::flush_codecs()
143 {
144         bool did_something = false;
145         if (video) {
146                 if (decode_and_process_video_packet(nullptr)) {
147                         did_something = true;
148                 }
149         }
150
151         for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
152                 auto context = _codec_context[i->index(_format_context)];
153                 int r = avcodec_send_packet (context, nullptr);
154                 if (r < 0 && r != AVERROR_EOF) {
155                         /* EOF can happen if we've already sent a flush packet */
156                         throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
157                 }
158                 r = avcodec_receive_frame (context, audio_frame(i));
159                 if (r >= 0) {
160                         process_audio_frame (i);
161                         did_something = true;
162                 }
163         }
164
165         return did_something ? FlushResult::AGAIN : FlushResult::DONE;
166 }
167
168
169 FFmpegDecoder::FlushResult
170 FFmpegDecoder::flush_fill()
171 {
172         /* Make sure all streams are the same length and round up to the next video frame */
173
174         bool did_something = false;
175
176         auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
177         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
178         full_length = full_length.ceil (frc.source);
179         if (video && !video->ignore()) {
180                 double const vfr = _ffmpeg_content->video_frame_rate().get();
181                 auto const f = full_length.frames_round (vfr);
182                 auto const v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
183                 if (v < f) {
184                         video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
185                         did_something = true;
186                 }
187         }
188
189         if (audio && !audio->ignore()) {
190                 for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
191                         auto const a = audio->stream_position(film(), i);
192                         /* Unfortunately if a is 0 that really means that we don't know the stream position since
193                            there has been no data on it since the last seek.  In this case we'll just do nothing
194                            here.  I'm not sure if that's the right idea.
195                         */
196                         if (a > ContentTime() && a < full_length) {
197                                 LOG_DEBUG_PLAYER("Flush inserts silence at %1", to_string(a));
198                                 auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
199                                 auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
200                                 silence->make_silent ();
201                                 audio->emit (film(), i, silence, a, true);
202                                 did_something = true;
203                         }
204                 }
205         }
206
207         return did_something ? FlushResult::AGAIN : FlushResult::DONE;
208 }
209
210
211 bool
212 FFmpegDecoder::pass ()
213 {
214         auto packet = av_packet_alloc();
215         DCPOMATIC_ASSERT (packet);
216
217         int r = av_read_frame (_format_context, packet);
218
219         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
220            has pretty-much succeeded (and hence generated data which should be processed).
221            Hence it makes sense to continue here in that case.
222         */
223         if (r < 0 && r != AVERROR_INVALIDDATA) {
224                 LOG_DEBUG_PLAYER("FFpmegDecoder::pass flushes because av_read_frame returned %1", r);
225                 if (r != AVERROR_EOF) {
226                         /* Maybe we should fail here, but for now we'll just finish off instead */
227                         char buf[256];
228                         av_strerror (r, buf, sizeof(buf));
229                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
230                 }
231
232                 av_packet_free (&packet);
233                 return flush() == FlushResult::DONE;
234         }
235
236         int const si = packet->stream_index;
237         auto fc = _ffmpeg_content;
238
239         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
240                 decode_and_process_video_packet (packet);
241         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
242                 decode_and_process_subtitle_packet (packet);
243         } else {
244                 decode_and_process_audio_packet (packet);
245         }
246
247         av_packet_free (&packet);
248         return false;
249 }
250
251
252 /** @param data pointer to array of pointers to buffers.
253  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
254  */
255 static
256 shared_ptr<AudioBuffers>
257 deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
258 {
259         auto format = static_cast<AVSampleFormat>(frame->format);
260
261         /* XXX: can't we use swr_convert() to do the format conversion? */
262
263         int const channels = frame->channels;
264         int const frames = frame->nb_samples;
265         int const total_samples = frames * channels;
266         auto audio = make_shared<AudioBuffers>(channels, frames);
267         auto data = audio->data();
268
269         switch (format) {
270         case AV_SAMPLE_FMT_U8:
271         {
272                 auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
273                 int sample = 0;
274                 int channel = 0;
275                 for (int i = 0; i < total_samples; ++i) {
276                         data[channel][sample] = float(*p++) / (1 << 23);
277
278                         ++channel;
279                         if (channel == channels) {
280                                 channel = 0;
281                                 ++sample;
282                         }
283                 }
284         }
285         break;
286
287         case AV_SAMPLE_FMT_S16:
288         {
289                 auto p = reinterpret_cast<int16_t *> (frame->data[0]);
290                 int sample = 0;
291                 int channel = 0;
292                 for (int i = 0; i < total_samples; ++i) {
293                         data[channel][sample] = float(*p++) / (1 << 15);
294
295                         ++channel;
296                         if (channel == channels) {
297                                 channel = 0;
298                                 ++sample;
299                         }
300                 }
301         }
302         break;
303
304         case AV_SAMPLE_FMT_S16P:
305         {
306                 auto p = reinterpret_cast<int16_t **> (frame->data);
307                 for (int i = 0; i < channels; ++i) {
308                         for (int j = 0; j < frames; ++j) {
309                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
310                         }
311                 }
312         }
313         break;
314
315         case AV_SAMPLE_FMT_S32:
316         {
317                 auto p = reinterpret_cast<int32_t *> (frame->data[0]);
318                 int sample = 0;
319                 int channel = 0;
320                 for (int i = 0; i < total_samples; ++i) {
321                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
322
323                         ++channel;
324                         if (channel == channels) {
325                                 channel = 0;
326                                 ++sample;
327                         }
328                 }
329         }
330         break;
331
332         case AV_SAMPLE_FMT_S32P:
333         {
334                 auto p = reinterpret_cast<int32_t **> (frame->data);
335                 for (int i = 0; i < channels; ++i) {
336                         for (int j = 0; j < frames; ++j) {
337                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
338                         }
339                 }
340         }
341         break;
342
343         case AV_SAMPLE_FMT_FLT:
344         {
345                 auto p = reinterpret_cast<float*> (frame->data[0]);
346                 int sample = 0;
347                 int channel = 0;
348                 for (int i = 0; i < total_samples; ++i) {
349                         data[channel][sample] = *p++;
350
351                         ++channel;
352                         if (channel == channels) {
353                                 channel = 0;
354                                 ++sample;
355                         }
356                 }
357         }
358         break;
359
360         case AV_SAMPLE_FMT_FLTP:
361         {
362                 auto p = reinterpret_cast<float**> (frame->data);
363                 for (int i = 0; i < channels; ++i) {
364                         memcpy (data[i], p[i], frames * sizeof(float));
365                 }
366         }
367         break;
368
369         default:
370                 throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
371         }
372
373         return audio;
374 }
375
376
377 AVSampleFormat
378 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
379 {
380         return static_cast<AVSampleFormat>(stream->stream(_format_context)->codecpar->format);
381 }
382
383
384 int
385 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
386 {
387         return av_get_bytes_per_sample (audio_sample_format (stream));
388 }
389
390
391 void
392 FFmpegDecoder::seek (ContentTime time, bool accurate)
393 {
394         Decoder::seek (time, accurate);
395
396         /* If we are doing an `accurate' seek, we need to use pre-roll, as
397            we don't really know what the seek will give us.
398         */
399
400         auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
401         time -= pre_roll;
402
403         /* XXX: it seems debatable whether PTS should be used here...
404            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
405         */
406
407         optional<int> stream;
408
409         if (_video_stream) {
410                 stream = _video_stream;
411         } else {
412                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
413                 auto s = dynamic_pointer_cast<FFmpegAudioStream>(_ffmpeg_content->audio->stream());
414                 if (s) {
415                         stream = s->index (_format_context);
416                 }
417         }
418
419         DCPOMATIC_ASSERT (stream);
420
421         auto u = time - _pts_offset;
422         if (u < ContentTime ()) {
423                 u = ContentTime ();
424         }
425         av_seek_frame (
426                 _format_context,
427                 stream.get(),
428                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
429                 AVSEEK_FLAG_BACKWARD
430                 );
431
432         /* Force re-creation of filter graphs to reset them and hence to make sure
433            they don't have any pre-seek frames knocking about.
434         */
435         _filter_graphs.clear();
436
437         if (video_codec_context ()) {
438                 avcodec_flush_buffers (video_codec_context());
439         }
440
441         for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
442                 avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
443         }
444
445         if (subtitle_codec_context ()) {
446                 avcodec_flush_buffers (subtitle_codec_context ());
447         }
448
449         _have_current_subtitle = false;
450
451         for (auto& i: _next_time) {
452                 i.second = boost::optional<dcpomatic::ContentTime>();
453         }
454 }
455
456
457 shared_ptr<FFmpegAudioStream>
458 FFmpegDecoder::audio_stream_from_index (int index) const
459 {
460         /* XXX: inefficient */
461         auto streams = ffmpeg_content()->ffmpeg_audio_streams();
462         auto stream = streams.begin();
463         while (stream != streams.end() && !(*stream)->uses_index(_format_context, index)) {
464                 ++stream;
465         }
466
467         if (stream == streams.end ()) {
468                 return {};
469         }
470
471         return *stream;
472 }
473
474
475 void
476 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
477 {
478         auto frame = audio_frame (stream);
479         auto data = deinterleave_audio(stream, frame);
480
481         auto const time_base = stream->stream(_format_context)->time_base;
482
483         ContentTime ct;
484         if (frame->pts == AV_NOPTS_VALUE) {
485                 /* In some streams we see not every frame coming through with a timestamp; for those
486                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
487                    particularly noticeable with TrueHD streams (see #1111).
488                    */
489                 if (_next_time[stream]) {
490                         ct = *_next_time[stream];
491                 }
492         } else {
493                 ct = ContentTime::from_seconds (
494                         frame->best_effort_timestamp *
495                         av_q2d(time_base))
496                         + _pts_offset;
497                 LOG_DEBUG_PLAYER(
498                         "Process audio with timestamp %1 (BET %2, timebase %3/%4, (PTS offset %5)",
499                         to_string(ct),
500                         frame->best_effort_timestamp,
501                         time_base.num,
502                         time_base.den,
503                         to_string(_pts_offset)
504                         );
505         }
506
507         _next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
508
509         if (ct < ContentTime()) {
510                 /* Discard audio data that comes before time 0 */
511                 auto const remove = min (int64_t(data->frames()), (-ct).frames_ceil(double(stream->frame_rate())));
512                 data->move (data->frames() - remove, remove, 0);
513                 data->set_frames (data->frames() - remove);
514                 ct += ContentTime::from_frames (remove, stream->frame_rate());
515         }
516
517         if (ct < ContentTime()) {
518                 LOG_WARNING (
519                         "Crazy timestamp %1 for %2 samples in stream %3 (ts=%4 tb=%5, off=%6)",
520                         to_string(ct),
521                         data->frames(),
522                         stream->id(),
523                         frame->best_effort_timestamp,
524                         av_q2d(time_base),
525                         to_string(_pts_offset)
526                         );
527         }
528
529         /* Give this data provided there is some, and its time is sane */
530         if (ct >= ContentTime() && data->frames() > 0) {
531                 audio->emit (film(), stream, data, ct);
532         }
533 }
534
535
536 void
537 FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
538 {
539         auto stream = audio_stream_from_index (packet->stream_index);
540         if (!stream) {
541                 return;
542         }
543
544         auto context = _codec_context[stream->index(_format_context)];
545         auto frame = audio_frame (stream);
546
547         LOG_DEBUG_PLAYER("Send audio packet on stream %1", stream->index(_format_context));
548         int r = avcodec_send_packet (context, packet);
549         if (r < 0) {
550                 LOG_WARNING("avcodec_send_packet returned %1 for an audio packet", r);
551         }
552         while (r >= 0) {
553                 r = avcodec_receive_frame (context, frame);
554                 if (r == AVERROR(EAGAIN)) {
555                         /* More input is required */
556                         LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive audio frame");
557                         return;
558                 }
559
560                 /* We choose to be relaxed here about other errors; it seems that there may be valid
561                  * data to decode even if an error occurred.  #352 may be related (though this was
562                  * when we were using an old version of the FFmpeg API).
563                  */
564                 process_audio_frame (stream);
565         }
566 }
567
568
569 bool
570 FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet)
571 {
572         DCPOMATIC_ASSERT (_video_stream);
573
574         auto context = video_codec_context();
575
576         bool pending = false;
577         do {
578                 int r = avcodec_send_packet (context, packet);
579                 if (r < 0) {
580                         LOG_WARNING("avcodec_send_packet returned %1 for a video packet", r);
581                 }
582
583                 /* EAGAIN means we should call avcodec_receive_frame and then re-send the same packet */
584                 pending = r == AVERROR(EAGAIN);
585
586                 while (true) {
587                         r = avcodec_receive_frame (context, _video_frame);
588                         if (r == AVERROR(EAGAIN) || r == AVERROR_EOF || (r < 0 && !packet)) {
589                                 /* More input is required, no more frames are coming, or we are flushing and there was
590                                  * some error which we just want to ignore.
591                                  */
592                                 return false;
593                         } else if (r < 0) {
594                                 throw DecodeError (N_("avcodec_receive_frame"), N_("FFmpeg::decode_and_process_video_packet"), r);
595                         }
596
597                         process_video_frame ();
598                 }
599         } while (pending);
600
601         return true;
602 }
603
604
605 void
606 FFmpegDecoder::process_video_frame ()
607 {
608         auto graph = _filter_graphs.get(dcp::Size(_video_frame->width, _video_frame->height), static_cast<AVPixelFormat>(_video_frame->format));
609         auto images = graph->process (_video_frame);
610
611         for (auto const& i: images) {
612
613                 auto image = i.first;
614
615                 if (i.second != AV_NOPTS_VALUE) {
616                         double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
617
618                         video->emit (
619                                 film(),
620                                 make_shared<RawImageProxy>(image),
621                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
622                                 );
623                 } else {
624                         LOG_WARNING_NC ("Dropping frame without PTS");
625                 }
626         }
627 }
628
629
630 void
631 FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
632 {
633         int got_subtitle;
634         AVSubtitle sub;
635         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
636                 return;
637         }
638
639         auto sub_period = subtitle_period (packet, ffmpeg_content()->subtitle_stream()->stream(_format_context), sub);
640
641         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
642         if (_have_current_subtitle) {
643                 if (_current_subtitle_to) {
644                         only_text()->emit_stop (min(*_current_subtitle_to, sub_period.from + _pts_offset));
645                 } else {
646                         only_text()->emit_stop (sub_period.from + _pts_offset);
647                 }
648                 _have_current_subtitle = false;
649         }
650
651         if (sub.num_rects <= 0) {
652                 /* Nothing new in this subtitle */
653                 avsubtitle_free (&sub);
654                 return;
655         }
656
657         /* Subtitle PTS (within the source, not taking into account any of the
658            source that we may have chopped off for the DCP).
659         */
660         ContentTime from;
661         from = sub_period.from + _pts_offset;
662         if (sub_period.to) {
663                 _current_subtitle_to = *sub_period.to + _pts_offset;
664         } else {
665                 _current_subtitle_to = optional<ContentTime>();
666                 _have_current_subtitle = true;
667         }
668
669         ContentBitmapText bitmap_text(from);
670         for (unsigned int i = 0; i < sub.num_rects; ++i) {
671                 auto const rect = sub.rects[i];
672
673                 switch (rect->type) {
674                 case SUBTITLE_NONE:
675                         break;
676                 case SUBTITLE_BITMAP:
677                         bitmap_text.subs.push_back(process_bitmap_subtitle(rect));
678                         break;
679                 case SUBTITLE_TEXT:
680                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
681                         break;
682                 case SUBTITLE_ASS:
683                         process_ass_subtitle (rect->ass, from);
684                         break;
685                 }
686         }
687
688         if (!bitmap_text.subs.empty()) {
689                 only_text()->emit_bitmap_start(bitmap_text);
690         }
691
692         if (_current_subtitle_to) {
693                 only_text()->emit_stop (*_current_subtitle_to);
694         }
695
696         avsubtitle_free (&sub);
697 }
698
699
700 BitmapText
701 FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect)
702 {
703         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
704            G, third R, fourth A.
705         */
706         auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), Image::Alignment::PADDED);
707
708 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
709         /* Start of the first line in the subtitle */
710         auto sub_p = rect->pict.data[0];
711         /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
712            (i.e. first byte B, second G, third R, fourth A)
713         */
714         auto const palette = rect->pict.data[1];
715 #else
716         /* Start of the first line in the subtitle */
717         auto sub_p = rect->data[0];
718         /* sub_p looks up into a BGRA palette which is at rect->data[1].
719            (first byte B, second G, third R, fourth A)
720         */
721         auto const* palette = rect->data[1];
722 #endif
723         /* And the stream has a map of those palette colours to colours
724            chosen by the user; created a `mapped' palette from those settings.
725         */
726         auto colour_map = ffmpeg_content()->subtitle_stream()->colours();
727         vector<RGBA> mapped_palette (rect->nb_colors);
728         for (int i = 0; i < rect->nb_colors; ++i) {
729                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
730                 auto j = colour_map.find (c);
731                 if (j != colour_map.end ()) {
732                         mapped_palette[i] = j->second;
733                 } else {
734                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
735                            it is from a project that was created before this stuff was added.  Just use the
736                            colour straight from the original palette.
737                         */
738                         mapped_palette[i] = c;
739                 }
740                 palette += 4;
741         }
742
743         /* Start of the output data */
744         auto out_p = image->data()[0];
745
746         for (int y = 0; y < rect->h; ++y) {
747                 auto sub_line_p = sub_p;
748                 auto out_line_p = out_p;
749                 for (int x = 0; x < rect->w; ++x) {
750                         auto const p = mapped_palette[*sub_line_p++];
751                         *out_line_p++ = p.b;
752                         *out_line_p++ = p.g;
753                         *out_line_p++ = p.r;
754                         *out_line_p++ = p.a;
755                 }
756 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
757                 sub_p += rect->pict.linesize[0];
758 #else
759                 sub_p += rect->linesize[0];
760 #endif
761                 out_p += image->stride()[0];
762         }
763
764         int target_width = subtitle_codec_context()->width;
765         if (target_width == 0 && video_codec_context()) {
766                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
767                    know if it's supposed to mean something from FFmpeg's point of view.
768                 */
769                 target_width = video_codec_context()->width;
770         }
771         int target_height = subtitle_codec_context()->height;
772         if (target_height == 0 && video_codec_context()) {
773                 target_height = video_codec_context()->height;
774         }
775         DCPOMATIC_ASSERT (target_width);
776         DCPOMATIC_ASSERT (target_height);
777         dcpomatic::Rect<double> const scaled_rect (
778                 static_cast<double>(rect->x) / target_width,
779                 static_cast<double>(rect->y) / target_height,
780                 static_cast<double>(rect->w) / target_width,
781                 static_cast<double>(rect->h) / target_height
782                 );
783
784         return { image, scaled_rect };
785 }
786
787
788 void
789 FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from)
790 {
791         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
792            produces a single format of Dialogue: lines...
793         */
794
795         int commas = 0;
796         string text;
797         for (size_t i = 0; i < ass.length(); ++i) {
798                 if (commas < 9 && ass[i] == ',') {
799                         ++commas;
800                 } else if (commas == 9) {
801                         text += ass[i];
802                 }
803         }
804
805         if (text.empty ()) {
806                 return;
807         }
808
809         sub::RawSubtitle base;
810         auto raw = sub::SSAReader::parse_line (
811                 base,
812                 text,
813                 _ffmpeg_content->video->size().width,
814                 _ffmpeg_content->video->size().height,
815                 sub::Colour(1, 1, 1)
816                 );
817
818         for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
819                 only_text()->emit_plain_start (from, i);
820         }
821 }