Add Piece::pass().
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         auto decoder = j->decoder_for(i);
191                         if (decoder) {
192                                 old_decoder = decoder;
193                                 break;
194                         }
195                 }
196
197                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
198                 DCPOMATIC_ASSERT (decoder);
199
200                 FrameRateChange frc (_film, i);
201
202                 if (decoder->video && _ignore_video) {
203                         decoder->video->set_ignore (true);
204                 }
205
206                 if (decoder->audio && _ignore_audio) {
207                         decoder->audio->set_ignore (true);
208                 }
209
210                 if (_ignore_text) {
211                         for (auto i: decoder->text) {
212                                 i->set_ignore (true);
213                         }
214                 }
215
216                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217                 if (dcp) {
218                         dcp->set_decode_referenced (_play_referenced);
219                         if (_play_referenced) {
220                                 dcp->set_forced_reduction (_dcp_decode_reduction);
221                         }
222                 }
223
224                 auto piece = make_shared<Piece>(i, decoder, frc);
225                 _pieces.push_back (piece);
226
227                 if (decoder->video) {
228                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
229                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231                         } else {
232                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
233                         }
234                 }
235
236                 if (decoder->audio) {
237                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
238                 }
239
240                 auto j = decoder->text.begin();
241
242                 while (j != decoder->text.end()) {
243                         (*j)->BitmapStart.connect (
244                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245                                 );
246                         (*j)->PlainStart.connect (
247                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
248                                 );
249                         (*j)->Stop.connect (
250                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252
253                         ++j;
254                 }
255
256                 if (decoder->atmos) {
257                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
258                 }
259         }
260
261         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
262                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
263                         /* Look for content later in the content list with in-use video that overlaps this */
264                         auto period = DCPTimePeriod((*i)->position(), (*i)->end(_film));
265                         auto j = i;
266                         ++j;
267                         for (; j != _pieces.end(); ++j) {
268                                 if ((*j)->use_video()) {
269                                         (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->end(_film)).overlap(period);
270                                 }
271                         }
272                 }
273         }
274
275         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
276         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
277
278         _last_video_time = boost::optional<dcpomatic::DCPTime>();
279         _last_video_eyes = Eyes::BOTH;
280         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
281 }
282
283
284 void
285 Player::playlist_content_change (ChangeType type, int property, bool frequent)
286 {
287         if (property == VideoContentProperty::CROP) {
288                 if (type == ChangeType::DONE) {
289                         auto const vcs = video_container_size();
290                         boost::mutex::scoped_lock lm (_mutex);
291                         for (auto const& i: _delay) {
292                                 i.first->reset_metadata (_film, vcs);
293                         }
294                 }
295         } else {
296                 if (type == ChangeType::PENDING) {
297                         /* The player content is probably about to change, so we can't carry on
298                            until that has happened and we've rebuilt our pieces.  Stop pass()
299                            and seek() from working until then.
300                         */
301                         ++_suspended;
302                 } else if (type == ChangeType::DONE) {
303                         /* A change in our content has gone through.  Re-build our pieces. */
304                         setup_pieces ();
305                         --_suspended;
306                 } else if (type == ChangeType::CANCELLED) {
307                         --_suspended;
308                 }
309         }
310
311         Change (type, property, frequent);
312 }
313
314
315 void
316 Player::set_video_container_size (dcp::Size s)
317 {
318         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
319
320         {
321                 boost::mutex::scoped_lock lm (_mutex);
322
323                 if (s == _video_container_size) {
324                         lm.unlock ();
325                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
326                         return;
327                 }
328
329                 _video_container_size = s;
330
331                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
332                 _black_image->make_black ();
333         }
334
335         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
336 }
337
338
339 void
340 Player::playlist_change (ChangeType type)
341 {
342         if (type == ChangeType::DONE) {
343                 setup_pieces ();
344         }
345         Change (type, PlayerProperty::PLAYLIST, false);
346 }
347
348
349 void
350 Player::film_change (ChangeType type, Film::Property p)
351 {
352         /* Here we should notice Film properties that affect our output, and
353            alert listeners that our output now would be different to how it was
354            last time we were run.
355         */
356
357         if (p == Film::Property::CONTAINER) {
358                 Change (type, PlayerProperty::FILM_CONTAINER, false);
359         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
360                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
361                    so we need new pieces here.
362                 */
363                 if (type == ChangeType::DONE) {
364                         setup_pieces ();
365                 }
366                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
367         } else if (p == Film::Property::AUDIO_PROCESSOR) {
368                 if (type == ChangeType::DONE && _film->audio_processor ()) {
369                         boost::mutex::scoped_lock lm (_mutex);
370                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
371                 }
372         } else if (p == Film::Property::AUDIO_CHANNELS) {
373                 if (type == ChangeType::DONE) {
374                         boost::mutex::scoped_lock lm (_mutex);
375                         _audio_merger.clear ();
376                 }
377         }
378 }
379
380
381 shared_ptr<PlayerVideo>
382 Player::black_player_video_frame (Eyes eyes) const
383 {
384         return std::make_shared<PlayerVideo> (
385                 std::make_shared<const RawImageProxy>(_black_image),
386                 Crop(),
387                 optional<double>(),
388                 _video_container_size,
389                 _video_container_size,
390                 eyes,
391                 Part::WHOLE,
392                 PresetColourConversion::all().front().conversion,
393                 VideoRange::FULL,
394                 std::weak_ptr<Content>(),
395                 boost::optional<Frame>(),
396                 false
397         );
398 }
399
400
401 vector<FontData>
402 Player::get_subtitle_fonts ()
403 {
404         boost::mutex::scoped_lock lm (_mutex);
405
406         vector<FontData> fonts;
407         for (auto i: _pieces) {
408                 /* XXX: things may go wrong if there are duplicate font IDs
409                    with different font files.
410                 */
411                 auto f = i->decoder->fonts ();
412                 copy (f.begin(), f.end(), back_inserter(fonts));
413         }
414
415         return fonts;
416 }
417
418
419 /** Set this player never to produce any video data */
420 void
421 Player::set_ignore_video ()
422 {
423         boost::mutex::scoped_lock lm (_mutex);
424         _ignore_video = true;
425         setup_pieces_unlocked ();
426 }
427
428
429 void
430 Player::set_ignore_audio ()
431 {
432         boost::mutex::scoped_lock lm (_mutex);
433         _ignore_audio = true;
434         setup_pieces_unlocked ();
435 }
436
437
438 void
439 Player::set_ignore_text ()
440 {
441         boost::mutex::scoped_lock lm (_mutex);
442         _ignore_text = true;
443         setup_pieces_unlocked ();
444 }
445
446
447 /** Set the player to always burn open texts into the image regardless of the content settings */
448 void
449 Player::set_always_burn_open_subtitles ()
450 {
451         boost::mutex::scoped_lock lm (_mutex);
452         _always_burn_open_subtitles = true;
453 }
454
455
456 /** Sets up the player to be faster, possibly at the expense of quality */
457 void
458 Player::set_fast ()
459 {
460         boost::mutex::scoped_lock lm (_mutex);
461         _fast = true;
462         setup_pieces_unlocked ();
463 }
464
465
466 void
467 Player::set_play_referenced ()
468 {
469         boost::mutex::scoped_lock lm (_mutex);
470         _play_referenced = true;
471         setup_pieces_unlocked ();
472 }
473
474
475 static void
476 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
477 {
478         DCPOMATIC_ASSERT (r);
479         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
480         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
481         if (r->actual_duration() > 0) {
482                 a.push_back (
483                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
484                         );
485         }
486 }
487
488
489 list<ReferencedReelAsset>
490 Player::get_reel_assets ()
491 {
492         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
493
494         list<ReferencedReelAsset> a;
495
496         for (auto i: playlist()->content()) {
497                 auto j = dynamic_pointer_cast<DCPContent> (i);
498                 if (!j) {
499                         continue;
500                 }
501
502                 scoped_ptr<DCPDecoder> decoder;
503                 try {
504                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
505                 } catch (...) {
506                         return a;
507                 }
508
509                 DCPOMATIC_ASSERT (j->video_frame_rate ());
510                 double const cfr = j->video_frame_rate().get();
511                 Frame const trim_start = j->trim_start().frames_round (cfr);
512                 Frame const trim_end = j->trim_end().frames_round (cfr);
513                 int const ffr = _film->video_frame_rate ();
514
515                 /* position in the asset from the start */
516                 int64_t offset_from_start = 0;
517                 /* position in the asset from the end */
518                 int64_t offset_from_end = 0;
519                 for (auto k: decoder->reels()) {
520                         /* Assume that main picture duration is the length of the reel */
521                         offset_from_end += k->main_picture()->actual_duration();
522                 }
523
524                 for (auto k: decoder->reels()) {
525
526                         /* Assume that main picture duration is the length of the reel */
527                         int64_t const reel_duration = k->main_picture()->actual_duration();
528
529                         /* See doc/design/trim_reels.svg */
530                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
531                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
532
533                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
534                         if (j->reference_video ()) {
535                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
536                         }
537
538                         if (j->reference_audio ()) {
539                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
540                         }
541
542                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
543                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
544                         }
545
546                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
547                                 for (auto l: k->closed_captions()) {
548                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
549                                 }
550                         }
551
552                         offset_from_start += reel_duration;
553                         offset_from_end -= reel_duration;
554                 }
555         }
556
557         return a;
558 }
559
560
561 bool
562 Player::pass ()
563 {
564         boost::mutex::scoped_lock lm (_mutex);
565
566         if (_suspended) {
567                 /* We can't pass in this state */
568                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
569                 return false;
570         }
571
572         if (_playback_length == DCPTime()) {
573                 /* Special; just give one black frame */
574                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
575                 return true;
576         }
577
578         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
579
580         shared_ptr<Piece> earliest_content;
581         optional<DCPTime> earliest_time;
582
583         for (auto i: _pieces) {
584                 if (i->done) {
585                         continue;
586                 }
587
588                 auto const t = i->decoder_position ();
589                 if (t > i->end(_film)) {
590                         i->done = true;
591                 } else {
592
593                         /* Given two choices at the same time, pick the one with texts so we see it before
594                            the video.
595                         */
596                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
597                                 earliest_time = t;
598                                 earliest_content = i;
599                         }
600                 }
601         }
602
603         bool done = false;
604
605         enum {
606                 NONE,
607                 CONTENT,
608                 BLACK,
609                 SILENT
610         } which = NONE;
611
612         if (earliest_content) {
613                 which = CONTENT;
614         }
615
616         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
617                 earliest_time = _black.position ();
618                 which = BLACK;
619         }
620
621         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
622                 earliest_time = _silent.position ();
623                 which = SILENT;
624         }
625
626         switch (which) {
627         case CONTENT:
628         {
629                 earliest_content->pass();
630                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
631                 if (dcp && !_play_referenced && dcp->reference_audio()) {
632                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
633                            to `hide' the fact that no audio was emitted during the referenced DCP (though
634                            we need to behave as though it was).
635                         */
636                         _last_audio_time = dcp->end (_film);
637                 }
638                 break;
639         }
640         case BLACK:
641                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
642                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
643                 _black.set_position (_black.position() + one_video_frame());
644                 break;
645         case SILENT:
646         {
647                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
648                 DCPTimePeriod period (_silent.period_at_position());
649                 if (_last_audio_time) {
650                         /* Sometimes the thing that happened last finishes fractionally before
651                            or after this silence.  Bodge the start time of the silence to fix it.
652                            I think this is nothing to worry about since we will just add or
653                            remove a little silence at the end of some content.
654                         */
655                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
656                         /* Let's not worry about less than a frame at 24fps */
657                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
658                         if (error >= too_much_error) {
659                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
660                         }
661                         DCPOMATIC_ASSERT (error < too_much_error);
662                         period.from = *_last_audio_time;
663                 }
664                 if (period.duration() > one_video_frame()) {
665                         period.to = period.from + one_video_frame();
666                 }
667                 fill_audio (period);
668                 _silent.set_position (period.to);
669                 break;
670         }
671         case NONE:
672                 done = true;
673                 break;
674         }
675
676         /* Emit any audio that is ready */
677
678         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
679            of our streams, or the position of the _silent.
680         */
681         auto pull_to = _playback_length;
682         for (auto i: _pieces) {
683                 i->update_pull_to (pull_to);
684         }
685         if (!_silent.done() && _silent.position() < pull_to) {
686                 pull_to = _silent.position();
687         }
688
689         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
690         auto audio = _audio_merger.pull (pull_to);
691         for (auto i = audio.begin(); i != audio.end(); ++i) {
692                 if (_last_audio_time && i->second < *_last_audio_time) {
693                         /* This new data comes before the last we emitted (or the last seek); discard it */
694                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
695                         if (!cut.first) {
696                                 continue;
697                         }
698                         *i = cut;
699                 } else if (_last_audio_time && i->second > *_last_audio_time) {
700                         /* There's a gap between this data and the last we emitted; fill with silence */
701                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
702                 }
703
704                 emit_audio (i->first, i->second);
705         }
706
707         if (done) {
708                 _shuffler->flush ();
709                 for (auto const& i: _delay) {
710                         do_emit_video(i.first, i.second);
711                 }
712         }
713
714         return done;
715 }
716
717
718 /** @return Open subtitles for the frame at the given time, converted to images */
719 optional<PositionImage>
720 Player::open_subtitles_for_frame (DCPTime time) const
721 {
722         list<PositionImage> captions;
723         int const vfr = _film->video_frame_rate();
724
725         for (
726                 auto j:
727                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
728                 ) {
729
730                 /* Bitmap subtitles */
731                 for (auto i: j.bitmap) {
732                         if (!i.image) {
733                                 continue;
734                         }
735
736                         /* i.image will already have been scaled to fit _video_container_size */
737                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
738
739                         captions.push_back (
740                                 PositionImage (
741                                         i.image,
742                                         Position<int> (
743                                                 lrint(_video_container_size.width * i.rectangle.x),
744                                                 lrint(_video_container_size.height * i.rectangle.y)
745                                                 )
746                                         )
747                                 );
748                 }
749
750                 /* String subtitles (rendered to an image) */
751                 if (!j.string.empty()) {
752                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
753                         copy (s.begin(), s.end(), back_inserter (captions));
754                 }
755         }
756
757         if (captions.empty()) {
758                 return {};
759         }
760
761         return merge (captions);
762 }
763
764
765 void
766 Player::video (weak_ptr<Piece> wp, ContentVideo video)
767 {
768         auto piece = wp.lock ();
769         if (!piece) {
770                 return;
771         }
772
773         if (!piece->use_video()) {
774                 return;
775         }
776
777         auto frc = piece->frame_rate_change();
778         if (frc.skip && (video.frame % 2) == 1) {
779                 return;
780         }
781
782         /* Time of the first frame we will emit */
783         DCPTime const time = piece->content_video_to_dcp (video.frame);
784         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
785
786         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
787            if it's after the content's period here as in that case we still need to fill any gap between
788            `now' and the end of the content's period.
789         */
790         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
791                 return;
792         }
793
794         if (piece->ignore_video && piece->ignore_video->contains(time)) {
795                 return;
796         }
797
798         /* Fill gaps that we discover now that we have some video which needs to be emitted.
799            This is where we need to fill to.
800         */
801         DCPTime fill_to = min (time, piece->end(_film));
802
803         if (_last_video_time) {
804                 DCPTime fill_from = max (*_last_video_time, piece->position());
805
806                 /* Fill if we have more than half a frame to do */
807                 if ((fill_to - fill_from) > one_video_frame() / 2) {
808                         auto last = _last_video.find (wp);
809                         if (_film->three_d()) {
810                                 auto fill_to_eyes = video.eyes;
811                                 if (fill_to_eyes == Eyes::BOTH) {
812                                         fill_to_eyes = Eyes::LEFT;
813                                 }
814                                 if (fill_to == piece->end(_film)) {
815                                         /* Don't fill after the end of the content */
816                                         fill_to_eyes = Eyes::LEFT;
817                                 }
818                                 auto j = fill_from;
819                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
820                                 if (eyes == Eyes::BOTH) {
821                                         eyes = Eyes::LEFT;
822                                 }
823                                 while (j < fill_to || eyes != fill_to_eyes) {
824                                         if (last != _last_video.end()) {
825                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
826                                                 auto copy = last->second->shallow_copy();
827                                                 copy->set_eyes (eyes);
828                                                 emit_video (copy, j);
829                                         } else {
830                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
831                                                 emit_video (black_player_video_frame(eyes), j);
832                                         }
833                                         if (eyes == Eyes::RIGHT) {
834                                                 j += one_video_frame();
835                                         }
836                                         eyes = increment_eyes (eyes);
837                                 }
838                         } else {
839                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
840                                         if (last != _last_video.end()) {
841                                                 emit_video (last->second, j);
842                                         } else {
843                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
844                                         }
845                                 }
846                         }
847                 }
848         }
849
850         _last_video[wp] = piece->player_video (video, _film, _video_container_size);
851
852         DCPTime t = time;
853         for (int i = 0; i < frc.repeat; ++i) {
854                 if (t < piece->end(_film)) {
855                         emit_video (_last_video[wp], t);
856                 }
857                 t += one_video_frame ();
858         }
859 }
860
861
862 void
863 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
864 {
865         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
866
867         auto piece = wp.lock ();
868         if (!piece) {
869                 return;
870         }
871
872         int const rfr = piece->resampled_audio_frame_rate (_film);
873
874         /* Compute time in the DCP */
875         auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
876         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
877
878         /* And the end of this block in the DCP */
879         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
880
881         /* Remove anything that comes before the start or after the end of the content */
882         if (time < piece->position()) {
883                 auto cut = discard_audio (content_audio.audio, time, piece->position());
884                 if (!cut.first) {
885                         /* This audio is entirely discarded */
886                         return;
887                 }
888                 content_audio.audio = cut.first;
889                 time = cut.second;
890         } else if (time > piece->end(_film)) {
891                 /* Discard it all */
892                 return;
893         } else if (end > piece->end(_film)) {
894                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
895                 if (remaining_frames == 0) {
896                         return;
897                 }
898                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
899         }
900
901         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
902
903         /* Gain */
904
905         if (piece->audio_gain() != 0) {
906                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
907                 gain->apply_gain (piece->audio_gain());
908                 content_audio.audio = gain;
909         }
910
911         /* Remap */
912
913         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
914
915         /* Process */
916
917         if (_audio_processor) {
918                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
919         }
920
921         /* Push */
922
923         _audio_merger.push (content_audio.audio, time);
924         piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
925 }
926
927
928 void
929 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
930 {
931         auto piece = wp.lock ();
932         auto text = wc.lock ();
933         if (!piece || !text) {
934                 return;
935         }
936
937         /* Apply content's subtitle offsets */
938         subtitle.sub.rectangle.x += text->x_offset ();
939         subtitle.sub.rectangle.y += text->y_offset ();
940
941         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
942         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
943         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
944
945         /* Apply content's subtitle scale */
946         subtitle.sub.rectangle.width *= text->x_scale ();
947         subtitle.sub.rectangle.height *= text->y_scale ();
948
949         PlayerText ps;
950         auto image = subtitle.sub.image;
951
952         /* We will scale the subtitle up to fit _video_container_size */
953         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
954         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
955         if (width == 0 || height == 0) {
956                 return;
957         }
958
959         dcp::Size scaled_size (width, height);
960         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
961         DCPTime from (piece->content_time_to_dcp(subtitle.from()));
962
963         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
964 }
965
966
967 void
968 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
969 {
970         auto piece = wp.lock ();
971         auto text = wc.lock ();
972         if (!piece || !text) {
973                 return;
974         }
975
976         PlayerText ps;
977         DCPTime const from (piece->content_time_to_dcp(subtitle.from()));
978
979         if (from > piece->end(_film)) {
980                 return;
981         }
982
983         for (auto s: subtitle.subs) {
984                 s.set_h_position (s.h_position() + text->x_offset ());
985                 s.set_v_position (s.v_position() + text->y_offset ());
986                 float const xs = text->x_scale();
987                 float const ys = text->y_scale();
988                 float size = s.size();
989
990                 /* Adjust size to express the common part of the scaling;
991                    e.g. if xs = ys = 0.5 we scale size by 2.
992                 */
993                 if (xs > 1e-5 && ys > 1e-5) {
994                         size *= 1 / min (1 / xs, 1 / ys);
995                 }
996                 s.set_size (size);
997
998                 /* Then express aspect ratio changes */
999                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1000                         s.set_aspect_adjust (xs / ys);
1001                 }
1002
1003                 s.set_in (dcp::Time(from.seconds(), 1000));
1004                 ps.string.push_back (StringText (s, text->outline_width()));
1005                 ps.add_fonts (text->fonts ());
1006         }
1007
1008         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1009 }
1010
1011
1012 void
1013 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1014 {
1015         auto text = wc.lock ();
1016         if (!text) {
1017                 return;
1018         }
1019
1020         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1021                 return;
1022         }
1023
1024         shared_ptr<Piece> piece = wp.lock ();
1025         if (!piece) {
1026                 return;
1027         }
1028
1029         auto const dcp_to = piece->content_time_to_dcp(to);
1030
1031         if (dcp_to > piece->end(_film)) {
1032                 return;
1033         }
1034
1035         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1036
1037         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1038         if (text->use() && !always && !text->burn()) {
1039                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1040         }
1041 }
1042
1043
1044 void
1045 Player::seek (DCPTime time, bool accurate)
1046 {
1047         boost::mutex::scoped_lock lm (_mutex);
1048         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1049
1050         if (_suspended) {
1051                 /* We can't seek in this state */
1052                 return;
1053         }
1054
1055         if (_shuffler) {
1056                 _shuffler->clear ();
1057         }
1058
1059         _delay.clear ();
1060
1061         if (_audio_processor) {
1062                 _audio_processor->flush ();
1063         }
1064
1065         _audio_merger.clear ();
1066         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1067                 _active_texts[i].clear ();
1068         }
1069
1070         for (auto i: _pieces) {
1071                 if (time < i->position()) {
1072                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1073                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1074                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1075                            been trimmed to a point between keyframes, or something).
1076                         */
1077                         i->decoder->seek (i->dcp_to_content_time(i->position(), _film), true);
1078                         i->done = false;
1079                 } else if (i->position() <= time && time < i->end(_film)) {
1080                         /* During; seek to position */
1081                         i->decoder->seek (i->dcp_to_content_time(time, _film), accurate);
1082                         i->done = false;
1083                 } else {
1084                         /* After; this piece is done */
1085                         i->done = true;
1086                 }
1087         }
1088
1089         if (accurate) {
1090                 _last_video_time = time;
1091                 _last_video_eyes = Eyes::LEFT;
1092                 _last_audio_time = time;
1093         } else {
1094                 _last_video_time = optional<DCPTime>();
1095                 _last_video_eyes = optional<Eyes>();
1096                 _last_audio_time = optional<DCPTime>();
1097         }
1098
1099         _black.set_position (time);
1100         _silent.set_position (time);
1101
1102         _last_video.clear ();
1103 }
1104
1105
1106 void
1107 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1108 {
1109         if (!_film->three_d()) {
1110                 if (pv->eyes() == Eyes::LEFT) {
1111                         /* Use left-eye images for both eyes... */
1112                         pv->set_eyes (Eyes::BOTH);
1113                 } else if (pv->eyes() == Eyes::RIGHT) {
1114                         /* ...and discard the right */
1115                         return;
1116                 }
1117         }
1118
1119         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1120            player before the video that requires them.
1121         */
1122         _delay.push_back (make_pair (pv, time));
1123
1124         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1125                 _last_video_time = time + one_video_frame();
1126         }
1127         _last_video_eyes = increment_eyes (pv->eyes());
1128
1129         if (_delay.size() < 3) {
1130                 return;
1131         }
1132
1133         auto to_do = _delay.front();
1134         _delay.pop_front();
1135         do_emit_video (to_do.first, to_do.second);
1136 }
1137
1138
1139 void
1140 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1141 {
1142         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1143                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1144                         _active_texts[i].clear_before (time);
1145                 }
1146         }
1147
1148         auto subtitles = open_subtitles_for_frame (time);
1149         if (subtitles) {
1150                 pv->set_text (subtitles.get ());
1151         }
1152
1153         Video (pv, time);
1154 }
1155
1156
1157 void
1158 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1159 {
1160         /* Log if the assert below is about to fail */
1161         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1162                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1163         }
1164
1165         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1166         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1167         Audio (data, time, _film->audio_frame_rate());
1168         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1169 }
1170
1171
1172 void
1173 Player::fill_audio (DCPTimePeriod period)
1174 {
1175         if (period.from == period.to) {
1176                 return;
1177         }
1178
1179         DCPOMATIC_ASSERT (period.from < period.to);
1180
1181         DCPTime t = period.from;
1182         while (t < period.to) {
1183                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1184                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1185                 if (samples) {
1186                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1187                         silence->make_silent ();
1188                         emit_audio (silence, t);
1189                 }
1190                 t += block;
1191         }
1192 }
1193
1194
1195 DCPTime
1196 Player::one_video_frame () const
1197 {
1198         return DCPTime::from_frames (1, _film->video_frame_rate ());
1199 }
1200
1201
1202 pair<shared_ptr<AudioBuffers>, DCPTime>
1203 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1204 {
1205         auto const discard_time = discard_to - time;
1206         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1207         auto remaining_frames = audio->frames() - discard_frames;
1208         if (remaining_frames <= 0) {
1209                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1210         }
1211         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1212         return make_pair(cut, time + discard_time);
1213 }
1214
1215
1216 void
1217 Player::set_dcp_decode_reduction (optional<int> reduction)
1218 {
1219         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1220
1221         {
1222                 boost::mutex::scoped_lock lm (_mutex);
1223
1224                 if (reduction == _dcp_decode_reduction) {
1225                         lm.unlock ();
1226                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1227                         return;
1228                 }
1229
1230                 _dcp_decode_reduction = reduction;
1231                 setup_pieces_unlocked ();
1232         }
1233
1234         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1235 }
1236
1237
1238 optional<DCPTime>
1239 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1240 {
1241         boost::mutex::scoped_lock lm (_mutex);
1242
1243         for (auto i: _pieces) {
1244                 if (i->content == content) {
1245                         return i->content_time_to_dcp(t);
1246                 }
1247         }
1248
1249         /* We couldn't find this content; perhaps things are being changed over */
1250         return {};
1251 }
1252
1253
1254 shared_ptr<const Playlist>
1255 Player::playlist () const
1256 {
1257         return _playlist ? _playlist : _film->playlist();
1258 }
1259
1260
1261 void
1262 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1263 {
1264         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1265 }
1266