Add Piece::reference_dcp_audio().
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         auto decoder = j->decoder_for(i);
191                         if (decoder) {
192                                 old_decoder = decoder;
193                                 break;
194                         }
195                 }
196
197                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
198                 DCPOMATIC_ASSERT (decoder);
199
200                 FrameRateChange frc (_film, i);
201
202                 if (decoder->video && _ignore_video) {
203                         decoder->video->set_ignore (true);
204                 }
205
206                 if (decoder->audio && _ignore_audio) {
207                         decoder->audio->set_ignore (true);
208                 }
209
210                 if (_ignore_text) {
211                         for (auto i: decoder->text) {
212                                 i->set_ignore (true);
213                         }
214                 }
215
216                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217                 if (dcp) {
218                         dcp->set_decode_referenced (_play_referenced);
219                         if (_play_referenced) {
220                                 dcp->set_forced_reduction (_dcp_decode_reduction);
221                         }
222                 }
223
224                 auto piece = make_shared<Piece>(i, decoder, frc);
225                 _pieces.push_back (piece);
226
227                 if (decoder->video) {
228                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
229                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231                         } else {
232                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
233                         }
234                 }
235
236                 if (decoder->audio) {
237                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
238                 }
239
240                 auto j = decoder->text.begin();
241
242                 while (j != decoder->text.end()) {
243                         (*j)->BitmapStart.connect (
244                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245                                 );
246                         (*j)->PlainStart.connect (
247                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
248                                 );
249                         (*j)->Stop.connect (
250                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252
253                         ++j;
254                 }
255
256                 if (decoder->atmos) {
257                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
258                 }
259         }
260
261         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
262                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
263                         /* Look for content later in the content list with in-use video that overlaps this */
264                         auto period = DCPTimePeriod((*i)->position(), (*i)->end(_film));
265                         auto j = i;
266                         ++j;
267                         for (; j != _pieces.end(); ++j) {
268                                 if ((*j)->use_video()) {
269                                         (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->end(_film)).overlap(period);
270                                 }
271                         }
272                 }
273         }
274
275         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
276         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
277
278         _last_video_time = boost::optional<dcpomatic::DCPTime>();
279         _last_video_eyes = Eyes::BOTH;
280         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
281 }
282
283
284 void
285 Player::playlist_content_change (ChangeType type, int property, bool frequent)
286 {
287         if (property == VideoContentProperty::CROP) {
288                 if (type == ChangeType::DONE) {
289                         auto const vcs = video_container_size();
290                         boost::mutex::scoped_lock lm (_mutex);
291                         for (auto const& i: _delay) {
292                                 i.first->reset_metadata (_film, vcs);
293                         }
294                 }
295         } else {
296                 if (type == ChangeType::PENDING) {
297                         /* The player content is probably about to change, so we can't carry on
298                            until that has happened and we've rebuilt our pieces.  Stop pass()
299                            and seek() from working until then.
300                         */
301                         ++_suspended;
302                 } else if (type == ChangeType::DONE) {
303                         /* A change in our content has gone through.  Re-build our pieces. */
304                         setup_pieces ();
305                         --_suspended;
306                 } else if (type == ChangeType::CANCELLED) {
307                         --_suspended;
308                 }
309         }
310
311         Change (type, property, frequent);
312 }
313
314
315 void
316 Player::set_video_container_size (dcp::Size s)
317 {
318         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
319
320         {
321                 boost::mutex::scoped_lock lm (_mutex);
322
323                 if (s == _video_container_size) {
324                         lm.unlock ();
325                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
326                         return;
327                 }
328
329                 _video_container_size = s;
330
331                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
332                 _black_image->make_black ();
333         }
334
335         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
336 }
337
338
339 void
340 Player::playlist_change (ChangeType type)
341 {
342         if (type == ChangeType::DONE) {
343                 setup_pieces ();
344         }
345         Change (type, PlayerProperty::PLAYLIST, false);
346 }
347
348
349 void
350 Player::film_change (ChangeType type, Film::Property p)
351 {
352         /* Here we should notice Film properties that affect our output, and
353            alert listeners that our output now would be different to how it was
354            last time we were run.
355         */
356
357         if (p == Film::Property::CONTAINER) {
358                 Change (type, PlayerProperty::FILM_CONTAINER, false);
359         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
360                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
361                    so we need new pieces here.
362                 */
363                 if (type == ChangeType::DONE) {
364                         setup_pieces ();
365                 }
366                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
367         } else if (p == Film::Property::AUDIO_PROCESSOR) {
368                 if (type == ChangeType::DONE && _film->audio_processor ()) {
369                         boost::mutex::scoped_lock lm (_mutex);
370                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
371                 }
372         } else if (p == Film::Property::AUDIO_CHANNELS) {
373                 if (type == ChangeType::DONE) {
374                         boost::mutex::scoped_lock lm (_mutex);
375                         _audio_merger.clear ();
376                 }
377         }
378 }
379
380
381 shared_ptr<PlayerVideo>
382 Player::black_player_video_frame (Eyes eyes) const
383 {
384         return std::make_shared<PlayerVideo> (
385                 std::make_shared<const RawImageProxy>(_black_image),
386                 Crop(),
387                 optional<double>(),
388                 _video_container_size,
389                 _video_container_size,
390                 eyes,
391                 Part::WHOLE,
392                 PresetColourConversion::all().front().conversion,
393                 VideoRange::FULL,
394                 std::weak_ptr<Content>(),
395                 boost::optional<Frame>(),
396                 false
397         );
398 }
399
400
401 vector<FontData>
402 Player::get_subtitle_fonts ()
403 {
404         boost::mutex::scoped_lock lm (_mutex);
405
406         vector<FontData> fonts;
407         for (auto i: _pieces) {
408                 /* XXX: things may go wrong if there are duplicate font IDs
409                    with different font files.
410                 */
411                 auto f = i->decoder->fonts ();
412                 copy (f.begin(), f.end(), back_inserter(fonts));
413         }
414
415         return fonts;
416 }
417
418
419 /** Set this player never to produce any video data */
420 void
421 Player::set_ignore_video ()
422 {
423         boost::mutex::scoped_lock lm (_mutex);
424         _ignore_video = true;
425         setup_pieces_unlocked ();
426 }
427
428
429 void
430 Player::set_ignore_audio ()
431 {
432         boost::mutex::scoped_lock lm (_mutex);
433         _ignore_audio = true;
434         setup_pieces_unlocked ();
435 }
436
437
438 void
439 Player::set_ignore_text ()
440 {
441         boost::mutex::scoped_lock lm (_mutex);
442         _ignore_text = true;
443         setup_pieces_unlocked ();
444 }
445
446
447 /** Set the player to always burn open texts into the image regardless of the content settings */
448 void
449 Player::set_always_burn_open_subtitles ()
450 {
451         boost::mutex::scoped_lock lm (_mutex);
452         _always_burn_open_subtitles = true;
453 }
454
455
456 /** Sets up the player to be faster, possibly at the expense of quality */
457 void
458 Player::set_fast ()
459 {
460         boost::mutex::scoped_lock lm (_mutex);
461         _fast = true;
462         setup_pieces_unlocked ();
463 }
464
465
466 void
467 Player::set_play_referenced ()
468 {
469         boost::mutex::scoped_lock lm (_mutex);
470         _play_referenced = true;
471         setup_pieces_unlocked ();
472 }
473
474
475 static void
476 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
477 {
478         DCPOMATIC_ASSERT (r);
479         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
480         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
481         if (r->actual_duration() > 0) {
482                 a.push_back (
483                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
484                         );
485         }
486 }
487
488
489 list<ReferencedReelAsset>
490 Player::get_reel_assets ()
491 {
492         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
493
494         list<ReferencedReelAsset> a;
495
496         for (auto i: playlist()->content()) {
497                 auto j = dynamic_pointer_cast<DCPContent> (i);
498                 if (!j) {
499                         continue;
500                 }
501
502                 scoped_ptr<DCPDecoder> decoder;
503                 try {
504                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
505                 } catch (...) {
506                         return a;
507                 }
508
509                 DCPOMATIC_ASSERT (j->video_frame_rate ());
510                 double const cfr = j->video_frame_rate().get();
511                 Frame const trim_start = j->trim_start().frames_round (cfr);
512                 Frame const trim_end = j->trim_end().frames_round (cfr);
513                 int const ffr = _film->video_frame_rate ();
514
515                 /* position in the asset from the start */
516                 int64_t offset_from_start = 0;
517                 /* position in the asset from the end */
518                 int64_t offset_from_end = 0;
519                 for (auto k: decoder->reels()) {
520                         /* Assume that main picture duration is the length of the reel */
521                         offset_from_end += k->main_picture()->actual_duration();
522                 }
523
524                 for (auto k: decoder->reels()) {
525
526                         /* Assume that main picture duration is the length of the reel */
527                         int64_t const reel_duration = k->main_picture()->actual_duration();
528
529                         /* See doc/design/trim_reels.svg */
530                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
531                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
532
533                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
534                         if (j->reference_video ()) {
535                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
536                         }
537
538                         if (j->reference_audio ()) {
539                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
540                         }
541
542                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
543                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
544                         }
545
546                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
547                                 for (auto l: k->closed_captions()) {
548                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
549                                 }
550                         }
551
552                         offset_from_start += reel_duration;
553                         offset_from_end -= reel_duration;
554                 }
555         }
556
557         return a;
558 }
559
560
561 bool
562 Player::pass ()
563 {
564         boost::mutex::scoped_lock lm (_mutex);
565
566         if (_suspended) {
567                 /* We can't pass in this state */
568                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
569                 return false;
570         }
571
572         if (_playback_length == DCPTime()) {
573                 /* Special; just give one black frame */
574                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
575                 return true;
576         }
577
578         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
579
580         shared_ptr<Piece> earliest_content;
581         optional<DCPTime> earliest_time;
582
583         for (auto i: _pieces) {
584                 if (i->done) {
585                         continue;
586                 }
587
588                 auto const t = i->decoder_position ();
589                 if (t > i->end(_film)) {
590                         i->done = true;
591                 } else {
592
593                         /* Given two choices at the same time, pick the one with texts so we see it before
594                            the video.
595                         */
596                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
597                                 earliest_time = t;
598                                 earliest_content = i;
599                         }
600                 }
601         }
602
603         bool done = false;
604
605         enum {
606                 NONE,
607                 CONTENT,
608                 BLACK,
609                 SILENT
610         } which = NONE;
611
612         if (earliest_content) {
613                 which = CONTENT;
614         }
615
616         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
617                 earliest_time = _black.position ();
618                 which = BLACK;
619         }
620
621         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
622                 earliest_time = _silent.position ();
623                 which = SILENT;
624         }
625
626         switch (which) {
627         case CONTENT:
628         {
629                 earliest_content->pass();
630                 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
631                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
632                            to `hide' the fact that no audio was emitted during the referenced DCP (though
633                            we need to behave as though it was).
634                         */
635                         _last_audio_time = earliest_content->end (_film);
636                 }
637                 break;
638         }
639         case BLACK:
640                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
641                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
642                 _black.set_position (_black.position() + one_video_frame());
643                 break;
644         case SILENT:
645         {
646                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
647                 DCPTimePeriod period (_silent.period_at_position());
648                 if (_last_audio_time) {
649                         /* Sometimes the thing that happened last finishes fractionally before
650                            or after this silence.  Bodge the start time of the silence to fix it.
651                            I think this is nothing to worry about since we will just add or
652                            remove a little silence at the end of some content.
653                         */
654                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
655                         /* Let's not worry about less than a frame at 24fps */
656                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
657                         if (error >= too_much_error) {
658                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
659                         }
660                         DCPOMATIC_ASSERT (error < too_much_error);
661                         period.from = *_last_audio_time;
662                 }
663                 if (period.duration() > one_video_frame()) {
664                         period.to = period.from + one_video_frame();
665                 }
666                 fill_audio (period);
667                 _silent.set_position (period.to);
668                 break;
669         }
670         case NONE:
671                 done = true;
672                 break;
673         }
674
675         /* Emit any audio that is ready */
676
677         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
678            of our streams, or the position of the _silent.
679         */
680         auto pull_to = _playback_length;
681         for (auto i: _pieces) {
682                 i->update_pull_to (pull_to);
683         }
684         if (!_silent.done() && _silent.position() < pull_to) {
685                 pull_to = _silent.position();
686         }
687
688         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
689         auto audio = _audio_merger.pull (pull_to);
690         for (auto i = audio.begin(); i != audio.end(); ++i) {
691                 if (_last_audio_time && i->second < *_last_audio_time) {
692                         /* This new data comes before the last we emitted (or the last seek); discard it */
693                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
694                         if (!cut.first) {
695                                 continue;
696                         }
697                         *i = cut;
698                 } else if (_last_audio_time && i->second > *_last_audio_time) {
699                         /* There's a gap between this data and the last we emitted; fill with silence */
700                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
701                 }
702
703                 emit_audio (i->first, i->second);
704         }
705
706         if (done) {
707                 _shuffler->flush ();
708                 for (auto const& i: _delay) {
709                         do_emit_video(i.first, i.second);
710                 }
711         }
712
713         return done;
714 }
715
716
717 /** @return Open subtitles for the frame at the given time, converted to images */
718 optional<PositionImage>
719 Player::open_subtitles_for_frame (DCPTime time) const
720 {
721         list<PositionImage> captions;
722         int const vfr = _film->video_frame_rate();
723
724         for (
725                 auto j:
726                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
727                 ) {
728
729                 /* Bitmap subtitles */
730                 for (auto i: j.bitmap) {
731                         if (!i.image) {
732                                 continue;
733                         }
734
735                         /* i.image will already have been scaled to fit _video_container_size */
736                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
737
738                         captions.push_back (
739                                 PositionImage (
740                                         i.image,
741                                         Position<int> (
742                                                 lrint(_video_container_size.width * i.rectangle.x),
743                                                 lrint(_video_container_size.height * i.rectangle.y)
744                                                 )
745                                         )
746                                 );
747                 }
748
749                 /* String subtitles (rendered to an image) */
750                 if (!j.string.empty()) {
751                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
752                         copy (s.begin(), s.end(), back_inserter (captions));
753                 }
754         }
755
756         if (captions.empty()) {
757                 return {};
758         }
759
760         return merge (captions);
761 }
762
763
764 void
765 Player::video (weak_ptr<Piece> wp, ContentVideo video)
766 {
767         auto piece = wp.lock ();
768         if (!piece) {
769                 return;
770         }
771
772         if (!piece->use_video()) {
773                 return;
774         }
775
776         auto frc = piece->frame_rate_change();
777         if (frc.skip && (video.frame % 2) == 1) {
778                 return;
779         }
780
781         /* Time of the first frame we will emit */
782         DCPTime const time = piece->content_video_to_dcp (video.frame);
783         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
784
785         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
786            if it's after the content's period here as in that case we still need to fill any gap between
787            `now' and the end of the content's period.
788         */
789         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
790                 return;
791         }
792
793         if (piece->ignore_video && piece->ignore_video->contains(time)) {
794                 return;
795         }
796
797         /* Fill gaps that we discover now that we have some video which needs to be emitted.
798            This is where we need to fill to.
799         */
800         DCPTime fill_to = min (time, piece->end(_film));
801
802         if (_last_video_time) {
803                 DCPTime fill_from = max (*_last_video_time, piece->position());
804
805                 /* Fill if we have more than half a frame to do */
806                 if ((fill_to - fill_from) > one_video_frame() / 2) {
807                         auto last = _last_video.find (wp);
808                         if (_film->three_d()) {
809                                 auto fill_to_eyes = video.eyes;
810                                 if (fill_to_eyes == Eyes::BOTH) {
811                                         fill_to_eyes = Eyes::LEFT;
812                                 }
813                                 if (fill_to == piece->end(_film)) {
814                                         /* Don't fill after the end of the content */
815                                         fill_to_eyes = Eyes::LEFT;
816                                 }
817                                 auto j = fill_from;
818                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
819                                 if (eyes == Eyes::BOTH) {
820                                         eyes = Eyes::LEFT;
821                                 }
822                                 while (j < fill_to || eyes != fill_to_eyes) {
823                                         if (last != _last_video.end()) {
824                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
825                                                 auto copy = last->second->shallow_copy();
826                                                 copy->set_eyes (eyes);
827                                                 emit_video (copy, j);
828                                         } else {
829                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
830                                                 emit_video (black_player_video_frame(eyes), j);
831                                         }
832                                         if (eyes == Eyes::RIGHT) {
833                                                 j += one_video_frame();
834                                         }
835                                         eyes = increment_eyes (eyes);
836                                 }
837                         } else {
838                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
839                                         if (last != _last_video.end()) {
840                                                 emit_video (last->second, j);
841                                         } else {
842                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
843                                         }
844                                 }
845                         }
846                 }
847         }
848
849         _last_video[wp] = piece->player_video (video, _film, _video_container_size);
850
851         DCPTime t = time;
852         for (int i = 0; i < frc.repeat; ++i) {
853                 if (t < piece->end(_film)) {
854                         emit_video (_last_video[wp], t);
855                 }
856                 t += one_video_frame ();
857         }
858 }
859
860
861 void
862 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
863 {
864         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
865
866         auto piece = wp.lock ();
867         if (!piece) {
868                 return;
869         }
870
871         int const rfr = piece->resampled_audio_frame_rate (_film);
872
873         /* Compute time in the DCP */
874         auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
875         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
876
877         /* And the end of this block in the DCP */
878         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
879
880         /* Remove anything that comes before the start or after the end of the content */
881         if (time < piece->position()) {
882                 auto cut = discard_audio (content_audio.audio, time, piece->position());
883                 if (!cut.first) {
884                         /* This audio is entirely discarded */
885                         return;
886                 }
887                 content_audio.audio = cut.first;
888                 time = cut.second;
889         } else if (time > piece->end(_film)) {
890                 /* Discard it all */
891                 return;
892         } else if (end > piece->end(_film)) {
893                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
894                 if (remaining_frames == 0) {
895                         return;
896                 }
897                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
898         }
899
900         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
901
902         /* Gain */
903
904         if (piece->audio_gain() != 0) {
905                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
906                 gain->apply_gain (piece->audio_gain());
907                 content_audio.audio = gain;
908         }
909
910         /* Remap */
911
912         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
913
914         /* Process */
915
916         if (_audio_processor) {
917                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
918         }
919
920         /* Push */
921
922         _audio_merger.push (content_audio.audio, time);
923         piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
924 }
925
926
927 void
928 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
929 {
930         auto piece = wp.lock ();
931         auto text = wc.lock ();
932         if (!piece || !text) {
933                 return;
934         }
935
936         /* Apply content's subtitle offsets */
937         subtitle.sub.rectangle.x += text->x_offset ();
938         subtitle.sub.rectangle.y += text->y_offset ();
939
940         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
941         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
942         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
943
944         /* Apply content's subtitle scale */
945         subtitle.sub.rectangle.width *= text->x_scale ();
946         subtitle.sub.rectangle.height *= text->y_scale ();
947
948         PlayerText ps;
949         auto image = subtitle.sub.image;
950
951         /* We will scale the subtitle up to fit _video_container_size */
952         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
953         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
954         if (width == 0 || height == 0) {
955                 return;
956         }
957
958         dcp::Size scaled_size (width, height);
959         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
960         DCPTime from (piece->content_time_to_dcp(subtitle.from()));
961
962         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
963 }
964
965
966 void
967 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
968 {
969         auto piece = wp.lock ();
970         auto text = wc.lock ();
971         if (!piece || !text) {
972                 return;
973         }
974
975         PlayerText ps;
976         DCPTime const from (piece->content_time_to_dcp(subtitle.from()));
977
978         if (from > piece->end(_film)) {
979                 return;
980         }
981
982         for (auto s: subtitle.subs) {
983                 s.set_h_position (s.h_position() + text->x_offset ());
984                 s.set_v_position (s.v_position() + text->y_offset ());
985                 float const xs = text->x_scale();
986                 float const ys = text->y_scale();
987                 float size = s.size();
988
989                 /* Adjust size to express the common part of the scaling;
990                    e.g. if xs = ys = 0.5 we scale size by 2.
991                 */
992                 if (xs > 1e-5 && ys > 1e-5) {
993                         size *= 1 / min (1 / xs, 1 / ys);
994                 }
995                 s.set_size (size);
996
997                 /* Then express aspect ratio changes */
998                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
999                         s.set_aspect_adjust (xs / ys);
1000                 }
1001
1002                 s.set_in (dcp::Time(from.seconds(), 1000));
1003                 ps.string.push_back (StringText (s, text->outline_width()));
1004                 ps.add_fonts (text->fonts ());
1005         }
1006
1007         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1008 }
1009
1010
1011 void
1012 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1013 {
1014         auto text = wc.lock ();
1015         if (!text) {
1016                 return;
1017         }
1018
1019         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1020                 return;
1021         }
1022
1023         shared_ptr<Piece> piece = wp.lock ();
1024         if (!piece) {
1025                 return;
1026         }
1027
1028         auto const dcp_to = piece->content_time_to_dcp(to);
1029
1030         if (dcp_to > piece->end(_film)) {
1031                 return;
1032         }
1033
1034         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1035
1036         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1037         if (text->use() && !always && !text->burn()) {
1038                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1039         }
1040 }
1041
1042
1043 void
1044 Player::seek (DCPTime time, bool accurate)
1045 {
1046         boost::mutex::scoped_lock lm (_mutex);
1047         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1048
1049         if (_suspended) {
1050                 /* We can't seek in this state */
1051                 return;
1052         }
1053
1054         if (_shuffler) {
1055                 _shuffler->clear ();
1056         }
1057
1058         _delay.clear ();
1059
1060         if (_audio_processor) {
1061                 _audio_processor->flush ();
1062         }
1063
1064         _audio_merger.clear ();
1065         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1066                 _active_texts[i].clear ();
1067         }
1068
1069         for (auto i: _pieces) {
1070                 if (time < i->position()) {
1071                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1072                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1073                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1074                            been trimmed to a point between keyframes, or something).
1075                         */
1076                         i->decoder->seek (i->dcp_to_content_time(i->position(), _film), true);
1077                         i->done = false;
1078                 } else if (i->position() <= time && time < i->end(_film)) {
1079                         /* During; seek to position */
1080                         i->decoder->seek (i->dcp_to_content_time(time, _film), accurate);
1081                         i->done = false;
1082                 } else {
1083                         /* After; this piece is done */
1084                         i->done = true;
1085                 }
1086         }
1087
1088         if (accurate) {
1089                 _last_video_time = time;
1090                 _last_video_eyes = Eyes::LEFT;
1091                 _last_audio_time = time;
1092         } else {
1093                 _last_video_time = optional<DCPTime>();
1094                 _last_video_eyes = optional<Eyes>();
1095                 _last_audio_time = optional<DCPTime>();
1096         }
1097
1098         _black.set_position (time);
1099         _silent.set_position (time);
1100
1101         _last_video.clear ();
1102 }
1103
1104
1105 void
1106 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1107 {
1108         if (!_film->three_d()) {
1109                 if (pv->eyes() == Eyes::LEFT) {
1110                         /* Use left-eye images for both eyes... */
1111                         pv->set_eyes (Eyes::BOTH);
1112                 } else if (pv->eyes() == Eyes::RIGHT) {
1113                         /* ...and discard the right */
1114                         return;
1115                 }
1116         }
1117
1118         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1119            player before the video that requires them.
1120         */
1121         _delay.push_back (make_pair (pv, time));
1122
1123         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1124                 _last_video_time = time + one_video_frame();
1125         }
1126         _last_video_eyes = increment_eyes (pv->eyes());
1127
1128         if (_delay.size() < 3) {
1129                 return;
1130         }
1131
1132         auto to_do = _delay.front();
1133         _delay.pop_front();
1134         do_emit_video (to_do.first, to_do.second);
1135 }
1136
1137
1138 void
1139 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1140 {
1141         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1142                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1143                         _active_texts[i].clear_before (time);
1144                 }
1145         }
1146
1147         auto subtitles = open_subtitles_for_frame (time);
1148         if (subtitles) {
1149                 pv->set_text (subtitles.get ());
1150         }
1151
1152         Video (pv, time);
1153 }
1154
1155
1156 void
1157 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1158 {
1159         /* Log if the assert below is about to fail */
1160         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1161                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1162         }
1163
1164         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1165         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1166         Audio (data, time, _film->audio_frame_rate());
1167         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1168 }
1169
1170
1171 void
1172 Player::fill_audio (DCPTimePeriod period)
1173 {
1174         if (period.from == period.to) {
1175                 return;
1176         }
1177
1178         DCPOMATIC_ASSERT (period.from < period.to);
1179
1180         DCPTime t = period.from;
1181         while (t < period.to) {
1182                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1183                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1184                 if (samples) {
1185                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1186                         silence->make_silent ();
1187                         emit_audio (silence, t);
1188                 }
1189                 t += block;
1190         }
1191 }
1192
1193
1194 DCPTime
1195 Player::one_video_frame () const
1196 {
1197         return DCPTime::from_frames (1, _film->video_frame_rate ());
1198 }
1199
1200
1201 pair<shared_ptr<AudioBuffers>, DCPTime>
1202 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1203 {
1204         auto const discard_time = discard_to - time;
1205         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1206         auto remaining_frames = audio->frames() - discard_frames;
1207         if (remaining_frames <= 0) {
1208                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1209         }
1210         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1211         return make_pair(cut, time + discard_time);
1212 }
1213
1214
1215 void
1216 Player::set_dcp_decode_reduction (optional<int> reduction)
1217 {
1218         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1219
1220         {
1221                 boost::mutex::scoped_lock lm (_mutex);
1222
1223                 if (reduction == _dcp_decode_reduction) {
1224                         lm.unlock ();
1225                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1226                         return;
1227                 }
1228
1229                 _dcp_decode_reduction = reduction;
1230                 setup_pieces_unlocked ();
1231         }
1232
1233         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1234 }
1235
1236
1237 optional<DCPTime>
1238 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1239 {
1240         boost::mutex::scoped_lock lm (_mutex);
1241
1242         for (auto i: _pieces) {
1243                 if (i->content == content) {
1244                         return i->content_time_to_dcp(t);
1245                 }
1246         }
1247
1248         /* We couldn't find this content; perhaps things are being changed over */
1249         return {};
1250 }
1251
1252
1253 shared_ptr<const Playlist>
1254 Player::playlist () const
1255 {
1256         return _playlist ? _playlist : _film->playlist();
1257 }
1258
1259
1260 void
1261 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1262 {
1263         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1264 }
1265