f14f4d0679bc2c7e818f85683cf382c7d20e3335
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "dcp_content.h"
30 #include "dcp_decoder.h"
31 #include "dcpomatic_log.h"
32 #include "decoder.h"
33 #include "decoder_factory.h"
34 #include "ffmpeg_content.h"
35 #include "film.h"
36 #include "frame_rate_change.h"
37 #include "image.h"
38 #include "image_decoder.h"
39 #include "job.h"
40 #include "log.h"
41 #include "piece_video.h"
42 #include "player.h"
43 #include "player_video.h"
44 #include "playlist.h"
45 #include "ratio.h"
46 #include "raw_image_proxy.h"
47 #include "referenced_reel_asset.h"
48 #include "render_text.h"
49 #include "shuffler.h"
50 #include "text_content.h"
51 #include "text_decoder.h"
52 #include "timer.h"
53 #include "video_decoder.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <stdint.h>
60 #include <algorithm>
61 #include <iostream>
62
63 #include "i18n.h"
64
65
66 using std::copy;
67 using std::cout;
68 using std::dynamic_pointer_cast;
69 using std::list;
70 using std::make_pair;
71 using std::make_shared;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::unique_ptr;
82 using boost::optional;
83 #if BOOST_VERSION >= 106100
84 using namespace boost::placeholders;
85 #endif
86 using namespace dcpomatic;
87
88
89 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
90 int const PlayerProperty::PLAYLIST = 701;
91 int const PlayerProperty::FILM_CONTAINER = 702;
92 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
93 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
94 int const PlayerProperty::PLAYBACK_LENGTH = 705;
95
96
97 Player::Player (shared_ptr<const Film> film)
98         : _film (film)
99         , _suspended (0)
100         , _tolerant (film->tolerant())
101         , _audio_merger (_film->audio_frame_rate())
102 {
103         construct ();
104 }
105
106
107 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
108         : _film (film)
109         , _playlist (playlist_)
110         , _suspended (0)
111         , _tolerant (film->tolerant())
112         , _audio_merger (_film->audio_frame_rate())
113 {
114         construct ();
115 }
116
117
118 void
119 Player::construct ()
120 {
121         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
122         /* The butler must hear about this first, so since we are proxying this through to the butler we must
123            be first.
124         */
125         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
126         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
127         set_video_container_size (_film->frame_size ());
128
129         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
130
131         setup_pieces ();
132         seek (DCPTime (), true);
133 }
134
135
136 void
137 Player::setup_pieces ()
138 {
139         boost::mutex::scoped_lock lm (_mutex);
140         setup_pieces_unlocked ();
141 }
142
143
144 bool
145 have_video (shared_ptr<const Content> content)
146 {
147         return static_cast<bool>(content->video) && content->video->use();
148 }
149
150
151 bool
152 have_audio (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->audio);
155 }
156
157
158 void
159 Player::setup_pieces_unlocked ()
160 {
161         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
162
163         auto old_pieces = _pieces;
164         _pieces.clear ();
165
166         _shuffler.reset (new Shuffler());
167         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
168
169         for (auto i: playlist()->content()) {
170
171                 if (!i->paths_valid ()) {
172                         continue;
173                 }
174
175                 if (_ignore_video && _ignore_audio && i->text.empty()) {
176                         /* We're only interested in text and this content has none */
177                         continue;
178                 }
179
180                 shared_ptr<Decoder> old_decoder;
181                 for (auto j: old_pieces) {
182                         auto decoder = j->decoder_for(i);
183                         if (decoder) {
184                                 old_decoder = decoder;
185                                 break;
186                         }
187                 }
188
189                 auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
190                 DCPOMATIC_ASSERT (decoder);
191
192                 FrameRateChange frc (_film, i);
193
194                 if (decoder->video && _ignore_video) {
195                         decoder->video->set_ignore (true);
196                 }
197
198                 if (decoder->audio && _ignore_audio) {
199                         decoder->audio->set_ignore (true);
200                 }
201
202                 if (_ignore_text) {
203                         for (auto i: decoder->text) {
204                                 i->set_ignore (true);
205                         }
206                 }
207
208                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
209                 if (dcp) {
210                         dcp->set_decode_referenced (_play_referenced);
211                         if (_play_referenced) {
212                                 dcp->set_forced_reduction (_dcp_decode_reduction);
213                         }
214                 }
215
216                 auto piece = make_shared<Piece>(_film, i, decoder, frc, _fast);
217                 _pieces.push_back (piece);
218
219                 if (i->video) {
220                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
221                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
222                                 piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
223                         } else {
224                                 piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
225                         }
226                 }
227
228                 if (i->audio) {
229                         piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
230                 }
231
232                 auto j = decoder->text.begin();
233
234                 while (j != decoder->text.end()) {
235                         (*j)->BitmapStart.connect (
236                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
237                                 );
238                         (*j)->PlainStart.connect (
239                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
240                                 );
241                         (*j)->Stop.connect (
242                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
243                                 );
244
245                         ++j;
246                 }
247
248                 if (decoder->atmos) {
249                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
250                 }
251         }
252
253         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
254                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
255                         /* Look for content later in the content list with in-use video that overlaps this */
256                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
257                                 if ((*j)->use_video()) {
258                                         (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
259                                 }
260                         }
261                 }
262         }
263
264         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
265         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
266
267         _last_video_time = boost::optional<dcpomatic::DCPTime>();
268         _last_video_eyes = Eyes::BOTH;
269         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
270 }
271
272
273 optional<DCPTime>
274 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
275 {
276         boost::mutex::scoped_lock lm (_mutex);
277
278         for (auto i: _pieces) {
279                 auto dcp = i->content_time_to_dcp(content, t);
280                 if (dcp) {
281                         return *dcp;
282                 }
283         }
284
285         /* We couldn't find this content; perhaps things are being changed over */
286         return {};
287 }
288
289
290 void
291 Player::playlist_content_change (ChangeType type, int property, bool frequent)
292 {
293         if (property == VideoContentProperty::CROP) {
294                 if (type == ChangeType::DONE) {
295                         auto const vcs = video_container_size();
296                         boost::mutex::scoped_lock lm (_mutex);
297                         for (auto const& i: _delay) {
298                                 i.first->reset_metadata (_film, vcs);
299                         }
300                 }
301         } else {
302                 if (type == ChangeType::PENDING) {
303                         /* The player content is probably about to change, so we can't carry on
304                            until that has happened and we've rebuilt our pieces.  Stop pass()
305                            and seek() from working until then.
306                         */
307                         ++_suspended;
308                 } else if (type == ChangeType::DONE) {
309                         /* A change in our content has gone through.  Re-build our pieces. */
310                         setup_pieces ();
311                         --_suspended;
312                 } else if (type == ChangeType::CANCELLED) {
313                         --_suspended;
314                 }
315         }
316
317         Change (type, property, frequent);
318 }
319
320
321 void
322 Player::set_video_container_size (dcp::Size s)
323 {
324         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
325
326         {
327                 boost::mutex::scoped_lock lm (_mutex);
328
329                 if (s == _video_container_size) {
330                         lm.unlock ();
331                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
332                         return;
333                 }
334
335                 _video_container_size = s;
336
337                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
338                 _black_image->make_black ();
339         }
340
341         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
342 }
343
344
345 void
346 Player::playlist_change (ChangeType type)
347 {
348         if (type == ChangeType::DONE) {
349                 setup_pieces ();
350         }
351         Change (type, PlayerProperty::PLAYLIST, false);
352 }
353
354
355 void
356 Player::film_change (ChangeType type, Film::Property p)
357 {
358         /* Here we should notice Film properties that affect our output, and
359            alert listeners that our output now would be different to how it was
360            last time we were run.
361         */
362
363         if (p == Film::Property::CONTAINER) {
364                 Change (type, PlayerProperty::FILM_CONTAINER, false);
365         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
366                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
367                    so we need new pieces here.
368                 */
369                 if (type == ChangeType::DONE) {
370                         setup_pieces ();
371                 }
372                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
373         } else if (p == Film::Property::AUDIO_PROCESSOR) {
374                 if (type == ChangeType::DONE && _film->audio_processor ()) {
375                         boost::mutex::scoped_lock lm (_mutex);
376                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
377                 }
378         } else if (p == Film::Property::AUDIO_CHANNELS) {
379                 if (type == ChangeType::DONE) {
380                         boost::mutex::scoped_lock lm (_mutex);
381                         _audio_merger.clear ();
382                 }
383         }
384 }
385
386
387 shared_ptr<PlayerVideo>
388 Player::black_player_video_frame (Eyes eyes) const
389 {
390         return std::make_shared<PlayerVideo> (
391                 std::make_shared<const RawImageProxy>(_black_image),
392                 Crop(),
393                 optional<double>(),
394                 _video_container_size,
395                 _video_container_size,
396                 eyes,
397                 Part::WHOLE,
398                 PresetColourConversion::all().front().conversion,
399                 VideoRange::FULL,
400                 std::weak_ptr<Content>(),
401                 boost::optional<Frame>(),
402                 false
403         );
404 }
405
406
407 vector<FontData>
408 Player::get_subtitle_fonts ()
409 {
410         boost::mutex::scoped_lock lm (_mutex);
411
412         vector<FontData> fonts;
413         for (auto i: _pieces) {
414                 /* XXX: things may go wrong if there are duplicate font IDs
415                    with different font files.
416                 */
417                 auto f = i->fonts ();
418                 copy (f.begin(), f.end(), back_inserter(fonts));
419         }
420
421         return fonts;
422 }
423
424
425 /** Set this player never to produce any video data */
426 void
427 Player::set_ignore_video ()
428 {
429         boost::mutex::scoped_lock lm (_mutex);
430         _ignore_video = true;
431         setup_pieces_unlocked ();
432 }
433
434
435 void
436 Player::set_ignore_audio ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439         _ignore_audio = true;
440         setup_pieces_unlocked ();
441 }
442
443
444 void
445 Player::set_ignore_text ()
446 {
447         boost::mutex::scoped_lock lm (_mutex);
448         _ignore_text = true;
449         setup_pieces_unlocked ();
450 }
451
452
453 /** Set the player to always burn open texts into the image regardless of the content settings */
454 void
455 Player::set_always_burn_open_subtitles ()
456 {
457         boost::mutex::scoped_lock lm (_mutex);
458         _always_burn_open_subtitles = true;
459 }
460
461
462 /** Sets up the player to be faster, possibly at the expense of quality */
463 void
464 Player::set_fast ()
465 {
466         boost::mutex::scoped_lock lm (_mutex);
467         _fast = true;
468         setup_pieces_unlocked ();
469 }
470
471
472 void
473 Player::set_play_referenced ()
474 {
475         boost::mutex::scoped_lock lm (_mutex);
476         _play_referenced = true;
477         setup_pieces_unlocked ();
478 }
479
480
481 static void
482 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
483 {
484         DCPOMATIC_ASSERT (r);
485         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
486         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
487         if (r->actual_duration() > 0) {
488                 a.push_back (
489                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
490                         );
491         }
492 }
493
494
495 list<ReferencedReelAsset>
496 Player::get_reel_assets ()
497 {
498         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
499
500         list<ReferencedReelAsset> a;
501
502         for (auto i: playlist()->content()) {
503                 auto j = dynamic_pointer_cast<DCPContent> (i);
504                 if (!j) {
505                         continue;
506                 }
507
508                 unique_ptr<DCPDecoder> decoder;
509                 try {
510                         decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
511                 } catch (...) {
512                         return a;
513                 }
514
515                 DCPOMATIC_ASSERT (j->video_frame_rate ());
516                 double const cfr = j->video_frame_rate().get();
517                 Frame const trim_start = j->trim_start().frames_round (cfr);
518                 Frame const trim_end = j->trim_end().frames_round (cfr);
519                 int const ffr = _film->video_frame_rate ();
520
521                 /* position in the asset from the start */
522                 int64_t offset_from_start = 0;
523                 /* position in the asset from the end */
524                 int64_t offset_from_end = 0;
525                 for (auto k: decoder->reels()) {
526                         /* Assume that main picture duration is the length of the reel */
527                         offset_from_end += k->main_picture()->actual_duration();
528                 }
529
530                 for (auto k: decoder->reels()) {
531
532                         /* Assume that main picture duration is the length of the reel */
533                         int64_t const reel_duration = k->main_picture()->actual_duration();
534
535                         /* See doc/design/trim_reels.svg */
536                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
537                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
538
539                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
540                         if (j->reference_video ()) {
541                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
542                         }
543
544                         if (j->reference_audio ()) {
545                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
546                         }
547
548                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
549                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
550                         }
551
552                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
553                                 for (auto l: k->closed_captions()) {
554                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
555                                 }
556                         }
557
558                         offset_from_start += reel_duration;
559                         offset_from_end -= reel_duration;
560                 }
561         }
562
563         return a;
564 }
565
566
567 bool
568 Player::pass ()
569 {
570         boost::mutex::scoped_lock lm (_mutex);
571
572         if (_suspended) {
573                 /* We can't pass in this state */
574                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
575                 return false;
576         }
577
578         if (_playback_length == DCPTime()) {
579                 /* Special; just give one black frame */
580                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
581                 return true;
582         }
583
584         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
585
586         shared_ptr<Piece> earliest_content;
587         optional<DCPTime> earliest_time;
588
589         for (auto i: _pieces) {
590                 auto time = i->decoder_before(earliest_time);
591                 if (time) {
592                         earliest_time = *time;
593                         earliest_content = i;
594                 }
595         }
596
597         bool done = false;
598
599         enum {
600                 NONE,
601                 CONTENT,
602                 BLACK,
603                 SILENT
604         } which = NONE;
605
606         if (earliest_content) {
607                 which = CONTENT;
608         }
609
610         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
611                 earliest_time = _black.position ();
612                 which = BLACK;
613         }
614
615         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
616                 earliest_time = _silent.position ();
617                 which = SILENT;
618         }
619
620         switch (which) {
621         case CONTENT:
622         {
623                 earliest_content->pass();
624                 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
625                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
626                            to `hide' the fact that no audio was emitted during the referenced DCP (though
627                            we need to behave as though it was).
628                         */
629                         _last_audio_time = earliest_content->end ();
630                 }
631                 break;
632         }
633         case BLACK:
634                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
635                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
636                 _black.set_position (_black.position() + one_video_frame());
637                 break;
638         case SILENT:
639         {
640                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
641                 DCPTimePeriod period (_silent.period_at_position());
642                 if (_last_audio_time) {
643                         /* Sometimes the thing that happened last finishes fractionally before
644                            or after this silence.  Bodge the start time of the silence to fix it.
645                            I think this is nothing to worry about since we will just add or
646                            remove a little silence at the end of some content.
647                         */
648                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
649                         /* Let's not worry about less than a frame at 24fps */
650                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
651                         if (error >= too_much_error) {
652                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
653                         }
654                         DCPOMATIC_ASSERT (error < too_much_error);
655                         period.from = *_last_audio_time;
656                 }
657                 if (period.duration() > one_video_frame()) {
658                         period.to = period.from + one_video_frame();
659                 }
660                 fill_audio (period);
661                 _silent.set_position (period.to);
662                 break;
663         }
664         case NONE:
665                 done = true;
666                 break;
667         }
668
669         /* Emit any audio that is ready */
670
671         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
672            of our streams, or the position of the _silent.
673         */
674         auto pull_to = _playback_length;
675         for (auto i: _pieces) {
676                 i->update_pull_to (pull_to);
677         }
678         if (!_silent.done() && _silent.position() < pull_to) {
679                 pull_to = _silent.position();
680         }
681
682         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
683         auto audio = _audio_merger.pull (pull_to);
684         for (auto i = audio.begin(); i != audio.end(); ++i) {
685                 if (_last_audio_time && i->second < *_last_audio_time) {
686                         /* This new data comes before the last we emitted (or the last seek); discard it */
687                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
688                         if (!cut.first) {
689                                 continue;
690                         }
691                         *i = cut;
692                 } else if (_last_audio_time && i->second > *_last_audio_time) {
693                         /* There's a gap between this data and the last we emitted; fill with silence */
694                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
695                 }
696
697                 emit_audio (i->first, i->second);
698         }
699
700         if (done) {
701                 _shuffler->flush ();
702                 for (auto const& i: _delay) {
703                         do_emit_video(i.first, i.second);
704                 }
705         }
706
707         return done;
708 }
709
710
711 /** @return Open subtitles for the frame at the given time, converted to images */
712 optional<PositionImage>
713 Player::open_subtitles_for_frame (DCPTime time) const
714 {
715         list<PositionImage> captions;
716         int const vfr = _film->video_frame_rate();
717
718         for (
719                 auto j:
720                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
721                 ) {
722
723                 /* Bitmap subtitles */
724                 for (auto i: j.bitmap) {
725                         if (!i.image) {
726                                 continue;
727                         }
728
729                         /* i.image will already have been scaled to fit _video_container_size */
730                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
731
732                         captions.push_back (
733                                 PositionImage (
734                                         i.image,
735                                         Position<int> (
736                                                 lrint(_video_container_size.width * i.rectangle.x),
737                                                 lrint(_video_container_size.height * i.rectangle.y)
738                                                 )
739                                         )
740                                 );
741                 }
742
743                 /* String subtitles (rendered to an image) */
744                 if (!j.string.empty()) {
745                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
746                         copy (s.begin(), s.end(), back_inserter (captions));
747                 }
748         }
749
750         if (captions.empty()) {
751                 return {};
752         }
753
754         return merge (captions);
755 }
756
757
758 void
759 Player::video (weak_ptr<Piece> wp, PieceVideo video)
760 {
761         auto piece = wp.lock ();
762         if (!piece) {
763                 return;
764         }
765
766         if (!piece->use_video()) {
767                 return;
768         }
769
770         auto frc = piece->frame_rate_change();
771         if (frc.skip && (video.frame % 2) == 1) {
772                 return;
773         }
774
775         /* Time of the first frame we will emit */
776         DCPTime const time = piece->content_video_to_dcp (video.frame);
777         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
778
779         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
780            if it's after the content's period here as in that case we still need to fill any gap between
781            `now' and the end of the content's period.
782         */
783         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
784                 return;
785         }
786
787         if (piece->ignore_video_at(time)) {
788                 return;
789         }
790
791         /* Fill gaps that we discover now that we have some video which needs to be emitted.
792            This is where we need to fill to.
793         */
794         DCPTime fill_to = min (time, piece->end());
795
796         if (_last_video_time) {
797                 DCPTime fill_from = max (*_last_video_time, piece->position());
798
799                 /* Fill if we have more than half a frame to do */
800                 if ((fill_to - fill_from) > one_video_frame() / 2) {
801                         auto last = _last_video.find (wp);
802                         if (_film->three_d()) {
803                                 auto fill_to_eyes = video.eyes;
804                                 if (fill_to_eyes == Eyes::BOTH) {
805                                         fill_to_eyes = Eyes::LEFT;
806                                 }
807                                 if (fill_to == piece->end()) {
808                                         /* Don't fill after the end of the content */
809                                         fill_to_eyes = Eyes::LEFT;
810                                 }
811                                 auto j = fill_from;
812                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
813                                 if (eyes == Eyes::BOTH) {
814                                         eyes = Eyes::LEFT;
815                                 }
816                                 while (j < fill_to || eyes != fill_to_eyes) {
817                                         if (last != _last_video.end()) {
818                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
819                                                 auto copy = last->second->shallow_copy();
820                                                 copy->set_eyes (eyes);
821                                                 emit_video (copy, j);
822                                         } else {
823                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
824                                                 emit_video (black_player_video_frame(eyes), j);
825                                         }
826                                         if (eyes == Eyes::RIGHT) {
827                                                 j += one_video_frame();
828                                         }
829                                         eyes = increment_eyes (eyes);
830                                 }
831                         } else {
832                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
833                                         if (last != _last_video.end()) {
834                                                 emit_video (last->second, j);
835                                         } else {
836                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
837                                         }
838                                 }
839                         }
840                 }
841         }
842
843         _last_video[wp] = piece->player_video (video, _video_container_size);
844
845         DCPTime t = time;
846         for (int i = 0; i < frc.repeat; ++i) {
847                 if (t < piece->end()) {
848                         emit_video (_last_video[wp], t);
849                 }
850                 t += one_video_frame ();
851         }
852 }
853
854
855 void
856 Player::audio (weak_ptr<Piece> wp, PieceAudio piece_audio)
857 {
858         DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
859
860         auto piece = wp.lock ();
861         if (!piece) {
862                 return;
863         }
864
865         int const rfr = piece->resampled_audio_frame_rate ();
866
867         /* Compute time in the DCP */
868         auto time = piece->resampled_audio_to_dcp (piece_audio.frame);
869         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", piece_audio.frame, to_string(time));
870
871         /* And the end of this block in the DCP */
872         auto end = time + DCPTime::from_frames(piece_audio.audio->frames(), rfr);
873
874         /* Remove anything that comes before the start or after the end of the content */
875         if (time < piece->position()) {
876                 auto cut = discard_audio (piece_audio.audio, time, piece->position());
877                 if (!cut.first) {
878                         /* This audio is entirely discarded */
879                         return;
880                 }
881                 piece_audio.audio = cut.first;
882                 time = cut.second;
883         } else if (time > piece->end()) {
884                 /* Discard it all */
885                 return;
886         } else if (end > piece->end()) {
887                 Frame const remaining_frames = DCPTime(piece->end() - time).frames_round(rfr);
888                 if (remaining_frames == 0) {
889                         return;
890                 }
891                 piece_audio.audio = make_shared<AudioBuffers>(piece_audio.audio, remaining_frames, 0);
892         }
893
894         DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
895
896         /* Gain */
897
898         if (piece->audio_gain() != 0) {
899                 auto gain = make_shared<AudioBuffers>(piece_audio.audio);
900                 gain->apply_gain (piece->audio_gain());
901                 piece_audio.audio = gain;
902         }
903
904         /* Remap */
905
906         piece_audio.audio = remap (piece_audio.audio, _film->audio_channels(), piece_audio.stream->mapping());
907
908         /* Process */
909
910         if (_audio_processor) {
911                 piece_audio.audio = _audio_processor->run (piece_audio.audio, _film->audio_channels ());
912         }
913
914         /* Push */
915
916         _audio_merger.push (piece_audio.audio, time);
917         piece->set_last_push_end (piece_audio.stream, time + DCPTime::from_frames(piece_audio.audio->frames(), _film->audio_frame_rate()));
918 }
919
920
921 void
922 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
923 {
924         auto piece = wp.lock ();
925         auto content = wc.lock ();
926         auto text = wt.lock ();
927         if (!piece || !content || !text) {
928                 return;
929         }
930
931         /* Apply content's subtitle offsets */
932         subtitle.sub.rectangle.x += text->x_offset ();
933         subtitle.sub.rectangle.y += text->y_offset ();
934
935         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
936         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
937         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
938
939         /* Apply content's subtitle scale */
940         subtitle.sub.rectangle.width *= text->x_scale ();
941         subtitle.sub.rectangle.height *= text->y_scale ();
942
943         PlayerText ps;
944         auto image = subtitle.sub.image;
945
946         /* We will scale the subtitle up to fit _video_container_size */
947         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
948         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
949         if (width == 0 || height == 0) {
950                 return;
951         }
952
953         dcp::Size scaled_size (width, height);
954         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
955         auto from = piece->content_time_to_dcp(content, subtitle.from());
956         DCPOMATIC_ASSERT (from);
957
958         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
959 }
960
961
962 void
963 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
964 {
965         auto piece = wp.lock ();
966         auto content = wc.lock ();
967         auto text = wt.lock ();
968         if (!piece || !content || !text) {
969                 return;
970         }
971
972         PlayerText ps;
973         auto const from = piece->content_time_to_dcp(content, subtitle.from());
974         DCPOMATIC_ASSERT (from);
975
976         if (from > piece->end()) {
977                 return;
978         }
979
980         for (auto s: subtitle.subs) {
981                 s.set_h_position (s.h_position() + text->x_offset ());
982                 s.set_v_position (s.v_position() + text->y_offset ());
983                 float const xs = text->x_scale();
984                 float const ys = text->y_scale();
985                 float size = s.size();
986
987                 /* Adjust size to express the common part of the scaling;
988                    e.g. if xs = ys = 0.5 we scale size by 2.
989                 */
990                 if (xs > 1e-5 && ys > 1e-5) {
991                         size *= 1 / min (1 / xs, 1 / ys);
992                 }
993                 s.set_size (size);
994
995                 /* Then express aspect ratio changes */
996                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
997                         s.set_aspect_adjust (xs / ys);
998                 }
999
1000                 s.set_in (dcp::Time(from->seconds(), 1000));
1001                 ps.string.push_back (StringText (s, text->outline_width()));
1002                 ps.add_fonts (text->fonts ());
1003         }
1004
1005         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1006 }
1007
1008
1009 void
1010 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1011 {
1012         auto content = wc.lock ();
1013         auto text = wt.lock ();
1014         if (!text) {
1015                 return;
1016         }
1017
1018         if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1019                 return;
1020         }
1021
1022         shared_ptr<Piece> piece = wp.lock ();
1023         if (!piece) {
1024                 return;
1025         }
1026
1027         auto const dcp_to = piece->content_time_to_dcp(content, to);
1028         DCPOMATIC_ASSERT (dcp_to);
1029
1030         if (*dcp_to > piece->end()) {
1031                 return;
1032         }
1033
1034         auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1035
1036         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1037         if (text->use() && !always && !text->burn()) {
1038                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1039         }
1040 }
1041
1042
1043 void
1044 Player::seek (DCPTime time, bool accurate)
1045 {
1046         boost::mutex::scoped_lock lm (_mutex);
1047         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1048
1049         if (_suspended) {
1050                 /* We can't seek in this state */
1051                 return;
1052         }
1053
1054         if (_shuffler) {
1055                 _shuffler->clear ();
1056         }
1057
1058         _delay.clear ();
1059
1060         if (_audio_processor) {
1061                 _audio_processor->flush ();
1062         }
1063
1064         _audio_merger.clear ();
1065         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1066                 _active_texts[i].clear ();
1067         }
1068
1069         for (auto i: _pieces) {
1070                 i->seek (time, accurate);
1071         }
1072
1073         if (accurate) {
1074                 _last_video_time = time;
1075                 _last_video_eyes = Eyes::LEFT;
1076                 _last_audio_time = time;
1077         } else {
1078                 _last_video_time = optional<DCPTime>();
1079                 _last_video_eyes = optional<Eyes>();
1080                 _last_audio_time = optional<DCPTime>();
1081         }
1082
1083         _black.set_position (time);
1084         _silent.set_position (time);
1085
1086         _last_video.clear ();
1087 }
1088
1089
1090 void
1091 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1092 {
1093         if (!_film->three_d()) {
1094                 if (pv->eyes() == Eyes::LEFT) {
1095                         /* Use left-eye images for both eyes... */
1096                         pv->set_eyes (Eyes::BOTH);
1097                 } else if (pv->eyes() == Eyes::RIGHT) {
1098                         /* ...and discard the right */
1099                         return;
1100                 }
1101         }
1102
1103         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1104            player before the video that requires them.
1105         */
1106         _delay.push_back (make_pair (pv, time));
1107
1108         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1109                 _last_video_time = time + one_video_frame();
1110         }
1111         _last_video_eyes = increment_eyes (pv->eyes());
1112
1113         if (_delay.size() < 3) {
1114                 return;
1115         }
1116
1117         auto to_do = _delay.front();
1118         _delay.pop_front();
1119         do_emit_video (to_do.first, to_do.second);
1120 }
1121
1122
1123 void
1124 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1125 {
1126         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1127                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1128                         _active_texts[i].clear_before (time);
1129                 }
1130         }
1131
1132         auto subtitles = open_subtitles_for_frame (time);
1133         if (subtitles) {
1134                 pv->set_text (subtitles.get ());
1135         }
1136
1137         Video (pv, time);
1138 }
1139
1140
1141 void
1142 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1143 {
1144         /* Log if the assert below is about to fail */
1145         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1146                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1147         }
1148
1149         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1150         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1151         Audio (data, time, _film->audio_frame_rate());
1152         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1153 }
1154
1155
1156 void
1157 Player::fill_audio (DCPTimePeriod period)
1158 {
1159         if (period.from == period.to) {
1160                 return;
1161         }
1162
1163         DCPOMATIC_ASSERT (period.from < period.to);
1164
1165         DCPTime t = period.from;
1166         while (t < period.to) {
1167                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1168                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1169                 if (samples) {
1170                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1171                         silence->make_silent ();
1172                         emit_audio (silence, t);
1173                 }
1174                 t += block;
1175         }
1176 }
1177
1178
1179 DCPTime
1180 Player::one_video_frame () const
1181 {
1182         return DCPTime::from_frames (1, _film->video_frame_rate ());
1183 }
1184
1185
1186 pair<shared_ptr<AudioBuffers>, DCPTime>
1187 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1188 {
1189         auto const discard_time = discard_to - time;
1190         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1191         auto remaining_frames = audio->frames() - discard_frames;
1192         if (remaining_frames <= 0) {
1193                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1194         }
1195         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1196         return make_pair(cut, time + discard_time);
1197 }
1198
1199
1200 void
1201 Player::set_dcp_decode_reduction (optional<int> reduction)
1202 {
1203         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1204
1205         {
1206                 boost::mutex::scoped_lock lm (_mutex);
1207
1208                 if (reduction == _dcp_decode_reduction) {
1209                         lm.unlock ();
1210                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1211                         return;
1212                 }
1213
1214                 _dcp_decode_reduction = reduction;
1215                 setup_pieces_unlocked ();
1216         }
1217
1218         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1219 }
1220
1221
1222 shared_ptr<const Playlist>
1223 Player::playlist () const
1224 {
1225         return _playlist ? _playlist : _film->playlist();
1226 }
1227
1228
1229 void
1230 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1231 {
1232         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1233 }
1234