Move some checks from Player to Piece.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "dcp_content.h"
30 #include "dcp_decoder.h"
31 #include "dcpomatic_log.h"
32 #include "decoder.h"
33 #include "decoder_factory.h"
34 #include "ffmpeg_content.h"
35 #include "film.h"
36 #include "frame_rate_change.h"
37 #include "image.h"
38 #include "image_decoder.h"
39 #include "job.h"
40 #include "log.h"
41 #include "piece_video.h"
42 #include "player.h"
43 #include "player_video.h"
44 #include "playlist.h"
45 #include "ratio.h"
46 #include "raw_image_proxy.h"
47 #include "referenced_reel_asset.h"
48 #include "render_text.h"
49 #include "shuffler.h"
50 #include "text_content.h"
51 #include "text_decoder.h"
52 #include "timer.h"
53 #include "video_decoder.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <stdint.h>
60 #include <algorithm>
61 #include <iostream>
62
63 #include "i18n.h"
64
65
66 using std::copy;
67 using std::cout;
68 using std::dynamic_pointer_cast;
69 using std::list;
70 using std::make_pair;
71 using std::make_shared;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::unique_ptr;
82 using boost::optional;
83 #if BOOST_VERSION >= 106100
84 using namespace boost::placeholders;
85 #endif
86 using namespace dcpomatic;
87
88
89 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
90 int const PlayerProperty::PLAYLIST = 701;
91 int const PlayerProperty::FILM_CONTAINER = 702;
92 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
93 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
94 int const PlayerProperty::PLAYBACK_LENGTH = 705;
95
96
97 /** About 0.01dB */
98 #define AUDIO_GAIN_EPSILON 0.001
99
100
101 Player::Player (shared_ptr<const Film> film)
102         : _film (film)
103         , _suspended (0)
104         , _tolerant (film->tolerant())
105         , _audio_merger (_film->audio_frame_rate())
106 {
107         construct ();
108 }
109
110
111 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
112         : _film (film)
113         , _playlist (playlist_)
114         , _suspended (0)
115         , _tolerant (film->tolerant())
116         , _audio_merger (_film->audio_frame_rate())
117 {
118         construct ();
119 }
120
121
122 void
123 Player::construct ()
124 {
125         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
126         /* The butler must hear about this first, so since we are proxying this through to the butler we must
127            be first.
128         */
129         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
130         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
131         set_video_container_size (_film->frame_size ());
132
133         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
134
135         setup_pieces ();
136         seek (DCPTime (), true);
137 }
138
139
140 void
141 Player::setup_pieces ()
142 {
143         boost::mutex::scoped_lock lm (_mutex);
144         setup_pieces_unlocked ();
145 }
146
147
148 bool
149 have_video (shared_ptr<const Content> content)
150 {
151         return static_cast<bool>(content->video) && content->video->use();
152 }
153
154
155 bool
156 have_audio (shared_ptr<const Content> content)
157 {
158         return static_cast<bool>(content->audio);
159 }
160
161
162 vector<vector<shared_ptr<Content>>>
163 collect (shared_ptr<const Film> film, ContentList content)
164 {
165         vector<shared_ptr<Content>> ungrouped;
166         vector<vector<shared_ptr<Content>>> grouped;
167
168         auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
169
170                 auto a_streams = a->streams();
171                 auto b_streams = b->streams();
172
173                 if (a_streams.size() != b_streams.size()) {
174                         return false;
175                 }
176
177                 for (size_t i = 0; i < a_streams.size(); ++i) {
178                         auto a_stream = a_streams[i];
179                         auto b_stream = b_streams[i];
180                         if (
181                                 !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
182                                 a_stream->frame_rate() != b_stream->frame_rate() ||
183                                 a_stream->channels() != b_stream->channels()) {
184                                 return false;
185                         }
186                 }
187
188                 return (
189                         fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
190                         a->delay() == b->delay() &&
191                         a->language() == b->language() &&
192                         a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
193                         a->channel_names() == b->channel_names()
194                        );
195         };
196
197         for (auto i: content) {
198                 if (i->video || !i->audio || !i->text.empty()) {
199                         ungrouped.push_back (i);
200                 } else {
201                         bool done = false;
202                         for (auto& g: grouped) {
203                                 if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
204                                         g.push_back (i);
205                                         done = true;
206                                 }
207                         }
208                         if (!done) {
209                                 grouped.push_back ({i});
210                         }
211                 }
212         }
213
214         for (auto i: ungrouped) {
215                 grouped.push_back({i});
216         }
217
218         return grouped;
219 }
220
221
222 void
223 Player::setup_pieces_unlocked ()
224 {
225         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
226
227         auto old_pieces = _pieces;
228         _pieces.clear ();
229
230         _shuffler.reset (new Shuffler());
231         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
232
233         for (auto i: playlist()->content()) {
234
235                 if (!i->paths_valid ()) {
236                         continue;
237                 }
238
239                 if (_ignore_video && _ignore_audio && i->text.empty()) {
240                         /* We're only interested in text and this content has none */
241                         continue;
242                 }
243
244                 shared_ptr<Decoder> old_decoder;
245                 for (auto j: old_pieces) {
246                         auto decoder = j->decoder_for(i);
247                         if (decoder) {
248                                 old_decoder = decoder;
249                                 break;
250                         }
251                 }
252
253                 auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
254                 DCPOMATIC_ASSERT (decoder);
255
256                 FrameRateChange frc (_film, i);
257
258                 if (decoder->video && _ignore_video) {
259                         decoder->video->set_ignore (true);
260                 }
261
262                 if (decoder->audio && _ignore_audio) {
263                         decoder->audio->set_ignore (true);
264                 }
265
266                 if (_ignore_text) {
267                         for (auto i: decoder->text) {
268                                 i->set_ignore (true);
269                         }
270                 }
271
272                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
273                 if (dcp) {
274                         dcp->set_decode_referenced (_play_referenced);
275                         if (_play_referenced) {
276                                 dcp->set_forced_reduction (_dcp_decode_reduction);
277                         }
278                 }
279
280                 auto piece = make_shared<Piece>(_film, i, decoder, frc, _fast);
281                 _pieces.push_back (piece);
282
283                 if (i->video) {
284                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
285                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
286                                 piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
287                         } else {
288                                 piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
289                         }
290                 }
291
292                 if (i->audio) {
293                         piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
294                 }
295
296                 piece->BitmapTextStart.connect (bind(&Player::bitmap_text_start, this, piece, _1));
297                 piece->StringTextStart.connect (bind(&Player::string_text_start, this, piece, _1));
298                 piece->TextStop.connect (bind(&Player::subtitle_stop, this, piece, _1));
299                 piece->Atmos.connect (bind(&Player::atmos, this, piece, _1));
300         }
301
302         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
303                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
304                         /* Look for content later in the content list with in-use video that overlaps this */
305                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
306                                 if ((*j)->use_video()) {
307                                         (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
308                                 }
309                         }
310                 }
311         }
312
313         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
314         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
315
316         _last_video_time = boost::optional<dcpomatic::DCPTime>();
317         _last_video_eyes = Eyes::BOTH;
318         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
319 }
320
321
322 optional<DCPTime>
323 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
324 {
325         boost::mutex::scoped_lock lm (_mutex);
326
327         for (auto i: _pieces) {
328                 auto dcp = i->content_time_to_dcp(content, t);
329                 if (dcp) {
330                         return *dcp;
331                 }
332         }
333
334         /* We couldn't find this content; perhaps things are being changed over */
335         return {};
336 }
337
338
339 void
340 Player::playlist_content_change (ChangeType type, int property, bool frequent)
341 {
342         if (property == VideoContentProperty::CROP) {
343                 if (type == ChangeType::DONE) {
344                         auto const vcs = video_container_size();
345                         boost::mutex::scoped_lock lm (_mutex);
346                         for (auto const& i: _delay) {
347                                 i.first->reset_metadata (_film, vcs);
348                         }
349                 }
350         } else {
351                 if (type == ChangeType::PENDING) {
352                         /* The player content is probably about to change, so we can't carry on
353                            until that has happened and we've rebuilt our pieces.  Stop pass()
354                            and seek() from working until then.
355                         */
356                         ++_suspended;
357                 } else if (type == ChangeType::DONE) {
358                         /* A change in our content has gone through.  Re-build our pieces. */
359                         setup_pieces ();
360                         --_suspended;
361                 } else if (type == ChangeType::CANCELLED) {
362                         --_suspended;
363                 }
364         }
365
366         Change (type, property, frequent);
367 }
368
369
370 void
371 Player::set_video_container_size (dcp::Size s)
372 {
373         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
374
375         {
376                 boost::mutex::scoped_lock lm (_mutex);
377
378                 if (s == _video_container_size) {
379                         lm.unlock ();
380                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
381                         return;
382                 }
383
384                 _video_container_size = s;
385
386                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
387                 _black_image->make_black ();
388         }
389
390         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
391 }
392
393
394 void
395 Player::playlist_change (ChangeType type)
396 {
397         if (type == ChangeType::DONE) {
398                 setup_pieces ();
399         }
400         Change (type, PlayerProperty::PLAYLIST, false);
401 }
402
403
404 void
405 Player::film_change (ChangeType type, Film::Property p)
406 {
407         /* Here we should notice Film properties that affect our output, and
408            alert listeners that our output now would be different to how it was
409            last time we were run.
410         */
411
412         if (p == Film::Property::CONTAINER) {
413                 Change (type, PlayerProperty::FILM_CONTAINER, false);
414         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
415                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
416                    so we need new pieces here.
417                 */
418                 if (type == ChangeType::DONE) {
419                         setup_pieces ();
420                 }
421                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
422         } else if (p == Film::Property::AUDIO_PROCESSOR) {
423                 if (type == ChangeType::DONE && _film->audio_processor ()) {
424                         boost::mutex::scoped_lock lm (_mutex);
425                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
426                 }
427         } else if (p == Film::Property::AUDIO_CHANNELS) {
428                 if (type == ChangeType::DONE) {
429                         boost::mutex::scoped_lock lm (_mutex);
430                         _audio_merger.clear ();
431                 }
432         }
433 }
434
435
436 shared_ptr<PlayerVideo>
437 Player::black_player_video_frame (Eyes eyes) const
438 {
439         return std::make_shared<PlayerVideo> (
440                 std::make_shared<const RawImageProxy>(_black_image),
441                 Crop(),
442                 optional<double>(),
443                 _video_container_size,
444                 _video_container_size,
445                 eyes,
446                 Part::WHOLE,
447                 PresetColourConversion::all().front().conversion,
448                 VideoRange::FULL,
449                 std::weak_ptr<Content>(),
450                 boost::optional<Frame>(),
451                 false
452         );
453 }
454
455
456 vector<FontData>
457 Player::get_subtitle_fonts ()
458 {
459         boost::mutex::scoped_lock lm (_mutex);
460
461         vector<FontData> fonts;
462         for (auto i: _pieces) {
463                 /* XXX: things may go wrong if there are duplicate font IDs
464                    with different font files.
465                 */
466                 auto f = i->fonts ();
467                 copy (f.begin(), f.end(), back_inserter(fonts));
468         }
469
470         return fonts;
471 }
472
473
474 /** Set this player never to produce any video data */
475 void
476 Player::set_ignore_video ()
477 {
478         boost::mutex::scoped_lock lm (_mutex);
479         _ignore_video = true;
480         setup_pieces_unlocked ();
481 }
482
483
484 void
485 Player::set_ignore_audio ()
486 {
487         boost::mutex::scoped_lock lm (_mutex);
488         _ignore_audio = true;
489         setup_pieces_unlocked ();
490 }
491
492
493 void
494 Player::set_ignore_text ()
495 {
496         boost::mutex::scoped_lock lm (_mutex);
497         _ignore_text = true;
498         setup_pieces_unlocked ();
499 }
500
501
502 /** Set the player to always burn open texts into the image regardless of the content settings */
503 void
504 Player::set_always_burn_open_subtitles ()
505 {
506         boost::mutex::scoped_lock lm (_mutex);
507         _always_burn_open_subtitles = true;
508 }
509
510
511 /** Sets up the player to be faster, possibly at the expense of quality */
512 void
513 Player::set_fast ()
514 {
515         boost::mutex::scoped_lock lm (_mutex);
516         _fast = true;
517         setup_pieces_unlocked ();
518 }
519
520
521 void
522 Player::set_play_referenced ()
523 {
524         boost::mutex::scoped_lock lm (_mutex);
525         _play_referenced = true;
526         setup_pieces_unlocked ();
527 }
528
529
530 static void
531 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
532 {
533         DCPOMATIC_ASSERT (r);
534         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
535         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
536         if (r->actual_duration() > 0) {
537                 a.push_back (
538                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
539                         );
540         }
541 }
542
543
544 list<ReferencedReelAsset>
545 Player::get_reel_assets ()
546 {
547         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
548
549         list<ReferencedReelAsset> a;
550
551         for (auto i: playlist()->content()) {
552                 auto j = dynamic_pointer_cast<DCPContent> (i);
553                 if (!j) {
554                         continue;
555                 }
556
557                 unique_ptr<DCPDecoder> decoder;
558                 try {
559                         decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
560                 } catch (...) {
561                         return a;
562                 }
563
564                 DCPOMATIC_ASSERT (j->video_frame_rate ());
565                 double const cfr = j->video_frame_rate().get();
566                 Frame const trim_start = j->trim_start().frames_round (cfr);
567                 Frame const trim_end = j->trim_end().frames_round (cfr);
568                 int const ffr = _film->video_frame_rate ();
569
570                 /* position in the asset from the start */
571                 int64_t offset_from_start = 0;
572                 /* position in the asset from the end */
573                 int64_t offset_from_end = 0;
574                 for (auto k: decoder->reels()) {
575                         /* Assume that main picture duration is the length of the reel */
576                         offset_from_end += k->main_picture()->actual_duration();
577                 }
578
579                 for (auto k: decoder->reels()) {
580
581                         /* Assume that main picture duration is the length of the reel */
582                         int64_t const reel_duration = k->main_picture()->actual_duration();
583
584                         /* See doc/design/trim_reels.svg */
585                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
586                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
587
588                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
589                         if (j->reference_video ()) {
590                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
591                         }
592
593                         if (j->reference_audio ()) {
594                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
595                         }
596
597                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
598                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
599                         }
600
601                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
602                                 for (auto l: k->closed_captions()) {
603                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
604                                 }
605                         }
606
607                         offset_from_start += reel_duration;
608                         offset_from_end -= reel_duration;
609                 }
610         }
611
612         return a;
613 }
614
615
616 bool
617 Player::pass ()
618 {
619         boost::mutex::scoped_lock lm (_mutex);
620
621         if (_suspended) {
622                 /* We can't pass in this state */
623                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
624                 return false;
625         }
626
627         if (_playback_length == DCPTime()) {
628                 /* Special; just give one black frame */
629                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
630                 return true;
631         }
632
633         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
634
635         shared_ptr<Piece> earliest_content;
636         optional<DCPTime> earliest_time;
637
638         for (auto i: _pieces) {
639                 auto time = i->decoder_before(earliest_time);
640                 if (time) {
641                         earliest_time = *time;
642                         earliest_content = i;
643                 }
644         }
645
646         bool done = false;
647
648         enum {
649                 NONE,
650                 CONTENT,
651                 BLACK,
652                 SILENT
653         } which = NONE;
654
655         if (earliest_content) {
656                 which = CONTENT;
657         }
658
659         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
660                 earliest_time = _black.position ();
661                 which = BLACK;
662         }
663
664         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
665                 earliest_time = _silent.position ();
666                 which = SILENT;
667         }
668
669         switch (which) {
670         case CONTENT:
671         {
672                 earliest_content->pass();
673                 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
674                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
675                            to `hide' the fact that no audio was emitted during the referenced DCP (though
676                            we need to behave as though it was).
677                         */
678                         _last_audio_time = earliest_content->end ();
679                 }
680                 break;
681         }
682         case BLACK:
683                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
684                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
685                 _black.set_position (_black.position() + one_video_frame());
686                 break;
687         case SILENT:
688         {
689                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
690                 DCPTimePeriod period (_silent.period_at_position());
691                 if (_last_audio_time) {
692                         /* Sometimes the thing that happened last finishes fractionally before
693                            or after this silence.  Bodge the start time of the silence to fix it.
694                            I think this is nothing to worry about since we will just add or
695                            remove a little silence at the end of some content.
696                         */
697                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
698                         /* Let's not worry about less than a frame at 24fps */
699                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
700                         if (error >= too_much_error) {
701                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
702                         }
703                         DCPOMATIC_ASSERT (error < too_much_error);
704                         period.from = *_last_audio_time;
705                 }
706                 if (period.duration() > one_video_frame()) {
707                         period.to = period.from + one_video_frame();
708                 }
709                 fill_audio (period);
710                 _silent.set_position (period.to);
711                 break;
712         }
713         case NONE:
714                 done = true;
715                 break;
716         }
717
718         /* Emit any audio that is ready */
719
720         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
721            of our streams, or the position of the _silent.
722         */
723         auto pull_to = _playback_length;
724         for (auto i: _pieces) {
725                 i->update_pull_to (pull_to);
726         }
727         if (!_silent.done() && _silent.position() < pull_to) {
728                 pull_to = _silent.position();
729         }
730
731         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
732         auto audio = _audio_merger.pull (pull_to);
733         for (auto i = audio.begin(); i != audio.end(); ++i) {
734                 if (_last_audio_time && i->second < *_last_audio_time) {
735                         /* This new data comes before the last we emitted (or the last seek); discard it */
736                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
737                         if (!cut.first) {
738                                 continue;
739                         }
740                         *i = cut;
741                 } else if (_last_audio_time && i->second > *_last_audio_time) {
742                         /* There's a gap between this data and the last we emitted; fill with silence */
743                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
744                 }
745
746                 emit_audio (i->first, i->second);
747         }
748
749         if (done) {
750                 _shuffler->flush ();
751                 for (auto const& i: _delay) {
752                         do_emit_video(i.first, i.second);
753                 }
754         }
755
756         return done;
757 }
758
759
760 /** @return Open subtitles for the frame at the given time, converted to images */
761 optional<PositionImage>
762 Player::open_subtitles_for_frame (DCPTime time) const
763 {
764         list<PositionImage> captions;
765         int const vfr = _film->video_frame_rate();
766
767         for (
768                 auto j:
769                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
770                 ) {
771
772                 /* Bitmap subtitles */
773                 for (auto i: j.bitmap) {
774                         if (!i.image) {
775                                 continue;
776                         }
777
778                         /* i.image will already have been scaled to fit _video_container_size */
779                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
780
781                         captions.push_back (
782                                 PositionImage (
783                                         i.image,
784                                         Position<int> (
785                                                 lrint(_video_container_size.width * i.rectangle.x),
786                                                 lrint(_video_container_size.height * i.rectangle.y)
787                                                 )
788                                         )
789                                 );
790                 }
791
792                 /* String subtitles (rendered to an image) */
793                 if (!j.string.empty()) {
794                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
795                         copy (s.begin(), s.end(), back_inserter (captions));
796                 }
797         }
798
799         if (captions.empty()) {
800                 return {};
801         }
802
803         return merge (captions);
804 }
805
806
807 void
808 Player::video (weak_ptr<Piece> wp, PieceVideo video)
809 {
810         auto piece = wp.lock ();
811         if (!piece) {
812                 return;
813         }
814
815         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(video.time));
816
817         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
818            if it's after the content's period here as in that case we still need to fill any gap between
819            `now' and the end of the content's period.
820         */
821         if (video.time < piece->position() || (_last_video_time && video.time < *_last_video_time)) {
822                 return;
823         }
824
825         /* Fill gaps that we discover now that we have some video which needs to be emitted.
826            This is where we need to fill to.
827         */
828         DCPTime fill_to = min (video.time, piece->end());
829
830         if (_last_video_time) {
831                 DCPTime fill_from = max (*_last_video_time, piece->position());
832
833                 /* Fill if we have more than half a frame to do */
834                 if ((fill_to - fill_from) > one_video_frame() / 2) {
835                         auto last = _last_video.find (wp);
836                         if (_film->three_d()) {
837                                 auto fill_to_eyes = video.eyes;
838                                 if (fill_to_eyes == Eyes::BOTH) {
839                                         fill_to_eyes = Eyes::LEFT;
840                                 }
841                                 if (fill_to == piece->end()) {
842                                         /* Don't fill after the end of the content */
843                                         fill_to_eyes = Eyes::LEFT;
844                                 }
845                                 auto j = fill_from;
846                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
847                                 if (eyes == Eyes::BOTH) {
848                                         eyes = Eyes::LEFT;
849                                 }
850                                 while (j < fill_to || eyes != fill_to_eyes) {
851                                         if (last != _last_video.end()) {
852                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
853                                                 auto copy = last->second->shallow_copy();
854                                                 copy->set_eyes (eyes);
855                                                 emit_video (copy, j);
856                                         } else {
857                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
858                                                 emit_video (black_player_video_frame(eyes), j);
859                                         }
860                                         if (eyes == Eyes::RIGHT) {
861                                                 j += one_video_frame();
862                                         }
863                                         eyes = increment_eyes (eyes);
864                                 }
865                         } else {
866                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
867                                         if (last != _last_video.end()) {
868                                                 emit_video (last->second, j);
869                                         } else {
870                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
871                                         }
872                                 }
873                         }
874                 }
875         }
876
877         _last_video[wp] = piece->player_video (video, _video_container_size);
878
879         DCPTime t = video.time;
880         auto const frc = piece->frame_rate_change();
881         for (int i = 0; i < frc.repeat; ++i) {
882                 if (t < piece->end()) {
883                         emit_video (_last_video[wp], t);
884                 }
885                 t += one_video_frame ();
886         }
887 }
888
889
890 void
891 Player::audio (weak_ptr<Piece> wp, PieceAudio audio)
892 {
893         DCPOMATIC_ASSERT (audio.audio->frames() > 0);
894
895         auto piece = wp.lock ();
896         if (!piece) {
897                 return;
898         }
899
900         LOG_DEBUG_PLAYER("Received audio at %1", to_string(audio.time));
901
902         /* The end of this block in the DCP */
903         int const rfr = piece->resampled_audio_frame_rate ();
904         auto end = audio.time + DCPTime::from_frames(audio.audio->frames(), rfr);
905
906         /* Remove anything that comes before the start or after the end of the content */
907         if (audio.time < piece->position()) {
908                 auto cut = discard_audio (audio.audio, audio.time, piece->position());
909                 if (!cut.first) {
910                         /* This audio is entirely discarded */
911                         return;
912                 }
913                 audio.audio = cut.first;
914                 audio.time = cut.second;
915         } else if (audio.time > piece->end()) {
916                 /* Discard it all */
917                 return;
918         } else if (end > piece->end()) {
919                 Frame const remaining_frames = DCPTime(piece->end() - audio.time).frames_round(rfr);
920                 if (remaining_frames == 0) {
921                         return;
922                 }
923                 audio.audio = make_shared<AudioBuffers>(audio.audio, remaining_frames, 0);
924         }
925
926         DCPOMATIC_ASSERT (audio.audio->frames() > 0);
927
928         /* Gain */
929
930         if (piece->audio_gain() != 0) {
931                 auto gain = make_shared<AudioBuffers>(audio.audio);
932                 gain->apply_gain (piece->audio_gain());
933                 audio.audio = gain;
934         }
935
936         /* Remap */
937
938         audio.audio = remap (audio.audio, _film->audio_channels(), audio.mapping);
939
940         /* Process */
941
942         if (_audio_processor) {
943                 audio.audio = _audio_processor->run (audio.audio, _film->audio_channels());
944         }
945
946         /* Push */
947
948         _audio_merger.push (audio.audio, audio.time);
949         piece->set_last_push_end (audio.stream, audio.time + DCPTime::from_frames(audio.audio->frames(), _film->audio_frame_rate()));
950 }
951
952
953 void
954 Player::bitmap_text_start (weak_ptr<Piece> wp, PieceBitmapTextStart subtitle)
955 {
956         auto piece = wp.lock ();
957         auto content = subtitle.content().lock();
958         auto text = subtitle.text().lock();
959         if (!piece || !content || !text) {
960                 return;
961         }
962
963         /* Apply content's subtitle offsets */
964         subtitle.sub.rectangle.x += text->x_offset ();
965         subtitle.sub.rectangle.y += text->y_offset ();
966
967         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
968         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
969         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
970
971         /* Apply content's subtitle scale */
972         subtitle.sub.rectangle.width *= text->x_scale ();
973         subtitle.sub.rectangle.height *= text->y_scale ();
974
975         PlayerText ps;
976         auto image = subtitle.sub.image;
977
978         /* We will scale the subtitle up to fit _video_container_size */
979         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
980         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
981         if (width == 0 || height == 0) {
982                 return;
983         }
984
985         dcp::Size scaled_size (width, height);
986         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
987         auto from = piece->content_time_to_dcp(content, subtitle.time());
988         DCPOMATIC_ASSERT (from);
989
990         _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
991 }
992
993
994 void
995 Player::string_text_start (weak_ptr<Piece> wp, PieceStringTextStart subtitle)
996 {
997         auto piece = wp.lock ();
998         auto content = subtitle.content().lock();
999         auto text = subtitle.text().lock();
1000         if (!piece || !content || !text) {
1001                 return;
1002         }
1003
1004         PlayerText ps;
1005         auto const from = piece->content_time_to_dcp(content, subtitle.time());
1006         DCPOMATIC_ASSERT (from);
1007
1008         if (from > piece->end()) {
1009                 return;
1010         }
1011
1012         for (auto s: subtitle.subs) {
1013                 s.set_h_position (s.h_position() + text->x_offset());
1014                 s.set_v_position (s.v_position() + text->y_offset());
1015                 float const xs = text->x_scale();
1016                 float const ys = text->y_scale();
1017                 float size = s.size();
1018
1019                 /* Adjust size to express the common part of the scaling;
1020                    e.g. if xs = ys = 0.5 we scale size by 2.
1021                 */
1022                 if (xs > 1e-5 && ys > 1e-5) {
1023                         size *= 1 / min (1 / xs, 1 / ys);
1024                 }
1025                 s.set_size (size);
1026
1027                 /* Then express aspect ratio changes */
1028                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1029                         s.set_aspect_adjust (xs / ys);
1030                 }
1031
1032                 s.set_in (dcp::Time(from->seconds(), 1000));
1033                 ps.string.push_back (StringText (s, text->outline_width()));
1034                 ps.add_fonts (text->fonts ());
1035         }
1036
1037         _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
1038 }
1039
1040
1041 void
1042 Player::subtitle_stop (weak_ptr<Piece> wp, PieceTextStop stop)
1043 {
1044         auto content = stop.content().lock();
1045         auto text = stop.text().lock();
1046         if (!text) {
1047                 return;
1048         }
1049
1050         if (!_active_texts[static_cast<int>(text->type())].have(stop.text())) {
1051                 return;
1052         }
1053
1054         shared_ptr<Piece> piece = wp.lock ();
1055         if (!piece) {
1056                 return;
1057         }
1058
1059         auto const dcp_to = piece->content_time_to_dcp(content, stop.time());
1060         DCPOMATIC_ASSERT (dcp_to);
1061
1062         if (*dcp_to > piece->end()) {
1063                 return;
1064         }
1065
1066         auto from = _active_texts[static_cast<int>(text->type())].add_to(stop.text(), *dcp_to);
1067
1068         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1069         if (text->use() && !always && !text->burn()) {
1070                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1071         }
1072 }
1073
1074
1075 void
1076 Player::seek (DCPTime time, bool accurate)
1077 {
1078         boost::mutex::scoped_lock lm (_mutex);
1079         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1080
1081         if (_suspended) {
1082                 /* We can't seek in this state */
1083                 return;
1084         }
1085
1086         if (_shuffler) {
1087                 _shuffler->clear ();
1088         }
1089
1090         _delay.clear ();
1091
1092         if (_audio_processor) {
1093                 _audio_processor->flush ();
1094         }
1095
1096         _audio_merger.clear ();
1097         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1098                 _active_texts[i].clear ();
1099         }
1100
1101         for (auto i: _pieces) {
1102                 i->seek (time, accurate);
1103         }
1104
1105         if (accurate) {
1106                 _last_video_time = time;
1107                 _last_video_eyes = Eyes::LEFT;
1108                 _last_audio_time = time;
1109         } else {
1110                 _last_video_time = optional<DCPTime>();
1111                 _last_video_eyes = optional<Eyes>();
1112                 _last_audio_time = optional<DCPTime>();
1113         }
1114
1115         _black.set_position (time);
1116         _silent.set_position (time);
1117
1118         _last_video.clear ();
1119 }
1120
1121
1122 void
1123 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1124 {
1125         if (!_film->three_d()) {
1126                 if (pv->eyes() == Eyes::LEFT) {
1127                         /* Use left-eye images for both eyes... */
1128                         pv->set_eyes (Eyes::BOTH);
1129                 } else if (pv->eyes() == Eyes::RIGHT) {
1130                         /* ...and discard the right */
1131                         return;
1132                 }
1133         }
1134
1135         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1136            player before the video that requires them.
1137         */
1138         _delay.push_back (make_pair (pv, time));
1139
1140         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1141                 _last_video_time = time + one_video_frame();
1142         }
1143         _last_video_eyes = increment_eyes (pv->eyes());
1144
1145         if (_delay.size() < 3) {
1146                 return;
1147         }
1148
1149         auto to_do = _delay.front();
1150         _delay.pop_front();
1151         do_emit_video (to_do.first, to_do.second);
1152 }
1153
1154
1155 void
1156 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1157 {
1158         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1159                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1160                         _active_texts[i].clear_before (time);
1161                 }
1162         }
1163
1164         auto subtitles = open_subtitles_for_frame (time);
1165         if (subtitles) {
1166                 pv->set_text (subtitles.get ());
1167         }
1168
1169         Video (pv, time);
1170 }
1171
1172
1173 void
1174 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1175 {
1176         /* Log if the assert below is about to fail */
1177         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1178                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1179         }
1180
1181         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1182         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1183         Audio (data, time, _film->audio_frame_rate());
1184         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1185 }
1186
1187
1188 void
1189 Player::fill_audio (DCPTimePeriod period)
1190 {
1191         if (period.from == period.to) {
1192                 return;
1193         }
1194
1195         DCPOMATIC_ASSERT (period.from < period.to);
1196
1197         DCPTime t = period.from;
1198         while (t < period.to) {
1199                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1200                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1201                 if (samples) {
1202                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1203                         silence->make_silent ();
1204                         emit_audio (silence, t);
1205                 }
1206                 t += block;
1207         }
1208 }
1209
1210
1211 DCPTime
1212 Player::one_video_frame () const
1213 {
1214         return DCPTime::from_frames (1, _film->video_frame_rate ());
1215 }
1216
1217
1218 pair<shared_ptr<AudioBuffers>, DCPTime>
1219 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1220 {
1221         auto const discard_time = discard_to - time;
1222         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1223         auto remaining_frames = audio->frames() - discard_frames;
1224         if (remaining_frames <= 0) {
1225                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1226         }
1227         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1228         return make_pair(cut, time + discard_time);
1229 }
1230
1231
1232 void
1233 Player::set_dcp_decode_reduction (optional<int> reduction)
1234 {
1235         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1236
1237         {
1238                 boost::mutex::scoped_lock lm (_mutex);
1239
1240                 if (reduction == _dcp_decode_reduction) {
1241                         lm.unlock ();
1242                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1243                         return;
1244                 }
1245
1246                 _dcp_decode_reduction = reduction;
1247                 setup_pieces_unlocked ();
1248         }
1249
1250         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1251 }
1252
1253
1254 shared_ptr<const Playlist>
1255 Player::playlist () const
1256 {
1257         return _playlist ? _playlist : _film->playlist();
1258 }
1259
1260
1261 void
1262 Player::atmos (weak_ptr<Piece>, PieceAtmos data)
1263 {
1264         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1265 }
1266