Add add_fonts() to Piece.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
83 #endif
84 using namespace dcpomatic;
85
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
92
93 Player::Player (shared_ptr<const Film> film)
94         : _film (film)
95         , _suspended (0)
96         , _ignore_video (false)
97         , _ignore_audio (false)
98         , _ignore_text (false)
99         , _always_burn_open_subtitles (false)
100         , _fast (false)
101         , _tolerant (film->tolerant())
102         , _play_referenced (false)
103         , _audio_merger (_film->audio_frame_rate())
104         , _shuffler (0)
105 {
106         construct ();
107 }
108
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110         : _film (film)
111         , _playlist (playlist_)
112         , _suspended (0)
113         , _ignore_video (false)
114         , _ignore_audio (false)
115         , _ignore_text (false)
116         , _always_burn_open_subtitles (false)
117         , _fast (false)
118         , _tolerant (film->tolerant())
119         , _play_referenced (false)
120         , _audio_merger (_film->audio_frame_rate())
121         , _shuffler (0)
122 {
123         construct ();
124 }
125
126 void
127 Player::construct ()
128 {
129         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130         /* The butler must hear about this first, so since we are proxying this through to the butler we must
131            be first.
132         */
133         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135         set_video_container_size (_film->frame_size ());
136
137         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
138
139         setup_pieces ();
140         seek (DCPTime (), true);
141 }
142
143 Player::~Player ()
144 {
145         delete _shuffler;
146 }
147
148 void
149 Player::setup_pieces ()
150 {
151         boost::mutex::scoped_lock lm (_mutex);
152         setup_pieces_unlocked ();
153 }
154
155
156 bool
157 have_video (shared_ptr<const Content> content)
158 {
159         return static_cast<bool>(content->video) && content->video->use();
160 }
161
162 bool
163 have_audio (shared_ptr<const Content> content)
164 {
165         return static_cast<bool>(content->audio);
166 }
167
168 void
169 Player::setup_pieces_unlocked ()
170 {
171         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
172
173         list<shared_ptr<Piece> > old_pieces = _pieces;
174         _pieces.clear ();
175
176         delete _shuffler;
177         _shuffler = new Shuffler();
178         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
179
180         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
181
182                 if (!i->paths_valid ()) {
183                         continue;
184                 }
185
186                 if (_ignore_video && _ignore_audio && i->text.empty()) {
187                         /* We're only interested in text and this content has none */
188                         continue;
189                 }
190
191                 shared_ptr<Decoder> old_decoder;
192                 /* XXX: needs to check vector of Content and use the old decoders, but
193                  * this will all be different as we have to coalesce content before
194                  * this happens.
195                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
196                         if (j->content == i) {
197                                 old_decoder = j->decoder;
198                                 break;
199                         }
200                 }
201                 */
202
203                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
204                 DCPOMATIC_ASSERT (decoder);
205
206                 FrameRateChange frc (_film, i);
207
208                 if (decoder->video && _ignore_video) {
209                         decoder->video->set_ignore (true);
210                 }
211
212                 if (decoder->audio && _ignore_audio) {
213                         decoder->audio->set_ignore (true);
214                 }
215
216                 if (_ignore_text) {
217                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
218                                 i->set_ignore (true);
219                         }
220                 }
221
222                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
223                 if (dcp) {
224                         dcp->set_decode_referenced (_play_referenced);
225                         if (_play_referenced) {
226                                 dcp->set_forced_reduction (_dcp_decode_reduction);
227                         }
228                 }
229
230                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
231                 _pieces.push_back (piece);
232
233                 if (decoder->video) {
234                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
235                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
236                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
237                         } else {
238                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
239                         }
240                 }
241
242                 if (decoder->audio) {
243                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
244                 }
245
246                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
247
248                 while (j != decoder->text.end()) {
249                         (*j)->BitmapStart.connect (
250                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252                         (*j)->PlainStart.connect (
253                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
254                                 );
255                         (*j)->Stop.connect (
256                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
257                                 );
258
259                         ++j;
260                 }
261
262                 if (decoder->atmos) {
263                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
264                 }
265         }
266
267         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
268         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
269
270         _last_video_time = DCPTime ();
271         _last_video_eyes = EYES_BOTH;
272         _last_audio_time = DCPTime ();
273 }
274
275 void
276 Player::playlist_content_change (ChangeType type, int property, bool frequent)
277 {
278         if (property == VideoContentProperty::CROP) {
279                 if (type == CHANGE_TYPE_DONE) {
280                         dcp::Size const vcs = video_container_size();
281                         boost::mutex::scoped_lock lm (_mutex);
282                         for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
283                                 i->first->reset_metadata (_film, vcs);
284                         }
285                 }
286         } else {
287                 if (type == CHANGE_TYPE_PENDING) {
288                         /* The player content is probably about to change, so we can't carry on
289                            until that has happened and we've rebuilt our pieces.  Stop pass()
290                            and seek() from working until then.
291                         */
292                         ++_suspended;
293                 } else if (type == CHANGE_TYPE_DONE) {
294                         /* A change in our content has gone through.  Re-build our pieces. */
295                         setup_pieces ();
296                         --_suspended;
297                 } else if (type == CHANGE_TYPE_CANCELLED) {
298                         --_suspended;
299                 }
300         }
301
302         Change (type, property, frequent);
303 }
304
305 void
306 Player::set_video_container_size (dcp::Size s)
307 {
308         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
309
310         {
311                 boost::mutex::scoped_lock lm (_mutex);
312
313                 if (s == _video_container_size) {
314                         lm.unlock ();
315                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
316                         return;
317                 }
318
319                 _video_container_size = s;
320
321                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
322                 _black_image->make_black ();
323         }
324
325         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
326 }
327
328 void
329 Player::playlist_change (ChangeType type)
330 {
331         if (type == CHANGE_TYPE_DONE) {
332                 setup_pieces ();
333         }
334         Change (type, PlayerProperty::PLAYLIST, false);
335 }
336
337 void
338 Player::film_change (ChangeType type, Film::Property p)
339 {
340         /* Here we should notice Film properties that affect our output, and
341            alert listeners that our output now would be different to how it was
342            last time we were run.
343         */
344
345         if (p == Film::CONTAINER) {
346                 Change (type, PlayerProperty::FILM_CONTAINER, false);
347         } else if (p == Film::VIDEO_FRAME_RATE) {
348                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
349                    so we need new pieces here.
350                 */
351                 if (type == CHANGE_TYPE_DONE) {
352                         setup_pieces ();
353                 }
354                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
355         } else if (p == Film::AUDIO_PROCESSOR) {
356                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
357                         boost::mutex::scoped_lock lm (_mutex);
358                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
359                 }
360         } else if (p == Film::AUDIO_CHANNELS) {
361                 if (type == CHANGE_TYPE_DONE) {
362                         boost::mutex::scoped_lock lm (_mutex);
363                         _audio_merger.clear ();
364                 }
365         }
366 }
367
368 shared_ptr<PlayerVideo>
369 Player::black_player_video_frame (Eyes eyes) const
370 {
371         return shared_ptr<PlayerVideo> (
372                 new PlayerVideo (
373                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
374                         Crop (),
375                         optional<double> (),
376                         _video_container_size,
377                         _video_container_size,
378                         eyes,
379                         PART_WHOLE,
380                         PresetColourConversion::all().front().conversion,
381                         VIDEO_RANGE_FULL,
382                         boost::weak_ptr<Content>(),
383                         boost::optional<Frame>(),
384                         false
385                 )
386         );
387 }
388
389
390 list<shared_ptr<Font> >
391 Player::get_subtitle_fonts ()
392 {
393         boost::mutex::scoped_lock lm (_mutex);
394
395         list<shared_ptr<Font> > fonts;
396         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
397                 i->add_fonts (fonts);
398         }
399
400         return fonts;
401 }
402
403 /** Set this player never to produce any video data */
404 void
405 Player::set_ignore_video ()
406 {
407         boost::mutex::scoped_lock lm (_mutex);
408         _ignore_video = true;
409         setup_pieces_unlocked ();
410 }
411
412 void
413 Player::set_ignore_audio ()
414 {
415         boost::mutex::scoped_lock lm (_mutex);
416         _ignore_audio = true;
417         setup_pieces_unlocked ();
418 }
419
420 void
421 Player::set_ignore_text ()
422 {
423         boost::mutex::scoped_lock lm (_mutex);
424         _ignore_text = true;
425         setup_pieces_unlocked ();
426 }
427
428 /** Set the player to always burn open texts into the image regardless of the content settings */
429 void
430 Player::set_always_burn_open_subtitles ()
431 {
432         boost::mutex::scoped_lock lm (_mutex);
433         _always_burn_open_subtitles = true;
434 }
435
436 /** Sets up the player to be faster, possibly at the expense of quality */
437 void
438 Player::set_fast ()
439 {
440         boost::mutex::scoped_lock lm (_mutex);
441         _fast = true;
442         setup_pieces_unlocked ();
443 }
444
445 void
446 Player::set_play_referenced ()
447 {
448         boost::mutex::scoped_lock lm (_mutex);
449         _play_referenced = true;
450         setup_pieces_unlocked ();
451 }
452
453 static void
454 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
455 {
456         DCPOMATIC_ASSERT (r);
457         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
458         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
459         if (r->actual_duration() > 0) {
460                 a.push_back (
461                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
462                         );
463         }
464 }
465
466 list<ReferencedReelAsset>
467 Player::get_reel_assets ()
468 {
469         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
470
471         list<ReferencedReelAsset> a;
472
473         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
474                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
475                 if (!j) {
476                         continue;
477                 }
478
479                 scoped_ptr<DCPDecoder> decoder;
480                 try {
481                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
482                 } catch (...) {
483                         return a;
484                 }
485
486                 DCPOMATIC_ASSERT (j->video_frame_rate ());
487                 double const cfr = j->video_frame_rate().get();
488                 Frame const trim_start = j->trim_start().frames_round (cfr);
489                 Frame const trim_end = j->trim_end().frames_round (cfr);
490                 int const ffr = _film->video_frame_rate ();
491
492                 /* position in the asset from the start */
493                 int64_t offset_from_start = 0;
494                 /* position in the asset from the end */
495                 int64_t offset_from_end = 0;
496                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
497                         /* Assume that main picture duration is the length of the reel */
498                         offset_from_end += k->main_picture()->actual_duration();
499                 }
500
501                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
502
503                         /* Assume that main picture duration is the length of the reel */
504                         int64_t const reel_duration = k->main_picture()->actual_duration();
505
506                         /* See doc/design/trim_reels.svg */
507                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
508                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
509
510                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
511                         if (j->reference_video ()) {
512                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
513                         }
514
515                         if (j->reference_audio ()) {
516                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
517                         }
518
519                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
520                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
521                         }
522
523                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
524                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
525                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
526                                 }
527                         }
528
529                         offset_from_start += reel_duration;
530                         offset_from_end -= reel_duration;
531                 }
532         }
533
534         return a;
535 }
536
537 bool
538 Player::pass ()
539 {
540         boost::mutex::scoped_lock lm (_mutex);
541
542         if (_suspended) {
543                 /* We can't pass in this state */
544                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
545                 return false;
546         }
547
548         if (_playback_length == DCPTime()) {
549                 /* Special; just give one black frame */
550                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
551                 return true;
552         }
553
554         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
555
556         shared_ptr<Piece> earliest_content;
557         optional<DCPTime> earliest_time;
558
559         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
560                 if (i->done) {
561                         continue;
562                 }
563
564                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
565                 if (t > i->end(_film)) {
566                         i->done = true;
567                 } else {
568
569                         /* Given two choices at the same time, pick the one with texts so we see it before
570                            the video.
571                         */
572                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
573                                 earliest_time = t;
574                                 earliest_content = i;
575                         }
576                 }
577         }
578
579         bool done = false;
580
581         enum {
582                 NONE,
583                 CONTENT,
584                 BLACK,
585                 SILENT
586         } which = NONE;
587
588         if (earliest_content) {
589                 which = CONTENT;
590         }
591
592         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
593                 earliest_time = _black.position ();
594                 which = BLACK;
595         }
596
597         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
598                 earliest_time = _silent.position ();
599                 which = SILENT;
600         }
601
602         switch (which) {
603         case CONTENT:
604         {
605                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
606                 earliest_content->done = earliest_content->decoder->pass ();
607                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
608                 if (dcp && !_play_referenced && dcp->reference_audio()) {
609                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
610                            to `hide' the fact that no audio was emitted during the referenced DCP (though
611                            we need to behave as though it was).
612                         */
613                         _last_audio_time = dcp->end (_film);
614                 }
615                 break;
616         }
617         case BLACK:
618                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
619                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
620                 _black.set_position (_black.position() + one_video_frame());
621                 break;
622         case SILENT:
623         {
624                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
625                 DCPTimePeriod period (_silent.period_at_position());
626                 if (_last_audio_time) {
627                         /* Sometimes the thing that happened last finishes fractionally before
628                            or after this silence.  Bodge the start time of the silence to fix it.
629                            I think this is nothing to worry about since we will just add or
630                            remove a little silence at the end of some content.
631                         */
632                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
633                         /* Let's not worry about less than a frame at 24fps */
634                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
635                         if (error >= too_much_error) {
636                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
637                         }
638                         DCPOMATIC_ASSERT (error < too_much_error);
639                         period.from = *_last_audio_time;
640                 }
641                 if (period.duration() > one_video_frame()) {
642                         period.to = period.from + one_video_frame();
643                 }
644                 fill_audio (period);
645                 _silent.set_position (period.to);
646                 break;
647         }
648         case NONE:
649                 done = true;
650                 break;
651         }
652
653         /* Emit any audio that is ready */
654
655         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
656            of our streams, or the position of the _silent.
657         */
658         DCPTime pull_to = _playback_length;
659         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
660                 i->update_pull_to (pull_to);
661         }
662         if (!_silent.done() && _silent.position() < pull_to) {
663                 pull_to = _silent.position();
664         }
665
666         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
667         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
668         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
669                 if (_last_audio_time && i->second < *_last_audio_time) {
670                         /* This new data comes before the last we emitted (or the last seek); discard it */
671                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
672                         if (!cut.first) {
673                                 continue;
674                         }
675                         *i = cut;
676                 } else if (_last_audio_time && i->second > *_last_audio_time) {
677                         /* There's a gap between this data and the last we emitted; fill with silence */
678                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
679                 }
680
681                 emit_audio (i->first, i->second);
682         }
683
684         if (done) {
685                 _shuffler->flush ();
686                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
687                         do_emit_video(i->first, i->second);
688                 }
689         }
690
691         return done;
692 }
693
694 /** @return Open subtitles for the frame at the given time, converted to images */
695 optional<PositionImage>
696 Player::open_subtitles_for_frame (DCPTime time) const
697 {
698         list<PositionImage> captions;
699         int const vfr = _film->video_frame_rate();
700
701         BOOST_FOREACH (
702                 PlayerText j,
703                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
704                 ) {
705
706                 /* Bitmap subtitles */
707                 BOOST_FOREACH (BitmapText i, j.bitmap) {
708                         if (!i.image) {
709                                 continue;
710                         }
711
712                         /* i.image will already have been scaled to fit _video_container_size */
713                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
714
715                         captions.push_back (
716                                 PositionImage (
717                                         i.image,
718                                         Position<int> (
719                                                 lrint (_video_container_size.width * i.rectangle.x),
720                                                 lrint (_video_container_size.height * i.rectangle.y)
721                                                 )
722                                         )
723                                 );
724                 }
725
726                 /* String subtitles (rendered to an image) */
727                 if (!j.string.empty ()) {
728                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
729                         copy (s.begin(), s.end(), back_inserter (captions));
730                 }
731         }
732
733         if (captions.empty ()) {
734                 return optional<PositionImage> ();
735         }
736
737         return merge (captions);
738 }
739
740 void
741 Player::video (weak_ptr<Piece> wp, ContentVideo video)
742 {
743         shared_ptr<Piece> piece = wp.lock ();
744         if (!piece) {
745                 return;
746         }
747
748         if (!piece->content->video->use()) {
749                 return;
750         }
751
752         FrameRateChange frc (_film, piece->content);
753         if (frc.skip && (video.frame % 2) == 1) {
754                 return;
755         }
756
757         /* Time of the first frame we will emit */
758         DCPTime const time = content_video_to_dcp (piece, video.frame);
759         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
760
761         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
762            if it's after the content's period here as in that case we still need to fill any gap between
763            `now' and the end of the content's period.
764         */
765         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
766                 return;
767         }
768
769         /* Fill gaps that we discover now that we have some video which needs to be emitted.
770            This is where we need to fill to.
771         */
772         DCPTime fill_to = min (time, piece->end(_film));
773
774         if (_last_video_time) {
775                 DCPTime fill_from = max (*_last_video_time, piece->position());
776
777                 /* Fill if we have more than half a frame to do */
778                 if ((fill_to - fill_from) > one_video_frame() / 2) {
779                         LastVideoMap::const_iterator last = _last_video.find (wp);
780                         if (_film->three_d()) {
781                                 Eyes fill_to_eyes = video.eyes;
782                                 if (fill_to_eyes == EYES_BOTH) {
783                                         fill_to_eyes = EYES_LEFT;
784                                 }
785                                 if (fill_to == piece->end(_film)) {
786                                         /* Don't fill after the end of the content */
787                                         fill_to_eyes = EYES_LEFT;
788                                 }
789                                 DCPTime j = fill_from;
790                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
791                                 if (eyes == EYES_BOTH) {
792                                         eyes = EYES_LEFT;
793                                 }
794                                 while (j < fill_to || eyes != fill_to_eyes) {
795                                         if (last != _last_video.end()) {
796                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
797                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
798                                                 copy->set_eyes (eyes);
799                                                 emit_video (copy, j);
800                                         } else {
801                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
802                                                 emit_video (black_player_video_frame(eyes), j);
803                                         }
804                                         if (eyes == EYES_RIGHT) {
805                                                 j += one_video_frame();
806                                         }
807                                         eyes = increment_eyes (eyes);
808                                 }
809                         } else {
810                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
811                                         if (last != _last_video.end()) {
812                                                 emit_video (last->second, j);
813                                         } else {
814                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
815                                         }
816                                 }
817                         }
818                 }
819         }
820
821         _last_video[wp].reset (
822                 new PlayerVideo (
823                         video.image,
824                         piece->content->video->crop (),
825                         piece->content->video->fade (_film, video.frame),
826                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
827                         _video_container_size,
828                         video.eyes,
829                         video.part,
830                         piece->content->video->colour_conversion(),
831                         piece->content->video->range(),
832                         piece->content,
833                         video.frame,
834                         false
835                         )
836                 );
837
838         DCPTime t = time;
839         for (int i = 0; i < frc.repeat; ++i) {
840                 if (t < piece->end(_film)) {
841                         emit_video (_last_video[wp], t);
842                 }
843                 t += one_video_frame ();
844         }
845 }
846
847 void
848 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
849 {
850         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
851
852         shared_ptr<Piece> piece = wp.lock ();
853         if (!piece) {
854                 return;
855         }
856
857         shared_ptr<AudioContent> content = piece->content->audio;
858         DCPOMATIC_ASSERT (content);
859
860         int const rfr = content->resampled_frame_rate (_film);
861
862         /* Compute time in the DCP */
863         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
864         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
865
866         /* And the end of this block in the DCP */
867         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
868
869         /* Remove anything that comes before the start or after the end of the content */
870         if (time < piece->position()) {
871                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
872                 if (!cut.first) {
873                         /* This audio is entirely discarded */
874                         return;
875                 }
876                 content_audio.audio = cut.first;
877                 time = cut.second;
878         } else if (time > piece->end(_film)) {
879                 /* Discard it all */
880                 return;
881         } else if (end > piece->end(_film)) {
882                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
883                 if (remaining_frames == 0) {
884                         return;
885                 }
886                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
887         }
888
889         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
890
891         /* Gain */
892
893         if (content->gain() != 0) {
894                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
895                 gain->apply_gain (content->gain ());
896                 content_audio.audio = gain;
897         }
898
899         /* Remap */
900
901         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
902
903         /* Process */
904
905         if (_audio_processor) {
906                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
907         }
908
909         /* Push */
910
911         _audio_merger.push (content_audio.audio, time);
912         /* XXX: this almost certainly needs to be more efficient; perhaps pieces fill a map to find
913          * the piece from the stream, then we can call the right piece with no loop.
914          */
915         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
916                 i->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
917         }
918 }
919
920 void
921 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
922 {
923         shared_ptr<Piece> piece = wp.lock ();
924         shared_ptr<const TextContent> text = wc.lock ();
925         if (!piece || !text) {
926                 return;
927         }
928
929         /* Apply content's subtitle offsets */
930         subtitle.sub.rectangle.x += text->x_offset ();
931         subtitle.sub.rectangle.y += text->y_offset ();
932
933         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
934         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
935         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
936
937         /* Apply content's subtitle scale */
938         subtitle.sub.rectangle.width *= text->x_scale ();
939         subtitle.sub.rectangle.height *= text->y_scale ();
940
941         PlayerText ps;
942         shared_ptr<Image> image = subtitle.sub.image;
943
944         /* We will scale the subtitle up to fit _video_container_size */
945         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
946         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
947         if (width == 0 || height == 0) {
948                 return;
949         }
950
951         dcp::Size scaled_size (width, height);
952         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
953         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
954
955         _active_texts[text->type()].add_from (wc, ps, from);
956 }
957
958 void
959 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
960 {
961         shared_ptr<Piece> piece = wp.lock ();
962         shared_ptr<const TextContent> text = wc.lock ();
963         if (!piece || !text) {
964                 return;
965         }
966
967         PlayerText ps;
968         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
969
970         if (from > piece->end(_film)) {
971                 return;
972         }
973
974         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
975                 s.set_h_position (s.h_position() + text->x_offset ());
976                 s.set_v_position (s.v_position() + text->y_offset ());
977                 float const xs = text->x_scale();
978                 float const ys = text->y_scale();
979                 float size = s.size();
980
981                 /* Adjust size to express the common part of the scaling;
982                    e.g. if xs = ys = 0.5 we scale size by 2.
983                 */
984                 if (xs > 1e-5 && ys > 1e-5) {
985                         size *= 1 / min (1 / xs, 1 / ys);
986                 }
987                 s.set_size (size);
988
989                 /* Then express aspect ratio changes */
990                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
991                         s.set_aspect_adjust (xs / ys);
992                 }
993
994                 s.set_in (dcp::Time(from.seconds(), 1000));
995                 ps.string.push_back (StringText (s, text->outline_width()));
996                 ps.add_fonts (text->fonts ());
997         }
998
999         _active_texts[text->type()].add_from (wc, ps, from);
1000 }
1001
1002 void
1003 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1004 {
1005         shared_ptr<const TextContent> text = wc.lock ();
1006         if (!text) {
1007                 return;
1008         }
1009
1010         if (!_active_texts[text->type()].have(wc)) {
1011                 return;
1012         }
1013
1014         shared_ptr<Piece> piece = wp.lock ();
1015         if (!piece) {
1016                 return;
1017         }
1018
1019         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1020
1021         if (dcp_to > piece->end(_film)) {
1022                 return;
1023         }
1024
1025         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1026
1027         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1028         if (text->use() && !always && !text->burn()) {
1029                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1030         }
1031 }
1032
1033 void
1034 Player::seek (DCPTime time, bool accurate)
1035 {
1036         boost::mutex::scoped_lock lm (_mutex);
1037         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1038
1039         if (_suspended) {
1040                 /* We can't seek in this state */
1041                 return;
1042         }
1043
1044         if (_shuffler) {
1045                 _shuffler->clear ();
1046         }
1047
1048         _delay.clear ();
1049
1050         if (_audio_processor) {
1051                 _audio_processor->flush ();
1052         }
1053
1054         _audio_merger.clear ();
1055         for (int i = 0; i < TEXT_COUNT; ++i) {
1056                 _active_texts[i].clear ();
1057         }
1058
1059         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1060                 if (time < i->position()) {
1061                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1062                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1063                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1064                            been trimmed to a point between keyframes, or something).
1065                         */
1066                         i->decoder->seek (dcp_to_content_time (i, i->position()), true);
1067                         i->done = false;
1068                 } else if (i->position() <= time && time < i->end(_film)) {
1069                         /* During; seek to position */
1070                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1071                         i->done = false;
1072                 } else {
1073                         /* After; this piece is done */
1074                         i->done = true;
1075                 }
1076         }
1077
1078         if (accurate) {
1079                 _last_video_time = time;
1080                 _last_video_eyes = EYES_LEFT;
1081                 _last_audio_time = time;
1082         } else {
1083                 _last_video_time = optional<DCPTime>();
1084                 _last_video_eyes = optional<Eyes>();
1085                 _last_audio_time = optional<DCPTime>();
1086         }
1087
1088         _black.set_position (time);
1089         _silent.set_position (time);
1090
1091         _last_video.clear ();
1092 }
1093
1094 void
1095 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1096 {
1097         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1098            player before the video that requires them.
1099         */
1100         _delay.push_back (make_pair (pv, time));
1101
1102         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1103                 _last_video_time = time + one_video_frame();
1104         }
1105         _last_video_eyes = increment_eyes (pv->eyes());
1106
1107         if (_delay.size() < 3) {
1108                 return;
1109         }
1110
1111         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1112         _delay.pop_front();
1113         do_emit_video (to_do.first, to_do.second);
1114 }
1115
1116 void
1117 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1118 {
1119         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1120                 for (int i = 0; i < TEXT_COUNT; ++i) {
1121                         _active_texts[i].clear_before (time);
1122                 }
1123         }
1124
1125         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1126         if (subtitles) {
1127                 pv->set_text (subtitles.get ());
1128         }
1129
1130         Video (pv, time);
1131 }
1132
1133 void
1134 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1135 {
1136         /* Log if the assert below is about to fail */
1137         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1138                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1139         }
1140
1141         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1142         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1143         Audio (data, time, _film->audio_frame_rate());
1144         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1145 }
1146
1147 void
1148 Player::fill_audio (DCPTimePeriod period)
1149 {
1150         if (period.from == period.to) {
1151                 return;
1152         }
1153
1154         DCPOMATIC_ASSERT (period.from < period.to);
1155
1156         DCPTime t = period.from;
1157         while (t < period.to) {
1158                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1159                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1160                 if (samples) {
1161                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1162                         silence->make_silent ();
1163                         emit_audio (silence, t);
1164                 }
1165                 t += block;
1166         }
1167 }
1168
1169 DCPTime
1170 Player::one_video_frame () const
1171 {
1172         return DCPTime::from_frames (1, _film->video_frame_rate ());
1173 }
1174
1175 pair<shared_ptr<AudioBuffers>, DCPTime>
1176 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1177 {
1178         DCPTime const discard_time = discard_to - time;
1179         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1180         Frame remaining_frames = audio->frames() - discard_frames;
1181         if (remaining_frames <= 0) {
1182                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1183         }
1184         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1185         return make_pair(cut, time + discard_time);
1186 }
1187
1188 void
1189 Player::set_dcp_decode_reduction (optional<int> reduction)
1190 {
1191         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1192
1193         {
1194                 boost::mutex::scoped_lock lm (_mutex);
1195
1196                 if (reduction == _dcp_decode_reduction) {
1197                         lm.unlock ();
1198                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1199                         return;
1200                 }
1201
1202                 _dcp_decode_reduction = reduction;
1203                 setup_pieces_unlocked ();
1204         }
1205
1206         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1207 }
1208
1209 optional<DCPTime>
1210 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1211 {
1212         boost::mutex::scoped_lock lm (_mutex);
1213
1214         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1215                 if (i->content == content) {
1216                         return content_time_to_dcp (i, t);
1217                 }
1218         }
1219
1220         /* We couldn't find this content; perhaps things are being changed over */
1221         return optional<DCPTime>();
1222 }
1223
1224
1225 shared_ptr<const Playlist>
1226 Player::playlist () const
1227 {
1228         return _playlist ? _playlist : _film->playlist();
1229 }
1230
1231
1232 void
1233 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1234 {
1235         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1236 }
1237