Fix Empty/Player behaviour when using a playlist that is not the same as the Film's.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (0)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _tolerant (film->tolerant())
97         , _play_referenced (false)
98         , _audio_merger (_film->audio_frame_rate())
99         , _shuffler (0)
100 {
101         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102         /* The butler must hear about this first, so since we are proxying this through to the butler we must
103            be first.
104         */
105         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107         set_video_container_size (_film->frame_size ());
108
109         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
110
111         setup_pieces ();
112         seek (DCPTime (), true);
113 }
114
115 Player::~Player ()
116 {
117         delete _shuffler;
118 }
119
120 void
121 Player::setup_pieces ()
122 {
123         boost::mutex::scoped_lock lm (_mutex);
124         setup_pieces_unlocked ();
125 }
126
127 bool
128 have_video (shared_ptr<const Content> content)
129 {
130         return static_cast<bool>(content->video);
131 }
132
133 bool
134 have_audio (shared_ptr<const Content> content)
135 {
136         return static_cast<bool>(content->audio);
137 }
138
139 void
140 Player::setup_pieces_unlocked ()
141 {
142         list<shared_ptr<Piece> > old_pieces = _pieces;
143         _pieces.clear ();
144
145         delete _shuffler;
146         _shuffler = new Shuffler();
147         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
148
149         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
150
151                 if (!i->paths_valid ()) {
152                         continue;
153                 }
154
155                 if (_ignore_video && _ignore_audio && i->text.empty()) {
156                         /* We're only interested in text and this content has none */
157                         continue;
158                 }
159
160                 shared_ptr<Decoder> old_decoder;
161                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
162                         if (j->content == i) {
163                                 old_decoder = j->decoder;
164                                 break;
165                         }
166                 }
167
168                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
169                 FrameRateChange frc (_film, i);
170
171                 if (!decoder) {
172                         /* Not something that we can decode; e.g. Atmos content */
173                         continue;
174                 }
175
176                 if (decoder->video && _ignore_video) {
177                         decoder->video->set_ignore (true);
178                 }
179
180                 if (decoder->audio && _ignore_audio) {
181                         decoder->audio->set_ignore (true);
182                 }
183
184                 if (_ignore_text) {
185                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
186                                 i->set_ignore (true);
187                         }
188                 }
189
190                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
191                 if (dcp) {
192                         dcp->set_decode_referenced (_play_referenced);
193                         if (_play_referenced) {
194                                 dcp->set_forced_reduction (_dcp_decode_reduction);
195                         }
196                 }
197
198                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
199                 _pieces.push_back (piece);
200
201                 if (decoder->video) {
202                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
203                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
204                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
205                         } else {
206                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
207                         }
208                 }
209
210                 if (decoder->audio) {
211                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
212                 }
213
214                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
215
216                 while (j != decoder->text.end()) {
217                         (*j)->BitmapStart.connect (
218                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
219                                 );
220                         (*j)->PlainStart.connect (
221                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
222                                 );
223                         (*j)->Stop.connect (
224                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
225                                 );
226
227                         ++j;
228                 }
229         }
230
231         _stream_states.clear ();
232         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
233                 if (i->content->audio) {
234                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
235                                 _stream_states[j] = StreamState (i, i->content->position ());
236                         }
237                 }
238         }
239
240         _black = Empty (_film, _playlist, bind(&have_video, _1));
241         _silent = Empty (_film, _playlist, bind(&have_audio, _1));
242
243         _last_video_time = DCPTime ();
244         _last_video_eyes = EYES_BOTH;
245         _last_audio_time = DCPTime ();
246
247         /* Cached value to save recalculating it on every ::pass */
248         _film_length = _film->length ();
249 }
250
251 void
252 Player::playlist_content_change (ChangeType type, int property, bool frequent)
253 {
254         if (type == CHANGE_TYPE_PENDING) {
255                 /* The player content is probably about to change, so we can't carry on
256                    until that has happened and we've rebuilt our pieces.  Stop pass()
257                    and seek() from working until then.
258                 */
259                 ++_suspended;
260         } else if (type == CHANGE_TYPE_DONE) {
261                 /* A change in our content has gone through.  Re-build our pieces. */
262                 setup_pieces ();
263                 --_suspended;
264         } else if (type == CHANGE_TYPE_CANCELLED) {
265                 --_suspended;
266         }
267
268         Change (type, property, frequent);
269 }
270
271 void
272 Player::set_video_container_size (dcp::Size s)
273 {
274         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
275
276         {
277                 boost::mutex::scoped_lock lm (_mutex);
278
279                 if (s == _video_container_size) {
280                         lm.unlock ();
281                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
282                         return;
283                 }
284
285                 _video_container_size = s;
286
287                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
288                 _black_image->make_black ();
289         }
290
291         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
292 }
293
294 void
295 Player::playlist_change (ChangeType type)
296 {
297         if (type == CHANGE_TYPE_DONE) {
298                 setup_pieces ();
299         }
300         Change (type, PlayerProperty::PLAYLIST, false);
301 }
302
303 void
304 Player::film_change (ChangeType type, Film::Property p)
305 {
306         /* Here we should notice Film properties that affect our output, and
307            alert listeners that our output now would be different to how it was
308            last time we were run.
309         */
310
311         if (p == Film::CONTAINER) {
312                 Change (type, PlayerProperty::FILM_CONTAINER, false);
313         } else if (p == Film::VIDEO_FRAME_RATE) {
314                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
315                    so we need new pieces here.
316                 */
317                 if (type == CHANGE_TYPE_DONE) {
318                         setup_pieces ();
319                 }
320                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
321         } else if (p == Film::AUDIO_PROCESSOR) {
322                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
323                         boost::mutex::scoped_lock lm (_mutex);
324                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
325                 }
326         } else if (p == Film::AUDIO_CHANNELS) {
327                 if (type == CHANGE_TYPE_DONE) {
328                         boost::mutex::scoped_lock lm (_mutex);
329                         _audio_merger.clear ();
330                 }
331         }
332 }
333
334 shared_ptr<PlayerVideo>
335 Player::black_player_video_frame (Eyes eyes) const
336 {
337         return shared_ptr<PlayerVideo> (
338                 new PlayerVideo (
339                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
340                         Crop (),
341                         optional<double> (),
342                         _video_container_size,
343                         _video_container_size,
344                         eyes,
345                         PART_WHOLE,
346                         PresetColourConversion::all().front().conversion,
347                         VIDEO_RANGE_FULL,
348                         boost::weak_ptr<Content>(),
349                         boost::optional<Frame>(),
350                         false
351                 )
352         );
353 }
354
355 Frame
356 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
357 {
358         DCPTime s = t - piece->content->position ();
359         s = min (piece->content->length_after_trim(_film), s);
360         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
361
362         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
363            then convert that ContentTime to frames at the content's rate.  However this fails for
364            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
365            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
366
367            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
368         */
369         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
370 }
371
372 DCPTime
373 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
374 {
375         /* See comment in dcp_to_content_video */
376         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
377         return d + piece->content->position();
378 }
379
380 Frame
381 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
382 {
383         DCPTime s = t - piece->content->position ();
384         s = min (piece->content->length_after_trim(_film), s);
385         /* See notes in dcp_to_content_video */
386         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
387 }
388
389 DCPTime
390 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
391 {
392         /* See comment in dcp_to_content_video */
393         return DCPTime::from_frames (f, _film->audio_frame_rate())
394                 - DCPTime (piece->content->trim_start(), piece->frc)
395                 + piece->content->position();
396 }
397
398 ContentTime
399 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
400 {
401         DCPTime s = t - piece->content->position ();
402         s = min (piece->content->length_after_trim(_film), s);
403         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
404 }
405
406 DCPTime
407 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
408 {
409         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
410 }
411
412 list<shared_ptr<Font> >
413 Player::get_subtitle_fonts ()
414 {
415         boost::mutex::scoped_lock lm (_mutex);
416
417         list<shared_ptr<Font> > fonts;
418         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
419                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
420                         /* XXX: things may go wrong if there are duplicate font IDs
421                            with different font files.
422                         */
423                         list<shared_ptr<Font> > f = j->fonts ();
424                         copy (f.begin(), f.end(), back_inserter (fonts));
425                 }
426         }
427
428         return fonts;
429 }
430
431 /** Set this player never to produce any video data */
432 void
433 Player::set_ignore_video ()
434 {
435         boost::mutex::scoped_lock lm (_mutex);
436         _ignore_video = true;
437         setup_pieces_unlocked ();
438 }
439
440 void
441 Player::set_ignore_audio ()
442 {
443         boost::mutex::scoped_lock lm (_mutex);
444         _ignore_audio = true;
445         setup_pieces_unlocked ();
446 }
447
448 void
449 Player::set_ignore_text ()
450 {
451         boost::mutex::scoped_lock lm (_mutex);
452         _ignore_text = true;
453         setup_pieces_unlocked ();
454 }
455
456 /** Set the player to always burn open texts into the image regardless of the content settings */
457 void
458 Player::set_always_burn_open_subtitles ()
459 {
460         boost::mutex::scoped_lock lm (_mutex);
461         _always_burn_open_subtitles = true;
462 }
463
464 /** Sets up the player to be faster, possibly at the expense of quality */
465 void
466 Player::set_fast ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _fast = true;
470         setup_pieces_unlocked ();
471 }
472
473 void
474 Player::set_play_referenced ()
475 {
476         boost::mutex::scoped_lock lm (_mutex);
477         _play_referenced = true;
478         setup_pieces_unlocked ();
479 }
480
481 static void
482 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
483 {
484         DCPOMATIC_ASSERT (r);
485         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
486         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
487         if (r->actual_duration() > 0) {
488                 a.push_back (
489                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
490                         );
491         }
492 }
493
494 list<ReferencedReelAsset>
495 Player::get_reel_assets ()
496 {
497         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
498
499         list<ReferencedReelAsset> a;
500
501         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
502                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
503                 if (!j) {
504                         continue;
505                 }
506
507                 scoped_ptr<DCPDecoder> decoder;
508                 try {
509                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
510                 } catch (...) {
511                         return a;
512                 }
513
514                 DCPOMATIC_ASSERT (j->video_frame_rate ());
515                 double const cfr = j->video_frame_rate().get();
516                 Frame const trim_start = j->trim_start().frames_round (cfr);
517                 Frame const trim_end = j->trim_end().frames_round (cfr);
518                 int const ffr = _film->video_frame_rate ();
519
520                 /* position in the asset from the start */
521                 int64_t offset_from_start = 0;
522                 /* position in the asset from the end */
523                 int64_t offset_from_end = 0;
524                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
525                         /* Assume that main picture duration is the length of the reel */
526                         offset_from_end += k->main_picture()->actual_duration();
527                 }
528
529                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
530
531                         /* Assume that main picture duration is the length of the reel */
532                         int64_t const reel_duration = k->main_picture()->actual_duration();
533
534                         /* See doc/design/trim_reels.svg */
535                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
536                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
537
538                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
539                         if (j->reference_video ()) {
540                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
541                         }
542
543                         if (j->reference_audio ()) {
544                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
545                         }
546
547                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
548                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
549                         }
550
551                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
552                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
553                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
554                                 }
555                         }
556
557                         offset_from_start += reel_duration;
558                         offset_from_end -= reel_duration;
559                 }
560         }
561
562         return a;
563 }
564
565 bool
566 Player::pass ()
567 {
568         boost::mutex::scoped_lock lm (_mutex);
569         DCPOMATIC_ASSERT (_film_length);
570
571         if (_suspended) {
572                 /* We can't pass in this state */
573                 return false;
574         }
575
576         if (*_film_length == DCPTime()) {
577                 /* Special case of an empty Film; just give one black frame */
578                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
579                 return true;
580         }
581
582         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
583
584         shared_ptr<Piece> earliest_content;
585         optional<DCPTime> earliest_time;
586
587         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
588                 if (i->done) {
589                         continue;
590                 }
591
592                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
593                 if (t > i->content->end(_film)) {
594                         i->done = true;
595                 } else {
596
597                         /* Given two choices at the same time, pick the one with texts so we see it before
598                            the video.
599                         */
600                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
601                                 earliest_time = t;
602                                 earliest_content = i;
603                         }
604                 }
605         }
606
607         bool done = false;
608
609         enum {
610                 NONE,
611                 CONTENT,
612                 BLACK,
613                 SILENT
614         } which = NONE;
615
616         if (earliest_content) {
617                 which = CONTENT;
618         }
619
620         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
621                 earliest_time = _black.position ();
622                 which = BLACK;
623         }
624
625         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
626                 earliest_time = _silent.position ();
627                 which = SILENT;
628         }
629
630         switch (which) {
631         case CONTENT:
632         {
633                 earliest_content->done = earliest_content->decoder->pass ();
634                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
635                 if (dcp && !_play_referenced && dcp->reference_audio()) {
636                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
637                            to `hide' the fact that no audio was emitted during the referenced DCP (though
638                            we need to behave as though it was).
639                         */
640                         _last_audio_time = dcp->end (_film);
641                 }
642                 break;
643         }
644         case BLACK:
645                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
646                 _black.set_position (_black.position() + one_video_frame());
647                 break;
648         case SILENT:
649         {
650                 DCPTimePeriod period (_silent.period_at_position());
651                 if (_last_audio_time) {
652                         /* Sometimes the thing that happened last finishes fractionally before
653                            or after this silence.  Bodge the start time of the silence to fix it.
654                            I think this is nothing to worry about since we will just add or
655                            remove a little silence at the end of some content.
656                         */
657                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
658                         /* Let's not worry about less than a frame at 24fps */
659                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
660                         if (error >= too_much_error) {
661                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
662                         }
663                         DCPOMATIC_ASSERT (error < too_much_error);
664                         period.from = *_last_audio_time;
665                 }
666                 if (period.duration() > one_video_frame()) {
667                         period.to = period.from + one_video_frame();
668                 }
669                 fill_audio (period);
670                 _silent.set_position (period.to);
671                 break;
672         }
673         case NONE:
674                 done = true;
675                 break;
676         }
677
678         /* Emit any audio that is ready */
679
680         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
681            of our streams, or the position of the _silent.
682         */
683         DCPTime pull_to = *_film_length;
684         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
685                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
686                         pull_to = i->second.last_push_end;
687                 }
688         }
689         if (!_silent.done() && _silent.position() < pull_to) {
690                 pull_to = _silent.position();
691         }
692
693         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
694         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
695                 if (_last_audio_time && i->second < *_last_audio_time) {
696                         /* This new data comes before the last we emitted (or the last seek); discard it */
697                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
698                         if (!cut.first) {
699                                 continue;
700                         }
701                         *i = cut;
702                 } else if (_last_audio_time && i->second > *_last_audio_time) {
703                         /* There's a gap between this data and the last we emitted; fill with silence */
704                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
705                 }
706
707                 emit_audio (i->first, i->second);
708         }
709
710         if (done) {
711                 _shuffler->flush ();
712                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
713                         do_emit_video(i->first, i->second);
714                 }
715         }
716
717         return done;
718 }
719
720 /** @return Open subtitles for the frame at the given time, converted to images */
721 optional<PositionImage>
722 Player::open_subtitles_for_frame (DCPTime time) const
723 {
724         list<PositionImage> captions;
725         int const vfr = _film->video_frame_rate();
726
727         BOOST_FOREACH (
728                 PlayerText j,
729                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
730                 ) {
731
732                 /* Bitmap subtitles */
733                 BOOST_FOREACH (BitmapText i, j.bitmap) {
734                         if (!i.image) {
735                                 continue;
736                         }
737
738                         /* i.image will already have been scaled to fit _video_container_size */
739                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
740
741                         captions.push_back (
742                                 PositionImage (
743                                         i.image,
744                                         Position<int> (
745                                                 lrint (_video_container_size.width * i.rectangle.x),
746                                                 lrint (_video_container_size.height * i.rectangle.y)
747                                                 )
748                                         )
749                                 );
750                 }
751
752                 /* String subtitles (rendered to an image) */
753                 if (!j.string.empty ()) {
754                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
755                         copy (s.begin(), s.end(), back_inserter (captions));
756                 }
757         }
758
759         if (captions.empty ()) {
760                 return optional<PositionImage> ();
761         }
762
763         return merge (captions);
764 }
765
766 void
767 Player::video (weak_ptr<Piece> wp, ContentVideo video)
768 {
769         shared_ptr<Piece> piece = wp.lock ();
770         if (!piece) {
771                 return;
772         }
773
774         FrameRateChange frc (_film, piece->content);
775         if (frc.skip && (video.frame % 2) == 1) {
776                 return;
777         }
778
779         /* Time of the first frame we will emit */
780         DCPTime const time = content_video_to_dcp (piece, video.frame);
781
782         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
783            if it's after the content's period here as in that case we still need to fill any gap between
784            `now' and the end of the content's period.
785         */
786         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
787                 return;
788         }
789
790         /* Fill gaps that we discover now that we have some video which needs to be emitted.
791            This is where we need to fill to.
792         */
793         DCPTime fill_to = min (time, piece->content->end(_film));
794
795         if (_last_video_time) {
796                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
797
798                 /* Fill if we have more than half a frame to do */
799                 if ((fill_to - fill_from) > one_video_frame() / 2) {
800                         LastVideoMap::const_iterator last = _last_video.find (wp);
801                         if (_film->three_d()) {
802                                 Eyes fill_to_eyes = video.eyes;
803                                 if (fill_to_eyes == EYES_BOTH) {
804                                         fill_to_eyes = EYES_LEFT;
805                                 }
806                                 if (fill_to == piece->content->end(_film)) {
807                                         /* Don't fill after the end of the content */
808                                         fill_to_eyes = EYES_LEFT;
809                                 }
810                                 DCPTime j = fill_from;
811                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
812                                 if (eyes == EYES_BOTH) {
813                                         eyes = EYES_LEFT;
814                                 }
815                                 while (j < fill_to || eyes != fill_to_eyes) {
816                                         if (last != _last_video.end()) {
817                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
818                                                 copy->set_eyes (eyes);
819                                                 emit_video (copy, j);
820                                         } else {
821                                                 emit_video (black_player_video_frame(eyes), j);
822                                         }
823                                         if (eyes == EYES_RIGHT) {
824                                                 j += one_video_frame();
825                                         }
826                                         eyes = increment_eyes (eyes);
827                                 }
828                         } else {
829                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
830                                         if (last != _last_video.end()) {
831                                                 emit_video (last->second, j);
832                                         } else {
833                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
834                                         }
835                                 }
836                         }
837                 }
838         }
839
840         _last_video[wp].reset (
841                 new PlayerVideo (
842                         video.image,
843                         piece->content->video->crop (),
844                         piece->content->video->fade (_film, video.frame),
845                         piece->content->video->scale().size (
846                                 piece->content->video, _video_container_size, _film->frame_size ()
847                                 ),
848                         _video_container_size,
849                         video.eyes,
850                         video.part,
851                         piece->content->video->colour_conversion(),
852                         piece->content->video->range(),
853                         piece->content,
854                         video.frame,
855                         false
856                         )
857                 );
858
859         DCPTime t = time;
860         for (int i = 0; i < frc.repeat; ++i) {
861                 if (t < piece->content->end(_film)) {
862                         emit_video (_last_video[wp], t);
863                 }
864                 t += one_video_frame ();
865         }
866 }
867
868 void
869 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
870 {
871         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
872
873         shared_ptr<Piece> piece = wp.lock ();
874         if (!piece) {
875                 return;
876         }
877
878         shared_ptr<AudioContent> content = piece->content->audio;
879         DCPOMATIC_ASSERT (content);
880
881         int const rfr = content->resampled_frame_rate (_film);
882
883         /* Compute time in the DCP */
884         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
885         /* And the end of this block in the DCP */
886         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
887
888         /* Remove anything that comes before the start or after the end of the content */
889         if (time < piece->content->position()) {
890                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
891                 if (!cut.first) {
892                         /* This audio is entirely discarded */
893                         return;
894                 }
895                 content_audio.audio = cut.first;
896                 time = cut.second;
897         } else if (time > piece->content->end(_film)) {
898                 /* Discard it all */
899                 return;
900         } else if (end > piece->content->end(_film)) {
901                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
902                 if (remaining_frames == 0) {
903                         return;
904                 }
905                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
906         }
907
908         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
909
910         /* Gain */
911
912         if (content->gain() != 0) {
913                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
914                 gain->apply_gain (content->gain ());
915                 content_audio.audio = gain;
916         }
917
918         /* Remap */
919
920         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
921
922         /* Process */
923
924         if (_audio_processor) {
925                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
926         }
927
928         /* Push */
929
930         _audio_merger.push (content_audio.audio, time);
931         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
932         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
933 }
934
935 void
936 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
937 {
938         shared_ptr<Piece> piece = wp.lock ();
939         shared_ptr<const TextContent> text = wc.lock ();
940         if (!piece || !text) {
941                 return;
942         }
943
944         /* Apply content's subtitle offsets */
945         subtitle.sub.rectangle.x += text->x_offset ();
946         subtitle.sub.rectangle.y += text->y_offset ();
947
948         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
949         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
950         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
951
952         /* Apply content's subtitle scale */
953         subtitle.sub.rectangle.width *= text->x_scale ();
954         subtitle.sub.rectangle.height *= text->y_scale ();
955
956         PlayerText ps;
957         shared_ptr<Image> image = subtitle.sub.image;
958
959         /* We will scale the subtitle up to fit _video_container_size */
960         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
961         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
962         if (width == 0 || height == 0) {
963                 return;
964         }
965
966         dcp::Size scaled_size (width, height);
967         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
968         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
969
970         _active_texts[text->type()].add_from (wc, ps, from);
971 }
972
973 void
974 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
975 {
976         shared_ptr<Piece> piece = wp.lock ();
977         shared_ptr<const TextContent> text = wc.lock ();
978         if (!piece || !text) {
979                 return;
980         }
981
982         PlayerText ps;
983         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
984
985         if (from > piece->content->end(_film)) {
986                 return;
987         }
988
989         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
990                 s.set_h_position (s.h_position() + text->x_offset ());
991                 s.set_v_position (s.v_position() + text->y_offset ());
992                 float const xs = text->x_scale();
993                 float const ys = text->y_scale();
994                 float size = s.size();
995
996                 /* Adjust size to express the common part of the scaling;
997                    e.g. if xs = ys = 0.5 we scale size by 2.
998                 */
999                 if (xs > 1e-5 && ys > 1e-5) {
1000                         size *= 1 / min (1 / xs, 1 / ys);
1001                 }
1002                 s.set_size (size);
1003
1004                 /* Then express aspect ratio changes */
1005                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1006                         s.set_aspect_adjust (xs / ys);
1007                 }
1008
1009                 s.set_in (dcp::Time(from.seconds(), 1000));
1010                 ps.string.push_back (StringText (s, text->outline_width()));
1011                 ps.add_fonts (text->fonts ());
1012         }
1013
1014         _active_texts[text->type()].add_from (wc, ps, from);
1015 }
1016
1017 void
1018 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1019 {
1020         shared_ptr<const TextContent> text = wc.lock ();
1021         if (!text) {
1022                 return;
1023         }
1024
1025         if (!_active_texts[text->type()].have(wc)) {
1026                 return;
1027         }
1028
1029         shared_ptr<Piece> piece = wp.lock ();
1030         if (!piece) {
1031                 return;
1032         }
1033
1034         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1035
1036         if (dcp_to > piece->content->end(_film)) {
1037                 return;
1038         }
1039
1040         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1041
1042         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1043         if (text->use() && !always && !text->burn()) {
1044                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1045         }
1046 }
1047
1048 void
1049 Player::seek (DCPTime time, bool accurate)
1050 {
1051         boost::mutex::scoped_lock lm (_mutex);
1052
1053         if (_suspended) {
1054                 /* We can't seek in this state */
1055                 return;
1056         }
1057
1058         if (_shuffler) {
1059                 _shuffler->clear ();
1060         }
1061
1062         _delay.clear ();
1063
1064         if (_audio_processor) {
1065                 _audio_processor->flush ();
1066         }
1067
1068         _audio_merger.clear ();
1069         for (int i = 0; i < TEXT_COUNT; ++i) {
1070                 _active_texts[i].clear ();
1071         }
1072
1073         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1074                 if (time < i->content->position()) {
1075                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1076                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1077                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1078                            been trimmed to a point between keyframes, or something).
1079                         */
1080                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1081                         i->done = false;
1082                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1083                         /* During; seek to position */
1084                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1085                         i->done = false;
1086                 } else {
1087                         /* After; this piece is done */
1088                         i->done = true;
1089                 }
1090         }
1091
1092         if (accurate) {
1093                 _last_video_time = time;
1094                 _last_video_eyes = EYES_LEFT;
1095                 _last_audio_time = time;
1096         } else {
1097                 _last_video_time = optional<DCPTime>();
1098                 _last_video_eyes = optional<Eyes>();
1099                 _last_audio_time = optional<DCPTime>();
1100         }
1101
1102         _black.set_position (time);
1103         _silent.set_position (time);
1104
1105         _last_video.clear ();
1106 }
1107
1108 void
1109 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1110 {
1111         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1112            player before the video that requires them.
1113         */
1114         _delay.push_back (make_pair (pv, time));
1115
1116         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1117                 _last_video_time = time + one_video_frame();
1118         }
1119         _last_video_eyes = increment_eyes (pv->eyes());
1120
1121         if (_delay.size() < 3) {
1122                 return;
1123         }
1124
1125         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1126         _delay.pop_front();
1127         do_emit_video (to_do.first, to_do.second);
1128 }
1129
1130 void
1131 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1132 {
1133         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1134                 for (int i = 0; i < TEXT_COUNT; ++i) {
1135                         _active_texts[i].clear_before (time);
1136                 }
1137         }
1138
1139         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1140         if (subtitles) {
1141                 pv->set_text (subtitles.get ());
1142         }
1143
1144         Video (pv, time);
1145 }
1146
1147 void
1148 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1149 {
1150         /* Log if the assert below is about to fail */
1151         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1152                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1153         }
1154
1155         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1156         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1157         Audio (data, time, _film->audio_frame_rate());
1158         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1159 }
1160
1161 void
1162 Player::fill_audio (DCPTimePeriod period)
1163 {
1164         if (period.from == period.to) {
1165                 return;
1166         }
1167
1168         DCPOMATIC_ASSERT (period.from < period.to);
1169
1170         DCPTime t = period.from;
1171         while (t < period.to) {
1172                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1173                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1174                 if (samples) {
1175                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1176                         silence->make_silent ();
1177                         emit_audio (silence, t);
1178                 }
1179                 t += block;
1180         }
1181 }
1182
1183 DCPTime
1184 Player::one_video_frame () const
1185 {
1186         return DCPTime::from_frames (1, _film->video_frame_rate ());
1187 }
1188
1189 pair<shared_ptr<AudioBuffers>, DCPTime>
1190 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1191 {
1192         DCPTime const discard_time = discard_to - time;
1193         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1194         Frame remaining_frames = audio->frames() - discard_frames;
1195         if (remaining_frames <= 0) {
1196                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1197         }
1198         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1199         return make_pair(cut, time + discard_time);
1200 }
1201
1202 void
1203 Player::set_dcp_decode_reduction (optional<int> reduction)
1204 {
1205         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1206
1207         {
1208                 boost::mutex::scoped_lock lm (_mutex);
1209
1210                 if (reduction == _dcp_decode_reduction) {
1211                         lm.unlock ();
1212                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1213                         return;
1214                 }
1215
1216                 _dcp_decode_reduction = reduction;
1217                 setup_pieces_unlocked ();
1218         }
1219
1220         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1221 }
1222
1223 optional<DCPTime>
1224 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1225 {
1226         boost::mutex::scoped_lock lm (_mutex);
1227
1228         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1229                 if (i->content == content) {
1230                         return content_time_to_dcp (i, t);
1231                 }
1232         }
1233
1234         /* We couldn't find this content; perhaps things are being changed over */
1235         return optional<DCPTime>();
1236 }