Some more piece layer stuff.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 using namespace dcpomatic;
82
83 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
84 int const PlayerProperty::PLAYLIST = 701;
85 int const PlayerProperty::FILM_CONTAINER = 702;
86 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
87 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
88 int const PlayerProperty::PLAYBACK_LENGTH = 705;
89
90 Player::Player (shared_ptr<const Film> film)
91         : _film (film)
92         , _suspended (0)
93         , _ignore_video (false)
94         , _ignore_audio (false)
95         , _ignore_text (false)
96         , _always_burn_open_subtitles (false)
97         , _fast (false)
98         , _tolerant (film->tolerant())
99         , _play_referenced (false)
100         , _audio_merger (_film->audio_frame_rate())
101         , _shuffler (0)
102 {
103         construct ();
104 }
105
106 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
107         : _film (film)
108         , _playlist (playlist_)
109         , _suspended (0)
110         , _ignore_video (false)
111         , _ignore_audio (false)
112         , _ignore_text (false)
113         , _always_burn_open_subtitles (false)
114         , _fast (false)
115         , _tolerant (film->tolerant())
116         , _play_referenced (false)
117         , _audio_merger (_film->audio_frame_rate())
118         , _shuffler (0)
119 {
120         construct ();
121 }
122
123 void
124 Player::construct ()
125 {
126         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127         /* The butler must hear about this first, so since we are proxying this through to the butler we must
128            be first.
129         */
130         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132         set_video_container_size (_film->frame_size ());
133
134         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
135
136         setup_pieces ();
137         seek (DCPTime (), true);
138 }
139
140 Player::~Player ()
141 {
142         delete _shuffler;
143 }
144
145 void
146 Player::setup_pieces ()
147 {
148         boost::mutex::scoped_lock lm (_mutex);
149         setup_pieces_unlocked ();
150 }
151
152
153 bool
154 have_video (shared_ptr<const Content> content)
155 {
156         return static_cast<bool>(content->video) && content->video->use();
157 }
158
159 bool
160 have_audio (shared_ptr<const Content> content)
161 {
162         return static_cast<bool>(content->audio);
163 }
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         list<shared_ptr<Piece> > old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
190                         if (j->content == i) {
191                                 old_decoder = j->decoder;
192                                 break;
193                         }
194                 }
195
196                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197                 DCPOMATIC_ASSERT (decoder);
198
199                 FrameRateChange frc (_film, i);
200
201                 if (decoder->video && _ignore_video) {
202                         decoder->video->set_ignore (true);
203                 }
204
205                 if (decoder->audio && _ignore_audio) {
206                         decoder->audio->set_ignore (true);
207                 }
208
209                 if (_ignore_text) {
210                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
211                                 i->set_ignore (true);
212                         }
213                 }
214
215                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
216                 if (dcp) {
217                         dcp->set_decode_referenced (_play_referenced);
218                         if (_play_referenced) {
219                                 dcp->set_forced_reduction (_dcp_decode_reduction);
220                         }
221                 }
222
223                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
224                 _pieces.push_back (piece);
225
226                 if (decoder->video) {
227                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
228                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
230                         } else {
231                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
232                         }
233                 }
234
235                 if (decoder->audio) {
236                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
237                 }
238
239                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
240
241                 while (j != decoder->text.end()) {
242                         (*j)->BitmapStart.connect (
243                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245                         (*j)->PlainStart.connect (
246                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->Stop.connect (
249                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251
252                         ++j;
253                 }
254
255                 if (decoder->atmos) {
256                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
257                 }
258         }
259
260         _stream_states.clear ();
261         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
262                 if (i->content->audio) {
263                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
264                                 _stream_states[j] = StreamState (i, i->content->position ());
265                         }
266                 }
267         }
268
269         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
271
272         _last_video_time = DCPTime ();
273         _last_video_eyes = EYES_BOTH;
274         _last_audio_time = DCPTime ();
275 }
276
277 void
278 Player::playlist_content_change (ChangeType type, int property, bool frequent)
279 {
280         if (type == CHANGE_TYPE_PENDING) {
281                 /* The player content is probably about to change, so we can't carry on
282                    until that has happened and we've rebuilt our pieces.  Stop pass()
283                    and seek() from working until then.
284                 */
285                 ++_suspended;
286         } else if (type == CHANGE_TYPE_DONE) {
287                 /* A change in our content has gone through.  Re-build our pieces. */
288                 setup_pieces ();
289                 --_suspended;
290         } else if (type == CHANGE_TYPE_CANCELLED) {
291                 --_suspended;
292         }
293
294         Change (type, property, frequent);
295 }
296
297 void
298 Player::set_video_container_size (dcp::Size s)
299 {
300         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
301
302         {
303                 boost::mutex::scoped_lock lm (_mutex);
304
305                 if (s == _video_container_size) {
306                         lm.unlock ();
307                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
308                         return;
309                 }
310
311                 _video_container_size = s;
312
313                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314                 _black_image->make_black ();
315         }
316
317         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318 }
319
320 void
321 Player::playlist_change (ChangeType type)
322 {
323         if (type == CHANGE_TYPE_DONE) {
324                 setup_pieces ();
325         }
326         Change (type, PlayerProperty::PLAYLIST, false);
327 }
328
329 void
330 Player::film_change (ChangeType type, Film::Property p)
331 {
332         /* Here we should notice Film properties that affect our output, and
333            alert listeners that our output now would be different to how it was
334            last time we were run.
335         */
336
337         if (p == Film::CONTAINER) {
338                 Change (type, PlayerProperty::FILM_CONTAINER, false);
339         } else if (p == Film::VIDEO_FRAME_RATE) {
340                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
341                    so we need new pieces here.
342                 */
343                 if (type == CHANGE_TYPE_DONE) {
344                         setup_pieces ();
345                 }
346                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
347         } else if (p == Film::AUDIO_PROCESSOR) {
348                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
349                         boost::mutex::scoped_lock lm (_mutex);
350                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
351                 }
352         } else if (p == Film::AUDIO_CHANNELS) {
353                 if (type == CHANGE_TYPE_DONE) {
354                         boost::mutex::scoped_lock lm (_mutex);
355                         _audio_merger.clear ();
356                 }
357         }
358 }
359
360 shared_ptr<PlayerVideo>
361 Player::black_player_video_frame (Eyes eyes) const
362 {
363         return shared_ptr<PlayerVideo> (
364                 new PlayerVideo (
365                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
366                         Crop (),
367                         optional<double> (),
368                         _video_container_size,
369                         _video_container_size,
370                         eyes,
371                         PART_WHOLE,
372                         PresetColourConversion::all().front().conversion,
373                         VIDEO_RANGE_FULL,
374                         boost::weak_ptr<Content>(),
375                         boost::optional<Frame>(),
376                         false
377                 )
378         );
379 }
380
381
382 list<shared_ptr<Font> >
383 Player::get_subtitle_fonts ()
384 {
385         boost::mutex::scoped_lock lm (_mutex);
386
387         list<shared_ptr<Font> > fonts;
388         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
389                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
390                         /* XXX: things may go wrong if there are duplicate font IDs
391                            with different font files.
392                         */
393                         list<shared_ptr<Font> > f = j->fonts ();
394                         copy (f.begin(), f.end(), back_inserter (fonts));
395                 }
396         }
397
398         return fonts;
399 }
400
401 /** Set this player never to produce any video data */
402 void
403 Player::set_ignore_video ()
404 {
405         boost::mutex::scoped_lock lm (_mutex);
406         _ignore_video = true;
407         setup_pieces_unlocked ();
408 }
409
410 void
411 Player::set_ignore_audio ()
412 {
413         boost::mutex::scoped_lock lm (_mutex);
414         _ignore_audio = true;
415         setup_pieces_unlocked ();
416 }
417
418 void
419 Player::set_ignore_text ()
420 {
421         boost::mutex::scoped_lock lm (_mutex);
422         _ignore_text = true;
423         setup_pieces_unlocked ();
424 }
425
426 /** Set the player to always burn open texts into the image regardless of the content settings */
427 void
428 Player::set_always_burn_open_subtitles ()
429 {
430         boost::mutex::scoped_lock lm (_mutex);
431         _always_burn_open_subtitles = true;
432 }
433
434 /** Sets up the player to be faster, possibly at the expense of quality */
435 void
436 Player::set_fast ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439         _fast = true;
440         setup_pieces_unlocked ();
441 }
442
443 void
444 Player::set_play_referenced ()
445 {
446         boost::mutex::scoped_lock lm (_mutex);
447         _play_referenced = true;
448         setup_pieces_unlocked ();
449 }
450
451 static void
452 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
453 {
454         DCPOMATIC_ASSERT (r);
455         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
456         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
457         if (r->actual_duration() > 0) {
458                 a.push_back (
459                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
460                         );
461         }
462 }
463
464 list<ReferencedReelAsset>
465 Player::get_reel_assets ()
466 {
467         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
468
469         list<ReferencedReelAsset> a;
470
471         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
472                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
473                 if (!j) {
474                         continue;
475                 }
476
477                 scoped_ptr<DCPDecoder> decoder;
478                 try {
479                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
480                 } catch (...) {
481                         return a;
482                 }
483
484                 DCPOMATIC_ASSERT (j->video_frame_rate ());
485                 double const cfr = j->video_frame_rate().get();
486                 Frame const trim_start = j->trim_start().frames_round (cfr);
487                 Frame const trim_end = j->trim_end().frames_round (cfr);
488                 int const ffr = _film->video_frame_rate ();
489
490                 /* position in the asset from the start */
491                 int64_t offset_from_start = 0;
492                 /* position in the asset from the end */
493                 int64_t offset_from_end = 0;
494                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
495                         /* Assume that main picture duration is the length of the reel */
496                         offset_from_end += k->main_picture()->actual_duration();
497                 }
498
499                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
500
501                         /* Assume that main picture duration is the length of the reel */
502                         int64_t const reel_duration = k->main_picture()->actual_duration();
503
504                         /* See doc/design/trim_reels.svg */
505                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
506                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
507
508                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
509                         if (j->reference_video ()) {
510                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
511                         }
512
513                         if (j->reference_audio ()) {
514                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
515                         }
516
517                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
518                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
519                         }
520
521                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
522                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
523                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
524                                 }
525                         }
526
527                         offset_from_start += reel_duration;
528                         offset_from_end -= reel_duration;
529                 }
530         }
531
532         return a;
533 }
534
535 bool
536 Player::pass ()
537 {
538         boost::mutex::scoped_lock lm (_mutex);
539
540         if (_suspended) {
541                 /* We can't pass in this state */
542                 return false;
543         }
544
545         if (_playback_length == DCPTime()) {
546                 /* Special; just give one black frame */
547                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
548                 return true;
549         }
550
551         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
552
553         shared_ptr<Piece> earliest_content;
554         optional<DCPTime> earliest_time;
555
556         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
557                 if (i->done) {
558                         continue;
559                 }
560
561                 DCPTime const t = i->content_time_to_dcp (max(i->decoder->position(), i->content->trim_start()));
562                 if (t > i->end(_film)) {
563                         i->done = true;
564                 } else {
565
566                         /* Given two choices at the same time, pick the one with texts so we see it before
567                            the video.
568                         */
569                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
570                                 earliest_time = t;
571                                 earliest_content = i;
572                         }
573                 }
574         }
575
576         bool done = false;
577
578         enum {
579                 NONE,
580                 CONTENT,
581                 BLACK,
582                 SILENT
583         } which = NONE;
584
585         if (earliest_content) {
586                 which = CONTENT;
587         }
588
589         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
590                 earliest_time = _black.position ();
591                 which = BLACK;
592         }
593
594         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
595                 earliest_time = _silent.position ();
596                 which = SILENT;
597         }
598
599         switch (which) {
600         case CONTENT:
601         {
602                 earliest_content->done = earliest_content->decoder->pass ();
603                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
604                 if (dcp && !_play_referenced && dcp->reference_audio()) {
605                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
606                            to `hide' the fact that no audio was emitted during the referenced DCP (though
607                            we need to behave as though it was).
608                         */
609                         _last_audio_time = dcp->end (_film);
610                 }
611                 break;
612         }
613         case BLACK:
614                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
615                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
616                 _black.set_position (_black.position() + one_video_frame());
617                 break;
618         case SILENT:
619         {
620                 DCPTimePeriod period (_silent.period_at_position());
621                 if (_last_audio_time) {
622                         /* Sometimes the thing that happened last finishes fractionally before
623                            or after this silence.  Bodge the start time of the silence to fix it.
624                            I think this is nothing to worry about since we will just add or
625                            remove a little silence at the end of some content.
626                         */
627                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
628                         /* Let's not worry about less than a frame at 24fps */
629                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
630                         if (error >= too_much_error) {
631                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
632                         }
633                         DCPOMATIC_ASSERT (error < too_much_error);
634                         period.from = *_last_audio_time;
635                 }
636                 if (period.duration() > one_video_frame()) {
637                         period.to = period.from + one_video_frame();
638                 }
639                 fill_audio (period);
640                 _silent.set_position (period.to);
641                 break;
642         }
643         case NONE:
644                 done = true;
645                 break;
646         }
647
648         /* Emit any audio that is ready */
649
650         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
651            of our streams, or the position of the _silent.
652         */
653         DCPTime pull_to = _playback_length;
654         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
655                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
656                         pull_to = i->second.last_push_end;
657                 }
658         }
659         if (!_silent.done() && _silent.position() < pull_to) {
660                 pull_to = _silent.position();
661         }
662
663         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
664         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
665                 if (_last_audio_time && i->second < *_last_audio_time) {
666                         /* This new data comes before the last we emitted (or the last seek); discard it */
667                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
668                         if (!cut.first) {
669                                 continue;
670                         }
671                         *i = cut;
672                 } else if (_last_audio_time && i->second > *_last_audio_time) {
673                         /* There's a gap between this data and the last we emitted; fill with silence */
674                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
675                 }
676
677                 emit_audio (i->first, i->second);
678         }
679
680         if (done) {
681                 _shuffler->flush ();
682                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
683                         do_emit_video(i->first, i->second);
684                 }
685         }
686
687         return done;
688 }
689
690 /** @return Open subtitles for the frame at the given time, converted to images */
691 optional<PositionImage>
692 Player::open_subtitles_for_frame (DCPTime time) const
693 {
694         list<PositionImage> captions;
695         int const vfr = _film->video_frame_rate();
696
697         BOOST_FOREACH (
698                 PlayerText j,
699                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
700                 ) {
701
702                 /* Bitmap subtitles */
703                 BOOST_FOREACH (BitmapText i, j.bitmap) {
704                         if (!i.image) {
705                                 continue;
706                         }
707
708                         /* i.image will already have been scaled to fit _video_container_size */
709                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
710
711                         captions.push_back (
712                                 PositionImage (
713                                         i.image,
714                                         Position<int> (
715                                                 lrint (_video_container_size.width * i.rectangle.x),
716                                                 lrint (_video_container_size.height * i.rectangle.y)
717                                                 )
718                                         )
719                                 );
720                 }
721
722                 /* String subtitles (rendered to an image) */
723                 if (!j.string.empty ()) {
724                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
725                         copy (s.begin(), s.end(), back_inserter (captions));
726                 }
727         }
728
729         if (captions.empty ()) {
730                 return optional<PositionImage> ();
731         }
732
733         return merge (captions);
734 }
735
736 void
737 Player::video (weak_ptr<Piece> wp, ContentVideo video)
738 {
739         shared_ptr<Piece> piece = wp.lock ();
740         if (!piece) {
741                 return;
742         }
743
744         if (!piece->video_use()) {
745                 return;
746         }
747
748         if (piece->frc.skip && (video.frame % 2) == 1) {
749                 return;
750         }
751
752         /* Time of the first frame we will emit */
753         DCPTime const time = piece->content_video_to_dcp (video.frame);
754
755         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
756            if it's after the content's period here as in that case we still need to fill any gap between
757            `now' and the end of the content's period.
758         */
759         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
760                 return;
761         }
762
763         /* Fill gaps that we discover now that we have some video which needs to be emitted.
764            This is where we need to fill to.
765         */
766         DCPTime fill_to = min (time, piece->end(_film));
767
768         if (_last_video_time) {
769                 DCPTime fill_from = max (*_last_video_time, piece->position());
770
771                 /* Fill if we have more than half a frame to do */
772                 if ((fill_to - fill_from) > one_video_frame() / 2) {
773                         LastVideoMap::const_iterator last = _last_video.find (wp);
774                         if (_film->three_d()) {
775                                 Eyes fill_to_eyes = video.eyes;
776                                 if (fill_to_eyes == EYES_BOTH) {
777                                         fill_to_eyes = EYES_LEFT;
778                                 }
779                                 if (fill_to == piece->end(_film)) {
780                                         /* Don't fill after the end of the content */
781                                         fill_to_eyes = EYES_LEFT;
782                                 }
783                                 DCPTime j = fill_from;
784                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
785                                 if (eyes == EYES_BOTH) {
786                                         eyes = EYES_LEFT;
787                                 }
788                                 while (j < fill_to || eyes != fill_to_eyes) {
789                                         if (last != _last_video.end()) {
790                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
791                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
792                                                 copy->set_eyes (eyes);
793                                                 emit_video (copy, j);
794                                         } else {
795                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
796                                                 emit_video (black_player_video_frame(eyes), j);
797                                         }
798                                         if (eyes == EYES_RIGHT) {
799                                                 j += one_video_frame();
800                                         }
801                                         eyes = increment_eyes (eyes);
802                                 }
803                         } else {
804                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
805                                         if (last != _last_video.end()) {
806                                                 emit_video (last->second, j);
807                                         } else {
808                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
809                                         }
810                                 }
811                         }
812                 }
813         }
814
815         _last_video[wp].reset (
816                 new PlayerVideo (
817                         video.image,
818                         piece->video_crop(),
819                         piece->video_fade(_film, video.frame),
820                         scale_for_display(piece->video_scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
821                         _video_container_size,
822                         video.eyes,
823                         video.part,
824                         piece->video_colour_conversion(),
825                         piece->video_range(),
826                         piece->content,
827                         video.frame,
828                         false
829                         )
830                 );
831
832         DCPTime t = time;
833         for (int i = 0; i < piece->frc.repeat; ++i) {
834                 if (t < piece->end(_film)) {
835                         emit_video (_last_video[wp], t);
836                 }
837                 t += one_video_frame ();
838         }
839 }
840
841 void
842 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
843 {
844         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
845
846         shared_ptr<Piece> piece = wp.lock ();
847         if (!piece) {
848                 return;
849         }
850
851         int const rfr = piece->audio_resampled_frame_rate (_film);
852
853         /* Compute time in the DCP */
854         DCPTime time = piece->resampled_audio_to_dcp (_film, content_audio.frame);
855         /* And the end of this block in the DCP */
856         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
857
858         /* Remove anything that comes before the start or after the end of the content */
859         if (time < piece->position()) {
860                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
861                 if (!cut.first) {
862                         /* This audio is entirely discarded */
863                         return;
864                 }
865                 content_audio.audio = cut.first;
866                 time = cut.second;
867         } else if (time > piece->end(_film)) {
868                 /* Discard it all */
869                 return;
870         } else if (end > piece->end(_film)) {
871                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
872                 if (remaining_frames == 0) {
873                         return;
874                 }
875                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
876         }
877
878         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
879
880         /* Gain */
881
882         if (piece->audio_gain() != 0) {
883                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
884                 gain->apply_gain (piece->audio_gain());
885                 content_audio.audio = gain;
886         }
887
888         /* Remap */
889
890         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
891
892         /* Process */
893
894         if (_audio_processor) {
895                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
896         }
897
898         /* Push */
899
900         _audio_merger.push (content_audio.audio, time);
901         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
902         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
903 }
904
905 void
906 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
907 {
908         shared_ptr<Piece> piece = wp.lock ();
909         shared_ptr<const TextContent> text = wc.lock ();
910         if (!piece || !text) {
911                 return;
912         }
913
914         /* Apply content's subtitle offsets */
915         subtitle.sub.rectangle.x += text->x_offset ();
916         subtitle.sub.rectangle.y += text->y_offset ();
917
918         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
919         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
920         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
921
922         /* Apply content's subtitle scale */
923         subtitle.sub.rectangle.width *= text->x_scale ();
924         subtitle.sub.rectangle.height *= text->y_scale ();
925
926         PlayerText ps;
927         shared_ptr<Image> image = subtitle.sub.image;
928
929         /* We will scale the subtitle up to fit _video_container_size */
930         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
931         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
932         if (width == 0 || height == 0) {
933                 return;
934         }
935
936         dcp::Size scaled_size (width, height);
937         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
938         DCPTime from (piece->content_time_to_dcp(subtitle.from()));
939
940         _active_texts[text->type()].add_from (wc, ps, from);
941 }
942
943 void
944 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
945 {
946         shared_ptr<Piece> piece = wp.lock ();
947         shared_ptr<const TextContent> text = wc.lock ();
948         if (!piece || !text) {
949                 return;
950         }
951
952         PlayerText ps;
953         DCPTime const from (piece->content_time_to_dcp( subtitle.from()));
954
955         if (from > piece->end(_film)) {
956                 return;
957         }
958
959         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
960                 s.set_h_position (s.h_position() + text->x_offset ());
961                 s.set_v_position (s.v_position() + text->y_offset ());
962                 float const xs = text->x_scale();
963                 float const ys = text->y_scale();
964                 float size = s.size();
965
966                 /* Adjust size to express the common part of the scaling;
967                    e.g. if xs = ys = 0.5 we scale size by 2.
968                 */
969                 if (xs > 1e-5 && ys > 1e-5) {
970                         size *= 1 / min (1 / xs, 1 / ys);
971                 }
972                 s.set_size (size);
973
974                 /* Then express aspect ratio changes */
975                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
976                         s.set_aspect_adjust (xs / ys);
977                 }
978
979                 s.set_in (dcp::Time(from.seconds(), 1000));
980                 ps.string.push_back (StringText (s, text->outline_width()));
981                 ps.add_fonts (text->fonts ());
982         }
983
984         _active_texts[text->type()].add_from (wc, ps, from);
985 }
986
987 void
988 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
989 {
990         shared_ptr<const TextContent> text = wc.lock ();
991         if (!text) {
992                 return;
993         }
994
995         if (!_active_texts[text->type()].have(wc)) {
996                 return;
997         }
998
999         shared_ptr<Piece> piece = wp.lock ();
1000         if (!piece) {
1001                 return;
1002         }
1003
1004         DCPTime const dcp_to = piece->content_time_to_dcp(to);
1005
1006         if (dcp_to > piece->end(_film)) {
1007                 return;
1008         }
1009
1010         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1011
1012         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1013         if (text->use() && !always && !text->burn()) {
1014                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1015         }
1016 }
1017
1018 void
1019 Player::seek (DCPTime time, bool accurate)
1020 {
1021         boost::mutex::scoped_lock lm (_mutex);
1022
1023         if (_suspended) {
1024                 /* We can't seek in this state */
1025                 return;
1026         }
1027
1028         if (_shuffler) {
1029                 _shuffler->clear ();
1030         }
1031
1032         _delay.clear ();
1033
1034         if (_audio_processor) {
1035                 _audio_processor->flush ();
1036         }
1037
1038         _audio_merger.clear ();
1039         for (int i = 0; i < TEXT_COUNT; ++i) {
1040                 _active_texts[i].clear ();
1041         }
1042
1043         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1044                 if (time < i->content->position()) {
1045                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1046                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1047                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1048                            been trimmed to a point between keyframes, or something).
1049                         */
1050                         i->decoder->seek (i->dcp_to_content_time(_film, i->content->position()), true);
1051                         i->done = false;
1052                 } else if (i->content->position() <= time && time < i->end(_film)) {
1053                         /* During; seek to position */
1054                         i->decoder->seek (i->dcp_to_content_time(_film, time), accurate);
1055                         i->done = false;
1056                 } else {
1057                         /* After; this piece is done */
1058                         i->done = true;
1059                 }
1060         }
1061
1062         if (accurate) {
1063                 _last_video_time = time;
1064                 _last_video_eyes = EYES_LEFT;
1065                 _last_audio_time = time;
1066         } else {
1067                 _last_video_time = optional<DCPTime>();
1068                 _last_video_eyes = optional<Eyes>();
1069                 _last_audio_time = optional<DCPTime>();
1070         }
1071
1072         _black.set_position (time);
1073         _silent.set_position (time);
1074
1075         _last_video.clear ();
1076 }
1077
1078 void
1079 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1080 {
1081         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1082            player before the video that requires them.
1083         */
1084         _delay.push_back (make_pair (pv, time));
1085
1086         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1087                 _last_video_time = time + one_video_frame();
1088         }
1089         _last_video_eyes = increment_eyes (pv->eyes());
1090
1091         if (_delay.size() < 3) {
1092                 return;
1093         }
1094
1095         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1096         _delay.pop_front();
1097         do_emit_video (to_do.first, to_do.second);
1098 }
1099
1100 void
1101 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1102 {
1103         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1104                 for (int i = 0; i < TEXT_COUNT; ++i) {
1105                         _active_texts[i].clear_before (time);
1106                 }
1107         }
1108
1109         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1110         if (subtitles) {
1111                 pv->set_text (subtitles.get ());
1112         }
1113
1114         Video (pv, time);
1115 }
1116
1117 void
1118 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1119 {
1120         /* Log if the assert below is about to fail */
1121         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1122                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1123         }
1124
1125         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1126         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1127         Audio (data, time, _film->audio_frame_rate());
1128         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1129 }
1130
1131 void
1132 Player::fill_audio (DCPTimePeriod period)
1133 {
1134         if (period.from == period.to) {
1135                 return;
1136         }
1137
1138         DCPOMATIC_ASSERT (period.from < period.to);
1139
1140         DCPTime t = period.from;
1141         while (t < period.to) {
1142                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1143                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1144                 if (samples) {
1145                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1146                         silence->make_silent ();
1147                         emit_audio (silence, t);
1148                 }
1149                 t += block;
1150         }
1151 }
1152
1153 DCPTime
1154 Player::one_video_frame () const
1155 {
1156         return DCPTime::from_frames (1, _film->video_frame_rate ());
1157 }
1158
1159 pair<shared_ptr<AudioBuffers>, DCPTime>
1160 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1161 {
1162         DCPTime const discard_time = discard_to - time;
1163         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1164         Frame remaining_frames = audio->frames() - discard_frames;
1165         if (remaining_frames <= 0) {
1166                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1167         }
1168         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1169         return make_pair(cut, time + discard_time);
1170 }
1171
1172 void
1173 Player::set_dcp_decode_reduction (optional<int> reduction)
1174 {
1175         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1176
1177         {
1178                 boost::mutex::scoped_lock lm (_mutex);
1179
1180                 if (reduction == _dcp_decode_reduction) {
1181                         lm.unlock ();
1182                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1183                         return;
1184                 }
1185
1186                 _dcp_decode_reduction = reduction;
1187                 setup_pieces_unlocked ();
1188         }
1189
1190         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1191 }
1192
1193 optional<DCPTime>
1194 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1195 {
1196         boost::mutex::scoped_lock lm (_mutex);
1197
1198         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1199                 if (i->content == content) {
1200                         return i->content_time_to_dcp (t);
1201                 }
1202         }
1203
1204         /* We couldn't find this content; perhaps things are being changed over */
1205         return optional<DCPTime>();
1206 }
1207
1208
1209 shared_ptr<const Playlist>
1210 Player::playlist () const
1211 {
1212         return _playlist ? _playlist : _film->playlist();
1213 }
1214
1215
1216 void
1217 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1218 {
1219         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1220 }
1221