Fix multiple video overlaps.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::make_shared;
75 using std::max;
76 using std::min;
77 using std::min;
78 using std::pair;
79 using std::shared_ptr;
80 using std::vector;
81 using std::weak_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96 int const PlayerProperty::IGNORE_VIDEO = 706;
97 int const PlayerProperty::IGNORE_AUDIO = 707;
98 int const PlayerProperty::IGNORE_TEXT = 708;
99 int const PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES = 709;
100 int const PlayerProperty::PLAY_REFERENCED = 710;
101
102
103 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
104         : _film (film)
105         , _suspended (0)
106         , _ignore_video(false)
107         , _ignore_audio(false)
108         , _ignore_text(false)
109         , _always_burn_open_subtitles(false)
110         , _fast(false)
111         , _tolerant (film->tolerant())
112         , _play_referenced(false)
113         , _audio_merger(film->audio_frame_rate())
114         , _subtitle_alignment (subtitle_alignment)
115 {
116         construct ();
117 }
118
119
120 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
121         : _film (film)
122         , _playlist (playlist_)
123         , _suspended (0)
124         , _ignore_video(false)
125         , _ignore_audio(false)
126         , _ignore_text(false)
127         , _always_burn_open_subtitles(false)
128         , _fast(false)
129         , _tolerant (film->tolerant())
130         , _play_referenced(false)
131         , _audio_merger(film->audio_frame_rate())
132 {
133         construct ();
134 }
135
136
137 void
138 Player::construct ()
139 {
140         auto film = _film.lock();
141         DCPOMATIC_ASSERT(film);
142
143         connect();
144         set_video_container_size(film->frame_size());
145
146         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
147
148         setup_pieces ();
149         seek (DCPTime (), true);
150 }
151
152
153 void
154 Player::connect()
155 {
156         auto film = _film.lock();
157         DCPOMATIC_ASSERT(film);
158
159         _film_changed_connection = film->Change.connect(bind(&Player::film_change, this, _1, _2));
160         /* The butler must hear about this first, so since we are proxying this through to the butler we must
161            be first.
162         */
163         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
164         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
165 }
166
167
168 Player::Player(Player&& other)
169         : _film(other._film)
170         , _playlist(std::move(other._playlist))
171         , _suspended(other._suspended.load())
172         , _pieces(std::move(other._pieces))
173         , _video_container_size(other._video_container_size.load())
174         , _black_image(std::move(other._black_image))
175         , _ignore_video(other._ignore_video.load())
176         , _ignore_audio(other._ignore_audio.load())
177         , _ignore_text(other._ignore_text.load())
178         , _always_burn_open_subtitles(other._always_burn_open_subtitles.load())
179         , _fast(other._fast.load())
180         , _tolerant(other._tolerant)
181         , _play_referenced(other._play_referenced.load())
182         , _next_video_time(other._next_video_time)
183         , _next_audio_time(other._next_audio_time)
184         , _dcp_decode_reduction(other._dcp_decode_reduction.load())
185         , _last_video(std::move(other._last_video))
186         , _audio_merger(std::move(other._audio_merger))
187         , _shuffler(std::move(other._shuffler))
188         , _delay(std::move(other._delay))
189         , _stream_states(std::move(other._stream_states))
190         , _black(std::move(other._black))
191         , _silent(std::move(other._silent))
192         , _active_texts(std::move(other._active_texts))
193         , _audio_processor(std::move(other._audio_processor))
194         , _playback_length(other._playback_length.load())
195         , _subtitle_alignment(other._subtitle_alignment)
196 {
197         connect();
198 }
199
200
201 Player&
202 Player::operator=(Player&& other)
203 {
204         if (this == &other) {
205                 return *this;
206         }
207
208         _film = std::move(other._film);
209         _playlist = std::move(other._playlist);
210         _suspended = other._suspended.load();
211         _pieces = std::move(other._pieces);
212         _video_container_size = other._video_container_size.load();
213         _black_image = std::move(other._black_image);
214         _ignore_video = other._ignore_video.load();
215         _ignore_audio = other._ignore_audio.load();
216         _ignore_text = other._ignore_text.load();
217         _always_burn_open_subtitles = other._always_burn_open_subtitles.load();
218         _fast = other._fast.load();
219         _tolerant = other._tolerant;
220         _play_referenced = other._play_referenced.load();
221         _next_video_time = other._next_video_time;
222         _next_audio_time = other._next_audio_time;
223         _dcp_decode_reduction = other._dcp_decode_reduction.load();
224         _last_video = std::move(other._last_video);
225         _audio_merger = std::move(other._audio_merger);
226         _shuffler = std::move(other._shuffler);
227         _delay = std::move(other._delay);
228         _stream_states = std::move(other._stream_states);
229         _black = std::move(other._black);
230         _silent = std::move(other._silent);
231         _active_texts = std::move(other._active_texts);
232         _audio_processor = std::move(other._audio_processor);
233         _playback_length = other._playback_length.load();
234         _subtitle_alignment = other._subtitle_alignment;
235
236         connect();
237
238         return *this;
239 }
240
241
242 bool
243 have_video (shared_ptr<const Content> content)
244 {
245         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
246 }
247
248
249 bool
250 have_audio (shared_ptr<const Content> content)
251 {
252         return static_cast<bool>(content->audio) && content->can_be_played();
253 }
254
255
256 void
257 Player::setup_pieces ()
258 {
259         boost::mutex::scoped_lock lm (_mutex);
260
261         auto old_pieces = _pieces;
262         _pieces.clear ();
263
264         auto film = _film.lock();
265         if (!film) {
266                 return;
267         }
268
269         _playback_length = _playlist ? _playlist->length(film) : film->length();
270
271         auto playlist_content = playlist()->content();
272         bool const have_threed = std::any_of(
273                 playlist_content.begin(),
274                 playlist_content.end(),
275                 [](shared_ptr<const Content> c) {
276                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
277                 });
278
279
280         if (have_threed) {
281                 _shuffler.reset(new Shuffler());
282                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
283         }
284
285         for (auto content: playlist()->content()) {
286
287                 if (!content->paths_valid()) {
288                         continue;
289                 }
290
291                 if (_ignore_video && _ignore_audio && content->text.empty()) {
292                         /* We're only interested in text and this content has none */
293                         continue;
294                 }
295
296                 shared_ptr<Decoder> old_decoder;
297                 for (auto j: old_pieces) {
298                         if (j->content == content) {
299                                 old_decoder = j->decoder;
300                                 break;
301                         }
302                 }
303
304                 auto decoder = decoder_factory(film, content, _fast, _tolerant, old_decoder);
305                 DCPOMATIC_ASSERT (decoder);
306
307                 FrameRateChange frc(film, content);
308
309                 if (decoder->video && _ignore_video) {
310                         decoder->video->set_ignore (true);
311                 }
312
313                 if (decoder->audio && _ignore_audio) {
314                         decoder->audio->set_ignore (true);
315                 }
316
317                 if (_ignore_text) {
318                         for (auto i: decoder->text) {
319                                 i->set_ignore (true);
320                         }
321                 }
322
323                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
324                 if (dcp) {
325                         dcp->set_decode_referenced (_play_referenced);
326                         if (_play_referenced) {
327                                 dcp->set_forced_reduction (_dcp_decode_reduction);
328                         }
329                 }
330
331                 auto piece = make_shared<Piece>(content, decoder, frc);
332                 _pieces.push_back (piece);
333
334                 if (decoder->video) {
335                         if (have_threed) {
336                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
337                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
338                         } else {
339                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
340                         }
341                 }
342
343                 if (decoder->audio) {
344                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
345                 }
346
347                 auto j = decoder->text.begin();
348
349                 while (j != decoder->text.end()) {
350                         (*j)->BitmapStart.connect (
351                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
352                                 );
353                         (*j)->PlainStart.connect (
354                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
355                                 );
356                         (*j)->Stop.connect (
357                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
358                                 );
359
360                         ++j;
361                 }
362
363                 if (decoder->atmos) {
364                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
365                 }
366         }
367
368         _stream_states.clear ();
369         for (auto i: _pieces) {
370                 if (i->content->audio) {
371                         for (auto j: i->content->audio->streams()) {
372                                 _stream_states[j] = StreamState(i);
373                         }
374                 }
375         }
376
377         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
378                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
379         };
380
381         for (auto piece = _pieces.begin(); piece != _pieces.end(); ++piece) {
382                 if (ignore_overlap((*piece)->content->video)) {
383                         /* Look for content later in the content list with in-use video that overlaps this */
384                         auto const period = (*piece)->content->period(film);
385                         for (auto later_piece = std::next(piece); later_piece != _pieces.end(); ++later_piece) {
386                                 if (ignore_overlap((*later_piece)->content->video)) {
387                                         if (auto overlap = (*later_piece)->content->period(film).overlap(period)) {
388                                                 (*piece)->ignore_video.push_back(*overlap);
389                                         }
390                                 }
391                         }
392                 }
393         }
394
395         _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
396         _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
397
398         _next_video_time = boost::none;
399         _next_video_eyes = Eyes::BOTH;
400         _next_audio_time = boost::none;
401 }
402
403
404 void
405 Player::playlist_content_change (ChangeType type, int property, bool frequent)
406 {
407         auto film = _film.lock();
408         if (!film) {
409                 return;
410         }
411
412         if (property == VideoContentProperty::CROP) {
413                 if (type == ChangeType::DONE) {
414                         boost::mutex::scoped_lock lm (_mutex);
415                         for (auto const& i: _delay) {
416                                 i.first->reset_metadata(film, _video_container_size);
417                         }
418                 }
419         } else {
420                 if (type == ChangeType::PENDING) {
421                         /* The player content is probably about to change, so we can't carry on
422                            until that has happened and we've rebuilt our pieces.  Stop pass()
423                            and seek() from working until then.
424                         */
425                         ++_suspended;
426                 } else if (type == ChangeType::DONE) {
427                         /* A change in our content has gone through.  Re-build our pieces. */
428                         setup_pieces ();
429                         --_suspended;
430                 } else if (type == ChangeType::CANCELLED) {
431                         --_suspended;
432                 }
433         }
434
435         Change (type, property, frequent);
436 }
437
438
439 void
440 Player::set_video_container_size (dcp::Size s)
441 {
442         ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
443
444         if (s == _video_container_size) {
445                 cc.abort();
446                 return;
447         }
448
449         _video_container_size = s;
450
451         {
452                 boost::mutex::scoped_lock lm(_black_image_mutex);
453                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
454                 _black_image->make_black ();
455         }
456 }
457
458
459 void
460 Player::playlist_change (ChangeType type)
461 {
462         if (type == ChangeType::DONE) {
463                 setup_pieces ();
464         }
465         Change (type, PlayerProperty::PLAYLIST, false);
466 }
467
468
469 void
470 Player::film_change (ChangeType type, Film::Property p)
471 {
472         /* Here we should notice Film properties that affect our output, and
473            alert listeners that our output now would be different to how it was
474            last time we were run.
475         */
476
477         auto film = _film.lock();
478         if (!film) {
479                 return;
480         }
481
482         if (p == Film::Property::CONTAINER) {
483                 Change (type, PlayerProperty::FILM_CONTAINER, false);
484         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
485                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
486                    so we need new pieces here.
487                 */
488                 if (type == ChangeType::DONE) {
489                         setup_pieces ();
490                 }
491                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
492         } else if (p == Film::Property::AUDIO_PROCESSOR) {
493                 if (type == ChangeType::DONE && film->audio_processor ()) {
494                         boost::mutex::scoped_lock lm (_mutex);
495                         _audio_processor = film->audio_processor()->clone(film->audio_frame_rate());
496                 }
497         } else if (p == Film::Property::AUDIO_CHANNELS) {
498                 if (type == ChangeType::DONE) {
499                         boost::mutex::scoped_lock lm (_mutex);
500                         _audio_merger.clear ();
501                 }
502         }
503 }
504
505
506 shared_ptr<PlayerVideo>
507 Player::black_player_video_frame (Eyes eyes) const
508 {
509         boost::mutex::scoped_lock lm(_black_image_mutex);
510
511         return std::make_shared<PlayerVideo> (
512                 std::make_shared<const RawImageProxy>(_black_image),
513                 Crop(),
514                 optional<double>(),
515                 _video_container_size,
516                 _video_container_size,
517                 eyes,
518                 Part::WHOLE,
519                 PresetColourConversion::all().front().conversion,
520                 VideoRange::FULL,
521                 std::weak_ptr<Content>(),
522                 boost::optional<Frame>(),
523                 false
524         );
525 }
526
527
528 Frame
529 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
530 {
531         auto film = _film.lock();
532         DCPOMATIC_ASSERT(film);
533
534         auto s = t - piece->content->position ();
535         s = min (piece->content->length_after_trim(film), s);
536         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
537
538         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
539            then convert that ContentTime to frames at the content's rate.  However this fails for
540            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
541            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
542
543            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
544         */
545         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
546 }
547
548
549 DCPTime
550 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
551 {
552         /* See comment in dcp_to_content_video */
553         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
554         return d + piece->content->position();
555 }
556
557
558 Frame
559 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
560 {
561         auto film = _film.lock();
562         DCPOMATIC_ASSERT(film);
563
564         auto s = t - piece->content->position ();
565         s = min (piece->content->length_after_trim(film), s);
566         /* See notes in dcp_to_content_video */
567         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(film->audio_frame_rate());
568 }
569
570
571 DCPTime
572 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
573 {
574         auto film = _film.lock();
575         DCPOMATIC_ASSERT(film);
576
577         /* See comment in dcp_to_content_video */
578         return DCPTime::from_frames(f, film->audio_frame_rate())
579                 - DCPTime (piece->content->trim_start(), piece->frc)
580                 + piece->content->position();
581 }
582
583
584 ContentTime
585 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
586 {
587         auto film = _film.lock();
588         DCPOMATIC_ASSERT(film);
589
590         auto s = t - piece->content->position ();
591         s = min (piece->content->length_after_trim(film), s);
592         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
593 }
594
595
596 DCPTime
597 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
598 {
599         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
600 }
601
602
603 vector<shared_ptr<Font>>
604 Player::get_subtitle_fonts ()
605 {
606         boost::mutex::scoped_lock lm (_mutex);
607
608         vector<shared_ptr<Font>> fonts;
609         for (auto piece: _pieces) {
610                 for (auto text: piece->content->text) {
611                         auto text_fonts = text->fonts();
612                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
613                 }
614         }
615
616         return fonts;
617 }
618
619
620 /** Set this player never to produce any video data */
621 void
622 Player::set_ignore_video ()
623 {
624         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_VIDEO);
625         _ignore_video = true;
626         setup_pieces();
627 }
628
629
630 void
631 Player::set_ignore_audio ()
632 {
633         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_AUDIO);
634         _ignore_audio = true;
635         setup_pieces();
636 }
637
638
639 void
640 Player::set_ignore_text ()
641 {
642         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_TEXT);
643         _ignore_text = true;
644         setup_pieces();
645 }
646
647
648 /** Set the player to always burn open texts into the image regardless of the content settings */
649 void
650 Player::set_always_burn_open_subtitles ()
651 {
652         ChangeSignaller<Player, int> cc(this, PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES);
653         _always_burn_open_subtitles = true;
654 }
655
656
657 /** Sets up the player to be faster, possibly at the expense of quality */
658 void
659 Player::set_fast ()
660 {
661         _fast = true;
662         setup_pieces();
663 }
664
665
666 void
667 Player::set_play_referenced ()
668 {
669         ChangeSignaller<Player, int> cc(this, PlayerProperty::PLAY_REFERENCED);
670         _play_referenced = true;
671         setup_pieces();
672 }
673
674
675 bool
676 Player::pass ()
677 {
678         boost::mutex::scoped_lock lm (_mutex);
679
680         if (_suspended) {
681                 /* We can't pass in this state */
682                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
683                 return false;
684         }
685
686         auto film = _film.lock();
687
688         if (_playback_length.load() == DCPTime() || !film) {
689                 /* Special; just give one black frame */
690                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
691                 return true;
692         }
693
694         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
695
696         shared_ptr<Piece> earliest_content;
697         optional<DCPTime> earliest_time;
698
699         for (auto i: _pieces) {
700                 if (i->done) {
701                         continue;
702                 }
703
704                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
705                 if (t > i->content->end(film)) {
706                         i->done = true;
707                 } else {
708
709                         /* Given two choices at the same time, pick the one with texts so we see it before
710                            the video.
711                         */
712                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
713                                 earliest_time = t;
714                                 earliest_content = i;
715                         }
716                 }
717         }
718
719         bool done = false;
720
721         enum {
722                 NONE,
723                 CONTENT,
724                 BLACK,
725                 SILENT
726         } which = NONE;
727
728         if (earliest_content) {
729                 which = CONTENT;
730         }
731
732         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
733                 earliest_time = _black.position ();
734                 which = BLACK;
735         }
736
737         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
738                 earliest_time = _silent.position ();
739                 which = SILENT;
740         }
741
742         switch (which) {
743         case CONTENT:
744         {
745                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
746                 earliest_content->done = earliest_content->decoder->pass ();
747                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
748                 if (dcp && !_play_referenced && dcp->reference_audio()) {
749                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
750                            to `hide' the fact that no audio was emitted during the referenced DCP (though
751                            we need to behave as though it was).
752                         */
753                         _next_audio_time = dcp->end(film);
754                 }
755                 break;
756         }
757         case BLACK:
758                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
759                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
760                 _black.set_position (_black.position() + one_video_frame());
761                 break;
762         case SILENT:
763         {
764                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
765                 DCPTimePeriod period (_silent.period_at_position());
766                 if (_next_audio_time) {
767                         /* Sometimes the thing that happened last finishes fractionally before
768                            or after this silence.  Bodge the start time of the silence to fix it.
769                            I think this is nothing to worry about since we will just add or
770                            remove a little silence at the end of some content.
771                         */
772                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
773                         /* Let's not worry about less than a frame at 24fps */
774                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
775                         if (error >= too_much_error) {
776                                 film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
777                         }
778                         DCPOMATIC_ASSERT (error < too_much_error);
779                         period.from = *_next_audio_time;
780                 }
781                 if (period.duration() > one_video_frame()) {
782                         period.to = period.from + one_video_frame();
783                 }
784                 fill_audio (period);
785                 _silent.set_position (period.to);
786                 break;
787         }
788         case NONE:
789                 done = true;
790                 break;
791         }
792
793         /* Emit any audio that is ready */
794
795         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
796            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
797            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
798            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
799            that will never come, causing bugs like #2101.
800         */
801         constexpr int ignore_streams_behind = 5;
802
803         using state_pair = std::pair<AudioStreamPtr, StreamState>;
804
805         /* Find streams that have pushed */
806         std::vector<state_pair> have_pushed;
807         std::copy_if(_stream_states.begin(), _stream_states.end(), std::back_inserter(have_pushed), [](state_pair const& a) { return static_cast<bool>(a.second.last_push_end); });
808
809         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
810         auto latest_last_push_end = std::max_element(
811                 have_pushed.begin(),
812                 have_pushed.end(),
813                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end.get() < b.second.last_push_end.get(); }
814                 );
815
816         if (latest_last_push_end != have_pushed.end()) {
817                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end.get()));
818         }
819
820         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
821         std::map<AudioStreamPtr, StreamState> alive_stream_states;
822         for (auto const& i: _stream_states) {
823                 if (!i.second.last_push_end || (latest_last_push_end->second.last_push_end.get() - i.second.last_push_end.get()) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
824                         alive_stream_states.insert(i);
825                 } else {
826                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
827                 }
828         }
829
830         auto pull_to = _playback_length.load();
831         for (auto const& i: alive_stream_states) {
832                 auto position = i.second.last_push_end.get_value_or(i.second.piece->content->position());
833                 if (!i.second.piece->done && position < pull_to) {
834                         pull_to = position;
835                 }
836         }
837         if (!_silent.done() && _silent.position() < pull_to) {
838                 pull_to = _silent.position();
839         }
840
841         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
842         auto audio = _audio_merger.pull (pull_to);
843         for (auto i = audio.begin(); i != audio.end(); ++i) {
844                 if (_next_audio_time && i->second < *_next_audio_time) {
845                         /* This new data comes before the last we emitted (or the last seek); discard it */
846                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
847                         if (!cut.first) {
848                                 continue;
849                         }
850                         *i = cut;
851                 } else if (_next_audio_time && i->second > *_next_audio_time) {
852                         /* There's a gap between this data and the last we emitted; fill with silence */
853                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
854                 }
855
856                 emit_audio (i->first, i->second);
857         }
858
859         if (done) {
860                 if (_shuffler) {
861                         _shuffler->flush ();
862                 }
863                 for (auto const& i: _delay) {
864                         do_emit_video(i.first, i.second);
865                 }
866
867                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
868                  * However, if we have L and R video files, and one is shorter than the other,
869                  * the fill code in ::video mostly takes care of filling in the gaps.
870                  * However, since it fills at the point when it knows there is more video coming
871                  * at time t (so it should fill any gap up to t) it can't do anything right at the
872                  * end.  This is particularly bad news if the last frame emitted is a LEFT
873                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
874                  * Here's a hack to workaround that particular case.
875                  */
876                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
877                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
878                 }
879         }
880
881         return done;
882 }
883
884
885 /** @return Open subtitles for the frame at the given time, converted to images */
886 optional<PositionImage>
887 Player::open_subtitles_for_frame (DCPTime time) const
888 {
889         auto film = _film.lock();
890         if (!film) {
891                 return {};
892         }
893
894         list<PositionImage> captions;
895         int const vfr = film->video_frame_rate();
896
897         for (
898                 auto j:
899                 _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
900                 ) {
901
902                 /* Bitmap subtitles */
903                 for (auto i: j.bitmap) {
904                         if (!i.image) {
905                                 continue;
906                         }
907
908                         /* i.image will already have been scaled to fit _video_container_size */
909                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
910
911                         captions.push_back (
912                                 PositionImage (
913                                         i.image,
914                                         Position<int> (
915                                                 lrint(_video_container_size.load().width * i.rectangle.x),
916                                                 lrint(_video_container_size.load().height * i.rectangle.y)
917                                                 )
918                                         )
919                                 );
920                 }
921
922                 /* String subtitles (rendered to an image) */
923                 if (!j.string.empty()) {
924                         auto s = render_text(j.string, _video_container_size, time, vfr);
925                         copy (s.begin(), s.end(), back_inserter (captions));
926                 }
927         }
928
929         if (captions.empty()) {
930                 return {};
931         }
932
933         return merge (captions, _subtitle_alignment);
934 }
935
936
937 static
938 Eyes
939 increment_eyes (Eyes e)
940 {
941         if (e == Eyes::LEFT) {
942                 return Eyes::RIGHT;
943         }
944
945         return Eyes::LEFT;
946 }
947
948
949 void
950 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
951 {
952         if (_suspended) {
953                 return;
954         }
955
956         auto piece = weak_piece.lock ();
957         if (!piece) {
958                 return;
959         }
960
961         if (!piece->content->video->use()) {
962                 return;
963         }
964
965         auto film = _film.lock();
966         if (!film) {
967                 return;
968         }
969
970         FrameRateChange frc(film, piece->content);
971         if (frc.skip && (video.frame % 2) == 1) {
972                 return;
973         }
974
975         vector<Eyes> eyes_to_emit;
976
977         if (!film->three_d()) {
978                 if (video.eyes == Eyes::RIGHT) {
979                         /* 2D film, 3D content: discard right */
980                         return;
981                 } else if (video.eyes == Eyes::LEFT) {
982                         /* 2D film, 3D content: emit left as "both" */
983                         video.eyes = Eyes::BOTH;
984                         eyes_to_emit = { Eyes::BOTH };
985                 }
986         } else {
987                 if (video.eyes == Eyes::BOTH) {
988                         /* 3D film, 2D content; emit "both" for left and right */
989                         eyes_to_emit = { Eyes::LEFT, Eyes::RIGHT };
990                 }
991         }
992
993         if (eyes_to_emit.empty()) {
994                 eyes_to_emit = { video.eyes };
995         }
996
997         /* Time of the first frame we will emit */
998         DCPTime const time = content_video_to_dcp (piece, video.frame);
999         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
1000
1001         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
1002            if it's after the content's period here as in that case we still need to fill any gap between
1003            `now' and the end of the content's period.
1004         */
1005         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
1006                 return;
1007         }
1008
1009         auto ignore_video = std::find_if(
1010                 piece->ignore_video.begin(),
1011                 piece->ignore_video.end(),
1012                 [time](DCPTimePeriod period) { return period.contains(time); }
1013                 );
1014         if (ignore_video != piece->ignore_video.end()) {
1015                 return;
1016         }
1017
1018         /* Fill gaps that we discover now that we have some video which needs to be emitted.
1019            This is where we need to fill to.
1020         */
1021         DCPTime fill_to = min(time, piece->content->end(film));
1022
1023         if (_next_video_time) {
1024                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
1025
1026                 /* Fill if we have more than half a frame to do */
1027                 if ((fill_to - fill_from) > one_video_frame() / 2) {
1028                         auto last = _last_video.find (weak_piece);
1029                         if (film->three_d()) {
1030                                 auto fill_to_eyes = eyes_to_emit[0];
1031                                 if (fill_to_eyes == Eyes::BOTH) {
1032                                         fill_to_eyes = Eyes::LEFT;
1033                                 }
1034                                 if (fill_to == piece->content->end(film)) {
1035                                         /* Don't fill after the end of the content */
1036                                         fill_to_eyes = Eyes::LEFT;
1037                                 }
1038                                 auto j = fill_from;
1039                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
1040                                 if (eyes == Eyes::BOTH) {
1041                                         eyes = Eyes::LEFT;
1042                                 }
1043                                 while (j < fill_to || eyes != fill_to_eyes) {
1044                                         if (last != _last_video.end()) {
1045                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
1046                                                 auto copy = last->second->shallow_copy();
1047                                                 copy->set_eyes (eyes);
1048                                                 emit_video (copy, j);
1049                                         } else {
1050                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
1051                                                 emit_video (black_player_video_frame(eyes), j);
1052                                         }
1053                                         if (eyes == Eyes::RIGHT) {
1054                                                 j += one_video_frame();
1055                                         }
1056                                         eyes = increment_eyes (eyes);
1057                                 }
1058                         } else {
1059                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
1060                                         if (last != _last_video.end()) {
1061                                                 emit_video (last->second, j);
1062                                         } else {
1063                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
1064                                         }
1065                                 }
1066                         }
1067                 }
1068         }
1069
1070         auto const content_video = piece->content->video;
1071
1072         for (auto eyes: eyes_to_emit) {
1073                 _last_video[weak_piece] = std::make_shared<PlayerVideo>(
1074                         video.image,
1075                         content_video->actual_crop(),
1076                         content_video->fade(film, video.frame),
1077                         scale_for_display(
1078                                 content_video->scaled_size(film->frame_size()),
1079                                 _video_container_size,
1080                                 film->frame_size(),
1081                                 content_video->pixel_quanta()
1082                                 ),
1083                         _video_container_size,
1084                         eyes,
1085                         video.part,
1086                         content_video->colour_conversion(),
1087                         content_video->range(),
1088                         piece->content,
1089                         video.frame,
1090                         false
1091                         );
1092
1093                 DCPTime t = time;
1094                 for (int i = 0; i < frc.repeat; ++i) {
1095                         if (t < piece->content->end(film)) {
1096                                 emit_video (_last_video[weak_piece], t);
1097                         }
1098                         t += one_video_frame ();
1099                 }
1100         }
1101 }
1102
1103
1104 void
1105 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1106 {
1107         if (_suspended) {
1108                 return;
1109         }
1110
1111         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1112
1113         auto piece = weak_piece.lock ();
1114         if (!piece) {
1115                 return;
1116         }
1117
1118         auto film = _film.lock();
1119         if (!film) {
1120                 return;
1121         }
1122
1123         auto content = piece->content->audio;
1124         DCPOMATIC_ASSERT (content);
1125
1126         int const rfr = content->resampled_frame_rate(film);
1127
1128         /* Compute time in the DCP */
1129         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1130
1131         /* And the end of this block in the DCP */
1132         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1133         LOG_DEBUG_PLAYER("Received audio frame %1 covering %2 to %3 (%4)", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename());
1134
1135         /* Remove anything that comes before the start or after the end of the content */
1136         if (time < piece->content->position()) {
1137                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1138                 if (!cut.first) {
1139                         /* This audio is entirely discarded */
1140                         return;
1141                 }
1142                 content_audio.audio = cut.first;
1143                 time = cut.second;
1144         } else if (time > piece->content->end(film)) {
1145                 /* Discard it all */
1146                 return;
1147         } else if (end > piece->content->end(film)) {
1148                 Frame const remaining_frames = DCPTime(piece->content->end(film) - time).frames_round(rfr);
1149                 if (remaining_frames == 0) {
1150                         return;
1151                 }
1152                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1153         }
1154
1155         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1156
1157         /* Gain and fade */
1158
1159         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1160         if (content->gain() != 0 || !fade_coeffs.empty()) {
1161                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1162                 if (!fade_coeffs.empty()) {
1163                         /* Apply both fade and gain */
1164                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1165                         auto const channels = gain_buffers->channels();
1166                         auto const frames = fade_coeffs.size();
1167                         auto data = gain_buffers->data();
1168                         auto const gain = db_to_linear (content->gain());
1169                         for (auto channel = 0; channel < channels; ++channel) {
1170                                 for (auto frame = 0U; frame < frames; ++frame) {
1171                                         data[channel][frame] *= gain * fade_coeffs[frame];
1172                                 }
1173                         }
1174                 } else {
1175                         /* Just apply gain */
1176                         gain_buffers->apply_gain (content->gain());
1177                 }
1178                 content_audio.audio = gain_buffers;
1179         }
1180
1181         /* Remap */
1182
1183         content_audio.audio = remap(content_audio.audio, film->audio_channels(), stream->mapping());
1184
1185         /* Process */
1186
1187         if (_audio_processor) {
1188                 content_audio.audio = _audio_processor->run(content_audio.audio, film->audio_channels());
1189         }
1190
1191         /* Push */
1192
1193         _audio_merger.push (content_audio.audio, time);
1194         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1195         _stream_states[stream].last_push_end = time + DCPTime::from_frames(content_audio.audio->frames(), film->audio_frame_rate());
1196 }
1197
1198
1199 void
1200 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1201 {
1202         if (_suspended) {
1203                 return;
1204         }
1205
1206         auto piece = weak_piece.lock ();
1207         auto content = weak_content.lock ();
1208         if (!piece || !content) {
1209                 return;
1210         }
1211
1212         PlayerText ps;
1213         for (auto& sub: subtitle.subs)
1214         {
1215                 /* Apply content's subtitle offsets */
1216                 sub.rectangle.x += content->x_offset ();
1217                 sub.rectangle.y += content->y_offset ();
1218
1219                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1220                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1221                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1222
1223                 /* Apply content's subtitle scale */
1224                 sub.rectangle.width *= content->x_scale ();
1225                 sub.rectangle.height *= content->y_scale ();
1226
1227                 auto image = sub.image;
1228
1229                 /* We will scale the subtitle up to fit _video_container_size */
1230                 int const width = sub.rectangle.width * _video_container_size.load().width;
1231                 int const height = sub.rectangle.height * _video_container_size.load().height;
1232                 if (width == 0 || height == 0) {
1233                         return;
1234                 }
1235
1236                 dcp::Size scaled_size (width, height);
1237                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1238         }
1239
1240         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1241         _active_texts[content->type()].add_from(weak_content, ps, from);
1242 }
1243
1244
1245 void
1246 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1247 {
1248         if (_suspended) {
1249                 return;
1250         }
1251
1252         auto piece = weak_piece.lock ();
1253         auto content = weak_content.lock ();
1254         auto film = _film.lock();
1255         if (!piece || !content || !film) {
1256                 return;
1257         }
1258
1259         PlayerText ps;
1260         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1261
1262         if (from > piece->content->end(film)) {
1263                 return;
1264         }
1265
1266         for (auto s: subtitle.subs) {
1267                 s.set_h_position (s.h_position() + content->x_offset());
1268                 s.set_v_position (s.v_position() + content->y_offset());
1269                 float const xs = content->x_scale();
1270                 float const ys = content->y_scale();
1271                 float size = s.size();
1272
1273                 /* Adjust size to express the common part of the scaling;
1274                    e.g. if xs = ys = 0.5 we scale size by 2.
1275                 */
1276                 if (xs > 1e-5 && ys > 1e-5) {
1277                         size *= 1 / min (1 / xs, 1 / ys);
1278                 }
1279                 s.set_size (size);
1280
1281                 /* Then express aspect ratio changes */
1282                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1283                         s.set_aspect_adjust (xs / ys);
1284                 }
1285
1286                 s.set_in (dcp::Time(from.seconds(), 1000));
1287                 ps.string.push_back (s);
1288         }
1289
1290         _active_texts[content->type()].add_from(weak_content, ps, from);
1291 }
1292
1293
1294 void
1295 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1296 {
1297         if (_suspended) {
1298                 return;
1299         }
1300
1301         auto content = weak_content.lock ();
1302         if (!content) {
1303                 return;
1304         }
1305
1306         if (!_active_texts[content->type()].have(weak_content)) {
1307                 return;
1308         }
1309
1310         auto piece = weak_piece.lock ();
1311         auto film = _film.lock();
1312         if (!piece || !film) {
1313                 return;
1314         }
1315
1316         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1317
1318         if (dcp_to > piece->content->end(film)) {
1319                 return;
1320         }
1321
1322         auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
1323
1324         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1325         if (content->use() && !always && !content->burn()) {
1326                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1327         }
1328 }
1329
1330
1331 void
1332 Player::seek (DCPTime time, bool accurate)
1333 {
1334         boost::mutex::scoped_lock lm (_mutex);
1335         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1336
1337         if (_suspended) {
1338                 /* We can't seek in this state */
1339                 return;
1340         }
1341
1342         auto film = _film.lock();
1343         if (!film) {
1344                 return;
1345         }
1346
1347         if (_shuffler) {
1348                 _shuffler->clear ();
1349         }
1350
1351         _delay.clear ();
1352
1353         if (_audio_processor) {
1354                 _audio_processor->flush ();
1355         }
1356
1357         _audio_merger.clear ();
1358         std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
1359
1360         for (auto i: _pieces) {
1361                 if (time < i->content->position()) {
1362                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1363                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1364                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1365                            been trimmed to a point between keyframes, or something).
1366                         */
1367                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1368                         i->done = false;
1369                 } else if (i->content->position() <= time && time < i->content->end(film)) {
1370                         /* During; seek to position */
1371                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1372                         i->done = false;
1373                 } else {
1374                         /* After; this piece is done */
1375                         i->done = true;
1376                 }
1377         }
1378
1379         if (accurate) {
1380                 _next_video_time = time;
1381                 _next_video_eyes = Eyes::LEFT;
1382                 _next_audio_time = time;
1383         } else {
1384                 _next_video_time = boost::none;
1385                 _next_video_eyes = boost::none;
1386                 _next_audio_time = boost::none;
1387         }
1388
1389         _black.set_position (time);
1390         _silent.set_position (time);
1391
1392         _last_video.clear ();
1393
1394         for (auto& state: _stream_states) {
1395                 state.second.last_push_end = boost::none;
1396         }
1397 }
1398
1399
1400 void
1401 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1402 {
1403         auto film = _film.lock();
1404         DCPOMATIC_ASSERT(film);
1405
1406         if (!film->three_d()) {
1407                 if (pv->eyes() == Eyes::LEFT) {
1408                         /* Use left-eye images for both eyes... */
1409                         pv->set_eyes (Eyes::BOTH);
1410                 } else if (pv->eyes() == Eyes::RIGHT) {
1411                         /* ...and discard the right */
1412                         return;
1413                 }
1414         }
1415
1416         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1417            player before the video that requires them.
1418         */
1419         _delay.push_back (make_pair (pv, time));
1420
1421         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1422                 _next_video_time = time + one_video_frame();
1423         }
1424         _next_video_eyes = increment_eyes (pv->eyes());
1425
1426         if (_delay.size() < 3) {
1427                 return;
1428         }
1429
1430         auto to_do = _delay.front();
1431         _delay.pop_front();
1432         do_emit_video (to_do.first, to_do.second);
1433 }
1434
1435
1436 void
1437 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1438 {
1439         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1440                 std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
1441         }
1442
1443         auto subtitles = open_subtitles_for_frame (time);
1444         if (subtitles) {
1445                 pv->set_text (subtitles.get ());
1446         }
1447
1448         Video (pv, time);
1449 }
1450
1451
1452 void
1453 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1454 {
1455         auto film = _film.lock();
1456         DCPOMATIC_ASSERT(film);
1457
1458         /* Log if the assert below is about to fail */
1459         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1460                 film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1461         }
1462
1463         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1464         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1465         Audio(data, time, film->audio_frame_rate());
1466         _next_audio_time = time + DCPTime::from_frames(data->frames(), film->audio_frame_rate());
1467 }
1468
1469
1470 void
1471 Player::fill_audio (DCPTimePeriod period)
1472 {
1473         auto film = _film.lock();
1474         DCPOMATIC_ASSERT(film);
1475
1476         if (period.from == period.to) {
1477                 return;
1478         }
1479
1480         DCPOMATIC_ASSERT (period.from < period.to);
1481
1482         DCPTime t = period.from;
1483         while (t < period.to) {
1484                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1485                 Frame const samples = block.frames_round(film->audio_frame_rate());
1486                 if (samples) {
1487                         auto silence = make_shared<AudioBuffers>(film->audio_channels(), samples);
1488                         silence->make_silent ();
1489                         emit_audio (silence, t);
1490                 }
1491                 t += block;
1492         }
1493 }
1494
1495
1496 DCPTime
1497 Player::one_video_frame () const
1498 {
1499         auto film = _film.lock();
1500         DCPOMATIC_ASSERT(film);
1501
1502         return DCPTime::from_frames(1, film->video_frame_rate ());
1503 }
1504
1505
1506 pair<shared_ptr<AudioBuffers>, DCPTime>
1507 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1508 {
1509         auto film = _film.lock();
1510         DCPOMATIC_ASSERT(film);
1511
1512         auto const discard_time = discard_to - time;
1513         auto const discard_frames = discard_time.frames_round(film->audio_frame_rate());
1514         auto remaining_frames = audio->frames() - discard_frames;
1515         if (remaining_frames <= 0) {
1516                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1517         }
1518         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1519         return make_pair(cut, time + discard_time);
1520 }
1521
1522
1523 void
1524 Player::set_dcp_decode_reduction (optional<int> reduction)
1525 {
1526         ChangeSignaller<Player, int> cc(this, PlayerProperty::DCP_DECODE_REDUCTION);
1527
1528         if (reduction == _dcp_decode_reduction.load()) {
1529                 cc.abort();
1530                 return;
1531         }
1532
1533         _dcp_decode_reduction = reduction;
1534         setup_pieces();
1535 }
1536
1537
1538 optional<DCPTime>
1539 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1540 {
1541         boost::mutex::scoped_lock lm (_mutex);
1542
1543         for (auto i: _pieces) {
1544                 if (i->content == content) {
1545                         return content_time_to_dcp (i, t);
1546                 }
1547         }
1548
1549         /* We couldn't find this content; perhaps things are being changed over */
1550         return {};
1551 }
1552
1553
1554 optional<ContentTime>
1555 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1556 {
1557         boost::mutex::scoped_lock lm (_mutex);
1558
1559         for (auto i: _pieces) {
1560                 if (i->content == content) {
1561                         return dcp_to_content_time (i, t);
1562                 }
1563         }
1564
1565         /* We couldn't find this content; perhaps things are being changed over */
1566         return {};
1567 }
1568
1569
1570 shared_ptr<const Playlist>
1571 Player::playlist () const
1572 {
1573         auto film = _film.lock();
1574         if (!film) {
1575                 return {};
1576         }
1577
1578         return _playlist ? _playlist : film->playlist();
1579 }
1580
1581
1582 void
1583 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1584 {
1585         if (_suspended) {
1586                 return;
1587         }
1588
1589         auto film = _film.lock();
1590         DCPOMATIC_ASSERT(film);
1591
1592         auto piece = weak_piece.lock ();
1593         DCPOMATIC_ASSERT (piece);
1594
1595         auto const vfr = film->video_frame_rate();
1596
1597         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1598         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(film))) {
1599                 return;
1600         }
1601
1602         Atmos (data.data, dcp_time, data.metadata);
1603 }
1604
1605
1606 void
1607 Player::signal_change(ChangeType type, int property)
1608 {
1609         Change(type, property, false);
1610 }
1611