Fix ignore_video checker for separate-3D content (#2246).
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _tolerant (film->tolerant())
103         , _audio_merger (_film->audio_frame_rate())
104         , _subtitle_alignment (subtitle_alignment)
105 {
106         construct ();
107 }
108
109
110 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111         : _film (film)
112         , _playlist (playlist_)
113         , _suspended (0)
114         , _tolerant (film->tolerant())
115         , _audio_merger (_film->audio_frame_rate())
116 {
117         construct ();
118 }
119
120
121 void
122 Player::construct ()
123 {
124         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
125         /* The butler must hear about this first, so since we are proxying this through to the butler we must
126            be first.
127         */
128         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
129         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
130         set_video_container_size (_film->frame_size ());
131
132         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
133
134         setup_pieces ();
135         seek (DCPTime (), true);
136 }
137
138
139 void
140 Player::setup_pieces ()
141 {
142         boost::mutex::scoped_lock lm (_mutex);
143         setup_pieces_unlocked ();
144 }
145
146
147 bool
148 have_video (shared_ptr<const Content> content)
149 {
150         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
151 }
152
153
154 bool
155 have_audio (shared_ptr<const Content> content)
156 {
157         return static_cast<bool>(content->audio);
158 }
159
160
161 void
162 Player::setup_pieces_unlocked ()
163 {
164         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
165
166         auto old_pieces = _pieces;
167         _pieces.clear ();
168
169         auto playlist_content = playlist()->content();
170         bool const have_threed = std::any_of(
171                 playlist_content.begin(),
172                 playlist_content.end(),
173                 [](shared_ptr<const Content> c) {
174                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
175                 });
176
177
178         if (have_threed) {
179                 _shuffler.reset(new Shuffler());
180                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
181         }
182
183         for (auto i: playlist()->content()) {
184
185                 if (!i->paths_valid ()) {
186                         continue;
187                 }
188
189                 if (_ignore_video && _ignore_audio && i->text.empty()) {
190                         /* We're only interested in text and this content has none */
191                         continue;
192                 }
193
194                 shared_ptr<Decoder> old_decoder;
195                 for (auto j: old_pieces) {
196                         if (j->content == i) {
197                                 old_decoder = j->decoder;
198                                 break;
199                         }
200                 }
201
202                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
203                 DCPOMATIC_ASSERT (decoder);
204
205                 FrameRateChange frc (_film, i);
206
207                 if (decoder->video && _ignore_video) {
208                         decoder->video->set_ignore (true);
209                 }
210
211                 if (decoder->audio && _ignore_audio) {
212                         decoder->audio->set_ignore (true);
213                 }
214
215                 if (_ignore_text) {
216                         for (auto i: decoder->text) {
217                                 i->set_ignore (true);
218                         }
219                 }
220
221                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
222                 if (dcp) {
223                         dcp->set_decode_referenced (_play_referenced);
224                         if (_play_referenced) {
225                                 dcp->set_forced_reduction (_dcp_decode_reduction);
226                         }
227                 }
228
229                 auto piece = make_shared<Piece>(i, decoder, frc);
230                 _pieces.push_back (piece);
231
232                 if (decoder->video) {
233                         if (have_threed) {
234                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
235                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
236                         } else {
237                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
238                         }
239                 }
240
241                 if (decoder->audio) {
242                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
243                 }
244
245                 auto j = decoder->text.begin();
246
247                 while (j != decoder->text.end()) {
248                         (*j)->BitmapStart.connect (
249                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251                         (*j)->PlainStart.connect (
252                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
253                                 );
254                         (*j)->Stop.connect (
255                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256                                 );
257
258                         ++j;
259                 }
260
261                 if (decoder->atmos) {
262                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
263                 }
264         }
265
266         _stream_states.clear ();
267         for (auto i: _pieces) {
268                 if (i->content->audio) {
269                         for (auto j: i->content->audio->streams()) {
270                                 _stream_states[j] = StreamState (i, i->content->position ());
271                         }
272                 }
273         }
274
275         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
276                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
277         };
278
279         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
280                 if (ignore_overlap((*i)->content->video)) {
281                         /* Look for content later in the content list with in-use video that overlaps this */
282                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
283                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
284                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
285                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
286                                 }
287                         }
288                 }
289         }
290
291         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
292         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
293
294         _next_video_time = boost::none;
295         _next_video_eyes = Eyes::BOTH;
296         _next_audio_time = boost::none;
297 }
298
299
300 void
301 Player::playlist_content_change (ChangeType type, int property, bool frequent)
302 {
303         if (property == VideoContentProperty::CROP) {
304                 if (type == ChangeType::DONE) {
305                         auto const vcs = video_container_size();
306                         boost::mutex::scoped_lock lm (_mutex);
307                         for (auto const& i: _delay) {
308                                 i.first->reset_metadata (_film, vcs);
309                         }
310                 }
311         } else {
312                 if (type == ChangeType::PENDING) {
313                         /* The player content is probably about to change, so we can't carry on
314                            until that has happened and we've rebuilt our pieces.  Stop pass()
315                            and seek() from working until then.
316                         */
317                         ++_suspended;
318                 } else if (type == ChangeType::DONE) {
319                         /* A change in our content has gone through.  Re-build our pieces. */
320                         setup_pieces ();
321                         --_suspended;
322                 } else if (type == ChangeType::CANCELLED) {
323                         --_suspended;
324                 }
325         }
326
327         Change (type, property, frequent);
328 }
329
330
331 void
332 Player::set_video_container_size (dcp::Size s)
333 {
334         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
335
336         {
337                 boost::mutex::scoped_lock lm (_mutex);
338
339                 if (s == _video_container_size) {
340                         lm.unlock ();
341                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
342                         return;
343                 }
344
345                 _video_container_size = s;
346
347                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
348                 _black_image->make_black ();
349         }
350
351         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
352 }
353
354
355 void
356 Player::playlist_change (ChangeType type)
357 {
358         if (type == ChangeType::DONE) {
359                 setup_pieces ();
360         }
361         Change (type, PlayerProperty::PLAYLIST, false);
362 }
363
364
365 void
366 Player::film_change (ChangeType type, Film::Property p)
367 {
368         /* Here we should notice Film properties that affect our output, and
369            alert listeners that our output now would be different to how it was
370            last time we were run.
371         */
372
373         if (p == Film::Property::CONTAINER) {
374                 Change (type, PlayerProperty::FILM_CONTAINER, false);
375         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
376                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
377                    so we need new pieces here.
378                 */
379                 if (type == ChangeType::DONE) {
380                         setup_pieces ();
381                 }
382                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
383         } else if (p == Film::Property::AUDIO_PROCESSOR) {
384                 if (type == ChangeType::DONE && _film->audio_processor ()) {
385                         boost::mutex::scoped_lock lm (_mutex);
386                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
387                 }
388         } else if (p == Film::Property::AUDIO_CHANNELS) {
389                 if (type == ChangeType::DONE) {
390                         boost::mutex::scoped_lock lm (_mutex);
391                         _audio_merger.clear ();
392                 }
393         }
394 }
395
396
397 shared_ptr<PlayerVideo>
398 Player::black_player_video_frame (Eyes eyes) const
399 {
400         return std::make_shared<PlayerVideo> (
401                 std::make_shared<const RawImageProxy>(_black_image),
402                 Crop(),
403                 optional<double>(),
404                 _video_container_size,
405                 _video_container_size,
406                 eyes,
407                 Part::WHOLE,
408                 PresetColourConversion::all().front().conversion,
409                 VideoRange::FULL,
410                 std::weak_ptr<Content>(),
411                 boost::optional<Frame>(),
412                 false
413         );
414 }
415
416
417 Frame
418 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
419 {
420         auto s = t - piece->content->position ();
421         s = min (piece->content->length_after_trim(_film), s);
422         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
423
424         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
425            then convert that ContentTime to frames at the content's rate.  However this fails for
426            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
427            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
428
429            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
430         */
431         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
432 }
433
434
435 DCPTime
436 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
437 {
438         /* See comment in dcp_to_content_video */
439         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
440         return d + piece->content->position();
441 }
442
443
444 Frame
445 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
446 {
447         auto s = t - piece->content->position ();
448         s = min (piece->content->length_after_trim(_film), s);
449         /* See notes in dcp_to_content_video */
450         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
451 }
452
453
454 DCPTime
455 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
456 {
457         /* See comment in dcp_to_content_video */
458         return DCPTime::from_frames (f, _film->audio_frame_rate())
459                 - DCPTime (piece->content->trim_start(), piece->frc)
460                 + piece->content->position();
461 }
462
463
464 ContentTime
465 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
466 {
467         auto s = t - piece->content->position ();
468         s = min (piece->content->length_after_trim(_film), s);
469         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
470 }
471
472
473 DCPTime
474 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
475 {
476         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
477 }
478
479
480 vector<FontData>
481 Player::get_subtitle_fonts ()
482 {
483         boost::mutex::scoped_lock lm (_mutex);
484
485         vector<FontData> fonts;
486         for (auto i: _pieces) {
487                 /* XXX: things may go wrong if there are duplicate font IDs
488                    with different font files.
489                 */
490                 auto f = i->decoder->fonts ();
491                 copy (f.begin(), f.end(), back_inserter(fonts));
492         }
493
494         return fonts;
495 }
496
497
498 /** Set this player never to produce any video data */
499 void
500 Player::set_ignore_video ()
501 {
502         boost::mutex::scoped_lock lm (_mutex);
503         _ignore_video = true;
504         setup_pieces_unlocked ();
505 }
506
507
508 void
509 Player::set_ignore_audio ()
510 {
511         boost::mutex::scoped_lock lm (_mutex);
512         _ignore_audio = true;
513         setup_pieces_unlocked ();
514 }
515
516
517 void
518 Player::set_ignore_text ()
519 {
520         boost::mutex::scoped_lock lm (_mutex);
521         _ignore_text = true;
522         setup_pieces_unlocked ();
523 }
524
525
526 /** Set the player to always burn open texts into the image regardless of the content settings */
527 void
528 Player::set_always_burn_open_subtitles ()
529 {
530         boost::mutex::scoped_lock lm (_mutex);
531         _always_burn_open_subtitles = true;
532 }
533
534
535 /** Sets up the player to be faster, possibly at the expense of quality */
536 void
537 Player::set_fast ()
538 {
539         boost::mutex::scoped_lock lm (_mutex);
540         _fast = true;
541         setup_pieces_unlocked ();
542 }
543
544
545 void
546 Player::set_play_referenced ()
547 {
548         boost::mutex::scoped_lock lm (_mutex);
549         _play_referenced = true;
550         setup_pieces_unlocked ();
551 }
552
553
554 static void
555 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
556 {
557         DCPOMATIC_ASSERT (r);
558         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
559         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
560         if (r->actual_duration() > 0) {
561                 a.push_back (
562                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
563                         );
564         }
565 }
566
567
568 list<ReferencedReelAsset>
569 Player::get_reel_assets ()
570 {
571         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
572
573         list<ReferencedReelAsset> reel_assets;
574
575         for (auto content: playlist()->content()) {
576                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
577                 if (!dcp) {
578                         continue;
579                 }
580
581                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
582                         continue;
583                 }
584
585                 scoped_ptr<DCPDecoder> decoder;
586                 try {
587                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
588                 } catch (...) {
589                         return reel_assets;
590                 }
591
592                 auto const frame_rate = _film->video_frame_rate();
593                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
594                 /* We should only be referencing if the DCP rate is the same as the film rate */
595                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
596
597                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
598                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
599
600                 /* position in the asset from the start */
601                 int64_t offset_from_start = 0;
602                 /* position i the asset from the end */
603                 int64_t offset_from_end = 0;
604                 for (auto reel: decoder->reels()) {
605                         /* Assume that main picture duration is the length of the reel */
606                         offset_from_end += reel->main_picture()->actual_duration();
607                 }
608
609                 for (auto reel: decoder->reels()) {
610
611                         /* Assume that main picture duration is the length of the reel */
612                         int64_t const reel_duration = reel->main_picture()->actual_duration();
613
614                         /* See doc/design/trim_reels.svg */
615                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
616                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
617
618                         auto const from = max(DCPTime(), content->position() + DCPTime::from_frames(offset_from_start, frame_rate) - DCPTime::from_frames(trim_start, frame_rate));
619                         if (dcp->reference_video()) {
620                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
621                         }
622
623                         if (dcp->reference_audio()) {
624                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
625                         }
626
627                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
628                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
629                         }
630
631                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
632                                 for (auto caption: reel->closed_captions()) {
633                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
634                                 }
635                         }
636
637                         offset_from_start += reel_duration;
638                         offset_from_end -= reel_duration;
639                 }
640         }
641
642         return reel_assets;
643 }
644
645
646 bool
647 Player::pass ()
648 {
649         boost::mutex::scoped_lock lm (_mutex);
650
651         if (_suspended) {
652                 /* We can't pass in this state */
653                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
654                 return false;
655         }
656
657         if (_playback_length == DCPTime()) {
658                 /* Special; just give one black frame */
659                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
660                 return true;
661         }
662
663         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
664
665         shared_ptr<Piece> earliest_content;
666         optional<DCPTime> earliest_time;
667
668         for (auto i: _pieces) {
669                 if (i->done) {
670                         continue;
671                 }
672
673                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
674                 if (t > i->content->end(_film)) {
675                         i->done = true;
676                 } else {
677
678                         /* Given two choices at the same time, pick the one with texts so we see it before
679                            the video.
680                         */
681                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
682                                 earliest_time = t;
683                                 earliest_content = i;
684                         }
685                 }
686         }
687
688         bool done = false;
689
690         enum {
691                 NONE,
692                 CONTENT,
693                 BLACK,
694                 SILENT
695         } which = NONE;
696
697         if (earliest_content) {
698                 which = CONTENT;
699         }
700
701         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
702                 earliest_time = _black.position ();
703                 which = BLACK;
704         }
705
706         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
707                 earliest_time = _silent.position ();
708                 which = SILENT;
709         }
710
711         switch (which) {
712         case CONTENT:
713         {
714                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
715                 earliest_content->done = earliest_content->decoder->pass ();
716                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
717                 if (dcp && !_play_referenced && dcp->reference_audio()) {
718                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
719                            to `hide' the fact that no audio was emitted during the referenced DCP (though
720                            we need to behave as though it was).
721                         */
722                         _next_audio_time = dcp->end (_film);
723                 }
724                 break;
725         }
726         case BLACK:
727                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
728                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
729                 _black.set_position (_black.position() + one_video_frame());
730                 break;
731         case SILENT:
732         {
733                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
734                 DCPTimePeriod period (_silent.period_at_position());
735                 if (_next_audio_time) {
736                         /* Sometimes the thing that happened last finishes fractionally before
737                            or after this silence.  Bodge the start time of the silence to fix it.
738                            I think this is nothing to worry about since we will just add or
739                            remove a little silence at the end of some content.
740                         */
741                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
742                         /* Let's not worry about less than a frame at 24fps */
743                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
744                         if (error >= too_much_error) {
745                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
746                         }
747                         DCPOMATIC_ASSERT (error < too_much_error);
748                         period.from = *_next_audio_time;
749                 }
750                 if (period.duration() > one_video_frame()) {
751                         period.to = period.from + one_video_frame();
752                 }
753                 fill_audio (period);
754                 _silent.set_position (period.to);
755                 break;
756         }
757         case NONE:
758                 done = true;
759                 break;
760         }
761
762         /* Emit any audio that is ready */
763
764         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
765            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
766            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
767            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
768            that will never come, causing bugs like #2101.
769         */
770         constexpr int ignore_streams_behind = 5;
771
772         using state_pair = std::pair<AudioStreamPtr, StreamState>;
773
774         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
775         auto latest_last_push_end = std::max_element(
776                 _stream_states.begin(),
777                 _stream_states.end(),
778                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
779                 );
780
781         if (latest_last_push_end != _stream_states.end()) {
782                 LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
783         }
784
785         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
786         std::map<AudioStreamPtr, StreamState> alive_stream_states;
787         for (auto const& i: _stream_states) {
788                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
789                         alive_stream_states.insert(i);
790                 } else {
791                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
792                 }
793         }
794
795         auto pull_to = _playback_length;
796         for (auto const& i: alive_stream_states) {
797                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
798                         pull_to = i.second.last_push_end;
799                 }
800         }
801         if (!_silent.done() && _silent.position() < pull_to) {
802                 pull_to = _silent.position();
803         }
804
805         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
806         auto audio = _audio_merger.pull (pull_to);
807         for (auto i = audio.begin(); i != audio.end(); ++i) {
808                 if (_next_audio_time && i->second < *_next_audio_time) {
809                         /* This new data comes before the last we emitted (or the last seek); discard it */
810                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
811                         if (!cut.first) {
812                                 continue;
813                         }
814                         *i = cut;
815                 } else if (_next_audio_time && i->second > *_next_audio_time) {
816                         /* There's a gap between this data and the last we emitted; fill with silence */
817                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
818                 }
819
820                 emit_audio (i->first, i->second);
821         }
822
823         if (done) {
824                 if (_shuffler) {
825                         _shuffler->flush ();
826                 }
827                 for (auto const& i: _delay) {
828                         do_emit_video(i.first, i.second);
829                 }
830
831                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
832                  * However, if we have L and R video files, and one is shorter than the other,
833                  * the fill code in ::video mostly takes care of filling in the gaps.
834                  * However, since it fills at the point when it knows there is more video coming
835                  * at time t (so it should fill any gap up to t) it can't do anything right at the
836                  * end.  This is particularly bad news if the last frame emitted is a LEFT
837                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
838                  * Here's a hack to workaround that particular case.
839                  */
840                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
841                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
842                 }
843         }
844
845         return done;
846 }
847
848
849 /** @return Open subtitles for the frame at the given time, converted to images */
850 optional<PositionImage>
851 Player::open_subtitles_for_frame (DCPTime time) const
852 {
853         list<PositionImage> captions;
854         int const vfr = _film->video_frame_rate();
855
856         for (
857                 auto j:
858                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
859                 ) {
860
861                 /* Bitmap subtitles */
862                 for (auto i: j.bitmap) {
863                         if (!i.image) {
864                                 continue;
865                         }
866
867                         /* i.image will already have been scaled to fit _video_container_size */
868                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
869
870                         captions.push_back (
871                                 PositionImage (
872                                         i.image,
873                                         Position<int> (
874                                                 lrint(_video_container_size.width * i.rectangle.x),
875                                                 lrint(_video_container_size.height * i.rectangle.y)
876                                                 )
877                                         )
878                                 );
879                 }
880
881                 /* String subtitles (rendered to an image) */
882                 if (!j.string.empty()) {
883                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
884                         copy (s.begin(), s.end(), back_inserter (captions));
885                 }
886         }
887
888         if (captions.empty()) {
889                 return {};
890         }
891
892         return merge (captions, _subtitle_alignment);
893 }
894
895
896 void
897 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
898 {
899         if (_suspended) {
900                 return;
901         }
902
903         auto piece = weak_piece.lock ();
904         if (!piece) {
905                 return;
906         }
907
908         if (!piece->content->video->use()) {
909                 return;
910         }
911
912         FrameRateChange frc (_film, piece->content);
913         if (frc.skip && (video.frame % 2) == 1) {
914                 return;
915         }
916
917         /* Time of the first frame we will emit */
918         DCPTime const time = content_video_to_dcp (piece, video.frame);
919         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
920
921         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
922            if it's after the content's period here as in that case we still need to fill any gap between
923            `now' and the end of the content's period.
924         */
925         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
926                 return;
927         }
928
929         if (piece->ignore_video && piece->ignore_video->contains(time)) {
930                 return;
931         }
932
933         /* Fill gaps that we discover now that we have some video which needs to be emitted.
934            This is where we need to fill to.
935         */
936         DCPTime fill_to = min (time, piece->content->end(_film));
937
938         if (_next_video_time) {
939                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
940
941                 /* Fill if we have more than half a frame to do */
942                 if ((fill_to - fill_from) > one_video_frame() / 2) {
943                         auto last = _last_video.find (weak_piece);
944                         if (_film->three_d()) {
945                                 auto fill_to_eyes = video.eyes;
946                                 if (fill_to_eyes == Eyes::BOTH) {
947                                         fill_to_eyes = Eyes::LEFT;
948                                 }
949                                 if (fill_to == piece->content->end(_film)) {
950                                         /* Don't fill after the end of the content */
951                                         fill_to_eyes = Eyes::LEFT;
952                                 }
953                                 auto j = fill_from;
954                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
955                                 if (eyes == Eyes::BOTH) {
956                                         eyes = Eyes::LEFT;
957                                 }
958                                 while (j < fill_to || eyes != fill_to_eyes) {
959                                         if (last != _last_video.end()) {
960                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
961                                                 auto copy = last->second->shallow_copy();
962                                                 copy->set_eyes (eyes);
963                                                 emit_video (copy, j);
964                                         } else {
965                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
966                                                 emit_video (black_player_video_frame(eyes), j);
967                                         }
968                                         if (eyes == Eyes::RIGHT) {
969                                                 j += one_video_frame();
970                                         }
971                                         eyes = increment_eyes (eyes);
972                                 }
973                         } else {
974                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
975                                         if (last != _last_video.end()) {
976                                                 emit_video (last->second, j);
977                                         } else {
978                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
979                                         }
980                                 }
981                         }
982                 }
983         }
984
985         auto const content_video = piece->content->video;
986
987         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
988                 video.image,
989                 content_video->actual_crop(),
990                 content_video->fade (_film, video.frame),
991                 scale_for_display(
992                         content_video->scaled_size(_film->frame_size()),
993                         _video_container_size,
994                         _film->frame_size(),
995                         content_video->pixel_quanta()
996                         ),
997                 _video_container_size,
998                 video.eyes,
999                 video.part,
1000                 content_video->colour_conversion(),
1001                 content_video->range(),
1002                 piece->content,
1003                 video.frame,
1004                 false
1005                 );
1006
1007         DCPTime t = time;
1008         for (int i = 0; i < frc.repeat; ++i) {
1009                 if (t < piece->content->end(_film)) {
1010                         emit_video (_last_video[weak_piece], t);
1011                 }
1012                 t += one_video_frame ();
1013         }
1014 }
1015
1016
1017 void
1018 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1019 {
1020         if (_suspended) {
1021                 return;
1022         }
1023
1024         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1025
1026         auto piece = weak_piece.lock ();
1027         if (!piece) {
1028                 return;
1029         }
1030
1031         auto content = piece->content->audio;
1032         DCPOMATIC_ASSERT (content);
1033
1034         int const rfr = content->resampled_frame_rate (_film);
1035
1036         /* Compute time in the DCP */
1037         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1038         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1039
1040         /* And the end of this block in the DCP */
1041         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1042
1043         /* Remove anything that comes before the start or after the end of the content */
1044         if (time < piece->content->position()) {
1045                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1046                 if (!cut.first) {
1047                         /* This audio is entirely discarded */
1048                         return;
1049                 }
1050                 content_audio.audio = cut.first;
1051                 time = cut.second;
1052         } else if (time > piece->content->end(_film)) {
1053                 /* Discard it all */
1054                 return;
1055         } else if (end > piece->content->end(_film)) {
1056                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1057                 if (remaining_frames == 0) {
1058                         return;
1059                 }
1060                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1061         }
1062
1063         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1064
1065         /* Gain and fade */
1066
1067         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1068         if (content->gain() != 0 || !fade_coeffs.empty()) {
1069                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1070                 if (!fade_coeffs.empty()) {
1071                         /* Apply both fade and gain */
1072                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1073                         auto const channels = gain_buffers->channels();
1074                         auto const frames = fade_coeffs.size();
1075                         auto data = gain_buffers->data();
1076                         auto const gain = db_to_linear (content->gain());
1077                         for (auto channel = 0; channel < channels; ++channel) {
1078                                 for (auto frame = 0U; frame < frames; ++frame) {
1079                                         data[channel][frame] *= gain * fade_coeffs[frame];
1080                                 }
1081                         }
1082                 } else {
1083                         /* Just apply gain */
1084                         gain_buffers->apply_gain (content->gain());
1085                 }
1086                 content_audio.audio = gain_buffers;
1087         }
1088
1089         /* Remap */
1090
1091         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1092
1093         /* Process */
1094
1095         if (_audio_processor) {
1096                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1097         }
1098
1099         /* Push */
1100
1101         _audio_merger.push (content_audio.audio, time);
1102         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1103         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1104 }
1105
1106
1107 void
1108 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1109 {
1110         if (_suspended) {
1111                 return;
1112         }
1113
1114         auto piece = weak_piece.lock ();
1115         auto content = weak_content.lock ();
1116         if (!piece || !content) {
1117                 return;
1118         }
1119
1120         PlayerText ps;
1121         for (auto& sub: subtitle.subs)
1122         {
1123                 /* Apply content's subtitle offsets */
1124                 sub.rectangle.x += content->x_offset ();
1125                 sub.rectangle.y += content->y_offset ();
1126
1127                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1128                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1129                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1130
1131                 /* Apply content's subtitle scale */
1132                 sub.rectangle.width *= content->x_scale ();
1133                 sub.rectangle.height *= content->y_scale ();
1134
1135                 auto image = sub.image;
1136
1137                 /* We will scale the subtitle up to fit _video_container_size */
1138                 int const width = sub.rectangle.width * _video_container_size.width;
1139                 int const height = sub.rectangle.height * _video_container_size.height;
1140                 if (width == 0 || height == 0) {
1141                         return;
1142                 }
1143
1144                 dcp::Size scaled_size (width, height);
1145                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1146         }
1147
1148         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1149         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1150 }
1151
1152
1153 void
1154 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1155 {
1156         if (_suspended) {
1157                 return;
1158         }
1159
1160         auto piece = weak_piece.lock ();
1161         auto content = weak_content.lock ();
1162         if (!piece || !content) {
1163                 return;
1164         }
1165
1166         PlayerText ps;
1167         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1168
1169         if (from > piece->content->end(_film)) {
1170                 return;
1171         }
1172
1173         for (auto s: subtitle.subs) {
1174                 s.set_h_position (s.h_position() + content->x_offset());
1175                 s.set_v_position (s.v_position() + content->y_offset());
1176                 float const xs = content->x_scale();
1177                 float const ys = content->y_scale();
1178                 float size = s.size();
1179
1180                 /* Adjust size to express the common part of the scaling;
1181                    e.g. if xs = ys = 0.5 we scale size by 2.
1182                 */
1183                 if (xs > 1e-5 && ys > 1e-5) {
1184                         size *= 1 / min (1 / xs, 1 / ys);
1185                 }
1186                 s.set_size (size);
1187
1188                 /* Then express aspect ratio changes */
1189                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1190                         s.set_aspect_adjust (xs / ys);
1191                 }
1192
1193                 s.set_in (dcp::Time(from.seconds(), 1000));
1194                 ps.string.push_back (StringText (s, content->outline_width()));
1195                 ps.add_fonts (content->fonts ());
1196         }
1197
1198         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1199 }
1200
1201
1202 void
1203 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1204 {
1205         if (_suspended) {
1206                 return;
1207         }
1208
1209         auto content = weak_content.lock ();
1210         if (!content) {
1211                 return;
1212         }
1213
1214         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1215                 return;
1216         }
1217
1218         auto piece = weak_piece.lock ();
1219         if (!piece) {
1220                 return;
1221         }
1222
1223         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1224
1225         if (dcp_to > piece->content->end(_film)) {
1226                 return;
1227         }
1228
1229         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1230
1231         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1232         if (content->use() && !always && !content->burn()) {
1233                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1234         }
1235 }
1236
1237
1238 void
1239 Player::seek (DCPTime time, bool accurate)
1240 {
1241         boost::mutex::scoped_lock lm (_mutex);
1242         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1243
1244         if (_suspended) {
1245                 /* We can't seek in this state */
1246                 return;
1247         }
1248
1249         if (_shuffler) {
1250                 _shuffler->clear ();
1251         }
1252
1253         _delay.clear ();
1254
1255         if (_audio_processor) {
1256                 _audio_processor->flush ();
1257         }
1258
1259         _audio_merger.clear ();
1260         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1261                 _active_texts[i].clear ();
1262         }
1263
1264         for (auto i: _pieces) {
1265                 if (time < i->content->position()) {
1266                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1267                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1268                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1269                            been trimmed to a point between keyframes, or something).
1270                         */
1271                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1272                         i->done = false;
1273                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1274                         /* During; seek to position */
1275                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1276                         i->done = false;
1277                 } else {
1278                         /* After; this piece is done */
1279                         i->done = true;
1280                 }
1281         }
1282
1283         if (accurate) {
1284                 _next_video_time = time;
1285                 _next_video_eyes = Eyes::LEFT;
1286                 _next_audio_time = time;
1287         } else {
1288                 _next_video_time = boost::none;
1289                 _next_video_eyes = boost::none;
1290                 _next_audio_time = boost::none;
1291         }
1292
1293         _black.set_position (time);
1294         _silent.set_position (time);
1295
1296         _last_video.clear ();
1297 }
1298
1299
1300 void
1301 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1302 {
1303         if (!_film->three_d()) {
1304                 if (pv->eyes() == Eyes::LEFT) {
1305                         /* Use left-eye images for both eyes... */
1306                         pv->set_eyes (Eyes::BOTH);
1307                 } else if (pv->eyes() == Eyes::RIGHT) {
1308                         /* ...and discard the right */
1309                         return;
1310                 }
1311         }
1312
1313         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1314            player before the video that requires them.
1315         */
1316         _delay.push_back (make_pair (pv, time));
1317
1318         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1319                 _next_video_time = time + one_video_frame();
1320         }
1321         _next_video_eyes = increment_eyes (pv->eyes());
1322
1323         if (_delay.size() < 3) {
1324                 return;
1325         }
1326
1327         auto to_do = _delay.front();
1328         _delay.pop_front();
1329         do_emit_video (to_do.first, to_do.second);
1330 }
1331
1332
1333 void
1334 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1335 {
1336         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1337                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1338                         _active_texts[i].clear_before (time);
1339                 }
1340         }
1341
1342         auto subtitles = open_subtitles_for_frame (time);
1343         if (subtitles) {
1344                 pv->set_text (subtitles.get ());
1345         }
1346
1347         Video (pv, time);
1348 }
1349
1350
1351 void
1352 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1353 {
1354         /* Log if the assert below is about to fail */
1355         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1356                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1357         }
1358
1359         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1360         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1361         Audio (data, time, _film->audio_frame_rate());
1362         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1363 }
1364
1365
1366 void
1367 Player::fill_audio (DCPTimePeriod period)
1368 {
1369         if (period.from == period.to) {
1370                 return;
1371         }
1372
1373         DCPOMATIC_ASSERT (period.from < period.to);
1374
1375         DCPTime t = period.from;
1376         while (t < period.to) {
1377                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1378                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1379                 if (samples) {
1380                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1381                         silence->make_silent ();
1382                         emit_audio (silence, t);
1383                 }
1384                 t += block;
1385         }
1386 }
1387
1388
1389 DCPTime
1390 Player::one_video_frame () const
1391 {
1392         return DCPTime::from_frames (1, _film->video_frame_rate ());
1393 }
1394
1395
1396 pair<shared_ptr<AudioBuffers>, DCPTime>
1397 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1398 {
1399         auto const discard_time = discard_to - time;
1400         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1401         auto remaining_frames = audio->frames() - discard_frames;
1402         if (remaining_frames <= 0) {
1403                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1404         }
1405         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1406         return make_pair(cut, time + discard_time);
1407 }
1408
1409
1410 void
1411 Player::set_dcp_decode_reduction (optional<int> reduction)
1412 {
1413         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1414
1415         {
1416                 boost::mutex::scoped_lock lm (_mutex);
1417
1418                 if (reduction == _dcp_decode_reduction) {
1419                         lm.unlock ();
1420                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1421                         return;
1422                 }
1423
1424                 _dcp_decode_reduction = reduction;
1425                 setup_pieces_unlocked ();
1426         }
1427
1428         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1429 }
1430
1431
1432 optional<DCPTime>
1433 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1434 {
1435         boost::mutex::scoped_lock lm (_mutex);
1436
1437         for (auto i: _pieces) {
1438                 if (i->content == content) {
1439                         return content_time_to_dcp (i, t);
1440                 }
1441         }
1442
1443         /* We couldn't find this content; perhaps things are being changed over */
1444         return {};
1445 }
1446
1447
1448 optional<ContentTime>
1449 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1450 {
1451         boost::mutex::scoped_lock lm (_mutex);
1452
1453         for (auto i: _pieces) {
1454                 if (i->content == content) {
1455                         return dcp_to_content_time (i, t);
1456                 }
1457         }
1458
1459         /* We couldn't find this content; perhaps things are being changed over */
1460         return {};
1461 }
1462
1463
1464 shared_ptr<const Playlist>
1465 Player::playlist () const
1466 {
1467         return _playlist ? _playlist : _film->playlist();
1468 }
1469
1470
1471 void
1472 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1473 {
1474         if (_suspended) {
1475                 return;
1476         }
1477
1478         auto piece = weak_piece.lock ();
1479         DCPOMATIC_ASSERT (piece);
1480
1481         auto const vfr = _film->video_frame_rate();
1482
1483         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1484         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1485                 return;
1486         }
1487
1488         Atmos (data.data, dcp_time, data.metadata);
1489 }
1490