wip: got stuck... because PlayerVideo is related to the render size
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _tolerant (film->tolerant())
103         , _audio_merger (_film->audio_frame_rate())
104         , _subtitle_alignment (subtitle_alignment)
105 {
106         construct ();
107 }
108
109
110 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111         : _film (film)
112         , _playlist (playlist_)
113         , _suspended (0)
114         , _tolerant (film->tolerant())
115         , _audio_merger (_film->audio_frame_rate())
116 {
117         construct ();
118 }
119
120
121 void
122 Player::construct ()
123 {
124         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
125         /* The butler must hear about this first, so since we are proxying this through to the butler we must
126            be first.
127         */
128         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
129         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
130         set_video_container_size (_film->frame_size ());
131
132         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
133
134         setup_pieces ();
135         seek (DCPTime (), true);
136 }
137
138
139 void
140 Player::setup_pieces ()
141 {
142         boost::mutex::scoped_lock lm (_mutex);
143         setup_pieces_unlocked ();
144 }
145
146
147 bool
148 have_video (shared_ptr<const Content> content)
149 {
150         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
151 }
152
153
154 bool
155 have_audio (shared_ptr<const Content> content)
156 {
157         return static_cast<bool>(content->audio) && content->can_be_played();
158 }
159
160
161 void
162 Player::setup_pieces_unlocked ()
163 {
164         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
165
166         auto old_pieces = _pieces;
167         _pieces.clear ();
168
169         auto playlist_content = playlist()->content();
170         bool const have_threed = std::any_of(
171                 playlist_content.begin(),
172                 playlist_content.end(),
173                 [](shared_ptr<const Content> c) {
174                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
175                 });
176
177
178         if (have_threed) {
179                 _shuffler.reset(new Shuffler());
180                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
181         }
182
183         for (auto i: playlist()->content()) {
184
185                 if (!i->paths_valid ()) {
186                         continue;
187                 }
188
189                 if (_ignore_video && _ignore_audio && i->text.empty()) {
190                         /* We're only interested in text and this content has none */
191                         continue;
192                 }
193
194                 shared_ptr<Decoder> old_decoder;
195                 for (auto j: old_pieces) {
196                         if (j->content == i) {
197                                 old_decoder = j->decoder;
198                                 break;
199                         }
200                 }
201
202                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
203                 DCPOMATIC_ASSERT (decoder);
204
205                 FrameRateChange frc (_film, i);
206
207                 if (decoder->video && _ignore_video) {
208                         decoder->video->set_ignore (true);
209                 }
210
211                 if (decoder->audio && _ignore_audio) {
212                         decoder->audio->set_ignore (true);
213                 }
214
215                 if (_ignore_text) {
216                         for (auto i: decoder->text) {
217                                 i->set_ignore (true);
218                         }
219                 }
220
221                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
222                 if (dcp) {
223                         dcp->set_decode_referenced (_play_referenced);
224                         if (_play_referenced) {
225                                 dcp->set_forced_reduction (_dcp_decode_reduction);
226                         }
227                 }
228
229                 auto piece = make_shared<Piece>(i, decoder, frc);
230                 _pieces.push_back (piece);
231
232                 if (decoder->video) {
233                         if (have_threed) {
234                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
235                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
236                         } else {
237                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
238                         }
239                 }
240
241                 if (decoder->audio) {
242                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
243                 }
244
245                 auto j = decoder->text.begin();
246
247                 while (j != decoder->text.end()) {
248                         (*j)->BitmapStart.connect (
249                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251                         (*j)->PlainStart.connect (
252                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
253                                 );
254                         (*j)->Stop.connect (
255                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256                                 );
257
258                         ++j;
259                 }
260
261                 if (decoder->atmos) {
262                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
263                 }
264         }
265
266         _stream_states.clear ();
267         for (auto i: _pieces) {
268                 if (i->content->audio) {
269                         for (auto j: i->content->audio->streams()) {
270                                 _stream_states[j] = StreamState (i, i->content->position ());
271                         }
272                 }
273         }
274
275         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
276                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
277         };
278
279         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
280                 if (ignore_overlap((*i)->content->video)) {
281                         /* Look for content later in the content list with in-use video that overlaps this */
282                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
283                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
284                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
285                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
286                                 }
287                         }
288                 }
289         }
290
291         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
292         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
293
294         _next_video_time = boost::none;
295         _next_video_eyes = Eyes::BOTH;
296         _next_audio_time = boost::none;
297 }
298
299
300 void
301 Player::playlist_content_change (ChangeType type, int property, bool frequent)
302 {
303         if (property == VideoContentProperty::CROP) {
304                 if (type == ChangeType::DONE) {
305                         boost::mutex::scoped_lock lm (_mutex);
306                         for (auto const& i: _delay) {
307                                 i.first->reset_metadata(_film);
308                         }
309                 }
310         } else {
311                 if (type == ChangeType::PENDING) {
312                         /* The player content is probably about to change, so we can't carry on
313                            until that has happened and we've rebuilt our pieces.  Stop pass()
314                            and seek() from working until then.
315                         */
316                         ++_suspended;
317                 } else if (type == ChangeType::DONE) {
318                         /* A change in our content has gone through.  Re-build our pieces. */
319                         setup_pieces ();
320                         --_suspended;
321                 } else if (type == ChangeType::CANCELLED) {
322                         --_suspended;
323                 }
324         }
325
326         Change (type, property, frequent);
327 }
328
329
330 void
331 Player::set_video_container_size (dcp::Size s)
332 {
333         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
334
335         {
336                 boost::mutex::scoped_lock lm (_mutex);
337
338                 if (s == _video_container_size) {
339                         lm.unlock ();
340                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
341                         return;
342                 }
343
344                 _video_container_size = s;
345
346                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
347                 _black_image->make_black ();
348         }
349
350         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
351 }
352
353
354 void
355 Player::playlist_change (ChangeType type)
356 {
357         if (type == ChangeType::DONE) {
358                 setup_pieces ();
359         }
360         Change (type, PlayerProperty::PLAYLIST, false);
361 }
362
363
364 void
365 Player::film_change (ChangeType type, Film::Property p)
366 {
367         /* Here we should notice Film properties that affect our output, and
368            alert listeners that our output now would be different to how it was
369            last time we were run.
370         */
371
372         if (p == Film::Property::CONTAINER) {
373                 Change (type, PlayerProperty::FILM_CONTAINER, false);
374         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
375                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
376                    so we need new pieces here.
377                 */
378                 if (type == ChangeType::DONE) {
379                         setup_pieces ();
380                 }
381                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
382         } else if (p == Film::Property::AUDIO_PROCESSOR) {
383                 if (type == ChangeType::DONE && _film->audio_processor ()) {
384                         boost::mutex::scoped_lock lm (_mutex);
385                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
386                 }
387         } else if (p == Film::Property::AUDIO_CHANNELS) {
388                 if (type == ChangeType::DONE) {
389                         boost::mutex::scoped_lock lm (_mutex);
390                         _audio_merger.clear ();
391                 }
392         }
393 }
394
395
396 shared_ptr<PlayerVideo>
397 Player::black_player_video_frame (Eyes eyes) const
398 {
399         return std::make_shared<PlayerVideo> (
400                 std::make_shared<const RawImageProxy>(_black_image),
401                 Crop(),
402                 optional<double>(),
403                 _video_container_size,
404                 _video_container_size,
405                 eyes,
406                 Part::WHOLE,
407                 PresetColourConversion::all().front().conversion,
408                 VideoRange::FULL,
409                 std::weak_ptr<Content>(),
410                 boost::optional<Frame>(),
411                 false
412         );
413 }
414
415
416 Frame
417 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
418 {
419         auto s = t - piece->content->position ();
420         s = min (piece->content->length_after_trim(_film), s);
421         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
422
423         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
424            then convert that ContentTime to frames at the content's rate.  However this fails for
425            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
426            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
427
428            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
429         */
430         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
431 }
432
433
434 DCPTime
435 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
436 {
437         /* See comment in dcp_to_content_video */
438         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
439         return d + piece->content->position();
440 }
441
442
443 Frame
444 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
445 {
446         auto s = t - piece->content->position ();
447         s = min (piece->content->length_after_trim(_film), s);
448         /* See notes in dcp_to_content_video */
449         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
450 }
451
452
453 DCPTime
454 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
455 {
456         /* See comment in dcp_to_content_video */
457         return DCPTime::from_frames (f, _film->audio_frame_rate())
458                 - DCPTime (piece->content->trim_start(), piece->frc)
459                 + piece->content->position();
460 }
461
462
463 ContentTime
464 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
465 {
466         auto s = t - piece->content->position ();
467         s = min (piece->content->length_after_trim(_film), s);
468         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
469 }
470
471
472 DCPTime
473 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
474 {
475         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
476 }
477
478
479 vector<shared_ptr<Font>>
480 Player::get_subtitle_fonts ()
481 {
482         boost::mutex::scoped_lock lm (_mutex);
483
484         vector<shared_ptr<Font>> fonts;
485         for (auto piece: _pieces) {
486                 for (auto text: piece->content->text) {
487                         auto text_fonts = text->fonts();
488                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
489                 }
490         }
491
492         return fonts;
493 }
494
495
496 /** Set this player never to produce any video data */
497 void
498 Player::set_ignore_video ()
499 {
500         boost::mutex::scoped_lock lm (_mutex);
501         _ignore_video = true;
502         setup_pieces_unlocked ();
503 }
504
505
506 void
507 Player::set_ignore_audio ()
508 {
509         boost::mutex::scoped_lock lm (_mutex);
510         _ignore_audio = true;
511         setup_pieces_unlocked ();
512 }
513
514
515 void
516 Player::set_ignore_text ()
517 {
518         boost::mutex::scoped_lock lm (_mutex);
519         _ignore_text = true;
520         setup_pieces_unlocked ();
521 }
522
523
524 /** Set the player to always burn open texts into the image regardless of the content settings */
525 void
526 Player::set_always_burn_open_subtitles ()
527 {
528         boost::mutex::scoped_lock lm (_mutex);
529         _always_burn_open_subtitles = true;
530 }
531
532
533 /** Sets up the player to be faster, possibly at the expense of quality */
534 void
535 Player::set_fast ()
536 {
537         boost::mutex::scoped_lock lm (_mutex);
538         _fast = true;
539         setup_pieces_unlocked ();
540 }
541
542
543 void
544 Player::set_play_referenced ()
545 {
546         boost::mutex::scoped_lock lm (_mutex);
547         _play_referenced = true;
548         setup_pieces_unlocked ();
549 }
550
551
552 static void
553 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
554 {
555         DCPOMATIC_ASSERT (r);
556         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
557         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
558         if (r->actual_duration() > 0) {
559                 a.push_back (
560                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
561                         );
562         }
563 }
564
565
566 list<ReferencedReelAsset>
567 Player::get_reel_assets ()
568 {
569         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
570
571         list<ReferencedReelAsset> reel_assets;
572
573         for (auto content: playlist()->content()) {
574                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
575                 if (!dcp) {
576                         continue;
577                 }
578
579                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
580                         continue;
581                 }
582
583                 scoped_ptr<DCPDecoder> decoder;
584                 try {
585                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
586                 } catch (...) {
587                         return reel_assets;
588                 }
589
590                 auto const frame_rate = _film->video_frame_rate();
591                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
592                 /* We should only be referencing if the DCP rate is the same as the film rate */
593                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
594
595                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
596                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
597
598                 /* position in the asset from the start */
599                 int64_t offset_from_start = 0;
600                 /* position i the asset from the end */
601                 int64_t offset_from_end = 0;
602                 for (auto reel: decoder->reels()) {
603                         /* Assume that main picture duration is the length of the reel */
604                         offset_from_end += reel->main_picture()->actual_duration();
605                 }
606
607                 for (auto reel: decoder->reels()) {
608
609                         /* Assume that main picture duration is the length of the reel */
610                         int64_t const reel_duration = reel->main_picture()->actual_duration();
611
612                         /* See doc/design/trim_reels.svg */
613                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
614                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
615
616                         auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
617                         if (dcp->reference_video()) {
618                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
619                         }
620
621                         if (dcp->reference_audio()) {
622                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
623                         }
624
625                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
626                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
627                         }
628
629                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
630                                 for (auto caption: reel->closed_captions()) {
631                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
632                                 }
633                         }
634
635                         offset_from_start += reel_duration;
636                         offset_from_end -= reel_duration;
637                 }
638         }
639
640         return reel_assets;
641 }
642
643
644 bool
645 Player::pass ()
646 {
647         boost::mutex::scoped_lock lm (_mutex);
648
649         if (_suspended) {
650                 /* We can't pass in this state */
651                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
652                 return false;
653         }
654
655         if (_playback_length == DCPTime()) {
656                 /* Special; just give one black frame */
657                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
658                 return true;
659         }
660
661         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
662
663         shared_ptr<Piece> earliest_content;
664         optional<DCPTime> earliest_time;
665
666         for (auto i: _pieces) {
667                 if (i->done) {
668                         continue;
669                 }
670
671                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
672                 if (t > i->content->end(_film)) {
673                         i->done = true;
674                 } else {
675
676                         /* Given two choices at the same time, pick the one with texts so we see it before
677                            the video.
678                         */
679                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
680                                 earliest_time = t;
681                                 earliest_content = i;
682                         }
683                 }
684         }
685
686         bool done = false;
687
688         enum {
689                 NONE,
690                 CONTENT,
691                 BLACK,
692                 SILENT
693         } which = NONE;
694
695         if (earliest_content) {
696                 which = CONTENT;
697         }
698
699         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
700                 earliest_time = _black.position ();
701                 which = BLACK;
702         }
703
704         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
705                 earliest_time = _silent.position ();
706                 which = SILENT;
707         }
708
709         switch (which) {
710         case CONTENT:
711         {
712                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
713                 earliest_content->done = earliest_content->decoder->pass ();
714                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
715                 if (dcp && !_play_referenced && dcp->reference_audio()) {
716                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
717                            to `hide' the fact that no audio was emitted during the referenced DCP (though
718                            we need to behave as though it was).
719                         */
720                         _next_audio_time = dcp->end (_film);
721                 }
722                 break;
723         }
724         case BLACK:
725                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
726                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
727                 _black.set_position (_black.position() + one_video_frame());
728                 break;
729         case SILENT:
730         {
731                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
732                 DCPTimePeriod period (_silent.period_at_position());
733                 if (_next_audio_time) {
734                         /* Sometimes the thing that happened last finishes fractionally before
735                            or after this silence.  Bodge the start time of the silence to fix it.
736                            I think this is nothing to worry about since we will just add or
737                            remove a little silence at the end of some content.
738                         */
739                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
740                         /* Let's not worry about less than a frame at 24fps */
741                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
742                         if (error >= too_much_error) {
743                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
744                         }
745                         DCPOMATIC_ASSERT (error < too_much_error);
746                         period.from = *_next_audio_time;
747                 }
748                 if (period.duration() > one_video_frame()) {
749                         period.to = period.from + one_video_frame();
750                 }
751                 fill_audio (period);
752                 _silent.set_position (period.to);
753                 break;
754         }
755         case NONE:
756                 done = true;
757                 break;
758         }
759
760         /* Emit any audio that is ready */
761
762         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
763            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
764            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
765            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
766            that will never come, causing bugs like #2101.
767         */
768         constexpr int ignore_streams_behind = 5;
769
770         using state_pair = std::pair<AudioStreamPtr, StreamState>;
771
772         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
773         auto latest_last_push_end = std::max_element(
774                 _stream_states.begin(),
775                 _stream_states.end(),
776                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
777                 );
778
779         if (latest_last_push_end != _stream_states.end()) {
780                 LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
781         }
782
783         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
784         std::map<AudioStreamPtr, StreamState> alive_stream_states;
785         for (auto const& i: _stream_states) {
786                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
787                         alive_stream_states.insert(i);
788                 } else {
789                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
790                 }
791         }
792
793         auto pull_to = _playback_length;
794         for (auto const& i: alive_stream_states) {
795                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
796                         pull_to = i.second.last_push_end;
797                 }
798         }
799         if (!_silent.done() && _silent.position() < pull_to) {
800                 pull_to = _silent.position();
801         }
802
803         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
804         auto audio = _audio_merger.pull (pull_to);
805         for (auto i = audio.begin(); i != audio.end(); ++i) {
806                 if (_next_audio_time && i->second < *_next_audio_time) {
807                         /* This new data comes before the last we emitted (or the last seek); discard it */
808                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
809                         if (!cut.first) {
810                                 continue;
811                         }
812                         *i = cut;
813                 } else if (_next_audio_time && i->second > *_next_audio_time) {
814                         /* There's a gap between this data and the last we emitted; fill with silence */
815                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
816                 }
817
818                 emit_audio (i->first, i->second);
819         }
820
821         if (done) {
822                 if (_shuffler) {
823                         _shuffler->flush ();
824                 }
825                 for (auto const& i: _delay) {
826                         do_emit_video(i.first, i.second);
827                 }
828
829                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
830                  * However, if we have L and R video files, and one is shorter than the other,
831                  * the fill code in ::video mostly takes care of filling in the gaps.
832                  * However, since it fills at the point when it knows there is more video coming
833                  * at time t (so it should fill any gap up to t) it can't do anything right at the
834                  * end.  This is particularly bad news if the last frame emitted is a LEFT
835                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
836                  * Here's a hack to workaround that particular case.
837                  */
838                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
839                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
840                 }
841         }
842
843         return done;
844 }
845
846
847 /** @return Open subtitles for the frame at the given time, converted to images */
848 optional<PositionImage>
849 Player::open_subtitles_for_frame (DCPTime time) const
850 {
851         list<PositionImage> captions;
852         int const vfr = _film->video_frame_rate();
853
854         for (
855                 auto j:
856                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
857                 ) {
858
859                 /* Bitmap subtitles */
860                 for (auto i: j.bitmap) {
861                         if (!i.image) {
862                                 continue;
863                         }
864
865                         /* i.image will already have been scaled to fit _video_container_size */
866                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
867
868                         captions.push_back (
869                                 PositionImage (
870                                         i.image,
871                                         Position<int> (
872                                                 lrint(_video_container_size.width * i.rectangle.x),
873                                                 lrint(_video_container_size.height * i.rectangle.y)
874                                                 )
875                                         )
876                                 );
877                 }
878
879                 /* String subtitles (rendered to an image) */
880                 if (!j.string.empty()) {
881                         auto s = render_text(j.string, _video_container_size, time, vfr);
882                         copy (s.begin(), s.end(), back_inserter (captions));
883                 }
884         }
885
886         if (captions.empty()) {
887                 return {};
888         }
889
890         return merge (captions, _subtitle_alignment);
891 }
892
893
894 void
895 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
896 {
897         if (_suspended) {
898                 return;
899         }
900
901         auto piece = weak_piece.lock ();
902         if (!piece) {
903                 return;
904         }
905
906         if (!piece->content->video->use()) {
907                 return;
908         }
909
910         FrameRateChange frc (_film, piece->content);
911         if (frc.skip && (video.frame % 2) == 1) {
912                 return;
913         }
914
915         /* Time of the first frame we will emit */
916         DCPTime const time = content_video_to_dcp (piece, video.frame);
917         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
918
919         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
920            if it's after the content's period here as in that case we still need to fill any gap between
921            `now' and the end of the content's period.
922         */
923         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
924                 return;
925         }
926
927         if (piece->ignore_video && piece->ignore_video->contains(time)) {
928                 return;
929         }
930
931         /* Fill gaps that we discover now that we have some video which needs to be emitted.
932            This is where we need to fill to.
933         */
934         DCPTime fill_to = min (time, piece->content->end(_film));
935
936         if (_next_video_time) {
937                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
938
939                 /* Fill if we have more than half a frame to do */
940                 if ((fill_to - fill_from) > one_video_frame() / 2) {
941                         auto last = _last_video.find (weak_piece);
942                         if (_film->three_d()) {
943                                 auto fill_to_eyes = video.eyes;
944                                 if (fill_to_eyes == Eyes::BOTH) {
945                                         fill_to_eyes = Eyes::LEFT;
946                                 }
947                                 if (fill_to == piece->content->end(_film)) {
948                                         /* Don't fill after the end of the content */
949                                         fill_to_eyes = Eyes::LEFT;
950                                 }
951                                 auto j = fill_from;
952                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
953                                 if (eyes == Eyes::BOTH) {
954                                         eyes = Eyes::LEFT;
955                                 }
956                                 while (j < fill_to || eyes != fill_to_eyes) {
957                                         if (last != _last_video.end()) {
958                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
959                                                 auto copy = last->second->shallow_copy();
960                                                 copy->set_eyes (eyes);
961                                                 emit_video (copy, j);
962                                         } else {
963                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
964                                                 emit_video (black_player_video_frame(eyes), j);
965                                         }
966                                         if (eyes == Eyes::RIGHT) {
967                                                 j += one_video_frame();
968                                         }
969                                         eyes = increment_eyes (eyes);
970                                 }
971                         } else {
972                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
973                                         if (last != _last_video.end()) {
974                                                 emit_video (last->second, j);
975                                         } else {
976                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
977                                         }
978                                 }
979                         }
980                 }
981         }
982
983         auto const content_video = piece->content->video;
984
985         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
986                 video.image,
987                 content_video->actual_crop(),
988                 content_video->fade (_film, video.frame),
989                 scale_for_display(
990                         content_video->scaled_size(_film->frame_size()),
991                         _video_container_size,
992                         _film->frame_size(),
993                         content_video->pixel_quanta()
994                         ),
995                 _video_container_size,
996                 video.eyes,
997                 video.part,
998                 content_video->colour_conversion(),
999                 content_video->range(),
1000                 piece->content,
1001                 video.frame,
1002                 false
1003                 );
1004
1005         DCPTime t = time;
1006         for (int i = 0; i < frc.repeat; ++i) {
1007                 if (t < piece->content->end(_film)) {
1008                         emit_video (_last_video[weak_piece], t);
1009                 }
1010                 t += one_video_frame ();
1011         }
1012 }
1013
1014
1015 void
1016 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1017 {
1018         if (_suspended) {
1019                 return;
1020         }
1021
1022         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1023
1024         auto piece = weak_piece.lock ();
1025         if (!piece) {
1026                 return;
1027         }
1028
1029         auto content = piece->content->audio;
1030         DCPOMATIC_ASSERT (content);
1031
1032         int const rfr = content->resampled_frame_rate (_film);
1033
1034         /* Compute time in the DCP */
1035         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1036         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1037
1038         /* And the end of this block in the DCP */
1039         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1040
1041         /* Remove anything that comes before the start or after the end of the content */
1042         if (time < piece->content->position()) {
1043                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1044                 if (!cut.first) {
1045                         /* This audio is entirely discarded */
1046                         return;
1047                 }
1048                 content_audio.audio = cut.first;
1049                 time = cut.second;
1050         } else if (time > piece->content->end(_film)) {
1051                 /* Discard it all */
1052                 return;
1053         } else if (end > piece->content->end(_film)) {
1054                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1055                 if (remaining_frames == 0) {
1056                         return;
1057                 }
1058                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1059         }
1060
1061         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1062
1063         /* Gain and fade */
1064
1065         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1066         if (content->gain() != 0 || !fade_coeffs.empty()) {
1067                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1068                 if (!fade_coeffs.empty()) {
1069                         /* Apply both fade and gain */
1070                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1071                         auto const channels = gain_buffers->channels();
1072                         auto const frames = fade_coeffs.size();
1073                         auto data = gain_buffers->data();
1074                         auto const gain = db_to_linear (content->gain());
1075                         for (auto channel = 0; channel < channels; ++channel) {
1076                                 for (auto frame = 0U; frame < frames; ++frame) {
1077                                         data[channel][frame] *= gain * fade_coeffs[frame];
1078                                 }
1079                         }
1080                 } else {
1081                         /* Just apply gain */
1082                         gain_buffers->apply_gain (content->gain());
1083                 }
1084                 content_audio.audio = gain_buffers;
1085         }
1086
1087         /* Remap */
1088
1089         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1090
1091         /* Process */
1092
1093         if (_audio_processor) {
1094                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1095         }
1096
1097         /* Push */
1098
1099         _audio_merger.push (content_audio.audio, time);
1100         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1101         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1102 }
1103
1104
1105 void
1106 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1107 {
1108         if (_suspended) {
1109                 return;
1110         }
1111
1112         auto piece = weak_piece.lock ();
1113         auto content = weak_content.lock ();
1114         if (!piece || !content) {
1115                 return;
1116         }
1117
1118         PlayerText ps;
1119         for (auto& sub: subtitle.subs)
1120         {
1121                 /* Apply content's subtitle offsets */
1122                 sub.rectangle.x += content->x_offset ();
1123                 sub.rectangle.y += content->y_offset ();
1124
1125                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1126                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1127                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1128
1129                 /* Apply content's subtitle scale */
1130                 sub.rectangle.width *= content->x_scale ();
1131                 sub.rectangle.height *= content->y_scale ();
1132
1133                 auto image = sub.image;
1134
1135                 /* We will scale the subtitle up to fit _video_container_size */
1136                 int const width = sub.rectangle.width * _video_container_size.width;
1137                 int const height = sub.rectangle.height * _video_container_size.height;
1138                 if (width == 0 || height == 0) {
1139                         return;
1140                 }
1141
1142                 dcp::Size scaled_size (width, height);
1143                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1144         }
1145
1146         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1147         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1148 }
1149
1150
1151 void
1152 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1153 {
1154         if (_suspended) {
1155                 return;
1156         }
1157
1158         auto piece = weak_piece.lock ();
1159         auto content = weak_content.lock ();
1160         if (!piece || !content) {
1161                 return;
1162         }
1163
1164         PlayerText ps;
1165         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1166
1167         if (from > piece->content->end(_film)) {
1168                 return;
1169         }
1170
1171         for (auto s: subtitle.subs) {
1172                 s.set_h_position (s.h_position() + content->x_offset());
1173                 s.set_v_position (s.v_position() + content->y_offset());
1174                 float const xs = content->x_scale();
1175                 float const ys = content->y_scale();
1176                 float size = s.size();
1177
1178                 /* Adjust size to express the common part of the scaling;
1179                    e.g. if xs = ys = 0.5 we scale size by 2.
1180                 */
1181                 if (xs > 1e-5 && ys > 1e-5) {
1182                         size *= 1 / min (1 / xs, 1 / ys);
1183                 }
1184                 s.set_size (size);
1185
1186                 /* Then express aspect ratio changes */
1187                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1188                         s.set_aspect_adjust (xs / ys);
1189                 }
1190
1191                 s.set_in (dcp::Time(from.seconds(), 1000));
1192                 ps.string.push_back (s);
1193         }
1194
1195         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1196 }
1197
1198
1199 void
1200 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1201 {
1202         if (_suspended) {
1203                 return;
1204         }
1205
1206         auto content = weak_content.lock ();
1207         if (!content) {
1208                 return;
1209         }
1210
1211         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1212                 return;
1213         }
1214
1215         auto piece = weak_piece.lock ();
1216         if (!piece) {
1217                 return;
1218         }
1219
1220         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1221
1222         if (dcp_to > piece->content->end(_film)) {
1223                 return;
1224         }
1225
1226         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1227
1228         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1229         if (content->use() && !always && !content->burn()) {
1230                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1231         }
1232 }
1233
1234
1235 void
1236 Player::seek (DCPTime time, bool accurate)
1237 {
1238         boost::mutex::scoped_lock lm (_mutex);
1239         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1240
1241         if (_suspended) {
1242                 /* We can't seek in this state */
1243                 return;
1244         }
1245
1246         if (_shuffler) {
1247                 _shuffler->clear ();
1248         }
1249
1250         _delay.clear ();
1251
1252         if (_audio_processor) {
1253                 _audio_processor->flush ();
1254         }
1255
1256         _audio_merger.clear ();
1257         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1258                 _active_texts[i].clear ();
1259         }
1260
1261         for (auto i: _pieces) {
1262                 if (time < i->content->position()) {
1263                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1264                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1265                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1266                            been trimmed to a point between keyframes, or something).
1267                         */
1268                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1269                         i->done = false;
1270                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1271                         /* During; seek to position */
1272                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1273                         i->done = false;
1274                 } else {
1275                         /* After; this piece is done */
1276                         i->done = true;
1277                 }
1278         }
1279
1280         if (accurate) {
1281                 _next_video_time = time;
1282                 _next_video_eyes = Eyes::LEFT;
1283                 _next_audio_time = time;
1284         } else {
1285                 _next_video_time = boost::none;
1286                 _next_video_eyes = boost::none;
1287                 _next_audio_time = boost::none;
1288         }
1289
1290         _black.set_position (time);
1291         _silent.set_position (time);
1292
1293         _last_video.clear ();
1294 }
1295
1296
1297 void
1298 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1299 {
1300         if (!_film->three_d()) {
1301                 if (pv->eyes() == Eyes::LEFT) {
1302                         /* Use left-eye images for both eyes... */
1303                         pv->set_eyes (Eyes::BOTH);
1304                 } else if (pv->eyes() == Eyes::RIGHT) {
1305                         /* ...and discard the right */
1306                         return;
1307                 }
1308         }
1309
1310         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1311            player before the video that requires them.
1312         */
1313         _delay.push_back (make_pair (pv, time));
1314
1315         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1316                 _next_video_time = time + one_video_frame();
1317         }
1318         _next_video_eyes = increment_eyes (pv->eyes());
1319
1320         if (_delay.size() < 3) {
1321                 return;
1322         }
1323
1324         auto to_do = _delay.front();
1325         _delay.pop_front();
1326         do_emit_video (to_do.first, to_do.second);
1327 }
1328
1329
1330 void
1331 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1332 {
1333         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1334                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1335                         _active_texts[i].clear_before (time);
1336                 }
1337         }
1338
1339         auto subtitles = open_subtitles_for_frame (time);
1340         if (subtitles) {
1341                 pv->set_text (subtitles.get ());
1342         }
1343
1344         Video (pv, time);
1345 }
1346
1347
1348 void
1349 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1350 {
1351         /* Log if the assert below is about to fail */
1352         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1353                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1354         }
1355
1356         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1357         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1358         Audio (data, time, _film->audio_frame_rate());
1359         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1360 }
1361
1362
1363 void
1364 Player::fill_audio (DCPTimePeriod period)
1365 {
1366         if (period.from == period.to) {
1367                 return;
1368         }
1369
1370         DCPOMATIC_ASSERT (period.from < period.to);
1371
1372         DCPTime t = period.from;
1373         while (t < period.to) {
1374                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1375                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1376                 if (samples) {
1377                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1378                         silence->make_silent ();
1379                         emit_audio (silence, t);
1380                 }
1381                 t += block;
1382         }
1383 }
1384
1385
1386 DCPTime
1387 Player::one_video_frame () const
1388 {
1389         return DCPTime::from_frames (1, _film->video_frame_rate ());
1390 }
1391
1392
1393 pair<shared_ptr<AudioBuffers>, DCPTime>
1394 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1395 {
1396         auto const discard_time = discard_to - time;
1397         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1398         auto remaining_frames = audio->frames() - discard_frames;
1399         if (remaining_frames <= 0) {
1400                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1401         }
1402         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1403         return make_pair(cut, time + discard_time);
1404 }
1405
1406
1407 void
1408 Player::set_dcp_decode_reduction (optional<int> reduction)
1409 {
1410         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1411
1412         {
1413                 boost::mutex::scoped_lock lm (_mutex);
1414
1415                 if (reduction == _dcp_decode_reduction) {
1416                         lm.unlock ();
1417                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1418                         return;
1419                 }
1420
1421                 _dcp_decode_reduction = reduction;
1422                 setup_pieces_unlocked ();
1423         }
1424
1425         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1426 }
1427
1428
1429 optional<DCPTime>
1430 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1431 {
1432         boost::mutex::scoped_lock lm (_mutex);
1433
1434         for (auto i: _pieces) {
1435                 if (i->content == content) {
1436                         return content_time_to_dcp (i, t);
1437                 }
1438         }
1439
1440         /* We couldn't find this content; perhaps things are being changed over */
1441         return {};
1442 }
1443
1444
1445 optional<ContentTime>
1446 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1447 {
1448         boost::mutex::scoped_lock lm (_mutex);
1449
1450         for (auto i: _pieces) {
1451                 if (i->content == content) {
1452                         return dcp_to_content_time (i, t);
1453                 }
1454         }
1455
1456         /* We couldn't find this content; perhaps things are being changed over */
1457         return {};
1458 }
1459
1460
1461 shared_ptr<const Playlist>
1462 Player::playlist () const
1463 {
1464         return _playlist ? _playlist : _film->playlist();
1465 }
1466
1467
1468 void
1469 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1470 {
1471         if (_suspended) {
1472                 return;
1473         }
1474
1475         auto piece = weak_piece.lock ();
1476         DCPOMATIC_ASSERT (piece);
1477
1478         auto const vfr = _film->video_frame_rate();
1479
1480         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1481         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1482                 return;
1483         }
1484
1485         Atmos (data.data, dcp_time, data.metadata);
1486 }
1487