c04a433698102034f34b818e774f3144dc416d0b
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _ignore_video(false)
103         , _tolerant (film->tolerant())
104         , _audio_merger (_film->audio_frame_rate())
105         , _subtitle_alignment (subtitle_alignment)
106 {
107         construct ();
108 }
109
110
111 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
112         : _film (film)
113         , _playlist (playlist_)
114         , _suspended (0)
115         , _ignore_video(false)
116         , _tolerant (film->tolerant())
117         , _audio_merger (_film->audio_frame_rate())
118 {
119         construct ();
120 }
121
122
123 void
124 Player::construct ()
125 {
126         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127         /* The butler must hear about this first, so since we are proxying this through to the butler we must
128            be first.
129         */
130         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132         set_video_container_size (_film->frame_size ());
133
134         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
135
136         setup_pieces ();
137         seek (DCPTime (), true);
138 }
139
140
141 void
142 Player::setup_pieces ()
143 {
144         boost::mutex::scoped_lock lm (_mutex);
145         setup_pieces_unlocked ();
146 }
147
148
149 bool
150 have_video (shared_ptr<const Content> content)
151 {
152         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
153 }
154
155
156 bool
157 have_audio (shared_ptr<const Content> content)
158 {
159         return static_cast<bool>(content->audio) && content->can_be_played();
160 }
161
162
163 void
164 Player::setup_pieces_unlocked ()
165 {
166         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
167
168         auto old_pieces = _pieces;
169         _pieces.clear ();
170
171         auto playlist_content = playlist()->content();
172         bool const have_threed = std::any_of(
173                 playlist_content.begin(),
174                 playlist_content.end(),
175                 [](shared_ptr<const Content> c) {
176                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
177                 });
178
179
180         if (have_threed) {
181                 _shuffler.reset(new Shuffler());
182                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
183         }
184
185         for (auto i: playlist()->content()) {
186
187                 if (!i->paths_valid ()) {
188                         continue;
189                 }
190
191                 if (_ignore_video && _ignore_audio && i->text.empty()) {
192                         /* We're only interested in text and this content has none */
193                         continue;
194                 }
195
196                 shared_ptr<Decoder> old_decoder;
197                 for (auto j: old_pieces) {
198                         if (j->content == i) {
199                                 old_decoder = j->decoder;
200                                 break;
201                         }
202                 }
203
204                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
205                 DCPOMATIC_ASSERT (decoder);
206
207                 FrameRateChange frc (_film, i);
208
209                 if (decoder->video && _ignore_video) {
210                         decoder->video->set_ignore (true);
211                 }
212
213                 if (decoder->audio && _ignore_audio) {
214                         decoder->audio->set_ignore (true);
215                 }
216
217                 if (_ignore_text) {
218                         for (auto i: decoder->text) {
219                                 i->set_ignore (true);
220                         }
221                 }
222
223                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
224                 if (dcp) {
225                         dcp->set_decode_referenced (_play_referenced);
226                         if (_play_referenced) {
227                                 dcp->set_forced_reduction (_dcp_decode_reduction);
228                         }
229                 }
230
231                 auto piece = make_shared<Piece>(i, decoder, frc);
232                 _pieces.push_back (piece);
233
234                 if (decoder->video) {
235                         if (have_threed) {
236                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
237                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
238                         } else {
239                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
240                         }
241                 }
242
243                 if (decoder->audio) {
244                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
245                 }
246
247                 auto j = decoder->text.begin();
248
249                 while (j != decoder->text.end()) {
250                         (*j)->BitmapStart.connect (
251                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252                                 );
253                         (*j)->PlainStart.connect (
254                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255                                 );
256                         (*j)->Stop.connect (
257                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258                                 );
259
260                         ++j;
261                 }
262
263                 if (decoder->atmos) {
264                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
265                 }
266         }
267
268         _stream_states.clear ();
269         for (auto i: _pieces) {
270                 if (i->content->audio) {
271                         for (auto j: i->content->audio->streams()) {
272                                 _stream_states[j] = StreamState (i, i->content->position ());
273                         }
274                 }
275         }
276
277         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
278                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
279         };
280
281         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
282                 if (ignore_overlap((*i)->content->video)) {
283                         /* Look for content later in the content list with in-use video that overlaps this */
284                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
285                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
286                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
287                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
288                                 }
289                         }
290                 }
291         }
292
293         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
294         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
295
296         _next_video_time = boost::none;
297         _next_video_eyes = Eyes::BOTH;
298         _next_audio_time = boost::none;
299 }
300
301
302 void
303 Player::playlist_content_change (ChangeType type, int property, bool frequent)
304 {
305         if (property == VideoContentProperty::CROP) {
306                 if (type == ChangeType::DONE) {
307                         auto const vcs = video_container_size();
308                         boost::mutex::scoped_lock lm (_mutex);
309                         for (auto const& i: _delay) {
310                                 i.first->reset_metadata (_film, vcs);
311                         }
312                 }
313         } else {
314                 if (type == ChangeType::PENDING) {
315                         /* The player content is probably about to change, so we can't carry on
316                            until that has happened and we've rebuilt our pieces.  Stop pass()
317                            and seek() from working until then.
318                         */
319                         ++_suspended;
320                 } else if (type == ChangeType::DONE) {
321                         /* A change in our content has gone through.  Re-build our pieces. */
322                         setup_pieces ();
323                         --_suspended;
324                 } else if (type == ChangeType::CANCELLED) {
325                         --_suspended;
326                 }
327         }
328
329         Change (type, property, frequent);
330 }
331
332
333 void
334 Player::set_video_container_size (dcp::Size s)
335 {
336         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
337
338         {
339                 boost::mutex::scoped_lock lm (_mutex);
340
341                 if (s == _video_container_size) {
342                         lm.unlock ();
343                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
344                         return;
345                 }
346
347                 _video_container_size = s;
348
349                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
350                 _black_image->make_black ();
351         }
352
353         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
354 }
355
356
357 void
358 Player::playlist_change (ChangeType type)
359 {
360         if (type == ChangeType::DONE) {
361                 setup_pieces ();
362         }
363         Change (type, PlayerProperty::PLAYLIST, false);
364 }
365
366
367 void
368 Player::film_change (ChangeType type, Film::Property p)
369 {
370         /* Here we should notice Film properties that affect our output, and
371            alert listeners that our output now would be different to how it was
372            last time we were run.
373         */
374
375         if (p == Film::Property::CONTAINER) {
376                 Change (type, PlayerProperty::FILM_CONTAINER, false);
377         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
378                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
379                    so we need new pieces here.
380                 */
381                 if (type == ChangeType::DONE) {
382                         setup_pieces ();
383                 }
384                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
385         } else if (p == Film::Property::AUDIO_PROCESSOR) {
386                 if (type == ChangeType::DONE && _film->audio_processor ()) {
387                         boost::mutex::scoped_lock lm (_mutex);
388                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
389                 }
390         } else if (p == Film::Property::AUDIO_CHANNELS) {
391                 if (type == ChangeType::DONE) {
392                         boost::mutex::scoped_lock lm (_mutex);
393                         _audio_merger.clear ();
394                 }
395         }
396 }
397
398
399 shared_ptr<PlayerVideo>
400 Player::black_player_video_frame (Eyes eyes) const
401 {
402         return std::make_shared<PlayerVideo> (
403                 std::make_shared<const RawImageProxy>(_black_image),
404                 Crop(),
405                 optional<double>(),
406                 _video_container_size,
407                 _video_container_size,
408                 eyes,
409                 Part::WHOLE,
410                 PresetColourConversion::all().front().conversion,
411                 VideoRange::FULL,
412                 std::weak_ptr<Content>(),
413                 boost::optional<Frame>(),
414                 false
415         );
416 }
417
418
419 Frame
420 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
421 {
422         auto s = t - piece->content->position ();
423         s = min (piece->content->length_after_trim(_film), s);
424         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
425
426         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
427            then convert that ContentTime to frames at the content's rate.  However this fails for
428            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
429            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
430
431            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
432         */
433         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
434 }
435
436
437 DCPTime
438 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
439 {
440         /* See comment in dcp_to_content_video */
441         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
442         return d + piece->content->position();
443 }
444
445
446 Frame
447 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
448 {
449         auto s = t - piece->content->position ();
450         s = min (piece->content->length_after_trim(_film), s);
451         /* See notes in dcp_to_content_video */
452         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
453 }
454
455
456 DCPTime
457 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
458 {
459         /* See comment in dcp_to_content_video */
460         return DCPTime::from_frames (f, _film->audio_frame_rate())
461                 - DCPTime (piece->content->trim_start(), piece->frc)
462                 + piece->content->position();
463 }
464
465
466 ContentTime
467 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
468 {
469         auto s = t - piece->content->position ();
470         s = min (piece->content->length_after_trim(_film), s);
471         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
472 }
473
474
475 DCPTime
476 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
477 {
478         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
479 }
480
481
482 vector<shared_ptr<Font>>
483 Player::get_subtitle_fonts ()
484 {
485         boost::mutex::scoped_lock lm (_mutex);
486
487         vector<shared_ptr<Font>> fonts;
488         for (auto piece: _pieces) {
489                 for (auto text: piece->content->text) {
490                         auto text_fonts = text->fonts();
491                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
492                 }
493         }
494
495         return fonts;
496 }
497
498
499 /** Set this player never to produce any video data */
500 void
501 Player::set_ignore_video ()
502 {
503         _ignore_video = true;
504         setup_pieces();
505 }
506
507
508 void
509 Player::set_ignore_audio ()
510 {
511         boost::mutex::scoped_lock lm (_mutex);
512         _ignore_audio = true;
513         setup_pieces_unlocked ();
514 }
515
516
517 void
518 Player::set_ignore_text ()
519 {
520         boost::mutex::scoped_lock lm (_mutex);
521         _ignore_text = true;
522         setup_pieces_unlocked ();
523 }
524
525
526 /** Set the player to always burn open texts into the image regardless of the content settings */
527 void
528 Player::set_always_burn_open_subtitles ()
529 {
530         boost::mutex::scoped_lock lm (_mutex);
531         _always_burn_open_subtitles = true;
532 }
533
534
535 /** Sets up the player to be faster, possibly at the expense of quality */
536 void
537 Player::set_fast ()
538 {
539         boost::mutex::scoped_lock lm (_mutex);
540         _fast = true;
541         setup_pieces_unlocked ();
542 }
543
544
545 void
546 Player::set_play_referenced ()
547 {
548         boost::mutex::scoped_lock lm (_mutex);
549         _play_referenced = true;
550         setup_pieces_unlocked ();
551 }
552
553
554 static void
555 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
556 {
557         DCPOMATIC_ASSERT (r);
558         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
559         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
560         if (r->actual_duration() > 0) {
561                 a.push_back (
562                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
563                         );
564         }
565 }
566
567
568 list<ReferencedReelAsset>
569 Player::get_reel_assets ()
570 {
571         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
572
573         list<ReferencedReelAsset> reel_assets;
574
575         for (auto content: playlist()->content()) {
576                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
577                 if (!dcp) {
578                         continue;
579                 }
580
581                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
582                         continue;
583                 }
584
585                 scoped_ptr<DCPDecoder> decoder;
586                 try {
587                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
588                 } catch (...) {
589                         return reel_assets;
590                 }
591
592                 auto const frame_rate = _film->video_frame_rate();
593                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
594                 /* We should only be referencing if the DCP rate is the same as the film rate */
595                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
596
597                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
598                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
599
600                 /* position in the asset from the start */
601                 int64_t offset_from_start = 0;
602                 /* position i the asset from the end */
603                 int64_t offset_from_end = 0;
604                 for (auto reel: decoder->reels()) {
605                         /* Assume that main picture duration is the length of the reel */
606                         offset_from_end += reel->main_picture()->actual_duration();
607                 }
608
609                 for (auto reel: decoder->reels()) {
610
611                         /* Assume that main picture duration is the length of the reel */
612                         int64_t const reel_duration = reel->main_picture()->actual_duration();
613
614                         /* See doc/design/trim_reels.svg */
615                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
616                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
617
618                         auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
619                         if (dcp->reference_video()) {
620                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
621                         }
622
623                         if (dcp->reference_audio()) {
624                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
625                         }
626
627                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
628                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
629                         }
630
631                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
632                                 for (auto caption: reel->closed_captions()) {
633                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
634                                 }
635                         }
636
637                         offset_from_start += reel_duration;
638                         offset_from_end -= reel_duration;
639                 }
640         }
641
642         return reel_assets;
643 }
644
645
646 bool
647 Player::pass ()
648 {
649         boost::mutex::scoped_lock lm (_mutex);
650
651         if (_suspended) {
652                 /* We can't pass in this state */
653                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
654                 return false;
655         }
656
657         if (_playback_length == DCPTime()) {
658                 /* Special; just give one black frame */
659                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
660                 return true;
661         }
662
663         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
664
665         shared_ptr<Piece> earliest_content;
666         optional<DCPTime> earliest_time;
667
668         for (auto i: _pieces) {
669                 if (i->done) {
670                         continue;
671                 }
672
673                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
674                 if (t > i->content->end(_film)) {
675                         i->done = true;
676                 } else {
677
678                         /* Given two choices at the same time, pick the one with texts so we see it before
679                            the video.
680                         */
681                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
682                                 earliest_time = t;
683                                 earliest_content = i;
684                         }
685                 }
686         }
687
688         bool done = false;
689
690         enum {
691                 NONE,
692                 CONTENT,
693                 BLACK,
694                 SILENT
695         } which = NONE;
696
697         if (earliest_content) {
698                 which = CONTENT;
699         }
700
701         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
702                 earliest_time = _black.position ();
703                 which = BLACK;
704         }
705
706         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
707                 earliest_time = _silent.position ();
708                 which = SILENT;
709         }
710
711         switch (which) {
712         case CONTENT:
713         {
714                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
715                 earliest_content->done = earliest_content->decoder->pass ();
716                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
717                 if (dcp && !_play_referenced && dcp->reference_audio()) {
718                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
719                            to `hide' the fact that no audio was emitted during the referenced DCP (though
720                            we need to behave as though it was).
721                         */
722                         _next_audio_time = dcp->end (_film);
723                 }
724                 break;
725         }
726         case BLACK:
727                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
728                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
729                 _black.set_position (_black.position() + one_video_frame());
730                 break;
731         case SILENT:
732         {
733                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
734                 DCPTimePeriod period (_silent.period_at_position());
735                 if (_next_audio_time) {
736                         /* Sometimes the thing that happened last finishes fractionally before
737                            or after this silence.  Bodge the start time of the silence to fix it.
738                            I think this is nothing to worry about since we will just add or
739                            remove a little silence at the end of some content.
740                         */
741                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
742                         /* Let's not worry about less than a frame at 24fps */
743                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
744                         if (error >= too_much_error) {
745                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
746                         }
747                         DCPOMATIC_ASSERT (error < too_much_error);
748                         period.from = *_next_audio_time;
749                 }
750                 if (period.duration() > one_video_frame()) {
751                         period.to = period.from + one_video_frame();
752                 }
753                 fill_audio (period);
754                 _silent.set_position (period.to);
755                 break;
756         }
757         case NONE:
758                 done = true;
759                 break;
760         }
761
762         /* Emit any audio that is ready */
763
764         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
765            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
766            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
767            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
768            that will never come, causing bugs like #2101.
769         */
770         constexpr int ignore_streams_behind = 5;
771
772         using state_pair = std::pair<AudioStreamPtr, StreamState>;
773
774         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
775         auto latest_last_push_end = std::max_element(
776                 _stream_states.begin(),
777                 _stream_states.end(),
778                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
779                 );
780
781         if (latest_last_push_end != _stream_states.end()) {
782                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
783         }
784
785         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
786         std::map<AudioStreamPtr, StreamState> alive_stream_states;
787         for (auto const& i: _stream_states) {
788                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
789                         alive_stream_states.insert(i);
790                 } else {
791                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
792                 }
793         }
794
795         auto pull_to = _playback_length;
796         for (auto const& i: alive_stream_states) {
797                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
798                         pull_to = i.second.last_push_end;
799                 }
800         }
801         if (!_silent.done() && _silent.position() < pull_to) {
802                 pull_to = _silent.position();
803         }
804
805         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
806         auto audio = _audio_merger.pull (pull_to);
807         for (auto i = audio.begin(); i != audio.end(); ++i) {
808                 if (_next_audio_time && i->second < *_next_audio_time) {
809                         /* This new data comes before the last we emitted (or the last seek); discard it */
810                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
811                         if (!cut.first) {
812                                 continue;
813                         }
814                         *i = cut;
815                 } else if (_next_audio_time && i->second > *_next_audio_time) {
816                         /* There's a gap between this data and the last we emitted; fill with silence */
817                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
818                 }
819
820                 emit_audio (i->first, i->second);
821         }
822
823         if (done) {
824                 if (_shuffler) {
825                         _shuffler->flush ();
826                 }
827                 for (auto const& i: _delay) {
828                         do_emit_video(i.first, i.second);
829                 }
830
831                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
832                  * However, if we have L and R video files, and one is shorter than the other,
833                  * the fill code in ::video mostly takes care of filling in the gaps.
834                  * However, since it fills at the point when it knows there is more video coming
835                  * at time t (so it should fill any gap up to t) it can't do anything right at the
836                  * end.  This is particularly bad news if the last frame emitted is a LEFT
837                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
838                  * Here's a hack to workaround that particular case.
839                  */
840                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
841                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
842                 }
843         }
844
845         return done;
846 }
847
848
849 /** @return Open subtitles for the frame at the given time, converted to images */
850 optional<PositionImage>
851 Player::open_subtitles_for_frame (DCPTime time) const
852 {
853         list<PositionImage> captions;
854         int const vfr = _film->video_frame_rate();
855
856         for (
857                 auto j:
858                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
859                 ) {
860
861                 /* Bitmap subtitles */
862                 for (auto i: j.bitmap) {
863                         if (!i.image) {
864                                 continue;
865                         }
866
867                         /* i.image will already have been scaled to fit _video_container_size */
868                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
869
870                         captions.push_back (
871                                 PositionImage (
872                                         i.image,
873                                         Position<int> (
874                                                 lrint(_video_container_size.width * i.rectangle.x),
875                                                 lrint(_video_container_size.height * i.rectangle.y)
876                                                 )
877                                         )
878                                 );
879                 }
880
881                 /* String subtitles (rendered to an image) */
882                 if (!j.string.empty()) {
883                         auto s = render_text(j.string, _video_container_size, time, vfr);
884                         copy (s.begin(), s.end(), back_inserter (captions));
885                 }
886         }
887
888         if (captions.empty()) {
889                 return {};
890         }
891
892         return merge (captions, _subtitle_alignment);
893 }
894
895
896 void
897 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
898 {
899         if (_suspended) {
900                 return;
901         }
902
903         auto piece = weak_piece.lock ();
904         if (!piece) {
905                 return;
906         }
907
908         if (!piece->content->video->use()) {
909                 return;
910         }
911
912         FrameRateChange frc (_film, piece->content);
913         if (frc.skip && (video.frame % 2) == 1) {
914                 return;
915         }
916
917         /* Time of the first frame we will emit */
918         DCPTime const time = content_video_to_dcp (piece, video.frame);
919         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
920
921         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
922            if it's after the content's period here as in that case we still need to fill any gap between
923            `now' and the end of the content's period.
924         */
925         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
926                 return;
927         }
928
929         if (piece->ignore_video && piece->ignore_video->contains(time)) {
930                 return;
931         }
932
933         /* Fill gaps that we discover now that we have some video which needs to be emitted.
934            This is where we need to fill to.
935         */
936         DCPTime fill_to = min (time, piece->content->end(_film));
937
938         if (_next_video_time) {
939                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
940
941                 /* Fill if we have more than half a frame to do */
942                 if ((fill_to - fill_from) > one_video_frame() / 2) {
943                         auto last = _last_video.find (weak_piece);
944                         if (_film->three_d()) {
945                                 auto fill_to_eyes = video.eyes;
946                                 if (fill_to_eyes == Eyes::BOTH) {
947                                         fill_to_eyes = Eyes::LEFT;
948                                 }
949                                 if (fill_to == piece->content->end(_film)) {
950                                         /* Don't fill after the end of the content */
951                                         fill_to_eyes = Eyes::LEFT;
952                                 }
953                                 auto j = fill_from;
954                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
955                                 if (eyes == Eyes::BOTH) {
956                                         eyes = Eyes::LEFT;
957                                 }
958                                 while (j < fill_to || eyes != fill_to_eyes) {
959                                         if (last != _last_video.end()) {
960                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
961                                                 auto copy = last->second->shallow_copy();
962                                                 copy->set_eyes (eyes);
963                                                 emit_video (copy, j);
964                                         } else {
965                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
966                                                 emit_video (black_player_video_frame(eyes), j);
967                                         }
968                                         if (eyes == Eyes::RIGHT) {
969                                                 j += one_video_frame();
970                                         }
971                                         eyes = increment_eyes (eyes);
972                                 }
973                         } else {
974                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
975                                         if (last != _last_video.end()) {
976                                                 emit_video (last->second, j);
977                                         } else {
978                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
979                                         }
980                                 }
981                         }
982                 }
983         }
984
985         auto const content_video = piece->content->video;
986
987         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
988                 video.image,
989                 content_video->actual_crop(),
990                 content_video->fade (_film, video.frame),
991                 scale_for_display(
992                         content_video->scaled_size(_film->frame_size()),
993                         _video_container_size,
994                         _film->frame_size(),
995                         content_video->pixel_quanta()
996                         ),
997                 _video_container_size,
998                 video.eyes,
999                 video.part,
1000                 content_video->colour_conversion(),
1001                 content_video->range(),
1002                 piece->content,
1003                 video.frame,
1004                 false
1005                 );
1006
1007         DCPTime t = time;
1008         for (int i = 0; i < frc.repeat; ++i) {
1009                 if (t < piece->content->end(_film)) {
1010                         emit_video (_last_video[weak_piece], t);
1011                 }
1012                 t += one_video_frame ();
1013         }
1014 }
1015
1016
1017 void
1018 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1019 {
1020         if (_suspended) {
1021                 return;
1022         }
1023
1024         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1025
1026         auto piece = weak_piece.lock ();
1027         if (!piece) {
1028                 return;
1029         }
1030
1031         auto content = piece->content->audio;
1032         DCPOMATIC_ASSERT (content);
1033
1034         int const rfr = content->resampled_frame_rate (_film);
1035
1036         /* Compute time in the DCP */
1037         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1038         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1039
1040         /* And the end of this block in the DCP */
1041         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1042
1043         /* Remove anything that comes before the start or after the end of the content */
1044         if (time < piece->content->position()) {
1045                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1046                 if (!cut.first) {
1047                         /* This audio is entirely discarded */
1048                         return;
1049                 }
1050                 content_audio.audio = cut.first;
1051                 time = cut.second;
1052         } else if (time > piece->content->end(_film)) {
1053                 /* Discard it all */
1054                 return;
1055         } else if (end > piece->content->end(_film)) {
1056                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1057                 if (remaining_frames == 0) {
1058                         return;
1059                 }
1060                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1061         }
1062
1063         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1064
1065         /* Gain and fade */
1066
1067         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1068         if (content->gain() != 0 || !fade_coeffs.empty()) {
1069                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1070                 if (!fade_coeffs.empty()) {
1071                         /* Apply both fade and gain */
1072                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1073                         auto const channels = gain_buffers->channels();
1074                         auto const frames = fade_coeffs.size();
1075                         auto data = gain_buffers->data();
1076                         auto const gain = db_to_linear (content->gain());
1077                         for (auto channel = 0; channel < channels; ++channel) {
1078                                 for (auto frame = 0U; frame < frames; ++frame) {
1079                                         data[channel][frame] *= gain * fade_coeffs[frame];
1080                                 }
1081                         }
1082                 } else {
1083                         /* Just apply gain */
1084                         gain_buffers->apply_gain (content->gain());
1085                 }
1086                 content_audio.audio = gain_buffers;
1087         }
1088
1089         /* Remap */
1090
1091         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1092
1093         /* Process */
1094
1095         if (_audio_processor) {
1096                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1097         }
1098
1099         /* Push */
1100
1101         _audio_merger.push (content_audio.audio, time);
1102         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1103         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1104 }
1105
1106
1107 void
1108 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1109 {
1110         if (_suspended) {
1111                 return;
1112         }
1113
1114         auto piece = weak_piece.lock ();
1115         auto content = weak_content.lock ();
1116         if (!piece || !content) {
1117                 return;
1118         }
1119
1120         PlayerText ps;
1121         for (auto& sub: subtitle.subs)
1122         {
1123                 /* Apply content's subtitle offsets */
1124                 sub.rectangle.x += content->x_offset ();
1125                 sub.rectangle.y += content->y_offset ();
1126
1127                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1128                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1129                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1130
1131                 /* Apply content's subtitle scale */
1132                 sub.rectangle.width *= content->x_scale ();
1133                 sub.rectangle.height *= content->y_scale ();
1134
1135                 auto image = sub.image;
1136
1137                 /* We will scale the subtitle up to fit _video_container_size */
1138                 int const width = sub.rectangle.width * _video_container_size.width;
1139                 int const height = sub.rectangle.height * _video_container_size.height;
1140                 if (width == 0 || height == 0) {
1141                         return;
1142                 }
1143
1144                 dcp::Size scaled_size (width, height);
1145                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1146         }
1147
1148         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1149         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1150 }
1151
1152
1153 void
1154 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1155 {
1156         if (_suspended) {
1157                 return;
1158         }
1159
1160         auto piece = weak_piece.lock ();
1161         auto content = weak_content.lock ();
1162         if (!piece || !content) {
1163                 return;
1164         }
1165
1166         PlayerText ps;
1167         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1168
1169         if (from > piece->content->end(_film)) {
1170                 return;
1171         }
1172
1173         for (auto s: subtitle.subs) {
1174                 s.set_h_position (s.h_position() + content->x_offset());
1175                 s.set_v_position (s.v_position() + content->y_offset());
1176                 float const xs = content->x_scale();
1177                 float const ys = content->y_scale();
1178                 float size = s.size();
1179
1180                 /* Adjust size to express the common part of the scaling;
1181                    e.g. if xs = ys = 0.5 we scale size by 2.
1182                 */
1183                 if (xs > 1e-5 && ys > 1e-5) {
1184                         size *= 1 / min (1 / xs, 1 / ys);
1185                 }
1186                 s.set_size (size);
1187
1188                 /* Then express aspect ratio changes */
1189                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1190                         s.set_aspect_adjust (xs / ys);
1191                 }
1192
1193                 s.set_in (dcp::Time(from.seconds(), 1000));
1194                 ps.string.push_back (s);
1195         }
1196
1197         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1198 }
1199
1200
1201 void
1202 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1203 {
1204         if (_suspended) {
1205                 return;
1206         }
1207
1208         auto content = weak_content.lock ();
1209         if (!content) {
1210                 return;
1211         }
1212
1213         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1214                 return;
1215         }
1216
1217         auto piece = weak_piece.lock ();
1218         if (!piece) {
1219                 return;
1220         }
1221
1222         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1223
1224         if (dcp_to > piece->content->end(_film)) {
1225                 return;
1226         }
1227
1228         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1229
1230         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1231         if (content->use() && !always && !content->burn()) {
1232                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1233         }
1234 }
1235
1236
1237 void
1238 Player::seek (DCPTime time, bool accurate)
1239 {
1240         boost::mutex::scoped_lock lm (_mutex);
1241         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1242
1243         if (_suspended) {
1244                 /* We can't seek in this state */
1245                 return;
1246         }
1247
1248         if (_shuffler) {
1249                 _shuffler->clear ();
1250         }
1251
1252         _delay.clear ();
1253
1254         if (_audio_processor) {
1255                 _audio_processor->flush ();
1256         }
1257
1258         _audio_merger.clear ();
1259         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1260                 _active_texts[i].clear ();
1261         }
1262
1263         for (auto i: _pieces) {
1264                 if (time < i->content->position()) {
1265                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1266                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1267                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1268                            been trimmed to a point between keyframes, or something).
1269                         */
1270                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1271                         i->done = false;
1272                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1273                         /* During; seek to position */
1274                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1275                         i->done = false;
1276                 } else {
1277                         /* After; this piece is done */
1278                         i->done = true;
1279                 }
1280         }
1281
1282         if (accurate) {
1283                 _next_video_time = time;
1284                 _next_video_eyes = Eyes::LEFT;
1285                 _next_audio_time = time;
1286         } else {
1287                 _next_video_time = boost::none;
1288                 _next_video_eyes = boost::none;
1289                 _next_audio_time = boost::none;
1290         }
1291
1292         _black.set_position (time);
1293         _silent.set_position (time);
1294
1295         _last_video.clear ();
1296 }
1297
1298
1299 void
1300 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1301 {
1302         if (!_film->three_d()) {
1303                 if (pv->eyes() == Eyes::LEFT) {
1304                         /* Use left-eye images for both eyes... */
1305                         pv->set_eyes (Eyes::BOTH);
1306                 } else if (pv->eyes() == Eyes::RIGHT) {
1307                         /* ...and discard the right */
1308                         return;
1309                 }
1310         }
1311
1312         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1313            player before the video that requires them.
1314         */
1315         _delay.push_back (make_pair (pv, time));
1316
1317         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1318                 _next_video_time = time + one_video_frame();
1319         }
1320         _next_video_eyes = increment_eyes (pv->eyes());
1321
1322         if (_delay.size() < 3) {
1323                 return;
1324         }
1325
1326         auto to_do = _delay.front();
1327         _delay.pop_front();
1328         do_emit_video (to_do.first, to_do.second);
1329 }
1330
1331
1332 void
1333 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1334 {
1335         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1336                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1337                         _active_texts[i].clear_before (time);
1338                 }
1339         }
1340
1341         auto subtitles = open_subtitles_for_frame (time);
1342         if (subtitles) {
1343                 pv->set_text (subtitles.get ());
1344         }
1345
1346         Video (pv, time);
1347 }
1348
1349
1350 void
1351 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1352 {
1353         /* Log if the assert below is about to fail */
1354         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1355                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1356         }
1357
1358         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1359         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1360         Audio (data, time, _film->audio_frame_rate());
1361         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1362 }
1363
1364
1365 void
1366 Player::fill_audio (DCPTimePeriod period)
1367 {
1368         if (period.from == period.to) {
1369                 return;
1370         }
1371
1372         DCPOMATIC_ASSERT (period.from < period.to);
1373
1374         DCPTime t = period.from;
1375         while (t < period.to) {
1376                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1377                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1378                 if (samples) {
1379                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1380                         silence->make_silent ();
1381                         emit_audio (silence, t);
1382                 }
1383                 t += block;
1384         }
1385 }
1386
1387
1388 DCPTime
1389 Player::one_video_frame () const
1390 {
1391         return DCPTime::from_frames (1, _film->video_frame_rate ());
1392 }
1393
1394
1395 pair<shared_ptr<AudioBuffers>, DCPTime>
1396 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1397 {
1398         auto const discard_time = discard_to - time;
1399         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1400         auto remaining_frames = audio->frames() - discard_frames;
1401         if (remaining_frames <= 0) {
1402                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1403         }
1404         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1405         return make_pair(cut, time + discard_time);
1406 }
1407
1408
1409 void
1410 Player::set_dcp_decode_reduction (optional<int> reduction)
1411 {
1412         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1413
1414         {
1415                 boost::mutex::scoped_lock lm (_mutex);
1416
1417                 if (reduction == _dcp_decode_reduction) {
1418                         lm.unlock ();
1419                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1420                         return;
1421                 }
1422
1423                 _dcp_decode_reduction = reduction;
1424                 setup_pieces_unlocked ();
1425         }
1426
1427         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1428 }
1429
1430
1431 optional<DCPTime>
1432 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1433 {
1434         boost::mutex::scoped_lock lm (_mutex);
1435
1436         for (auto i: _pieces) {
1437                 if (i->content == content) {
1438                         return content_time_to_dcp (i, t);
1439                 }
1440         }
1441
1442         /* We couldn't find this content; perhaps things are being changed over */
1443         return {};
1444 }
1445
1446
1447 optional<ContentTime>
1448 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1449 {
1450         boost::mutex::scoped_lock lm (_mutex);
1451
1452         for (auto i: _pieces) {
1453                 if (i->content == content) {
1454                         return dcp_to_content_time (i, t);
1455                 }
1456         }
1457
1458         /* We couldn't find this content; perhaps things are being changed over */
1459         return {};
1460 }
1461
1462
1463 shared_ptr<const Playlist>
1464 Player::playlist () const
1465 {
1466         return _playlist ? _playlist : _film->playlist();
1467 }
1468
1469
1470 void
1471 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1472 {
1473         if (_suspended) {
1474                 return;
1475         }
1476
1477         auto piece = weak_piece.lock ();
1478         DCPOMATIC_ASSERT (piece);
1479
1480         auto const vfr = _film->video_frame_rate();
1481
1482         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1483         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1484                 return;
1485         }
1486
1487         Atmos (data.data, dcp_time, data.metadata);
1488 }
1489