C++11 and whitespace cleanups.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "piece.h"
44 #include "player.h"
45 #include "player_video.h"
46 #include "playlist.h"
47 #include "ratio.h"
48 #include "raw_image_proxy.h"
49 #include "referenced_reel_asset.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::make_shared;
75 using std::max;
76 using std::min;
77 using std::min;
78 using std::pair;
79 using std::shared_ptr;
80 using std::vector;
81 using std::weak_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103         , _subtitle_alignment (subtitle_alignment)
104 {
105         construct ();
106 }
107
108
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110         : _film (film)
111         , _playlist (playlist_)
112         , _suspended (0)
113         , _tolerant (film->tolerant())
114         , _audio_merger (_film->audio_frame_rate())
115 {
116         construct ();
117 }
118
119
120 void
121 Player::construct ()
122 {
123         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
124         /* The butler must hear about this first, so since we are proxying this through to the butler we must
125            be first.
126         */
127         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
128         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
129         set_video_container_size (_film->frame_size ());
130
131         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
132
133         setup_pieces ();
134         seek (DCPTime (), true);
135 }
136
137
138 void
139 Player::setup_pieces ()
140 {
141         boost::mutex::scoped_lock lm (_mutex);
142         setup_pieces_unlocked ();
143 }
144
145
146 bool
147 have_video (shared_ptr<const Content> content)
148 {
149         return static_cast<bool>(content->video) && content->video->use();
150 }
151
152
153 bool
154 have_audio (shared_ptr<const Content> content)
155 {
156         return static_cast<bool>(content->audio);
157 }
158
159
160 void
161 Player::setup_pieces_unlocked ()
162 {
163         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
164
165         auto old_pieces = _pieces;
166         _pieces.clear ();
167
168         _shuffler.reset (new Shuffler());
169         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
170
171         for (auto i: playlist()->content()) {
172
173                 if (!i->paths_valid ()) {
174                         continue;
175                 }
176
177                 if (_ignore_video && _ignore_audio && i->text.empty()) {
178                         /* We're only interested in text and this content has none */
179                         continue;
180                 }
181
182                 shared_ptr<Decoder> old_decoder;
183                 for (auto j: old_pieces) {
184                         if (j->content == i) {
185                                 old_decoder = j->decoder;
186                                 break;
187                         }
188                 }
189
190                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
191                 DCPOMATIC_ASSERT (decoder);
192
193                 FrameRateChange frc (_film, i);
194
195                 if (decoder->video && _ignore_video) {
196                         decoder->video->set_ignore (true);
197                 }
198
199                 if (decoder->audio && _ignore_audio) {
200                         decoder->audio->set_ignore (true);
201                 }
202
203                 if (_ignore_text) {
204                         for (auto i: decoder->text) {
205                                 i->set_ignore (true);
206                         }
207                 }
208
209                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
210                 if (dcp) {
211                         dcp->set_decode_referenced (_play_referenced);
212                         if (_play_referenced) {
213                                 dcp->set_forced_reduction (_dcp_decode_reduction);
214                         }
215                 }
216
217                 auto piece = make_shared<Piece>(i, decoder, frc);
218                 _pieces.push_back (piece);
219
220                 if (decoder->video) {
221                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
222                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
223                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
224                         } else {
225                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
226                         }
227                 }
228
229                 if (decoder->audio) {
230                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
231                 }
232
233                 auto j = decoder->text.begin();
234
235                 while (j != decoder->text.end()) {
236                         (*j)->BitmapStart.connect (
237                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
238                                 );
239                         (*j)->PlainStart.connect (
240                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
241                                 );
242                         (*j)->Stop.connect (
243                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245
246                         ++j;
247                 }
248
249                 if (decoder->atmos) {
250                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
251                 }
252         }
253
254         _stream_states.clear ();
255         for (auto i: _pieces) {
256                 if (i->content->audio) {
257                         for (auto j: i->content->audio->streams()) {
258                                 _stream_states[j] = StreamState (i, i->content->position ());
259                         }
260                 }
261         }
262
263         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
264                 if (auto video = (*i)->content->video) {
265                         if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
266                                 /* Look for content later in the content list with in-use video that overlaps this */
267                                 auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
268                                 auto j = i;
269                                 ++j;
270                                 for (; j != _pieces.end(); ++j) {
271                                         if ((*j)->content->video && (*j)->content->video->use()) {
272                                                 (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
273                                         }
274                                 }
275                         }
276                 }
277         }
278
279         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
280         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
281
282         _next_video_time = boost::none;
283         _next_video_eyes = Eyes::BOTH;
284         _next_audio_time = boost::none;
285 }
286
287
288 void
289 Player::playlist_content_change (ChangeType type, int property, bool frequent)
290 {
291         if (property == VideoContentProperty::CROP) {
292                 if (type == ChangeType::DONE) {
293                         auto const vcs = video_container_size();
294                         boost::mutex::scoped_lock lm (_mutex);
295                         for (auto const& i: _delay) {
296                                 i.first->reset_metadata (_film, vcs);
297                         }
298                 }
299         } else {
300                 if (type == ChangeType::PENDING) {
301                         /* The player content is probably about to change, so we can't carry on
302                            until that has happened and we've rebuilt our pieces.  Stop pass()
303                            and seek() from working until then.
304                         */
305                         ++_suspended;
306                 } else if (type == ChangeType::DONE) {
307                         /* A change in our content has gone through.  Re-build our pieces. */
308                         setup_pieces ();
309                         --_suspended;
310                 } else if (type == ChangeType::CANCELLED) {
311                         --_suspended;
312                 }
313         }
314
315         Change (type, property, frequent);
316 }
317
318
319 void
320 Player::set_video_container_size (dcp::Size s)
321 {
322         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
323
324         {
325                 boost::mutex::scoped_lock lm (_mutex);
326
327                 if (s == _video_container_size) {
328                         lm.unlock ();
329                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
330                         return;
331                 }
332
333                 _video_container_size = s;
334
335                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
336                 _black_image->make_black ();
337         }
338
339         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
340 }
341
342
343 void
344 Player::playlist_change (ChangeType type)
345 {
346         if (type == ChangeType::DONE) {
347                 setup_pieces ();
348         }
349         Change (type, PlayerProperty::PLAYLIST, false);
350 }
351
352
353 void
354 Player::film_change (ChangeType type, Film::Property p)
355 {
356         /* Here we should notice Film properties that affect our output, and
357            alert listeners that our output now would be different to how it was
358            last time we were run.
359         */
360
361         if (p == Film::Property::CONTAINER) {
362                 Change (type, PlayerProperty::FILM_CONTAINER, false);
363         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
364                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
365                    so we need new pieces here.
366                 */
367                 if (type == ChangeType::DONE) {
368                         setup_pieces ();
369                 }
370                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
371         } else if (p == Film::Property::AUDIO_PROCESSOR) {
372                 if (type == ChangeType::DONE && _film->audio_processor ()) {
373                         boost::mutex::scoped_lock lm (_mutex);
374                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
375                 }
376         } else if (p == Film::Property::AUDIO_CHANNELS) {
377                 if (type == ChangeType::DONE) {
378                         boost::mutex::scoped_lock lm (_mutex);
379                         _audio_merger.clear ();
380                 }
381         }
382 }
383
384
385 shared_ptr<PlayerVideo>
386 Player::black_player_video_frame (Eyes eyes) const
387 {
388         return std::make_shared<PlayerVideo> (
389                 std::make_shared<const RawImageProxy>(_black_image),
390                 Crop(),
391                 optional<double>(),
392                 _video_container_size,
393                 _video_container_size,
394                 eyes,
395                 Part::WHOLE,
396                 PresetColourConversion::all().front().conversion,
397                 VideoRange::FULL,
398                 std::weak_ptr<Content>(),
399                 boost::optional<Frame>(),
400                 false
401         );
402 }
403
404
405 Frame
406 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
407 {
408         auto s = t - piece->content->position ();
409         s = min (piece->content->length_after_trim(_film), s);
410         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
411
412         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
413            then convert that ContentTime to frames at the content's rate.  However this fails for
414            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
415            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
416
417            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
418         */
419         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
420 }
421
422
423 DCPTime
424 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
425 {
426         /* See comment in dcp_to_content_video */
427         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
428         return d + piece->content->position();
429 }
430
431
432 Frame
433 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
434 {
435         auto s = t - piece->content->position ();
436         s = min (piece->content->length_after_trim(_film), s);
437         /* See notes in dcp_to_content_video */
438         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
439 }
440
441
442 DCPTime
443 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
444 {
445         /* See comment in dcp_to_content_video */
446         return DCPTime::from_frames (f, _film->audio_frame_rate())
447                 - DCPTime (piece->content->trim_start(), piece->frc)
448                 + piece->content->position();
449 }
450
451
452 ContentTime
453 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
454 {
455         auto s = t - piece->content->position ();
456         s = min (piece->content->length_after_trim(_film), s);
457         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
458 }
459
460
461 DCPTime
462 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
463 {
464         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
465 }
466
467
468 vector<FontData>
469 Player::get_subtitle_fonts ()
470 {
471         boost::mutex::scoped_lock lm (_mutex);
472
473         vector<FontData> fonts;
474         for (auto i: _pieces) {
475                 /* XXX: things may go wrong if there are duplicate font IDs
476                    with different font files.
477                 */
478                 auto f = i->decoder->fonts ();
479                 copy (f.begin(), f.end(), back_inserter(fonts));
480         }
481
482         return fonts;
483 }
484
485
486 /** Set this player never to produce any video data */
487 void
488 Player::set_ignore_video ()
489 {
490         boost::mutex::scoped_lock lm (_mutex);
491         _ignore_video = true;
492         setup_pieces_unlocked ();
493 }
494
495
496 void
497 Player::set_ignore_audio ()
498 {
499         boost::mutex::scoped_lock lm (_mutex);
500         _ignore_audio = true;
501         setup_pieces_unlocked ();
502 }
503
504
505 void
506 Player::set_ignore_text ()
507 {
508         boost::mutex::scoped_lock lm (_mutex);
509         _ignore_text = true;
510         setup_pieces_unlocked ();
511 }
512
513
514 /** Set the player to always burn open texts into the image regardless of the content settings */
515 void
516 Player::set_always_burn_open_subtitles ()
517 {
518         boost::mutex::scoped_lock lm (_mutex);
519         _always_burn_open_subtitles = true;
520 }
521
522
523 /** Sets up the player to be faster, possibly at the expense of quality */
524 void
525 Player::set_fast ()
526 {
527         boost::mutex::scoped_lock lm (_mutex);
528         _fast = true;
529         setup_pieces_unlocked ();
530 }
531
532
533 void
534 Player::set_play_referenced ()
535 {
536         boost::mutex::scoped_lock lm (_mutex);
537         _play_referenced = true;
538         setup_pieces_unlocked ();
539 }
540
541
542 static void
543 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
544 {
545         DCPOMATIC_ASSERT (r);
546         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
547         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
548         if (r->actual_duration() > 0) {
549                 a.push_back (
550                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
551                         );
552         }
553 }
554
555
556 list<ReferencedReelAsset>
557 Player::get_reel_assets ()
558 {
559         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
560
561         list<ReferencedReelAsset> reel_assets;
562
563         for (auto content: playlist()->content()) {
564                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
565                 if (!dcp) {
566                         continue;
567                 }
568
569                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
570                         continue;
571                 }
572
573                 scoped_ptr<DCPDecoder> decoder;
574                 try {
575                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
576                 } catch (...) {
577                         return reel_assets;
578                 }
579
580                 auto const frame_rate = _film->video_frame_rate();
581                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
582                 /* We should only be referencing if the DCP rate is the same as the film rate */
583                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
584
585                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
586                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
587
588                 /* position in the asset from the start */
589                 int64_t offset_from_start = 0;
590                 /* position i the asset from the end */
591                 int64_t offset_from_end = 0;
592                 for (auto reel: decoder->reels()) {
593                         /* Assume that main picture duration is the length of the reel */
594                         offset_from_end += reel->main_picture()->actual_duration();
595                 }
596
597                 for (auto reel: decoder->reels()) {
598
599                         /* Assume that main picture duration is the length of the reel */
600                         int64_t const reel_duration = reel->main_picture()->actual_duration();
601
602                         /* See doc/design/trim_reels.svg */
603                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
604                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
605
606                         auto const from = max(DCPTime(), content->position() + DCPTime::from_frames(offset_from_start, frame_rate) - DCPTime::from_frames(trim_start, frame_rate));
607                         if (dcp->reference_video()) {
608                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
609                         }
610
611                         if (dcp->reference_audio()) {
612                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
613                         }
614
615                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
616                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
617                         }
618
619                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
620                                 for (auto caption: reel->closed_captions()) {
621                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
622                                 }
623                         }
624
625                         offset_from_start += reel_duration;
626                         offset_from_end -= reel_duration;
627                 }
628         }
629
630         return reel_assets;
631 }
632
633
634 bool
635 Player::pass ()
636 {
637         boost::mutex::scoped_lock lm (_mutex);
638
639         if (_suspended) {
640                 /* We can't pass in this state */
641                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
642                 return false;
643         }
644
645         if (_playback_length == DCPTime()) {
646                 /* Special; just give one black frame */
647                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
648                 return true;
649         }
650
651         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
652
653         shared_ptr<Piece> earliest_content;
654         optional<DCPTime> earliest_time;
655
656         for (auto i: _pieces) {
657                 if (i->done) {
658                         continue;
659                 }
660
661                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
662                 if (t > i->content->end(_film)) {
663                         i->done = true;
664                 } else {
665
666                         /* Given two choices at the same time, pick the one with texts so we see it before
667                            the video.
668                         */
669                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
670                                 earliest_time = t;
671                                 earliest_content = i;
672                         }
673                 }
674         }
675
676         bool done = false;
677
678         enum {
679                 NONE,
680                 CONTENT,
681                 BLACK,
682                 SILENT
683         } which = NONE;
684
685         if (earliest_content) {
686                 which = CONTENT;
687         }
688
689         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
690                 earliest_time = _black.position ();
691                 which = BLACK;
692         }
693
694         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
695                 earliest_time = _silent.position ();
696                 which = SILENT;
697         }
698
699         switch (which) {
700         case CONTENT:
701         {
702                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
703                 earliest_content->done = earliest_content->decoder->pass ();
704                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
705                 if (dcp && !_play_referenced && dcp->reference_audio()) {
706                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
707                            to `hide' the fact that no audio was emitted during the referenced DCP (though
708                            we need to behave as though it was).
709                         */
710                         _next_audio_time = dcp->end (_film);
711                 }
712                 break;
713         }
714         case BLACK:
715                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
716                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
717                 _black.set_position (_black.position() + one_video_frame());
718                 break;
719         case SILENT:
720         {
721                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
722                 DCPTimePeriod period (_silent.period_at_position());
723                 if (_next_audio_time) {
724                         /* Sometimes the thing that happened last finishes fractionally before
725                            or after this silence.  Bodge the start time of the silence to fix it.
726                            I think this is nothing to worry about since we will just add or
727                            remove a little silence at the end of some content.
728                         */
729                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
730                         /* Let's not worry about less than a frame at 24fps */
731                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
732                         if (error >= too_much_error) {
733                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
734                         }
735                         DCPOMATIC_ASSERT (error < too_much_error);
736                         period.from = *_next_audio_time;
737                 }
738                 if (period.duration() > one_video_frame()) {
739                         period.to = period.from + one_video_frame();
740                 }
741                 fill_audio (period);
742                 _silent.set_position (period.to);
743                 break;
744         }
745         case NONE:
746                 done = true;
747                 break;
748         }
749
750         /* Emit any audio that is ready */
751
752         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
753            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
754            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
755            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
756            that will never come, causing bugs like #2101.
757         */
758         constexpr int ignore_streams_behind = 5;
759
760         using state_pair = std::pair<AudioStreamPtr, StreamState>;
761
762         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
763         auto latest_last_push_end = std::max_element(
764                 _stream_states.begin(),
765                 _stream_states.end(),
766                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
767                 );
768
769         if (latest_last_push_end != _stream_states.end()) {
770                 LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
771         }
772
773         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
774         std::map<AudioStreamPtr, StreamState> alive_stream_states;
775         for (auto const& i: _stream_states) {
776                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
777                         alive_stream_states.insert(i);
778                 } else {
779                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
780                 }
781         }
782
783         auto pull_to = _playback_length;
784         for (auto const& i: alive_stream_states) {
785                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
786                         pull_to = i.second.last_push_end;
787                 }
788         }
789         if (!_silent.done() && _silent.position() < pull_to) {
790                 pull_to = _silent.position();
791         }
792
793         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
794         auto audio = _audio_merger.pull (pull_to);
795         for (auto i = audio.begin(); i != audio.end(); ++i) {
796                 if (_next_audio_time && i->second < *_next_audio_time) {
797                         /* This new data comes before the last we emitted (or the last seek); discard it */
798                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
799                         if (!cut.first) {
800                                 continue;
801                         }
802                         *i = cut;
803                 } else if (_next_audio_time && i->second > *_next_audio_time) {
804                         /* There's a gap between this data and the last we emitted; fill with silence */
805                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
806                 }
807
808                 emit_audio (i->first, i->second);
809         }
810
811         if (done) {
812                 _shuffler->flush ();
813                 for (auto const& i: _delay) {
814                         do_emit_video(i.first, i.second);
815                 }
816
817                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
818                  * However, if we have L and R video files, and one is shorter than the other,
819                  * the fill code in ::video mostly takes care of filling in the gaps.
820                  * However, since it fills at the point when it knows there is more video coming
821                  * at time t (so it should fill any gap up to t) it can't do anything right at the
822                  * end.  This is particularly bad news if the last frame emitted is a LEFT
823                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
824                  * Here's a hack to workaround that particular case.
825                  */
826                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
827                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
828                 }
829         }
830
831         return done;
832 }
833
834
835 /** @return Open subtitles for the frame at the given time, converted to images */
836 optional<PositionImage>
837 Player::open_subtitles_for_frame (DCPTime time) const
838 {
839         list<PositionImage> captions;
840         int const vfr = _film->video_frame_rate();
841
842         for (
843                 auto j:
844                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
845                 ) {
846
847                 /* Bitmap subtitles */
848                 for (auto i: j.bitmap) {
849                         if (!i.image) {
850                                 continue;
851                         }
852
853                         /* i.image will already have been scaled to fit _video_container_size */
854                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
855
856                         captions.push_back (
857                                 PositionImage (
858                                         i.image,
859                                         Position<int> (
860                                                 lrint(_video_container_size.width * i.rectangle.x),
861                                                 lrint(_video_container_size.height * i.rectangle.y)
862                                                 )
863                                         )
864                                 );
865                 }
866
867                 /* String subtitles (rendered to an image) */
868                 if (!j.string.empty()) {
869                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
870                         copy (s.begin(), s.end(), back_inserter (captions));
871                 }
872         }
873
874         if (captions.empty()) {
875                 return {};
876         }
877
878         return merge (captions, _subtitle_alignment);
879 }
880
881
882 void
883 Player::video (weak_ptr<Piece> wp, ContentVideo video)
884 {
885         if (_suspended) {
886                 return;
887         }
888
889         auto piece = wp.lock ();
890         if (!piece) {
891                 return;
892         }
893
894         if (!piece->content->video->use()) {
895                 return;
896         }
897
898         FrameRateChange frc (_film, piece->content);
899         if (frc.skip && (video.frame % 2) == 1) {
900                 return;
901         }
902
903         /* Time of the first frame we will emit */
904         DCPTime const time = content_video_to_dcp (piece, video.frame);
905         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
906
907         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
908            if it's after the content's period here as in that case we still need to fill any gap between
909            `now' and the end of the content's period.
910         */
911         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
912                 return;
913         }
914
915         if (piece->ignore_video && piece->ignore_video->contains(time)) {
916                 return;
917         }
918
919         /* Fill gaps that we discover now that we have some video which needs to be emitted.
920            This is where we need to fill to.
921         */
922         DCPTime fill_to = min (time, piece->content->end(_film));
923
924         if (_next_video_time) {
925                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
926
927                 /* Fill if we have more than half a frame to do */
928                 if ((fill_to - fill_from) > one_video_frame() / 2) {
929                         auto last = _last_video.find (wp);
930                         if (_film->three_d()) {
931                                 auto fill_to_eyes = video.eyes;
932                                 if (fill_to_eyes == Eyes::BOTH) {
933                                         fill_to_eyes = Eyes::LEFT;
934                                 }
935                                 if (fill_to == piece->content->end(_film)) {
936                                         /* Don't fill after the end of the content */
937                                         fill_to_eyes = Eyes::LEFT;
938                                 }
939                                 auto j = fill_from;
940                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
941                                 if (eyes == Eyes::BOTH) {
942                                         eyes = Eyes::LEFT;
943                                 }
944                                 while (j < fill_to || eyes != fill_to_eyes) {
945                                         if (last != _last_video.end()) {
946                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
947                                                 auto copy = last->second->shallow_copy();
948                                                 copy->set_eyes (eyes);
949                                                 emit_video (copy, j);
950                                         } else {
951                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
952                                                 emit_video (black_player_video_frame(eyes), j);
953                                         }
954                                         if (eyes == Eyes::RIGHT) {
955                                                 j += one_video_frame();
956                                         }
957                                         eyes = increment_eyes (eyes);
958                                 }
959                         } else {
960                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
961                                         if (last != _last_video.end()) {
962                                                 emit_video (last->second, j);
963                                         } else {
964                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
965                                         }
966                                 }
967                         }
968                 }
969         }
970
971         auto const content_video = piece->content->video;
972
973         _last_video[wp] = std::make_shared<PlayerVideo>(
974                 video.image,
975                 content_video->actual_crop(),
976                 content_video->fade (_film, video.frame),
977                 scale_for_display(
978                         content_video->scaled_size(_film->frame_size()),
979                         _video_container_size,
980                         _film->frame_size(),
981                         content_video->pixel_quanta()
982                         ),
983                 _video_container_size,
984                 video.eyes,
985                 video.part,
986                 content_video->colour_conversion(),
987                 content_video->range(),
988                 piece->content,
989                 video.frame,
990                 false
991                 );
992
993         DCPTime t = time;
994         for (int i = 0; i < frc.repeat; ++i) {
995                 if (t < piece->content->end(_film)) {
996                         emit_video (_last_video[wp], t);
997                 }
998                 t += one_video_frame ();
999         }
1000 }
1001
1002
1003 void
1004 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
1005 {
1006         if (_suspended) {
1007                 return;
1008         }
1009
1010         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1011
1012         auto piece = wp.lock ();
1013         if (!piece) {
1014                 return;
1015         }
1016
1017         auto content = piece->content->audio;
1018         DCPOMATIC_ASSERT (content);
1019
1020         int const rfr = content->resampled_frame_rate (_film);
1021
1022         /* Compute time in the DCP */
1023         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1024         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1025
1026         /* And the end of this block in the DCP */
1027         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1028
1029         /* Remove anything that comes before the start or after the end of the content */
1030         if (time < piece->content->position()) {
1031                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1032                 if (!cut.first) {
1033                         /* This audio is entirely discarded */
1034                         return;
1035                 }
1036                 content_audio.audio = cut.first;
1037                 time = cut.second;
1038         } else if (time > piece->content->end(_film)) {
1039                 /* Discard it all */
1040                 return;
1041         } else if (end > piece->content->end(_film)) {
1042                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1043                 if (remaining_frames == 0) {
1044                         return;
1045                 }
1046                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1047         }
1048
1049         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1050
1051         /* Gain */
1052
1053         if (content->gain() != 0) {
1054                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
1055                 gain->apply_gain (content->gain());
1056                 content_audio.audio = gain;
1057         }
1058
1059         /* Remap */
1060
1061         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1062
1063         /* Process */
1064
1065         if (_audio_processor) {
1066                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1067         }
1068
1069         /* Push */
1070
1071         _audio_merger.push (content_audio.audio, time);
1072         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1073         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1074 }
1075
1076
1077 void
1078 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
1079 {
1080         if (_suspended) {
1081                 return;
1082         }
1083
1084         auto piece = wp.lock ();
1085         auto text = wc.lock ();
1086         if (!piece || !text) {
1087                 return;
1088         }
1089
1090         /* Apply content's subtitle offsets */
1091         subtitle.sub.rectangle.x += text->x_offset ();
1092         subtitle.sub.rectangle.y += text->y_offset ();
1093
1094         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1095         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1096         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1097
1098         /* Apply content's subtitle scale */
1099         subtitle.sub.rectangle.width *= text->x_scale ();
1100         subtitle.sub.rectangle.height *= text->y_scale ();
1101
1102         PlayerText ps;
1103         auto image = subtitle.sub.image;
1104
1105         /* We will scale the subtitle up to fit _video_container_size */
1106         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1107         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1108         if (width == 0 || height == 0) {
1109                 return;
1110         }
1111
1112         dcp::Size scaled_size (width, height);
1113         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
1114         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1115
1116         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1117 }
1118
1119
1120 void
1121 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1122 {
1123         if (_suspended) {
1124                 return;
1125         }
1126
1127         auto piece = wp.lock ();
1128         auto text = wc.lock ();
1129         if (!piece || !text) {
1130                 return;
1131         }
1132
1133         PlayerText ps;
1134         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1135
1136         if (from > piece->content->end(_film)) {
1137                 return;
1138         }
1139
1140         for (auto s: subtitle.subs) {
1141                 s.set_h_position (s.h_position() + text->x_offset ());
1142                 s.set_v_position (s.v_position() + text->y_offset ());
1143                 float const xs = text->x_scale();
1144                 float const ys = text->y_scale();
1145                 float size = s.size();
1146
1147                 /* Adjust size to express the common part of the scaling;
1148                    e.g. if xs = ys = 0.5 we scale size by 2.
1149                 */
1150                 if (xs > 1e-5 && ys > 1e-5) {
1151                         size *= 1 / min (1 / xs, 1 / ys);
1152                 }
1153                 s.set_size (size);
1154
1155                 /* Then express aspect ratio changes */
1156                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1157                         s.set_aspect_adjust (xs / ys);
1158                 }
1159
1160                 s.set_in (dcp::Time(from.seconds(), 1000));
1161                 ps.string.push_back (StringText (s, text->outline_width()));
1162                 ps.add_fonts (text->fonts ());
1163         }
1164
1165         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1166 }
1167
1168
1169 void
1170 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1171 {
1172         if (_suspended) {
1173                 return;
1174         }
1175
1176         auto text = wc.lock ();
1177         if (!text) {
1178                 return;
1179         }
1180
1181         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1182                 return;
1183         }
1184
1185         shared_ptr<Piece> piece = wp.lock ();
1186         if (!piece) {
1187                 return;
1188         }
1189
1190         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1191
1192         if (dcp_to > piece->content->end(_film)) {
1193                 return;
1194         }
1195
1196         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1197
1198         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1199         if (text->use() && !always && !text->burn()) {
1200                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1201         }
1202 }
1203
1204
1205 void
1206 Player::seek (DCPTime time, bool accurate)
1207 {
1208         boost::mutex::scoped_lock lm (_mutex);
1209         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1210
1211         if (_suspended) {
1212                 /* We can't seek in this state */
1213                 return;
1214         }
1215
1216         if (_shuffler) {
1217                 _shuffler->clear ();
1218         }
1219
1220         _delay.clear ();
1221
1222         if (_audio_processor) {
1223                 _audio_processor->flush ();
1224         }
1225
1226         _audio_merger.clear ();
1227         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1228                 _active_texts[i].clear ();
1229         }
1230
1231         for (auto i: _pieces) {
1232                 if (time < i->content->position()) {
1233                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1234                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1235                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1236                            been trimmed to a point between keyframes, or something).
1237                         */
1238                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1239                         i->done = false;
1240                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1241                         /* During; seek to position */
1242                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1243                         i->done = false;
1244                 } else {
1245                         /* After; this piece is done */
1246                         i->done = true;
1247                 }
1248         }
1249
1250         if (accurate) {
1251                 _next_video_time = time;
1252                 _next_video_eyes = Eyes::LEFT;
1253                 _next_audio_time = time;
1254         } else {
1255                 _next_video_time = boost::none;
1256                 _next_video_eyes = boost::none;
1257                 _next_audio_time = boost::none;
1258         }
1259
1260         _black.set_position (time);
1261         _silent.set_position (time);
1262
1263         _last_video.clear ();
1264 }
1265
1266
1267 void
1268 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1269 {
1270         if (!_film->three_d()) {
1271                 if (pv->eyes() == Eyes::LEFT) {
1272                         /* Use left-eye images for both eyes... */
1273                         pv->set_eyes (Eyes::BOTH);
1274                 } else if (pv->eyes() == Eyes::RIGHT) {
1275                         /* ...and discard the right */
1276                         return;
1277                 }
1278         }
1279
1280         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1281            player before the video that requires them.
1282         */
1283         _delay.push_back (make_pair (pv, time));
1284
1285         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1286                 _next_video_time = time + one_video_frame();
1287         }
1288         _next_video_eyes = increment_eyes (pv->eyes());
1289
1290         if (_delay.size() < 3) {
1291                 return;
1292         }
1293
1294         auto to_do = _delay.front();
1295         _delay.pop_front();
1296         do_emit_video (to_do.first, to_do.second);
1297 }
1298
1299
1300 void
1301 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1302 {
1303         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1304                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1305                         _active_texts[i].clear_before (time);
1306                 }
1307         }
1308
1309         auto subtitles = open_subtitles_for_frame (time);
1310         if (subtitles) {
1311                 pv->set_text (subtitles.get ());
1312         }
1313
1314         Video (pv, time);
1315 }
1316
1317
1318 void
1319 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1320 {
1321         /* Log if the assert below is about to fail */
1322         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1323                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1324         }
1325
1326         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1327         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1328         Audio (data, time, _film->audio_frame_rate());
1329         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1330 }
1331
1332
1333 void
1334 Player::fill_audio (DCPTimePeriod period)
1335 {
1336         if (period.from == period.to) {
1337                 return;
1338         }
1339
1340         DCPOMATIC_ASSERT (period.from < period.to);
1341
1342         DCPTime t = period.from;
1343         while (t < period.to) {
1344                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1345                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1346                 if (samples) {
1347                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1348                         silence->make_silent ();
1349                         emit_audio (silence, t);
1350                 }
1351                 t += block;
1352         }
1353 }
1354
1355
1356 DCPTime
1357 Player::one_video_frame () const
1358 {
1359         return DCPTime::from_frames (1, _film->video_frame_rate ());
1360 }
1361
1362
1363 pair<shared_ptr<AudioBuffers>, DCPTime>
1364 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1365 {
1366         auto const discard_time = discard_to - time;
1367         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1368         auto remaining_frames = audio->frames() - discard_frames;
1369         if (remaining_frames <= 0) {
1370                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1371         }
1372         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1373         return make_pair(cut, time + discard_time);
1374 }
1375
1376
1377 void
1378 Player::set_dcp_decode_reduction (optional<int> reduction)
1379 {
1380         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1381
1382         {
1383                 boost::mutex::scoped_lock lm (_mutex);
1384
1385                 if (reduction == _dcp_decode_reduction) {
1386                         lm.unlock ();
1387                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1388                         return;
1389                 }
1390
1391                 _dcp_decode_reduction = reduction;
1392                 setup_pieces_unlocked ();
1393         }
1394
1395         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1396 }
1397
1398
1399 optional<DCPTime>
1400 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1401 {
1402         boost::mutex::scoped_lock lm (_mutex);
1403
1404         for (auto i: _pieces) {
1405                 if (i->content == content) {
1406                         return content_time_to_dcp (i, t);
1407                 }
1408         }
1409
1410         /* We couldn't find this content; perhaps things are being changed over */
1411         return {};
1412 }
1413
1414
1415 shared_ptr<const Playlist>
1416 Player::playlist () const
1417 {
1418         return _playlist ? _playlist : _film->playlist();
1419 }
1420
1421
1422 void
1423 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1424 {
1425         if (_suspended) {
1426                 return;
1427         }
1428
1429         auto piece = weak_piece.lock ();
1430         DCPOMATIC_ASSERT (piece);
1431
1432         auto const vfr = _film->video_frame_rate();
1433
1434         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1435         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1436                 return;
1437         }
1438
1439         Atmos (data.data, dcp_time, data.metadata);
1440 }
1441