2793bd62a9e70583b5961f08506fa05f08e2d017
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _ignore_video(false)
103         , _ignore_audio(false)
104         , _ignore_text(false)
105         , _always_burn_open_subtitles(false)
106         , _fast(false)
107         , _tolerant (film->tolerant())
108         , _play_referenced(false)
109         , _audio_merger (_film->audio_frame_rate())
110         , _subtitle_alignment (subtitle_alignment)
111 {
112         construct ();
113 }
114
115
116 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
117         : _film (film)
118         , _playlist (playlist_)
119         , _suspended (0)
120         , _ignore_video(false)
121         , _ignore_audio(false)
122         , _ignore_text(false)
123         , _always_burn_open_subtitles(false)
124         , _fast(false)
125         , _tolerant (film->tolerant())
126         , _play_referenced(false)
127         , _audio_merger (_film->audio_frame_rate())
128 {
129         construct ();
130 }
131
132
133 void
134 Player::construct ()
135 {
136         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
137         /* The butler must hear about this first, so since we are proxying this through to the butler we must
138            be first.
139         */
140         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
141         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
142         set_video_container_size (_film->frame_size ());
143
144         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
145
146         setup_pieces ();
147         seek (DCPTime (), true);
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio) && content->can_be_played();
162 }
163
164
165 void
166 Player::setup_pieces ()
167 {
168         boost::mutex::scoped_lock lm (_mutex);
169
170         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
171
172         auto old_pieces = _pieces;
173         _pieces.clear ();
174
175         auto playlist_content = playlist()->content();
176         bool const have_threed = std::any_of(
177                 playlist_content.begin(),
178                 playlist_content.end(),
179                 [](shared_ptr<const Content> c) {
180                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
181                 });
182
183
184         if (have_threed) {
185                 _shuffler.reset(new Shuffler());
186                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
187         }
188
189         for (auto i: playlist()->content()) {
190
191                 if (!i->paths_valid ()) {
192                         continue;
193                 }
194
195                 if (_ignore_video && _ignore_audio && i->text.empty()) {
196                         /* We're only interested in text and this content has none */
197                         continue;
198                 }
199
200                 shared_ptr<Decoder> old_decoder;
201                 for (auto j: old_pieces) {
202                         if (j->content == i) {
203                                 old_decoder = j->decoder;
204                                 break;
205                         }
206                 }
207
208                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
209                 DCPOMATIC_ASSERT (decoder);
210
211                 FrameRateChange frc (_film, i);
212
213                 if (decoder->video && _ignore_video) {
214                         decoder->video->set_ignore (true);
215                 }
216
217                 if (decoder->audio && _ignore_audio) {
218                         decoder->audio->set_ignore (true);
219                 }
220
221                 if (_ignore_text) {
222                         for (auto i: decoder->text) {
223                                 i->set_ignore (true);
224                         }
225                 }
226
227                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
228                 if (dcp) {
229                         dcp->set_decode_referenced (_play_referenced);
230                         if (_play_referenced) {
231                                 dcp->set_forced_reduction (_dcp_decode_reduction);
232                         }
233                 }
234
235                 auto piece = make_shared<Piece>(i, decoder, frc);
236                 _pieces.push_back (piece);
237
238                 if (decoder->video) {
239                         if (have_threed) {
240                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
241                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
242                         } else {
243                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
244                         }
245                 }
246
247                 if (decoder->audio) {
248                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
249                 }
250
251                 auto j = decoder->text.begin();
252
253                 while (j != decoder->text.end()) {
254                         (*j)->BitmapStart.connect (
255                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256                                 );
257                         (*j)->PlainStart.connect (
258                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
259                                 );
260                         (*j)->Stop.connect (
261                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
262                                 );
263
264                         ++j;
265                 }
266
267                 if (decoder->atmos) {
268                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
269                 }
270         }
271
272         _stream_states.clear ();
273         for (auto i: _pieces) {
274                 if (i->content->audio) {
275                         for (auto j: i->content->audio->streams()) {
276                                 _stream_states[j] = StreamState (i, i->content->position ());
277                         }
278                 }
279         }
280
281         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
282                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
283         };
284
285         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
286                 if (ignore_overlap((*i)->content->video)) {
287                         /* Look for content later in the content list with in-use video that overlaps this */
288                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
289                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
290                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
291                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
292                                 }
293                         }
294                 }
295         }
296
297         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
298         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
299
300         _next_video_time = boost::none;
301         _next_video_eyes = Eyes::BOTH;
302         _next_audio_time = boost::none;
303 }
304
305
306 void
307 Player::playlist_content_change (ChangeType type, int property, bool frequent)
308 {
309         if (property == VideoContentProperty::CROP) {
310                 if (type == ChangeType::DONE) {
311                         auto const vcs = video_container_size();
312                         boost::mutex::scoped_lock lm (_mutex);
313                         for (auto const& i: _delay) {
314                                 i.first->reset_metadata (_film, vcs);
315                         }
316                 }
317         } else {
318                 if (type == ChangeType::PENDING) {
319                         /* The player content is probably about to change, so we can't carry on
320                            until that has happened and we've rebuilt our pieces.  Stop pass()
321                            and seek() from working until then.
322                         */
323                         ++_suspended;
324                 } else if (type == ChangeType::DONE) {
325                         /* A change in our content has gone through.  Re-build our pieces. */
326                         setup_pieces ();
327                         --_suspended;
328                 } else if (type == ChangeType::CANCELLED) {
329                         --_suspended;
330                 }
331         }
332
333         Change (type, property, frequent);
334 }
335
336
337 void
338 Player::set_video_container_size (dcp::Size s)
339 {
340         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
341
342         if (s == _video_container_size) {
343                 Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
344                 return;
345         }
346
347         _video_container_size = s;
348
349         {
350                 boost::mutex::scoped_lock lm (_mutex);
351                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
352                 _black_image->make_black ();
353         }
354
355         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
356 }
357
358
359 void
360 Player::playlist_change (ChangeType type)
361 {
362         if (type == ChangeType::DONE) {
363                 setup_pieces ();
364         }
365         Change (type, PlayerProperty::PLAYLIST, false);
366 }
367
368
369 void
370 Player::film_change (ChangeType type, Film::Property p)
371 {
372         /* Here we should notice Film properties that affect our output, and
373            alert listeners that our output now would be different to how it was
374            last time we were run.
375         */
376
377         if (p == Film::Property::CONTAINER) {
378                 Change (type, PlayerProperty::FILM_CONTAINER, false);
379         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
380                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
381                    so we need new pieces here.
382                 */
383                 if (type == ChangeType::DONE) {
384                         setup_pieces ();
385                 }
386                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
387         } else if (p == Film::Property::AUDIO_PROCESSOR) {
388                 if (type == ChangeType::DONE && _film->audio_processor ()) {
389                         boost::mutex::scoped_lock lm (_mutex);
390                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
391                 }
392         } else if (p == Film::Property::AUDIO_CHANNELS) {
393                 if (type == ChangeType::DONE) {
394                         boost::mutex::scoped_lock lm (_mutex);
395                         _audio_merger.clear ();
396                 }
397         }
398 }
399
400
401 shared_ptr<PlayerVideo>
402 Player::black_player_video_frame (Eyes eyes) const
403 {
404         return std::make_shared<PlayerVideo> (
405                 std::make_shared<const RawImageProxy>(_black_image),
406                 Crop(),
407                 optional<double>(),
408                 _video_container_size,
409                 _video_container_size,
410                 eyes,
411                 Part::WHOLE,
412                 PresetColourConversion::all().front().conversion,
413                 VideoRange::FULL,
414                 std::weak_ptr<Content>(),
415                 boost::optional<Frame>(),
416                 false
417         );
418 }
419
420
421 Frame
422 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
423 {
424         auto s = t - piece->content->position ();
425         s = min (piece->content->length_after_trim(_film), s);
426         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
427
428         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
429            then convert that ContentTime to frames at the content's rate.  However this fails for
430            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
431            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
432
433            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
434         */
435         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
436 }
437
438
439 DCPTime
440 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
441 {
442         /* See comment in dcp_to_content_video */
443         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
444         return d + piece->content->position();
445 }
446
447
448 Frame
449 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
450 {
451         auto s = t - piece->content->position ();
452         s = min (piece->content->length_after_trim(_film), s);
453         /* See notes in dcp_to_content_video */
454         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
455 }
456
457
458 DCPTime
459 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
460 {
461         /* See comment in dcp_to_content_video */
462         return DCPTime::from_frames (f, _film->audio_frame_rate())
463                 - DCPTime (piece->content->trim_start(), piece->frc)
464                 + piece->content->position();
465 }
466
467
468 ContentTime
469 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
470 {
471         auto s = t - piece->content->position ();
472         s = min (piece->content->length_after_trim(_film), s);
473         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
474 }
475
476
477 DCPTime
478 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
479 {
480         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
481 }
482
483
484 vector<shared_ptr<Font>>
485 Player::get_subtitle_fonts ()
486 {
487         boost::mutex::scoped_lock lm (_mutex);
488
489         vector<shared_ptr<Font>> fonts;
490         for (auto piece: _pieces) {
491                 for (auto text: piece->content->text) {
492                         auto text_fonts = text->fonts();
493                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
494                 }
495         }
496
497         return fonts;
498 }
499
500
501 /** Set this player never to produce any video data */
502 void
503 Player::set_ignore_video ()
504 {
505         _ignore_video = true;
506         setup_pieces();
507 }
508
509
510 void
511 Player::set_ignore_audio ()
512 {
513         _ignore_audio = true;
514         setup_pieces();
515 }
516
517
518 void
519 Player::set_ignore_text ()
520 {
521         _ignore_text = true;
522         setup_pieces();
523 }
524
525
526 /** Set the player to always burn open texts into the image regardless of the content settings */
527 void
528 Player::set_always_burn_open_subtitles ()
529 {
530         _always_burn_open_subtitles = true;
531 }
532
533
534 /** Sets up the player to be faster, possibly at the expense of quality */
535 void
536 Player::set_fast ()
537 {
538         _fast = true;
539         setup_pieces();
540 }
541
542
543 void
544 Player::set_play_referenced ()
545 {
546         _play_referenced = true;
547         setup_pieces();
548 }
549
550
551 static void
552 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
553 {
554         DCPOMATIC_ASSERT (r);
555         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
556         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
557         if (r->actual_duration() > 0) {
558                 a.push_back (
559                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
560                         );
561         }
562 }
563
564
565 list<ReferencedReelAsset>
566 Player::get_reel_assets ()
567 {
568         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
569
570         list<ReferencedReelAsset> reel_assets;
571
572         for (auto content: playlist()->content()) {
573                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
574                 if (!dcp) {
575                         continue;
576                 }
577
578                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
579                         continue;
580                 }
581
582                 scoped_ptr<DCPDecoder> decoder;
583                 try {
584                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
585                 } catch (...) {
586                         return reel_assets;
587                 }
588
589                 auto const frame_rate = _film->video_frame_rate();
590                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
591                 /* We should only be referencing if the DCP rate is the same as the film rate */
592                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
593
594                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
595                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
596
597                 /* position in the asset from the start */
598                 int64_t offset_from_start = 0;
599                 /* position i the asset from the end */
600                 int64_t offset_from_end = 0;
601                 for (auto reel: decoder->reels()) {
602                         /* Assume that main picture duration is the length of the reel */
603                         offset_from_end += reel->main_picture()->actual_duration();
604                 }
605
606                 for (auto reel: decoder->reels()) {
607
608                         /* Assume that main picture duration is the length of the reel */
609                         int64_t const reel_duration = reel->main_picture()->actual_duration();
610
611                         /* See doc/design/trim_reels.svg */
612                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
613                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
614
615                         auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
616                         if (dcp->reference_video()) {
617                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
618                         }
619
620                         if (dcp->reference_audio()) {
621                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
622                         }
623
624                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
625                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
626                         }
627
628                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
629                                 for (auto caption: reel->closed_captions()) {
630                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
631                                 }
632                         }
633
634                         offset_from_start += reel_duration;
635                         offset_from_end -= reel_duration;
636                 }
637         }
638
639         return reel_assets;
640 }
641
642
643 bool
644 Player::pass ()
645 {
646         boost::mutex::scoped_lock lm (_mutex);
647
648         if (_suspended) {
649                 /* We can't pass in this state */
650                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
651                 return false;
652         }
653
654         if (_playback_length == DCPTime()) {
655                 /* Special; just give one black frame */
656                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
657                 return true;
658         }
659
660         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
661
662         shared_ptr<Piece> earliest_content;
663         optional<DCPTime> earliest_time;
664
665         for (auto i: _pieces) {
666                 if (i->done) {
667                         continue;
668                 }
669
670                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
671                 if (t > i->content->end(_film)) {
672                         i->done = true;
673                 } else {
674
675                         /* Given two choices at the same time, pick the one with texts so we see it before
676                            the video.
677                         */
678                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
679                                 earliest_time = t;
680                                 earliest_content = i;
681                         }
682                 }
683         }
684
685         bool done = false;
686
687         enum {
688                 NONE,
689                 CONTENT,
690                 BLACK,
691                 SILENT
692         } which = NONE;
693
694         if (earliest_content) {
695                 which = CONTENT;
696         }
697
698         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
699                 earliest_time = _black.position ();
700                 which = BLACK;
701         }
702
703         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
704                 earliest_time = _silent.position ();
705                 which = SILENT;
706         }
707
708         switch (which) {
709         case CONTENT:
710         {
711                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
712                 earliest_content->done = earliest_content->decoder->pass ();
713                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
714                 if (dcp && !_play_referenced && dcp->reference_audio()) {
715                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
716                            to `hide' the fact that no audio was emitted during the referenced DCP (though
717                            we need to behave as though it was).
718                         */
719                         _next_audio_time = dcp->end (_film);
720                 }
721                 break;
722         }
723         case BLACK:
724                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
725                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
726                 _black.set_position (_black.position() + one_video_frame());
727                 break;
728         case SILENT:
729         {
730                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
731                 DCPTimePeriod period (_silent.period_at_position());
732                 if (_next_audio_time) {
733                         /* Sometimes the thing that happened last finishes fractionally before
734                            or after this silence.  Bodge the start time of the silence to fix it.
735                            I think this is nothing to worry about since we will just add or
736                            remove a little silence at the end of some content.
737                         */
738                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
739                         /* Let's not worry about less than a frame at 24fps */
740                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
741                         if (error >= too_much_error) {
742                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
743                         }
744                         DCPOMATIC_ASSERT (error < too_much_error);
745                         period.from = *_next_audio_time;
746                 }
747                 if (period.duration() > one_video_frame()) {
748                         period.to = period.from + one_video_frame();
749                 }
750                 fill_audio (period);
751                 _silent.set_position (period.to);
752                 break;
753         }
754         case NONE:
755                 done = true;
756                 break;
757         }
758
759         /* Emit any audio that is ready */
760
761         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
762            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
763            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
764            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
765            that will never come, causing bugs like #2101.
766         */
767         constexpr int ignore_streams_behind = 5;
768
769         using state_pair = std::pair<AudioStreamPtr, StreamState>;
770
771         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
772         auto latest_last_push_end = std::max_element(
773                 _stream_states.begin(),
774                 _stream_states.end(),
775                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
776                 );
777
778         if (latest_last_push_end != _stream_states.end()) {
779                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
780         }
781
782         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
783         std::map<AudioStreamPtr, StreamState> alive_stream_states;
784         for (auto const& i: _stream_states) {
785                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
786                         alive_stream_states.insert(i);
787                 } else {
788                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
789                 }
790         }
791
792         auto pull_to = _playback_length;
793         for (auto const& i: alive_stream_states) {
794                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
795                         pull_to = i.second.last_push_end;
796                 }
797         }
798         if (!_silent.done() && _silent.position() < pull_to) {
799                 pull_to = _silent.position();
800         }
801
802         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
803         auto audio = _audio_merger.pull (pull_to);
804         for (auto i = audio.begin(); i != audio.end(); ++i) {
805                 if (_next_audio_time && i->second < *_next_audio_time) {
806                         /* This new data comes before the last we emitted (or the last seek); discard it */
807                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
808                         if (!cut.first) {
809                                 continue;
810                         }
811                         *i = cut;
812                 } else if (_next_audio_time && i->second > *_next_audio_time) {
813                         /* There's a gap between this data and the last we emitted; fill with silence */
814                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
815                 }
816
817                 emit_audio (i->first, i->second);
818         }
819
820         if (done) {
821                 if (_shuffler) {
822                         _shuffler->flush ();
823                 }
824                 for (auto const& i: _delay) {
825                         do_emit_video(i.first, i.second);
826                 }
827
828                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
829                  * However, if we have L and R video files, and one is shorter than the other,
830                  * the fill code in ::video mostly takes care of filling in the gaps.
831                  * However, since it fills at the point when it knows there is more video coming
832                  * at time t (so it should fill any gap up to t) it can't do anything right at the
833                  * end.  This is particularly bad news if the last frame emitted is a LEFT
834                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
835                  * Here's a hack to workaround that particular case.
836                  */
837                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
838                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
839                 }
840         }
841
842         return done;
843 }
844
845
846 /** @return Open subtitles for the frame at the given time, converted to images */
847 optional<PositionImage>
848 Player::open_subtitles_for_frame (DCPTime time) const
849 {
850         list<PositionImage> captions;
851         int const vfr = _film->video_frame_rate();
852
853         for (
854                 auto j:
855                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
856                 ) {
857
858                 /* Bitmap subtitles */
859                 for (auto i: j.bitmap) {
860                         if (!i.image) {
861                                 continue;
862                         }
863
864                         /* i.image will already have been scaled to fit _video_container_size */
865                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
866
867                         captions.push_back (
868                                 PositionImage (
869                                         i.image,
870                                         Position<int> (
871                                                 lrint(_video_container_size.load().width * i.rectangle.x),
872                                                 lrint(_video_container_size.load().height * i.rectangle.y)
873                                                 )
874                                         )
875                                 );
876                 }
877
878                 /* String subtitles (rendered to an image) */
879                 if (!j.string.empty()) {
880                         auto s = render_text(j.string, _video_container_size, time, vfr);
881                         copy (s.begin(), s.end(), back_inserter (captions));
882                 }
883         }
884
885         if (captions.empty()) {
886                 return {};
887         }
888
889         return merge (captions, _subtitle_alignment);
890 }
891
892
893 void
894 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
895 {
896         if (_suspended) {
897                 return;
898         }
899
900         auto piece = weak_piece.lock ();
901         if (!piece) {
902                 return;
903         }
904
905         if (!piece->content->video->use()) {
906                 return;
907         }
908
909         FrameRateChange frc (_film, piece->content);
910         if (frc.skip && (video.frame % 2) == 1) {
911                 return;
912         }
913
914         /* Time of the first frame we will emit */
915         DCPTime const time = content_video_to_dcp (piece, video.frame);
916         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
917
918         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
919            if it's after the content's period here as in that case we still need to fill any gap between
920            `now' and the end of the content's period.
921         */
922         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
923                 return;
924         }
925
926         if (piece->ignore_video && piece->ignore_video->contains(time)) {
927                 return;
928         }
929
930         /* Fill gaps that we discover now that we have some video which needs to be emitted.
931            This is where we need to fill to.
932         */
933         DCPTime fill_to = min (time, piece->content->end(_film));
934
935         if (_next_video_time) {
936                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
937
938                 /* Fill if we have more than half a frame to do */
939                 if ((fill_to - fill_from) > one_video_frame() / 2) {
940                         auto last = _last_video.find (weak_piece);
941                         if (_film->three_d()) {
942                                 auto fill_to_eyes = video.eyes;
943                                 if (fill_to_eyes == Eyes::BOTH) {
944                                         fill_to_eyes = Eyes::LEFT;
945                                 }
946                                 if (fill_to == piece->content->end(_film)) {
947                                         /* Don't fill after the end of the content */
948                                         fill_to_eyes = Eyes::LEFT;
949                                 }
950                                 auto j = fill_from;
951                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
952                                 if (eyes == Eyes::BOTH) {
953                                         eyes = Eyes::LEFT;
954                                 }
955                                 while (j < fill_to || eyes != fill_to_eyes) {
956                                         if (last != _last_video.end()) {
957                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
958                                                 auto copy = last->second->shallow_copy();
959                                                 copy->set_eyes (eyes);
960                                                 emit_video (copy, j);
961                                         } else {
962                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
963                                                 emit_video (black_player_video_frame(eyes), j);
964                                         }
965                                         if (eyes == Eyes::RIGHT) {
966                                                 j += one_video_frame();
967                                         }
968                                         eyes = increment_eyes (eyes);
969                                 }
970                         } else {
971                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
972                                         if (last != _last_video.end()) {
973                                                 emit_video (last->second, j);
974                                         } else {
975                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
976                                         }
977                                 }
978                         }
979                 }
980         }
981
982         auto const content_video = piece->content->video;
983
984         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
985                 video.image,
986                 content_video->actual_crop(),
987                 content_video->fade (_film, video.frame),
988                 scale_for_display(
989                         content_video->scaled_size(_film->frame_size()),
990                         _video_container_size,
991                         _film->frame_size(),
992                         content_video->pixel_quanta()
993                         ),
994                 _video_container_size,
995                 video.eyes,
996                 video.part,
997                 content_video->colour_conversion(),
998                 content_video->range(),
999                 piece->content,
1000                 video.frame,
1001                 false
1002                 );
1003
1004         DCPTime t = time;
1005         for (int i = 0; i < frc.repeat; ++i) {
1006                 if (t < piece->content->end(_film)) {
1007                         emit_video (_last_video[weak_piece], t);
1008                 }
1009                 t += one_video_frame ();
1010         }
1011 }
1012
1013
1014 void
1015 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1016 {
1017         if (_suspended) {
1018                 return;
1019         }
1020
1021         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1022
1023         auto piece = weak_piece.lock ();
1024         if (!piece) {
1025                 return;
1026         }
1027
1028         auto content = piece->content->audio;
1029         DCPOMATIC_ASSERT (content);
1030
1031         int const rfr = content->resampled_frame_rate (_film);
1032
1033         /* Compute time in the DCP */
1034         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1035         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1036
1037         /* And the end of this block in the DCP */
1038         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1039
1040         /* Remove anything that comes before the start or after the end of the content */
1041         if (time < piece->content->position()) {
1042                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1043                 if (!cut.first) {
1044                         /* This audio is entirely discarded */
1045                         return;
1046                 }
1047                 content_audio.audio = cut.first;
1048                 time = cut.second;
1049         } else if (time > piece->content->end(_film)) {
1050                 /* Discard it all */
1051                 return;
1052         } else if (end > piece->content->end(_film)) {
1053                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1054                 if (remaining_frames == 0) {
1055                         return;
1056                 }
1057                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1058         }
1059
1060         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1061
1062         /* Gain and fade */
1063
1064         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1065         if (content->gain() != 0 || !fade_coeffs.empty()) {
1066                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1067                 if (!fade_coeffs.empty()) {
1068                         /* Apply both fade and gain */
1069                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1070                         auto const channels = gain_buffers->channels();
1071                         auto const frames = fade_coeffs.size();
1072                         auto data = gain_buffers->data();
1073                         auto const gain = db_to_linear (content->gain());
1074                         for (auto channel = 0; channel < channels; ++channel) {
1075                                 for (auto frame = 0U; frame < frames; ++frame) {
1076                                         data[channel][frame] *= gain * fade_coeffs[frame];
1077                                 }
1078                         }
1079                 } else {
1080                         /* Just apply gain */
1081                         gain_buffers->apply_gain (content->gain());
1082                 }
1083                 content_audio.audio = gain_buffers;
1084         }
1085
1086         /* Remap */
1087
1088         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1089
1090         /* Process */
1091
1092         if (_audio_processor) {
1093                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1094         }
1095
1096         /* Push */
1097
1098         _audio_merger.push (content_audio.audio, time);
1099         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1100         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1101 }
1102
1103
1104 void
1105 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1106 {
1107         if (_suspended) {
1108                 return;
1109         }
1110
1111         auto piece = weak_piece.lock ();
1112         auto content = weak_content.lock ();
1113         if (!piece || !content) {
1114                 return;
1115         }
1116
1117         PlayerText ps;
1118         for (auto& sub: subtitle.subs)
1119         {
1120                 /* Apply content's subtitle offsets */
1121                 sub.rectangle.x += content->x_offset ();
1122                 sub.rectangle.y += content->y_offset ();
1123
1124                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1125                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1126                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1127
1128                 /* Apply content's subtitle scale */
1129                 sub.rectangle.width *= content->x_scale ();
1130                 sub.rectangle.height *= content->y_scale ();
1131
1132                 auto image = sub.image;
1133
1134                 /* We will scale the subtitle up to fit _video_container_size */
1135                 int const width = sub.rectangle.width * _video_container_size.load().width;
1136                 int const height = sub.rectangle.height * _video_container_size.load().height;
1137                 if (width == 0 || height == 0) {
1138                         return;
1139                 }
1140
1141                 dcp::Size scaled_size (width, height);
1142                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1143         }
1144
1145         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1146         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1147 }
1148
1149
1150 void
1151 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1152 {
1153         if (_suspended) {
1154                 return;
1155         }
1156
1157         auto piece = weak_piece.lock ();
1158         auto content = weak_content.lock ();
1159         if (!piece || !content) {
1160                 return;
1161         }
1162
1163         PlayerText ps;
1164         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1165
1166         if (from > piece->content->end(_film)) {
1167                 return;
1168         }
1169
1170         for (auto s: subtitle.subs) {
1171                 s.set_h_position (s.h_position() + content->x_offset());
1172                 s.set_v_position (s.v_position() + content->y_offset());
1173                 float const xs = content->x_scale();
1174                 float const ys = content->y_scale();
1175                 float size = s.size();
1176
1177                 /* Adjust size to express the common part of the scaling;
1178                    e.g. if xs = ys = 0.5 we scale size by 2.
1179                 */
1180                 if (xs > 1e-5 && ys > 1e-5) {
1181                         size *= 1 / min (1 / xs, 1 / ys);
1182                 }
1183                 s.set_size (size);
1184
1185                 /* Then express aspect ratio changes */
1186                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1187                         s.set_aspect_adjust (xs / ys);
1188                 }
1189
1190                 s.set_in (dcp::Time(from.seconds(), 1000));
1191                 ps.string.push_back (s);
1192         }
1193
1194         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1195 }
1196
1197
1198 void
1199 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1200 {
1201         if (_suspended) {
1202                 return;
1203         }
1204
1205         auto content = weak_content.lock ();
1206         if (!content) {
1207                 return;
1208         }
1209
1210         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1211                 return;
1212         }
1213
1214         auto piece = weak_piece.lock ();
1215         if (!piece) {
1216                 return;
1217         }
1218
1219         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1220
1221         if (dcp_to > piece->content->end(_film)) {
1222                 return;
1223         }
1224
1225         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1226
1227         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1228         if (content->use() && !always && !content->burn()) {
1229                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1230         }
1231 }
1232
1233
1234 void
1235 Player::seek (DCPTime time, bool accurate)
1236 {
1237         boost::mutex::scoped_lock lm (_mutex);
1238         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1239
1240         if (_suspended) {
1241                 /* We can't seek in this state */
1242                 return;
1243         }
1244
1245         if (_shuffler) {
1246                 _shuffler->clear ();
1247         }
1248
1249         _delay.clear ();
1250
1251         if (_audio_processor) {
1252                 _audio_processor->flush ();
1253         }
1254
1255         _audio_merger.clear ();
1256         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1257                 _active_texts[i].clear ();
1258         }
1259
1260         for (auto i: _pieces) {
1261                 if (time < i->content->position()) {
1262                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1263                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1264                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1265                            been trimmed to a point between keyframes, or something).
1266                         */
1267                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1268                         i->done = false;
1269                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1270                         /* During; seek to position */
1271                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1272                         i->done = false;
1273                 } else {
1274                         /* After; this piece is done */
1275                         i->done = true;
1276                 }
1277         }
1278
1279         if (accurate) {
1280                 _next_video_time = time;
1281                 _next_video_eyes = Eyes::LEFT;
1282                 _next_audio_time = time;
1283         } else {
1284                 _next_video_time = boost::none;
1285                 _next_video_eyes = boost::none;
1286                 _next_audio_time = boost::none;
1287         }
1288
1289         _black.set_position (time);
1290         _silent.set_position (time);
1291
1292         _last_video.clear ();
1293 }
1294
1295
1296 void
1297 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1298 {
1299         if (!_film->three_d()) {
1300                 if (pv->eyes() == Eyes::LEFT) {
1301                         /* Use left-eye images for both eyes... */
1302                         pv->set_eyes (Eyes::BOTH);
1303                 } else if (pv->eyes() == Eyes::RIGHT) {
1304                         /* ...and discard the right */
1305                         return;
1306                 }
1307         }
1308
1309         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1310            player before the video that requires them.
1311         */
1312         _delay.push_back (make_pair (pv, time));
1313
1314         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1315                 _next_video_time = time + one_video_frame();
1316         }
1317         _next_video_eyes = increment_eyes (pv->eyes());
1318
1319         if (_delay.size() < 3) {
1320                 return;
1321         }
1322
1323         auto to_do = _delay.front();
1324         _delay.pop_front();
1325         do_emit_video (to_do.first, to_do.second);
1326 }
1327
1328
1329 void
1330 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1331 {
1332         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1333                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1334                         _active_texts[i].clear_before (time);
1335                 }
1336         }
1337
1338         auto subtitles = open_subtitles_for_frame (time);
1339         if (subtitles) {
1340                 pv->set_text (subtitles.get ());
1341         }
1342
1343         Video (pv, time);
1344 }
1345
1346
1347 void
1348 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1349 {
1350         /* Log if the assert below is about to fail */
1351         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1352                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1353         }
1354
1355         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1356         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1357         Audio (data, time, _film->audio_frame_rate());
1358         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1359 }
1360
1361
1362 void
1363 Player::fill_audio (DCPTimePeriod period)
1364 {
1365         if (period.from == period.to) {
1366                 return;
1367         }
1368
1369         DCPOMATIC_ASSERT (period.from < period.to);
1370
1371         DCPTime t = period.from;
1372         while (t < period.to) {
1373                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1374                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1375                 if (samples) {
1376                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1377                         silence->make_silent ();
1378                         emit_audio (silence, t);
1379                 }
1380                 t += block;
1381         }
1382 }
1383
1384
1385 DCPTime
1386 Player::one_video_frame () const
1387 {
1388         return DCPTime::from_frames (1, _film->video_frame_rate ());
1389 }
1390
1391
1392 pair<shared_ptr<AudioBuffers>, DCPTime>
1393 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1394 {
1395         auto const discard_time = discard_to - time;
1396         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1397         auto remaining_frames = audio->frames() - discard_frames;
1398         if (remaining_frames <= 0) {
1399                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1400         }
1401         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1402         return make_pair(cut, time + discard_time);
1403 }
1404
1405
1406 void
1407 Player::set_dcp_decode_reduction (optional<int> reduction)
1408 {
1409         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1410
1411         if (reduction == _dcp_decode_reduction.load()) {
1412                 Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1413                 return;
1414         }
1415
1416         _dcp_decode_reduction = reduction;
1417         setup_pieces();
1418
1419         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1420 }
1421
1422
1423 optional<DCPTime>
1424 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1425 {
1426         boost::mutex::scoped_lock lm (_mutex);
1427
1428         for (auto i: _pieces) {
1429                 if (i->content == content) {
1430                         return content_time_to_dcp (i, t);
1431                 }
1432         }
1433
1434         /* We couldn't find this content; perhaps things are being changed over */
1435         return {};
1436 }
1437
1438
1439 optional<ContentTime>
1440 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1441 {
1442         boost::mutex::scoped_lock lm (_mutex);
1443
1444         for (auto i: _pieces) {
1445                 if (i->content == content) {
1446                         return dcp_to_content_time (i, t);
1447                 }
1448         }
1449
1450         /* We couldn't find this content; perhaps things are being changed over */
1451         return {};
1452 }
1453
1454
1455 shared_ptr<const Playlist>
1456 Player::playlist () const
1457 {
1458         return _playlist ? _playlist : _film->playlist();
1459 }
1460
1461
1462 void
1463 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1464 {
1465         if (_suspended) {
1466                 return;
1467         }
1468
1469         auto piece = weak_piece.lock ();
1470         DCPOMATIC_ASSERT (piece);
1471
1472         auto const vfr = _film->video_frame_rate();
1473
1474         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1475         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1476                 return;
1477         }
1478
1479         Atmos (data.data, dcp_time, data.metadata);
1480 }
1481