Use atomic for _fast.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _ignore_video(false)
103         , _ignore_audio(false)
104         , _ignore_text(false)
105         , _always_burn_open_subtitles(false)
106         , _fast(false)
107         , _tolerant (film->tolerant())
108         , _audio_merger (_film->audio_frame_rate())
109         , _subtitle_alignment (subtitle_alignment)
110 {
111         construct ();
112 }
113
114
115 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
116         : _film (film)
117         , _playlist (playlist_)
118         , _suspended (0)
119         , _ignore_video(false)
120         , _ignore_audio(false)
121         , _ignore_text(false)
122         , _always_burn_open_subtitles(false)
123         , _fast(false)
124         , _tolerant (film->tolerant())
125         , _audio_merger (_film->audio_frame_rate())
126 {
127         construct ();
128 }
129
130
131 void
132 Player::construct ()
133 {
134         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
135         /* The butler must hear about this first, so since we are proxying this through to the butler we must
136            be first.
137         */
138         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
139         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
140         set_video_container_size (_film->frame_size ());
141
142         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
143
144         setup_pieces ();
145         seek (DCPTime (), true);
146 }
147
148
149 void
150 Player::setup_pieces ()
151 {
152         boost::mutex::scoped_lock lm (_mutex);
153         setup_pieces_unlocked ();
154 }
155
156
157 bool
158 have_video (shared_ptr<const Content> content)
159 {
160         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
161 }
162
163
164 bool
165 have_audio (shared_ptr<const Content> content)
166 {
167         return static_cast<bool>(content->audio) && content->can_be_played();
168 }
169
170
171 void
172 Player::setup_pieces_unlocked ()
173 {
174         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
175
176         auto old_pieces = _pieces;
177         _pieces.clear ();
178
179         auto playlist_content = playlist()->content();
180         bool const have_threed = std::any_of(
181                 playlist_content.begin(),
182                 playlist_content.end(),
183                 [](shared_ptr<const Content> c) {
184                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
185                 });
186
187
188         if (have_threed) {
189                 _shuffler.reset(new Shuffler());
190                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
191         }
192
193         for (auto i: playlist()->content()) {
194
195                 if (!i->paths_valid ()) {
196                         continue;
197                 }
198
199                 if (_ignore_video && _ignore_audio && i->text.empty()) {
200                         /* We're only interested in text and this content has none */
201                         continue;
202                 }
203
204                 shared_ptr<Decoder> old_decoder;
205                 for (auto j: old_pieces) {
206                         if (j->content == i) {
207                                 old_decoder = j->decoder;
208                                 break;
209                         }
210                 }
211
212                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
213                 DCPOMATIC_ASSERT (decoder);
214
215                 FrameRateChange frc (_film, i);
216
217                 if (decoder->video && _ignore_video) {
218                         decoder->video->set_ignore (true);
219                 }
220
221                 if (decoder->audio && _ignore_audio) {
222                         decoder->audio->set_ignore (true);
223                 }
224
225                 if (_ignore_text) {
226                         for (auto i: decoder->text) {
227                                 i->set_ignore (true);
228                         }
229                 }
230
231                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
232                 if (dcp) {
233                         dcp->set_decode_referenced (_play_referenced);
234                         if (_play_referenced) {
235                                 dcp->set_forced_reduction (_dcp_decode_reduction);
236                         }
237                 }
238
239                 auto piece = make_shared<Piece>(i, decoder, frc);
240                 _pieces.push_back (piece);
241
242                 if (decoder->video) {
243                         if (have_threed) {
244                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
245                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
246                         } else {
247                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
248                         }
249                 }
250
251                 if (decoder->audio) {
252                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
253                 }
254
255                 auto j = decoder->text.begin();
256
257                 while (j != decoder->text.end()) {
258                         (*j)->BitmapStart.connect (
259                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
260                                 );
261                         (*j)->PlainStart.connect (
262                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
263                                 );
264                         (*j)->Stop.connect (
265                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
266                                 );
267
268                         ++j;
269                 }
270
271                 if (decoder->atmos) {
272                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
273                 }
274         }
275
276         _stream_states.clear ();
277         for (auto i: _pieces) {
278                 if (i->content->audio) {
279                         for (auto j: i->content->audio->streams()) {
280                                 _stream_states[j] = StreamState (i, i->content->position ());
281                         }
282                 }
283         }
284
285         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
286                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
287         };
288
289         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
290                 if (ignore_overlap((*i)->content->video)) {
291                         /* Look for content later in the content list with in-use video that overlaps this */
292                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
293                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
294                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
295                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
296                                 }
297                         }
298                 }
299         }
300
301         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
302         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
303
304         _next_video_time = boost::none;
305         _next_video_eyes = Eyes::BOTH;
306         _next_audio_time = boost::none;
307 }
308
309
310 void
311 Player::playlist_content_change (ChangeType type, int property, bool frequent)
312 {
313         if (property == VideoContentProperty::CROP) {
314                 if (type == ChangeType::DONE) {
315                         auto const vcs = video_container_size();
316                         boost::mutex::scoped_lock lm (_mutex);
317                         for (auto const& i: _delay) {
318                                 i.first->reset_metadata (_film, vcs);
319                         }
320                 }
321         } else {
322                 if (type == ChangeType::PENDING) {
323                         /* The player content is probably about to change, so we can't carry on
324                            until that has happened and we've rebuilt our pieces.  Stop pass()
325                            and seek() from working until then.
326                         */
327                         ++_suspended;
328                 } else if (type == ChangeType::DONE) {
329                         /* A change in our content has gone through.  Re-build our pieces. */
330                         setup_pieces ();
331                         --_suspended;
332                 } else if (type == ChangeType::CANCELLED) {
333                         --_suspended;
334                 }
335         }
336
337         Change (type, property, frequent);
338 }
339
340
341 void
342 Player::set_video_container_size (dcp::Size s)
343 {
344         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
345
346         {
347                 boost::mutex::scoped_lock lm (_mutex);
348
349                 if (s == _video_container_size) {
350                         lm.unlock ();
351                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
352                         return;
353                 }
354
355                 _video_container_size = s;
356
357                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
358                 _black_image->make_black ();
359         }
360
361         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
362 }
363
364
365 void
366 Player::playlist_change (ChangeType type)
367 {
368         if (type == ChangeType::DONE) {
369                 setup_pieces ();
370         }
371         Change (type, PlayerProperty::PLAYLIST, false);
372 }
373
374
375 void
376 Player::film_change (ChangeType type, Film::Property p)
377 {
378         /* Here we should notice Film properties that affect our output, and
379            alert listeners that our output now would be different to how it was
380            last time we were run.
381         */
382
383         if (p == Film::Property::CONTAINER) {
384                 Change (type, PlayerProperty::FILM_CONTAINER, false);
385         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
386                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
387                    so we need new pieces here.
388                 */
389                 if (type == ChangeType::DONE) {
390                         setup_pieces ();
391                 }
392                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
393         } else if (p == Film::Property::AUDIO_PROCESSOR) {
394                 if (type == ChangeType::DONE && _film->audio_processor ()) {
395                         boost::mutex::scoped_lock lm (_mutex);
396                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
397                 }
398         } else if (p == Film::Property::AUDIO_CHANNELS) {
399                 if (type == ChangeType::DONE) {
400                         boost::mutex::scoped_lock lm (_mutex);
401                         _audio_merger.clear ();
402                 }
403         }
404 }
405
406
407 shared_ptr<PlayerVideo>
408 Player::black_player_video_frame (Eyes eyes) const
409 {
410         return std::make_shared<PlayerVideo> (
411                 std::make_shared<const RawImageProxy>(_black_image),
412                 Crop(),
413                 optional<double>(),
414                 _video_container_size,
415                 _video_container_size,
416                 eyes,
417                 Part::WHOLE,
418                 PresetColourConversion::all().front().conversion,
419                 VideoRange::FULL,
420                 std::weak_ptr<Content>(),
421                 boost::optional<Frame>(),
422                 false
423         );
424 }
425
426
427 Frame
428 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
429 {
430         auto s = t - piece->content->position ();
431         s = min (piece->content->length_after_trim(_film), s);
432         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
433
434         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
435            then convert that ContentTime to frames at the content's rate.  However this fails for
436            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
437            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
438
439            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
440         */
441         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
442 }
443
444
445 DCPTime
446 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
447 {
448         /* See comment in dcp_to_content_video */
449         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
450         return d + piece->content->position();
451 }
452
453
454 Frame
455 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
456 {
457         auto s = t - piece->content->position ();
458         s = min (piece->content->length_after_trim(_film), s);
459         /* See notes in dcp_to_content_video */
460         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
461 }
462
463
464 DCPTime
465 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
466 {
467         /* See comment in dcp_to_content_video */
468         return DCPTime::from_frames (f, _film->audio_frame_rate())
469                 - DCPTime (piece->content->trim_start(), piece->frc)
470                 + piece->content->position();
471 }
472
473
474 ContentTime
475 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
476 {
477         auto s = t - piece->content->position ();
478         s = min (piece->content->length_after_trim(_film), s);
479         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
480 }
481
482
483 DCPTime
484 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
485 {
486         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
487 }
488
489
490 vector<shared_ptr<Font>>
491 Player::get_subtitle_fonts ()
492 {
493         boost::mutex::scoped_lock lm (_mutex);
494
495         vector<shared_ptr<Font>> fonts;
496         for (auto piece: _pieces) {
497                 for (auto text: piece->content->text) {
498                         auto text_fonts = text->fonts();
499                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
500                 }
501         }
502
503         return fonts;
504 }
505
506
507 /** Set this player never to produce any video data */
508 void
509 Player::set_ignore_video ()
510 {
511         _ignore_video = true;
512         setup_pieces();
513 }
514
515
516 void
517 Player::set_ignore_audio ()
518 {
519         _ignore_audio = true;
520         setup_pieces();
521 }
522
523
524 void
525 Player::set_ignore_text ()
526 {
527         _ignore_text = true;
528         setup_pieces();
529 }
530
531
532 /** Set the player to always burn open texts into the image regardless of the content settings */
533 void
534 Player::set_always_burn_open_subtitles ()
535 {
536         _always_burn_open_subtitles = true;
537 }
538
539
540 /** Sets up the player to be faster, possibly at the expense of quality */
541 void
542 Player::set_fast ()
543 {
544         _fast = true;
545         setup_pieces();
546 }
547
548
549 void
550 Player::set_play_referenced ()
551 {
552         boost::mutex::scoped_lock lm (_mutex);
553         _play_referenced = true;
554         setup_pieces_unlocked ();
555 }
556
557
558 static void
559 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
560 {
561         DCPOMATIC_ASSERT (r);
562         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
563         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
564         if (r->actual_duration() > 0) {
565                 a.push_back (
566                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
567                         );
568         }
569 }
570
571
572 list<ReferencedReelAsset>
573 Player::get_reel_assets ()
574 {
575         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
576
577         list<ReferencedReelAsset> reel_assets;
578
579         for (auto content: playlist()->content()) {
580                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
581                 if (!dcp) {
582                         continue;
583                 }
584
585                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
586                         continue;
587                 }
588
589                 scoped_ptr<DCPDecoder> decoder;
590                 try {
591                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
592                 } catch (...) {
593                         return reel_assets;
594                 }
595
596                 auto const frame_rate = _film->video_frame_rate();
597                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
598                 /* We should only be referencing if the DCP rate is the same as the film rate */
599                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
600
601                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
602                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
603
604                 /* position in the asset from the start */
605                 int64_t offset_from_start = 0;
606                 /* position i the asset from the end */
607                 int64_t offset_from_end = 0;
608                 for (auto reel: decoder->reels()) {
609                         /* Assume that main picture duration is the length of the reel */
610                         offset_from_end += reel->main_picture()->actual_duration();
611                 }
612
613                 for (auto reel: decoder->reels()) {
614
615                         /* Assume that main picture duration is the length of the reel */
616                         int64_t const reel_duration = reel->main_picture()->actual_duration();
617
618                         /* See doc/design/trim_reels.svg */
619                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
620                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
621
622                         auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
623                         if (dcp->reference_video()) {
624                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
625                         }
626
627                         if (dcp->reference_audio()) {
628                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
629                         }
630
631                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
632                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
633                         }
634
635                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
636                                 for (auto caption: reel->closed_captions()) {
637                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
638                                 }
639                         }
640
641                         offset_from_start += reel_duration;
642                         offset_from_end -= reel_duration;
643                 }
644         }
645
646         return reel_assets;
647 }
648
649
650 bool
651 Player::pass ()
652 {
653         boost::mutex::scoped_lock lm (_mutex);
654
655         if (_suspended) {
656                 /* We can't pass in this state */
657                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
658                 return false;
659         }
660
661         if (_playback_length == DCPTime()) {
662                 /* Special; just give one black frame */
663                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
664                 return true;
665         }
666
667         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
668
669         shared_ptr<Piece> earliest_content;
670         optional<DCPTime> earliest_time;
671
672         for (auto i: _pieces) {
673                 if (i->done) {
674                         continue;
675                 }
676
677                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
678                 if (t > i->content->end(_film)) {
679                         i->done = true;
680                 } else {
681
682                         /* Given two choices at the same time, pick the one with texts so we see it before
683                            the video.
684                         */
685                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
686                                 earliest_time = t;
687                                 earliest_content = i;
688                         }
689                 }
690         }
691
692         bool done = false;
693
694         enum {
695                 NONE,
696                 CONTENT,
697                 BLACK,
698                 SILENT
699         } which = NONE;
700
701         if (earliest_content) {
702                 which = CONTENT;
703         }
704
705         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
706                 earliest_time = _black.position ();
707                 which = BLACK;
708         }
709
710         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
711                 earliest_time = _silent.position ();
712                 which = SILENT;
713         }
714
715         switch (which) {
716         case CONTENT:
717         {
718                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
719                 earliest_content->done = earliest_content->decoder->pass ();
720                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
721                 if (dcp && !_play_referenced && dcp->reference_audio()) {
722                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
723                            to `hide' the fact that no audio was emitted during the referenced DCP (though
724                            we need to behave as though it was).
725                         */
726                         _next_audio_time = dcp->end (_film);
727                 }
728                 break;
729         }
730         case BLACK:
731                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
732                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
733                 _black.set_position (_black.position() + one_video_frame());
734                 break;
735         case SILENT:
736         {
737                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
738                 DCPTimePeriod period (_silent.period_at_position());
739                 if (_next_audio_time) {
740                         /* Sometimes the thing that happened last finishes fractionally before
741                            or after this silence.  Bodge the start time of the silence to fix it.
742                            I think this is nothing to worry about since we will just add or
743                            remove a little silence at the end of some content.
744                         */
745                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
746                         /* Let's not worry about less than a frame at 24fps */
747                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
748                         if (error >= too_much_error) {
749                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
750                         }
751                         DCPOMATIC_ASSERT (error < too_much_error);
752                         period.from = *_next_audio_time;
753                 }
754                 if (period.duration() > one_video_frame()) {
755                         period.to = period.from + one_video_frame();
756                 }
757                 fill_audio (period);
758                 _silent.set_position (period.to);
759                 break;
760         }
761         case NONE:
762                 done = true;
763                 break;
764         }
765
766         /* Emit any audio that is ready */
767
768         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
769            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
770            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
771            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
772            that will never come, causing bugs like #2101.
773         */
774         constexpr int ignore_streams_behind = 5;
775
776         using state_pair = std::pair<AudioStreamPtr, StreamState>;
777
778         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
779         auto latest_last_push_end = std::max_element(
780                 _stream_states.begin(),
781                 _stream_states.end(),
782                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
783                 );
784
785         if (latest_last_push_end != _stream_states.end()) {
786                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
787         }
788
789         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
790         std::map<AudioStreamPtr, StreamState> alive_stream_states;
791         for (auto const& i: _stream_states) {
792                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
793                         alive_stream_states.insert(i);
794                 } else {
795                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
796                 }
797         }
798
799         auto pull_to = _playback_length;
800         for (auto const& i: alive_stream_states) {
801                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
802                         pull_to = i.second.last_push_end;
803                 }
804         }
805         if (!_silent.done() && _silent.position() < pull_to) {
806                 pull_to = _silent.position();
807         }
808
809         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
810         auto audio = _audio_merger.pull (pull_to);
811         for (auto i = audio.begin(); i != audio.end(); ++i) {
812                 if (_next_audio_time && i->second < *_next_audio_time) {
813                         /* This new data comes before the last we emitted (or the last seek); discard it */
814                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
815                         if (!cut.first) {
816                                 continue;
817                         }
818                         *i = cut;
819                 } else if (_next_audio_time && i->second > *_next_audio_time) {
820                         /* There's a gap between this data and the last we emitted; fill with silence */
821                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
822                 }
823
824                 emit_audio (i->first, i->second);
825         }
826
827         if (done) {
828                 if (_shuffler) {
829                         _shuffler->flush ();
830                 }
831                 for (auto const& i: _delay) {
832                         do_emit_video(i.first, i.second);
833                 }
834
835                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
836                  * However, if we have L and R video files, and one is shorter than the other,
837                  * the fill code in ::video mostly takes care of filling in the gaps.
838                  * However, since it fills at the point when it knows there is more video coming
839                  * at time t (so it should fill any gap up to t) it can't do anything right at the
840                  * end.  This is particularly bad news if the last frame emitted is a LEFT
841                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
842                  * Here's a hack to workaround that particular case.
843                  */
844                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
845                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
846                 }
847         }
848
849         return done;
850 }
851
852
853 /** @return Open subtitles for the frame at the given time, converted to images */
854 optional<PositionImage>
855 Player::open_subtitles_for_frame (DCPTime time) const
856 {
857         list<PositionImage> captions;
858         int const vfr = _film->video_frame_rate();
859
860         for (
861                 auto j:
862                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
863                 ) {
864
865                 /* Bitmap subtitles */
866                 for (auto i: j.bitmap) {
867                         if (!i.image) {
868                                 continue;
869                         }
870
871                         /* i.image will already have been scaled to fit _video_container_size */
872                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
873
874                         captions.push_back (
875                                 PositionImage (
876                                         i.image,
877                                         Position<int> (
878                                                 lrint(_video_container_size.width * i.rectangle.x),
879                                                 lrint(_video_container_size.height * i.rectangle.y)
880                                                 )
881                                         )
882                                 );
883                 }
884
885                 /* String subtitles (rendered to an image) */
886                 if (!j.string.empty()) {
887                         auto s = render_text(j.string, _video_container_size, time, vfr);
888                         copy (s.begin(), s.end(), back_inserter (captions));
889                 }
890         }
891
892         if (captions.empty()) {
893                 return {};
894         }
895
896         return merge (captions, _subtitle_alignment);
897 }
898
899
900 void
901 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
902 {
903         if (_suspended) {
904                 return;
905         }
906
907         auto piece = weak_piece.lock ();
908         if (!piece) {
909                 return;
910         }
911
912         if (!piece->content->video->use()) {
913                 return;
914         }
915
916         FrameRateChange frc (_film, piece->content);
917         if (frc.skip && (video.frame % 2) == 1) {
918                 return;
919         }
920
921         /* Time of the first frame we will emit */
922         DCPTime const time = content_video_to_dcp (piece, video.frame);
923         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
924
925         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
926            if it's after the content's period here as in that case we still need to fill any gap between
927            `now' and the end of the content's period.
928         */
929         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
930                 return;
931         }
932
933         if (piece->ignore_video && piece->ignore_video->contains(time)) {
934                 return;
935         }
936
937         /* Fill gaps that we discover now that we have some video which needs to be emitted.
938            This is where we need to fill to.
939         */
940         DCPTime fill_to = min (time, piece->content->end(_film));
941
942         if (_next_video_time) {
943                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
944
945                 /* Fill if we have more than half a frame to do */
946                 if ((fill_to - fill_from) > one_video_frame() / 2) {
947                         auto last = _last_video.find (weak_piece);
948                         if (_film->three_d()) {
949                                 auto fill_to_eyes = video.eyes;
950                                 if (fill_to_eyes == Eyes::BOTH) {
951                                         fill_to_eyes = Eyes::LEFT;
952                                 }
953                                 if (fill_to == piece->content->end(_film)) {
954                                         /* Don't fill after the end of the content */
955                                         fill_to_eyes = Eyes::LEFT;
956                                 }
957                                 auto j = fill_from;
958                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
959                                 if (eyes == Eyes::BOTH) {
960                                         eyes = Eyes::LEFT;
961                                 }
962                                 while (j < fill_to || eyes != fill_to_eyes) {
963                                         if (last != _last_video.end()) {
964                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
965                                                 auto copy = last->second->shallow_copy();
966                                                 copy->set_eyes (eyes);
967                                                 emit_video (copy, j);
968                                         } else {
969                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
970                                                 emit_video (black_player_video_frame(eyes), j);
971                                         }
972                                         if (eyes == Eyes::RIGHT) {
973                                                 j += one_video_frame();
974                                         }
975                                         eyes = increment_eyes (eyes);
976                                 }
977                         } else {
978                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
979                                         if (last != _last_video.end()) {
980                                                 emit_video (last->second, j);
981                                         } else {
982                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
983                                         }
984                                 }
985                         }
986                 }
987         }
988
989         auto const content_video = piece->content->video;
990
991         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
992                 video.image,
993                 content_video->actual_crop(),
994                 content_video->fade (_film, video.frame),
995                 scale_for_display(
996                         content_video->scaled_size(_film->frame_size()),
997                         _video_container_size,
998                         _film->frame_size(),
999                         content_video->pixel_quanta()
1000                         ),
1001                 _video_container_size,
1002                 video.eyes,
1003                 video.part,
1004                 content_video->colour_conversion(),
1005                 content_video->range(),
1006                 piece->content,
1007                 video.frame,
1008                 false
1009                 );
1010
1011         DCPTime t = time;
1012         for (int i = 0; i < frc.repeat; ++i) {
1013                 if (t < piece->content->end(_film)) {
1014                         emit_video (_last_video[weak_piece], t);
1015                 }
1016                 t += one_video_frame ();
1017         }
1018 }
1019
1020
1021 void
1022 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1023 {
1024         if (_suspended) {
1025                 return;
1026         }
1027
1028         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1029
1030         auto piece = weak_piece.lock ();
1031         if (!piece) {
1032                 return;
1033         }
1034
1035         auto content = piece->content->audio;
1036         DCPOMATIC_ASSERT (content);
1037
1038         int const rfr = content->resampled_frame_rate (_film);
1039
1040         /* Compute time in the DCP */
1041         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1042         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1043
1044         /* And the end of this block in the DCP */
1045         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1046
1047         /* Remove anything that comes before the start or after the end of the content */
1048         if (time < piece->content->position()) {
1049                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1050                 if (!cut.first) {
1051                         /* This audio is entirely discarded */
1052                         return;
1053                 }
1054                 content_audio.audio = cut.first;
1055                 time = cut.second;
1056         } else if (time > piece->content->end(_film)) {
1057                 /* Discard it all */
1058                 return;
1059         } else if (end > piece->content->end(_film)) {
1060                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1061                 if (remaining_frames == 0) {
1062                         return;
1063                 }
1064                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1065         }
1066
1067         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1068
1069         /* Gain and fade */
1070
1071         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1072         if (content->gain() != 0 || !fade_coeffs.empty()) {
1073                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1074                 if (!fade_coeffs.empty()) {
1075                         /* Apply both fade and gain */
1076                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1077                         auto const channels = gain_buffers->channels();
1078                         auto const frames = fade_coeffs.size();
1079                         auto data = gain_buffers->data();
1080                         auto const gain = db_to_linear (content->gain());
1081                         for (auto channel = 0; channel < channels; ++channel) {
1082                                 for (auto frame = 0U; frame < frames; ++frame) {
1083                                         data[channel][frame] *= gain * fade_coeffs[frame];
1084                                 }
1085                         }
1086                 } else {
1087                         /* Just apply gain */
1088                         gain_buffers->apply_gain (content->gain());
1089                 }
1090                 content_audio.audio = gain_buffers;
1091         }
1092
1093         /* Remap */
1094
1095         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1096
1097         /* Process */
1098
1099         if (_audio_processor) {
1100                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1101         }
1102
1103         /* Push */
1104
1105         _audio_merger.push (content_audio.audio, time);
1106         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1107         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1108 }
1109
1110
1111 void
1112 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1113 {
1114         if (_suspended) {
1115                 return;
1116         }
1117
1118         auto piece = weak_piece.lock ();
1119         auto content = weak_content.lock ();
1120         if (!piece || !content) {
1121                 return;
1122         }
1123
1124         PlayerText ps;
1125         for (auto& sub: subtitle.subs)
1126         {
1127                 /* Apply content's subtitle offsets */
1128                 sub.rectangle.x += content->x_offset ();
1129                 sub.rectangle.y += content->y_offset ();
1130
1131                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1132                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1133                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1134
1135                 /* Apply content's subtitle scale */
1136                 sub.rectangle.width *= content->x_scale ();
1137                 sub.rectangle.height *= content->y_scale ();
1138
1139                 auto image = sub.image;
1140
1141                 /* We will scale the subtitle up to fit _video_container_size */
1142                 int const width = sub.rectangle.width * _video_container_size.width;
1143                 int const height = sub.rectangle.height * _video_container_size.height;
1144                 if (width == 0 || height == 0) {
1145                         return;
1146                 }
1147
1148                 dcp::Size scaled_size (width, height);
1149                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1150         }
1151
1152         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1153         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1154 }
1155
1156
1157 void
1158 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1159 {
1160         if (_suspended) {
1161                 return;
1162         }
1163
1164         auto piece = weak_piece.lock ();
1165         auto content = weak_content.lock ();
1166         if (!piece || !content) {
1167                 return;
1168         }
1169
1170         PlayerText ps;
1171         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1172
1173         if (from > piece->content->end(_film)) {
1174                 return;
1175         }
1176
1177         for (auto s: subtitle.subs) {
1178                 s.set_h_position (s.h_position() + content->x_offset());
1179                 s.set_v_position (s.v_position() + content->y_offset());
1180                 float const xs = content->x_scale();
1181                 float const ys = content->y_scale();
1182                 float size = s.size();
1183
1184                 /* Adjust size to express the common part of the scaling;
1185                    e.g. if xs = ys = 0.5 we scale size by 2.
1186                 */
1187                 if (xs > 1e-5 && ys > 1e-5) {
1188                         size *= 1 / min (1 / xs, 1 / ys);
1189                 }
1190                 s.set_size (size);
1191
1192                 /* Then express aspect ratio changes */
1193                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1194                         s.set_aspect_adjust (xs / ys);
1195                 }
1196
1197                 s.set_in (dcp::Time(from.seconds(), 1000));
1198                 ps.string.push_back (s);
1199         }
1200
1201         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1202 }
1203
1204
1205 void
1206 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1207 {
1208         if (_suspended) {
1209                 return;
1210         }
1211
1212         auto content = weak_content.lock ();
1213         if (!content) {
1214                 return;
1215         }
1216
1217         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1218                 return;
1219         }
1220
1221         auto piece = weak_piece.lock ();
1222         if (!piece) {
1223                 return;
1224         }
1225
1226         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1227
1228         if (dcp_to > piece->content->end(_film)) {
1229                 return;
1230         }
1231
1232         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1233
1234         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1235         if (content->use() && !always && !content->burn()) {
1236                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1237         }
1238 }
1239
1240
1241 void
1242 Player::seek (DCPTime time, bool accurate)
1243 {
1244         boost::mutex::scoped_lock lm (_mutex);
1245         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1246
1247         if (_suspended) {
1248                 /* We can't seek in this state */
1249                 return;
1250         }
1251
1252         if (_shuffler) {
1253                 _shuffler->clear ();
1254         }
1255
1256         _delay.clear ();
1257
1258         if (_audio_processor) {
1259                 _audio_processor->flush ();
1260         }
1261
1262         _audio_merger.clear ();
1263         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1264                 _active_texts[i].clear ();
1265         }
1266
1267         for (auto i: _pieces) {
1268                 if (time < i->content->position()) {
1269                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1270                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1271                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1272                            been trimmed to a point between keyframes, or something).
1273                         */
1274                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1275                         i->done = false;
1276                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1277                         /* During; seek to position */
1278                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1279                         i->done = false;
1280                 } else {
1281                         /* After; this piece is done */
1282                         i->done = true;
1283                 }
1284         }
1285
1286         if (accurate) {
1287                 _next_video_time = time;
1288                 _next_video_eyes = Eyes::LEFT;
1289                 _next_audio_time = time;
1290         } else {
1291                 _next_video_time = boost::none;
1292                 _next_video_eyes = boost::none;
1293                 _next_audio_time = boost::none;
1294         }
1295
1296         _black.set_position (time);
1297         _silent.set_position (time);
1298
1299         _last_video.clear ();
1300 }
1301
1302
1303 void
1304 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1305 {
1306         if (!_film->three_d()) {
1307                 if (pv->eyes() == Eyes::LEFT) {
1308                         /* Use left-eye images for both eyes... */
1309                         pv->set_eyes (Eyes::BOTH);
1310                 } else if (pv->eyes() == Eyes::RIGHT) {
1311                         /* ...and discard the right */
1312                         return;
1313                 }
1314         }
1315
1316         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1317            player before the video that requires them.
1318         */
1319         _delay.push_back (make_pair (pv, time));
1320
1321         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1322                 _next_video_time = time + one_video_frame();
1323         }
1324         _next_video_eyes = increment_eyes (pv->eyes());
1325
1326         if (_delay.size() < 3) {
1327                 return;
1328         }
1329
1330         auto to_do = _delay.front();
1331         _delay.pop_front();
1332         do_emit_video (to_do.first, to_do.second);
1333 }
1334
1335
1336 void
1337 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1338 {
1339         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1340                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1341                         _active_texts[i].clear_before (time);
1342                 }
1343         }
1344
1345         auto subtitles = open_subtitles_for_frame (time);
1346         if (subtitles) {
1347                 pv->set_text (subtitles.get ());
1348         }
1349
1350         Video (pv, time);
1351 }
1352
1353
1354 void
1355 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1356 {
1357         /* Log if the assert below is about to fail */
1358         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1359                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1360         }
1361
1362         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1363         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1364         Audio (data, time, _film->audio_frame_rate());
1365         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1366 }
1367
1368
1369 void
1370 Player::fill_audio (DCPTimePeriod period)
1371 {
1372         if (period.from == period.to) {
1373                 return;
1374         }
1375
1376         DCPOMATIC_ASSERT (period.from < period.to);
1377
1378         DCPTime t = period.from;
1379         while (t < period.to) {
1380                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1381                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1382                 if (samples) {
1383                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1384                         silence->make_silent ();
1385                         emit_audio (silence, t);
1386                 }
1387                 t += block;
1388         }
1389 }
1390
1391
1392 DCPTime
1393 Player::one_video_frame () const
1394 {
1395         return DCPTime::from_frames (1, _film->video_frame_rate ());
1396 }
1397
1398
1399 pair<shared_ptr<AudioBuffers>, DCPTime>
1400 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1401 {
1402         auto const discard_time = discard_to - time;
1403         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1404         auto remaining_frames = audio->frames() - discard_frames;
1405         if (remaining_frames <= 0) {
1406                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1407         }
1408         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1409         return make_pair(cut, time + discard_time);
1410 }
1411
1412
1413 void
1414 Player::set_dcp_decode_reduction (optional<int> reduction)
1415 {
1416         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1417
1418         {
1419                 boost::mutex::scoped_lock lm (_mutex);
1420
1421                 if (reduction == _dcp_decode_reduction) {
1422                         lm.unlock ();
1423                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1424                         return;
1425                 }
1426
1427                 _dcp_decode_reduction = reduction;
1428                 setup_pieces_unlocked ();
1429         }
1430
1431         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1432 }
1433
1434
1435 optional<DCPTime>
1436 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1437 {
1438         boost::mutex::scoped_lock lm (_mutex);
1439
1440         for (auto i: _pieces) {
1441                 if (i->content == content) {
1442                         return content_time_to_dcp (i, t);
1443                 }
1444         }
1445
1446         /* We couldn't find this content; perhaps things are being changed over */
1447         return {};
1448 }
1449
1450
1451 optional<ContentTime>
1452 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1453 {
1454         boost::mutex::scoped_lock lm (_mutex);
1455
1456         for (auto i: _pieces) {
1457                 if (i->content == content) {
1458                         return dcp_to_content_time (i, t);
1459                 }
1460         }
1461
1462         /* We couldn't find this content; perhaps things are being changed over */
1463         return {};
1464 }
1465
1466
1467 shared_ptr<const Playlist>
1468 Player::playlist () const
1469 {
1470         return _playlist ? _playlist : _film->playlist();
1471 }
1472
1473
1474 void
1475 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1476 {
1477         if (_suspended) {
1478                 return;
1479         }
1480
1481         auto piece = weak_piece.lock ();
1482         DCPOMATIC_ASSERT (piece);
1483
1484         auto const vfr = _film->video_frame_rate();
1485
1486         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1487         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1488                 return;
1489         }
1490
1491         Atmos (data.data, dcp_time, data.metadata);
1492 }
1493