Use atomic for _video_container_size.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _ignore_video(false)
103         , _ignore_audio(false)
104         , _ignore_text(false)
105         , _always_burn_open_subtitles(false)
106         , _fast(false)
107         , _tolerant (film->tolerant())
108         , _play_referenced(false)
109         , _audio_merger (_film->audio_frame_rate())
110         , _subtitle_alignment (subtitle_alignment)
111 {
112         construct ();
113 }
114
115
116 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
117         : _film (film)
118         , _playlist (playlist_)
119         , _suspended (0)
120         , _ignore_video(false)
121         , _ignore_audio(false)
122         , _ignore_text(false)
123         , _always_burn_open_subtitles(false)
124         , _fast(false)
125         , _tolerant (film->tolerant())
126         , _play_referenced(false)
127         , _audio_merger (_film->audio_frame_rate())
128 {
129         construct ();
130 }
131
132
133 void
134 Player::construct ()
135 {
136         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
137         /* The butler must hear about this first, so since we are proxying this through to the butler we must
138            be first.
139         */
140         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
141         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
142         set_video_container_size (_film->frame_size ());
143
144         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
145
146         setup_pieces ();
147         seek (DCPTime (), true);
148 }
149
150
151 void
152 Player::setup_pieces ()
153 {
154         boost::mutex::scoped_lock lm (_mutex);
155         setup_pieces_unlocked ();
156 }
157
158
159 bool
160 have_video (shared_ptr<const Content> content)
161 {
162         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
163 }
164
165
166 bool
167 have_audio (shared_ptr<const Content> content)
168 {
169         return static_cast<bool>(content->audio) && content->can_be_played();
170 }
171
172
173 void
174 Player::setup_pieces_unlocked ()
175 {
176         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
177
178         auto old_pieces = _pieces;
179         _pieces.clear ();
180
181         auto playlist_content = playlist()->content();
182         bool const have_threed = std::any_of(
183                 playlist_content.begin(),
184                 playlist_content.end(),
185                 [](shared_ptr<const Content> c) {
186                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
187                 });
188
189
190         if (have_threed) {
191                 _shuffler.reset(new Shuffler());
192                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
193         }
194
195         for (auto i: playlist()->content()) {
196
197                 if (!i->paths_valid ()) {
198                         continue;
199                 }
200
201                 if (_ignore_video && _ignore_audio && i->text.empty()) {
202                         /* We're only interested in text and this content has none */
203                         continue;
204                 }
205
206                 shared_ptr<Decoder> old_decoder;
207                 for (auto j: old_pieces) {
208                         if (j->content == i) {
209                                 old_decoder = j->decoder;
210                                 break;
211                         }
212                 }
213
214                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
215                 DCPOMATIC_ASSERT (decoder);
216
217                 FrameRateChange frc (_film, i);
218
219                 if (decoder->video && _ignore_video) {
220                         decoder->video->set_ignore (true);
221                 }
222
223                 if (decoder->audio && _ignore_audio) {
224                         decoder->audio->set_ignore (true);
225                 }
226
227                 if (_ignore_text) {
228                         for (auto i: decoder->text) {
229                                 i->set_ignore (true);
230                         }
231                 }
232
233                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
234                 if (dcp) {
235                         dcp->set_decode_referenced (_play_referenced);
236                         if (_play_referenced) {
237                                 dcp->set_forced_reduction (_dcp_decode_reduction);
238                         }
239                 }
240
241                 auto piece = make_shared<Piece>(i, decoder, frc);
242                 _pieces.push_back (piece);
243
244                 if (decoder->video) {
245                         if (have_threed) {
246                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
247                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
248                         } else {
249                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
250                         }
251                 }
252
253                 if (decoder->audio) {
254                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
255                 }
256
257                 auto j = decoder->text.begin();
258
259                 while (j != decoder->text.end()) {
260                         (*j)->BitmapStart.connect (
261                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
262                                 );
263                         (*j)->PlainStart.connect (
264                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
265                                 );
266                         (*j)->Stop.connect (
267                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
268                                 );
269
270                         ++j;
271                 }
272
273                 if (decoder->atmos) {
274                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
275                 }
276         }
277
278         _stream_states.clear ();
279         for (auto i: _pieces) {
280                 if (i->content->audio) {
281                         for (auto j: i->content->audio->streams()) {
282                                 _stream_states[j] = StreamState (i, i->content->position ());
283                         }
284                 }
285         }
286
287         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
288                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
289         };
290
291         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
292                 if (ignore_overlap((*i)->content->video)) {
293                         /* Look for content later in the content list with in-use video that overlaps this */
294                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
295                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
296                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
297                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
298                                 }
299                         }
300                 }
301         }
302
303         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
304         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
305
306         _next_video_time = boost::none;
307         _next_video_eyes = Eyes::BOTH;
308         _next_audio_time = boost::none;
309 }
310
311
312 void
313 Player::playlist_content_change (ChangeType type, int property, bool frequent)
314 {
315         if (property == VideoContentProperty::CROP) {
316                 if (type == ChangeType::DONE) {
317                         auto const vcs = video_container_size();
318                         boost::mutex::scoped_lock lm (_mutex);
319                         for (auto const& i: _delay) {
320                                 i.first->reset_metadata (_film, vcs);
321                         }
322                 }
323         } else {
324                 if (type == ChangeType::PENDING) {
325                         /* The player content is probably about to change, so we can't carry on
326                            until that has happened and we've rebuilt our pieces.  Stop pass()
327                            and seek() from working until then.
328                         */
329                         ++_suspended;
330                 } else if (type == ChangeType::DONE) {
331                         /* A change in our content has gone through.  Re-build our pieces. */
332                         setup_pieces ();
333                         --_suspended;
334                 } else if (type == ChangeType::CANCELLED) {
335                         --_suspended;
336                 }
337         }
338
339         Change (type, property, frequent);
340 }
341
342
343 void
344 Player::set_video_container_size (dcp::Size s)
345 {
346         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
347
348         if (s == _video_container_size) {
349                 Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
350                 return;
351         }
352
353         _video_container_size = s;
354
355         {
356                 boost::mutex::scoped_lock lm (_mutex);
357                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
358                 _black_image->make_black ();
359         }
360
361         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
362 }
363
364
365 void
366 Player::playlist_change (ChangeType type)
367 {
368         if (type == ChangeType::DONE) {
369                 setup_pieces ();
370         }
371         Change (type, PlayerProperty::PLAYLIST, false);
372 }
373
374
375 void
376 Player::film_change (ChangeType type, Film::Property p)
377 {
378         /* Here we should notice Film properties that affect our output, and
379            alert listeners that our output now would be different to how it was
380            last time we were run.
381         */
382
383         if (p == Film::Property::CONTAINER) {
384                 Change (type, PlayerProperty::FILM_CONTAINER, false);
385         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
386                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
387                    so we need new pieces here.
388                 */
389                 if (type == ChangeType::DONE) {
390                         setup_pieces ();
391                 }
392                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
393         } else if (p == Film::Property::AUDIO_PROCESSOR) {
394                 if (type == ChangeType::DONE && _film->audio_processor ()) {
395                         boost::mutex::scoped_lock lm (_mutex);
396                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
397                 }
398         } else if (p == Film::Property::AUDIO_CHANNELS) {
399                 if (type == ChangeType::DONE) {
400                         boost::mutex::scoped_lock lm (_mutex);
401                         _audio_merger.clear ();
402                 }
403         }
404 }
405
406
407 shared_ptr<PlayerVideo>
408 Player::black_player_video_frame (Eyes eyes) const
409 {
410         return std::make_shared<PlayerVideo> (
411                 std::make_shared<const RawImageProxy>(_black_image),
412                 Crop(),
413                 optional<double>(),
414                 _video_container_size,
415                 _video_container_size,
416                 eyes,
417                 Part::WHOLE,
418                 PresetColourConversion::all().front().conversion,
419                 VideoRange::FULL,
420                 std::weak_ptr<Content>(),
421                 boost::optional<Frame>(),
422                 false
423         );
424 }
425
426
427 Frame
428 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
429 {
430         auto s = t - piece->content->position ();
431         s = min (piece->content->length_after_trim(_film), s);
432         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
433
434         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
435            then convert that ContentTime to frames at the content's rate.  However this fails for
436            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
437            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
438
439            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
440         */
441         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
442 }
443
444
445 DCPTime
446 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
447 {
448         /* See comment in dcp_to_content_video */
449         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
450         return d + piece->content->position();
451 }
452
453
454 Frame
455 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
456 {
457         auto s = t - piece->content->position ();
458         s = min (piece->content->length_after_trim(_film), s);
459         /* See notes in dcp_to_content_video */
460         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
461 }
462
463
464 DCPTime
465 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
466 {
467         /* See comment in dcp_to_content_video */
468         return DCPTime::from_frames (f, _film->audio_frame_rate())
469                 - DCPTime (piece->content->trim_start(), piece->frc)
470                 + piece->content->position();
471 }
472
473
474 ContentTime
475 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
476 {
477         auto s = t - piece->content->position ();
478         s = min (piece->content->length_after_trim(_film), s);
479         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
480 }
481
482
483 DCPTime
484 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
485 {
486         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
487 }
488
489
490 vector<shared_ptr<Font>>
491 Player::get_subtitle_fonts ()
492 {
493         boost::mutex::scoped_lock lm (_mutex);
494
495         vector<shared_ptr<Font>> fonts;
496         for (auto piece: _pieces) {
497                 for (auto text: piece->content->text) {
498                         auto text_fonts = text->fonts();
499                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
500                 }
501         }
502
503         return fonts;
504 }
505
506
507 /** Set this player never to produce any video data */
508 void
509 Player::set_ignore_video ()
510 {
511         _ignore_video = true;
512         setup_pieces();
513 }
514
515
516 void
517 Player::set_ignore_audio ()
518 {
519         _ignore_audio = true;
520         setup_pieces();
521 }
522
523
524 void
525 Player::set_ignore_text ()
526 {
527         _ignore_text = true;
528         setup_pieces();
529 }
530
531
532 /** Set the player to always burn open texts into the image regardless of the content settings */
533 void
534 Player::set_always_burn_open_subtitles ()
535 {
536         _always_burn_open_subtitles = true;
537 }
538
539
540 /** Sets up the player to be faster, possibly at the expense of quality */
541 void
542 Player::set_fast ()
543 {
544         _fast = true;
545         setup_pieces();
546 }
547
548
549 void
550 Player::set_play_referenced ()
551 {
552         _play_referenced = true;
553         setup_pieces();
554 }
555
556
557 static void
558 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
559 {
560         DCPOMATIC_ASSERT (r);
561         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
562         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
563         if (r->actual_duration() > 0) {
564                 a.push_back (
565                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
566                         );
567         }
568 }
569
570
571 list<ReferencedReelAsset>
572 Player::get_reel_assets ()
573 {
574         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
575
576         list<ReferencedReelAsset> reel_assets;
577
578         for (auto content: playlist()->content()) {
579                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
580                 if (!dcp) {
581                         continue;
582                 }
583
584                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
585                         continue;
586                 }
587
588                 scoped_ptr<DCPDecoder> decoder;
589                 try {
590                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
591                 } catch (...) {
592                         return reel_assets;
593                 }
594
595                 auto const frame_rate = _film->video_frame_rate();
596                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
597                 /* We should only be referencing if the DCP rate is the same as the film rate */
598                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
599
600                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
601                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
602
603                 /* position in the asset from the start */
604                 int64_t offset_from_start = 0;
605                 /* position i the asset from the end */
606                 int64_t offset_from_end = 0;
607                 for (auto reel: decoder->reels()) {
608                         /* Assume that main picture duration is the length of the reel */
609                         offset_from_end += reel->main_picture()->actual_duration();
610                 }
611
612                 for (auto reel: decoder->reels()) {
613
614                         /* Assume that main picture duration is the length of the reel */
615                         int64_t const reel_duration = reel->main_picture()->actual_duration();
616
617                         /* See doc/design/trim_reels.svg */
618                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
619                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
620
621                         auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
622                         if (dcp->reference_video()) {
623                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
624                         }
625
626                         if (dcp->reference_audio()) {
627                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
628                         }
629
630                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
631                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
632                         }
633
634                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
635                                 for (auto caption: reel->closed_captions()) {
636                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
637                                 }
638                         }
639
640                         offset_from_start += reel_duration;
641                         offset_from_end -= reel_duration;
642                 }
643         }
644
645         return reel_assets;
646 }
647
648
649 bool
650 Player::pass ()
651 {
652         boost::mutex::scoped_lock lm (_mutex);
653
654         if (_suspended) {
655                 /* We can't pass in this state */
656                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
657                 return false;
658         }
659
660         if (_playback_length == DCPTime()) {
661                 /* Special; just give one black frame */
662                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
663                 return true;
664         }
665
666         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
667
668         shared_ptr<Piece> earliest_content;
669         optional<DCPTime> earliest_time;
670
671         for (auto i: _pieces) {
672                 if (i->done) {
673                         continue;
674                 }
675
676                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
677                 if (t > i->content->end(_film)) {
678                         i->done = true;
679                 } else {
680
681                         /* Given two choices at the same time, pick the one with texts so we see it before
682                            the video.
683                         */
684                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
685                                 earliest_time = t;
686                                 earliest_content = i;
687                         }
688                 }
689         }
690
691         bool done = false;
692
693         enum {
694                 NONE,
695                 CONTENT,
696                 BLACK,
697                 SILENT
698         } which = NONE;
699
700         if (earliest_content) {
701                 which = CONTENT;
702         }
703
704         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
705                 earliest_time = _black.position ();
706                 which = BLACK;
707         }
708
709         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
710                 earliest_time = _silent.position ();
711                 which = SILENT;
712         }
713
714         switch (which) {
715         case CONTENT:
716         {
717                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
718                 earliest_content->done = earliest_content->decoder->pass ();
719                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
720                 if (dcp && !_play_referenced && dcp->reference_audio()) {
721                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
722                            to `hide' the fact that no audio was emitted during the referenced DCP (though
723                            we need to behave as though it was).
724                         */
725                         _next_audio_time = dcp->end (_film);
726                 }
727                 break;
728         }
729         case BLACK:
730                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
731                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
732                 _black.set_position (_black.position() + one_video_frame());
733                 break;
734         case SILENT:
735         {
736                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
737                 DCPTimePeriod period (_silent.period_at_position());
738                 if (_next_audio_time) {
739                         /* Sometimes the thing that happened last finishes fractionally before
740                            or after this silence.  Bodge the start time of the silence to fix it.
741                            I think this is nothing to worry about since we will just add or
742                            remove a little silence at the end of some content.
743                         */
744                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
745                         /* Let's not worry about less than a frame at 24fps */
746                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
747                         if (error >= too_much_error) {
748                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
749                         }
750                         DCPOMATIC_ASSERT (error < too_much_error);
751                         period.from = *_next_audio_time;
752                 }
753                 if (period.duration() > one_video_frame()) {
754                         period.to = period.from + one_video_frame();
755                 }
756                 fill_audio (period);
757                 _silent.set_position (period.to);
758                 break;
759         }
760         case NONE:
761                 done = true;
762                 break;
763         }
764
765         /* Emit any audio that is ready */
766
767         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
768            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
769            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
770            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
771            that will never come, causing bugs like #2101.
772         */
773         constexpr int ignore_streams_behind = 5;
774
775         using state_pair = std::pair<AudioStreamPtr, StreamState>;
776
777         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
778         auto latest_last_push_end = std::max_element(
779                 _stream_states.begin(),
780                 _stream_states.end(),
781                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
782                 );
783
784         if (latest_last_push_end != _stream_states.end()) {
785                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
786         }
787
788         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
789         std::map<AudioStreamPtr, StreamState> alive_stream_states;
790         for (auto const& i: _stream_states) {
791                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
792                         alive_stream_states.insert(i);
793                 } else {
794                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
795                 }
796         }
797
798         auto pull_to = _playback_length;
799         for (auto const& i: alive_stream_states) {
800                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
801                         pull_to = i.second.last_push_end;
802                 }
803         }
804         if (!_silent.done() && _silent.position() < pull_to) {
805                 pull_to = _silent.position();
806         }
807
808         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
809         auto audio = _audio_merger.pull (pull_to);
810         for (auto i = audio.begin(); i != audio.end(); ++i) {
811                 if (_next_audio_time && i->second < *_next_audio_time) {
812                         /* This new data comes before the last we emitted (or the last seek); discard it */
813                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
814                         if (!cut.first) {
815                                 continue;
816                         }
817                         *i = cut;
818                 } else if (_next_audio_time && i->second > *_next_audio_time) {
819                         /* There's a gap between this data and the last we emitted; fill with silence */
820                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
821                 }
822
823                 emit_audio (i->first, i->second);
824         }
825
826         if (done) {
827                 if (_shuffler) {
828                         _shuffler->flush ();
829                 }
830                 for (auto const& i: _delay) {
831                         do_emit_video(i.first, i.second);
832                 }
833
834                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
835                  * However, if we have L and R video files, and one is shorter than the other,
836                  * the fill code in ::video mostly takes care of filling in the gaps.
837                  * However, since it fills at the point when it knows there is more video coming
838                  * at time t (so it should fill any gap up to t) it can't do anything right at the
839                  * end.  This is particularly bad news if the last frame emitted is a LEFT
840                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
841                  * Here's a hack to workaround that particular case.
842                  */
843                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
844                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
845                 }
846         }
847
848         return done;
849 }
850
851
852 /** @return Open subtitles for the frame at the given time, converted to images */
853 optional<PositionImage>
854 Player::open_subtitles_for_frame (DCPTime time) const
855 {
856         list<PositionImage> captions;
857         int const vfr = _film->video_frame_rate();
858
859         for (
860                 auto j:
861                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
862                 ) {
863
864                 /* Bitmap subtitles */
865                 for (auto i: j.bitmap) {
866                         if (!i.image) {
867                                 continue;
868                         }
869
870                         /* i.image will already have been scaled to fit _video_container_size */
871                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
872
873                         captions.push_back (
874                                 PositionImage (
875                                         i.image,
876                                         Position<int> (
877                                                 lrint(_video_container_size.load().width * i.rectangle.x),
878                                                 lrint(_video_container_size.load().height * i.rectangle.y)
879                                                 )
880                                         )
881                                 );
882                 }
883
884                 /* String subtitles (rendered to an image) */
885                 if (!j.string.empty()) {
886                         auto s = render_text(j.string, _video_container_size, time, vfr);
887                         copy (s.begin(), s.end(), back_inserter (captions));
888                 }
889         }
890
891         if (captions.empty()) {
892                 return {};
893         }
894
895         return merge (captions, _subtitle_alignment);
896 }
897
898
899 void
900 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
901 {
902         if (_suspended) {
903                 return;
904         }
905
906         auto piece = weak_piece.lock ();
907         if (!piece) {
908                 return;
909         }
910
911         if (!piece->content->video->use()) {
912                 return;
913         }
914
915         FrameRateChange frc (_film, piece->content);
916         if (frc.skip && (video.frame % 2) == 1) {
917                 return;
918         }
919
920         /* Time of the first frame we will emit */
921         DCPTime const time = content_video_to_dcp (piece, video.frame);
922         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
923
924         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
925            if it's after the content's period here as in that case we still need to fill any gap between
926            `now' and the end of the content's period.
927         */
928         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
929                 return;
930         }
931
932         if (piece->ignore_video && piece->ignore_video->contains(time)) {
933                 return;
934         }
935
936         /* Fill gaps that we discover now that we have some video which needs to be emitted.
937            This is where we need to fill to.
938         */
939         DCPTime fill_to = min (time, piece->content->end(_film));
940
941         if (_next_video_time) {
942                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
943
944                 /* Fill if we have more than half a frame to do */
945                 if ((fill_to - fill_from) > one_video_frame() / 2) {
946                         auto last = _last_video.find (weak_piece);
947                         if (_film->three_d()) {
948                                 auto fill_to_eyes = video.eyes;
949                                 if (fill_to_eyes == Eyes::BOTH) {
950                                         fill_to_eyes = Eyes::LEFT;
951                                 }
952                                 if (fill_to == piece->content->end(_film)) {
953                                         /* Don't fill after the end of the content */
954                                         fill_to_eyes = Eyes::LEFT;
955                                 }
956                                 auto j = fill_from;
957                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
958                                 if (eyes == Eyes::BOTH) {
959                                         eyes = Eyes::LEFT;
960                                 }
961                                 while (j < fill_to || eyes != fill_to_eyes) {
962                                         if (last != _last_video.end()) {
963                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
964                                                 auto copy = last->second->shallow_copy();
965                                                 copy->set_eyes (eyes);
966                                                 emit_video (copy, j);
967                                         } else {
968                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
969                                                 emit_video (black_player_video_frame(eyes), j);
970                                         }
971                                         if (eyes == Eyes::RIGHT) {
972                                                 j += one_video_frame();
973                                         }
974                                         eyes = increment_eyes (eyes);
975                                 }
976                         } else {
977                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
978                                         if (last != _last_video.end()) {
979                                                 emit_video (last->second, j);
980                                         } else {
981                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
982                                         }
983                                 }
984                         }
985                 }
986         }
987
988         auto const content_video = piece->content->video;
989
990         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
991                 video.image,
992                 content_video->actual_crop(),
993                 content_video->fade (_film, video.frame),
994                 scale_for_display(
995                         content_video->scaled_size(_film->frame_size()),
996                         _video_container_size,
997                         _film->frame_size(),
998                         content_video->pixel_quanta()
999                         ),
1000                 _video_container_size,
1001                 video.eyes,
1002                 video.part,
1003                 content_video->colour_conversion(),
1004                 content_video->range(),
1005                 piece->content,
1006                 video.frame,
1007                 false
1008                 );
1009
1010         DCPTime t = time;
1011         for (int i = 0; i < frc.repeat; ++i) {
1012                 if (t < piece->content->end(_film)) {
1013                         emit_video (_last_video[weak_piece], t);
1014                 }
1015                 t += one_video_frame ();
1016         }
1017 }
1018
1019
1020 void
1021 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1022 {
1023         if (_suspended) {
1024                 return;
1025         }
1026
1027         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1028
1029         auto piece = weak_piece.lock ();
1030         if (!piece) {
1031                 return;
1032         }
1033
1034         auto content = piece->content->audio;
1035         DCPOMATIC_ASSERT (content);
1036
1037         int const rfr = content->resampled_frame_rate (_film);
1038
1039         /* Compute time in the DCP */
1040         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1041         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1042
1043         /* And the end of this block in the DCP */
1044         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1045
1046         /* Remove anything that comes before the start or after the end of the content */
1047         if (time < piece->content->position()) {
1048                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1049                 if (!cut.first) {
1050                         /* This audio is entirely discarded */
1051                         return;
1052                 }
1053                 content_audio.audio = cut.first;
1054                 time = cut.second;
1055         } else if (time > piece->content->end(_film)) {
1056                 /* Discard it all */
1057                 return;
1058         } else if (end > piece->content->end(_film)) {
1059                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1060                 if (remaining_frames == 0) {
1061                         return;
1062                 }
1063                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1064         }
1065
1066         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1067
1068         /* Gain and fade */
1069
1070         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1071         if (content->gain() != 0 || !fade_coeffs.empty()) {
1072                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1073                 if (!fade_coeffs.empty()) {
1074                         /* Apply both fade and gain */
1075                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1076                         auto const channels = gain_buffers->channels();
1077                         auto const frames = fade_coeffs.size();
1078                         auto data = gain_buffers->data();
1079                         auto const gain = db_to_linear (content->gain());
1080                         for (auto channel = 0; channel < channels; ++channel) {
1081                                 for (auto frame = 0U; frame < frames; ++frame) {
1082                                         data[channel][frame] *= gain * fade_coeffs[frame];
1083                                 }
1084                         }
1085                 } else {
1086                         /* Just apply gain */
1087                         gain_buffers->apply_gain (content->gain());
1088                 }
1089                 content_audio.audio = gain_buffers;
1090         }
1091
1092         /* Remap */
1093
1094         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1095
1096         /* Process */
1097
1098         if (_audio_processor) {
1099                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1100         }
1101
1102         /* Push */
1103
1104         _audio_merger.push (content_audio.audio, time);
1105         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1106         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1107 }
1108
1109
1110 void
1111 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1112 {
1113         if (_suspended) {
1114                 return;
1115         }
1116
1117         auto piece = weak_piece.lock ();
1118         auto content = weak_content.lock ();
1119         if (!piece || !content) {
1120                 return;
1121         }
1122
1123         PlayerText ps;
1124         for (auto& sub: subtitle.subs)
1125         {
1126                 /* Apply content's subtitle offsets */
1127                 sub.rectangle.x += content->x_offset ();
1128                 sub.rectangle.y += content->y_offset ();
1129
1130                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1131                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1132                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1133
1134                 /* Apply content's subtitle scale */
1135                 sub.rectangle.width *= content->x_scale ();
1136                 sub.rectangle.height *= content->y_scale ();
1137
1138                 auto image = sub.image;
1139
1140                 /* We will scale the subtitle up to fit _video_container_size */
1141                 int const width = sub.rectangle.width * _video_container_size.load().width;
1142                 int const height = sub.rectangle.height * _video_container_size.load().height;
1143                 if (width == 0 || height == 0) {
1144                         return;
1145                 }
1146
1147                 dcp::Size scaled_size (width, height);
1148                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1149         }
1150
1151         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1152         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1153 }
1154
1155
1156 void
1157 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1158 {
1159         if (_suspended) {
1160                 return;
1161         }
1162
1163         auto piece = weak_piece.lock ();
1164         auto content = weak_content.lock ();
1165         if (!piece || !content) {
1166                 return;
1167         }
1168
1169         PlayerText ps;
1170         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1171
1172         if (from > piece->content->end(_film)) {
1173                 return;
1174         }
1175
1176         for (auto s: subtitle.subs) {
1177                 s.set_h_position (s.h_position() + content->x_offset());
1178                 s.set_v_position (s.v_position() + content->y_offset());
1179                 float const xs = content->x_scale();
1180                 float const ys = content->y_scale();
1181                 float size = s.size();
1182
1183                 /* Adjust size to express the common part of the scaling;
1184                    e.g. if xs = ys = 0.5 we scale size by 2.
1185                 */
1186                 if (xs > 1e-5 && ys > 1e-5) {
1187                         size *= 1 / min (1 / xs, 1 / ys);
1188                 }
1189                 s.set_size (size);
1190
1191                 /* Then express aspect ratio changes */
1192                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1193                         s.set_aspect_adjust (xs / ys);
1194                 }
1195
1196                 s.set_in (dcp::Time(from.seconds(), 1000));
1197                 ps.string.push_back (s);
1198         }
1199
1200         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1201 }
1202
1203
1204 void
1205 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1206 {
1207         if (_suspended) {
1208                 return;
1209         }
1210
1211         auto content = weak_content.lock ();
1212         if (!content) {
1213                 return;
1214         }
1215
1216         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1217                 return;
1218         }
1219
1220         auto piece = weak_piece.lock ();
1221         if (!piece) {
1222                 return;
1223         }
1224
1225         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1226
1227         if (dcp_to > piece->content->end(_film)) {
1228                 return;
1229         }
1230
1231         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1232
1233         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1234         if (content->use() && !always && !content->burn()) {
1235                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1236         }
1237 }
1238
1239
1240 void
1241 Player::seek (DCPTime time, bool accurate)
1242 {
1243         boost::mutex::scoped_lock lm (_mutex);
1244         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1245
1246         if (_suspended) {
1247                 /* We can't seek in this state */
1248                 return;
1249         }
1250
1251         if (_shuffler) {
1252                 _shuffler->clear ();
1253         }
1254
1255         _delay.clear ();
1256
1257         if (_audio_processor) {
1258                 _audio_processor->flush ();
1259         }
1260
1261         _audio_merger.clear ();
1262         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1263                 _active_texts[i].clear ();
1264         }
1265
1266         for (auto i: _pieces) {
1267                 if (time < i->content->position()) {
1268                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1269                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1270                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1271                            been trimmed to a point between keyframes, or something).
1272                         */
1273                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1274                         i->done = false;
1275                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1276                         /* During; seek to position */
1277                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1278                         i->done = false;
1279                 } else {
1280                         /* After; this piece is done */
1281                         i->done = true;
1282                 }
1283         }
1284
1285         if (accurate) {
1286                 _next_video_time = time;
1287                 _next_video_eyes = Eyes::LEFT;
1288                 _next_audio_time = time;
1289         } else {
1290                 _next_video_time = boost::none;
1291                 _next_video_eyes = boost::none;
1292                 _next_audio_time = boost::none;
1293         }
1294
1295         _black.set_position (time);
1296         _silent.set_position (time);
1297
1298         _last_video.clear ();
1299 }
1300
1301
1302 void
1303 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1304 {
1305         if (!_film->three_d()) {
1306                 if (pv->eyes() == Eyes::LEFT) {
1307                         /* Use left-eye images for both eyes... */
1308                         pv->set_eyes (Eyes::BOTH);
1309                 } else if (pv->eyes() == Eyes::RIGHT) {
1310                         /* ...and discard the right */
1311                         return;
1312                 }
1313         }
1314
1315         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1316            player before the video that requires them.
1317         */
1318         _delay.push_back (make_pair (pv, time));
1319
1320         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1321                 _next_video_time = time + one_video_frame();
1322         }
1323         _next_video_eyes = increment_eyes (pv->eyes());
1324
1325         if (_delay.size() < 3) {
1326                 return;
1327         }
1328
1329         auto to_do = _delay.front();
1330         _delay.pop_front();
1331         do_emit_video (to_do.first, to_do.second);
1332 }
1333
1334
1335 void
1336 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1337 {
1338         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1339                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1340                         _active_texts[i].clear_before (time);
1341                 }
1342         }
1343
1344         auto subtitles = open_subtitles_for_frame (time);
1345         if (subtitles) {
1346                 pv->set_text (subtitles.get ());
1347         }
1348
1349         Video (pv, time);
1350 }
1351
1352
1353 void
1354 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1355 {
1356         /* Log if the assert below is about to fail */
1357         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1358                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1359         }
1360
1361         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1362         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1363         Audio (data, time, _film->audio_frame_rate());
1364         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1365 }
1366
1367
1368 void
1369 Player::fill_audio (DCPTimePeriod period)
1370 {
1371         if (period.from == period.to) {
1372                 return;
1373         }
1374
1375         DCPOMATIC_ASSERT (period.from < period.to);
1376
1377         DCPTime t = period.from;
1378         while (t < period.to) {
1379                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1380                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1381                 if (samples) {
1382                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1383                         silence->make_silent ();
1384                         emit_audio (silence, t);
1385                 }
1386                 t += block;
1387         }
1388 }
1389
1390
1391 DCPTime
1392 Player::one_video_frame () const
1393 {
1394         return DCPTime::from_frames (1, _film->video_frame_rate ());
1395 }
1396
1397
1398 pair<shared_ptr<AudioBuffers>, DCPTime>
1399 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1400 {
1401         auto const discard_time = discard_to - time;
1402         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1403         auto remaining_frames = audio->frames() - discard_frames;
1404         if (remaining_frames <= 0) {
1405                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1406         }
1407         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1408         return make_pair(cut, time + discard_time);
1409 }
1410
1411
1412 void
1413 Player::set_dcp_decode_reduction (optional<int> reduction)
1414 {
1415         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1416
1417         {
1418                 boost::mutex::scoped_lock lm (_mutex);
1419
1420                 if (reduction == _dcp_decode_reduction) {
1421                         lm.unlock ();
1422                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1423                         return;
1424                 }
1425
1426                 _dcp_decode_reduction = reduction;
1427                 setup_pieces_unlocked ();
1428         }
1429
1430         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1431 }
1432
1433
1434 optional<DCPTime>
1435 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1436 {
1437         boost::mutex::scoped_lock lm (_mutex);
1438
1439         for (auto i: _pieces) {
1440                 if (i->content == content) {
1441                         return content_time_to_dcp (i, t);
1442                 }
1443         }
1444
1445         /* We couldn't find this content; perhaps things are being changed over */
1446         return {};
1447 }
1448
1449
1450 optional<ContentTime>
1451 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1452 {
1453         boost::mutex::scoped_lock lm (_mutex);
1454
1455         for (auto i: _pieces) {
1456                 if (i->content == content) {
1457                         return dcp_to_content_time (i, t);
1458                 }
1459         }
1460
1461         /* We couldn't find this content; perhaps things are being changed over */
1462         return {};
1463 }
1464
1465
1466 shared_ptr<const Playlist>
1467 Player::playlist () const
1468 {
1469         return _playlist ? _playlist : _film->playlist();
1470 }
1471
1472
1473 void
1474 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1475 {
1476         if (_suspended) {
1477                 return;
1478         }
1479
1480         auto piece = weak_piece.lock ();
1481         DCPOMATIC_ASSERT (piece);
1482
1483         auto const vfr = _film->video_frame_rate();
1484
1485         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1486         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1487                 return;
1488         }
1489
1490         Atmos (data.data, dcp_time, data.metadata);
1491 }
1492