Use atomic for _ignore_text.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _ignore_video(false)
103         , _ignore_audio(false)
104         , _ignore_text(false)
105         , _tolerant (film->tolerant())
106         , _audio_merger (_film->audio_frame_rate())
107         , _subtitle_alignment (subtitle_alignment)
108 {
109         construct ();
110 }
111
112
113 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
114         : _film (film)
115         , _playlist (playlist_)
116         , _suspended (0)
117         , _ignore_video(false)
118         , _ignore_audio(false)
119         , _ignore_text(false)
120         , _tolerant (film->tolerant())
121         , _audio_merger (_film->audio_frame_rate())
122 {
123         construct ();
124 }
125
126
127 void
128 Player::construct ()
129 {
130         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
131         /* The butler must hear about this first, so since we are proxying this through to the butler we must
132            be first.
133         */
134         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
135         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
136         set_video_container_size (_film->frame_size ());
137
138         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
139
140         setup_pieces ();
141         seek (DCPTime (), true);
142 }
143
144
145 void
146 Player::setup_pieces ()
147 {
148         boost::mutex::scoped_lock lm (_mutex);
149         setup_pieces_unlocked ();
150 }
151
152
153 bool
154 have_video (shared_ptr<const Content> content)
155 {
156         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
157 }
158
159
160 bool
161 have_audio (shared_ptr<const Content> content)
162 {
163         return static_cast<bool>(content->audio) && content->can_be_played();
164 }
165
166
167 void
168 Player::setup_pieces_unlocked ()
169 {
170         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
171
172         auto old_pieces = _pieces;
173         _pieces.clear ();
174
175         auto playlist_content = playlist()->content();
176         bool const have_threed = std::any_of(
177                 playlist_content.begin(),
178                 playlist_content.end(),
179                 [](shared_ptr<const Content> c) {
180                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
181                 });
182
183
184         if (have_threed) {
185                 _shuffler.reset(new Shuffler());
186                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
187         }
188
189         for (auto i: playlist()->content()) {
190
191                 if (!i->paths_valid ()) {
192                         continue;
193                 }
194
195                 if (_ignore_video && _ignore_audio && i->text.empty()) {
196                         /* We're only interested in text and this content has none */
197                         continue;
198                 }
199
200                 shared_ptr<Decoder> old_decoder;
201                 for (auto j: old_pieces) {
202                         if (j->content == i) {
203                                 old_decoder = j->decoder;
204                                 break;
205                         }
206                 }
207
208                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
209                 DCPOMATIC_ASSERT (decoder);
210
211                 FrameRateChange frc (_film, i);
212
213                 if (decoder->video && _ignore_video) {
214                         decoder->video->set_ignore (true);
215                 }
216
217                 if (decoder->audio && _ignore_audio) {
218                         decoder->audio->set_ignore (true);
219                 }
220
221                 if (_ignore_text) {
222                         for (auto i: decoder->text) {
223                                 i->set_ignore (true);
224                         }
225                 }
226
227                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
228                 if (dcp) {
229                         dcp->set_decode_referenced (_play_referenced);
230                         if (_play_referenced) {
231                                 dcp->set_forced_reduction (_dcp_decode_reduction);
232                         }
233                 }
234
235                 auto piece = make_shared<Piece>(i, decoder, frc);
236                 _pieces.push_back (piece);
237
238                 if (decoder->video) {
239                         if (have_threed) {
240                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
241                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
242                         } else {
243                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
244                         }
245                 }
246
247                 if (decoder->audio) {
248                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
249                 }
250
251                 auto j = decoder->text.begin();
252
253                 while (j != decoder->text.end()) {
254                         (*j)->BitmapStart.connect (
255                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256                                 );
257                         (*j)->PlainStart.connect (
258                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
259                                 );
260                         (*j)->Stop.connect (
261                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
262                                 );
263
264                         ++j;
265                 }
266
267                 if (decoder->atmos) {
268                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
269                 }
270         }
271
272         _stream_states.clear ();
273         for (auto i: _pieces) {
274                 if (i->content->audio) {
275                         for (auto j: i->content->audio->streams()) {
276                                 _stream_states[j] = StreamState (i, i->content->position ());
277                         }
278                 }
279         }
280
281         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
282                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
283         };
284
285         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
286                 if (ignore_overlap((*i)->content->video)) {
287                         /* Look for content later in the content list with in-use video that overlaps this */
288                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
289                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
290                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
291                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
292                                 }
293                         }
294                 }
295         }
296
297         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
298         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
299
300         _next_video_time = boost::none;
301         _next_video_eyes = Eyes::BOTH;
302         _next_audio_time = boost::none;
303 }
304
305
306 void
307 Player::playlist_content_change (ChangeType type, int property, bool frequent)
308 {
309         if (property == VideoContentProperty::CROP) {
310                 if (type == ChangeType::DONE) {
311                         auto const vcs = video_container_size();
312                         boost::mutex::scoped_lock lm (_mutex);
313                         for (auto const& i: _delay) {
314                                 i.first->reset_metadata (_film, vcs);
315                         }
316                 }
317         } else {
318                 if (type == ChangeType::PENDING) {
319                         /* The player content is probably about to change, so we can't carry on
320                            until that has happened and we've rebuilt our pieces.  Stop pass()
321                            and seek() from working until then.
322                         */
323                         ++_suspended;
324                 } else if (type == ChangeType::DONE) {
325                         /* A change in our content has gone through.  Re-build our pieces. */
326                         setup_pieces ();
327                         --_suspended;
328                 } else if (type == ChangeType::CANCELLED) {
329                         --_suspended;
330                 }
331         }
332
333         Change (type, property, frequent);
334 }
335
336
337 void
338 Player::set_video_container_size (dcp::Size s)
339 {
340         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
341
342         {
343                 boost::mutex::scoped_lock lm (_mutex);
344
345                 if (s == _video_container_size) {
346                         lm.unlock ();
347                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
348                         return;
349                 }
350
351                 _video_container_size = s;
352
353                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
354                 _black_image->make_black ();
355         }
356
357         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
358 }
359
360
361 void
362 Player::playlist_change (ChangeType type)
363 {
364         if (type == ChangeType::DONE) {
365                 setup_pieces ();
366         }
367         Change (type, PlayerProperty::PLAYLIST, false);
368 }
369
370
371 void
372 Player::film_change (ChangeType type, Film::Property p)
373 {
374         /* Here we should notice Film properties that affect our output, and
375            alert listeners that our output now would be different to how it was
376            last time we were run.
377         */
378
379         if (p == Film::Property::CONTAINER) {
380                 Change (type, PlayerProperty::FILM_CONTAINER, false);
381         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
382                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
383                    so we need new pieces here.
384                 */
385                 if (type == ChangeType::DONE) {
386                         setup_pieces ();
387                 }
388                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
389         } else if (p == Film::Property::AUDIO_PROCESSOR) {
390                 if (type == ChangeType::DONE && _film->audio_processor ()) {
391                         boost::mutex::scoped_lock lm (_mutex);
392                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
393                 }
394         } else if (p == Film::Property::AUDIO_CHANNELS) {
395                 if (type == ChangeType::DONE) {
396                         boost::mutex::scoped_lock lm (_mutex);
397                         _audio_merger.clear ();
398                 }
399         }
400 }
401
402
403 shared_ptr<PlayerVideo>
404 Player::black_player_video_frame (Eyes eyes) const
405 {
406         return std::make_shared<PlayerVideo> (
407                 std::make_shared<const RawImageProxy>(_black_image),
408                 Crop(),
409                 optional<double>(),
410                 _video_container_size,
411                 _video_container_size,
412                 eyes,
413                 Part::WHOLE,
414                 PresetColourConversion::all().front().conversion,
415                 VideoRange::FULL,
416                 std::weak_ptr<Content>(),
417                 boost::optional<Frame>(),
418                 false
419         );
420 }
421
422
423 Frame
424 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
425 {
426         auto s = t - piece->content->position ();
427         s = min (piece->content->length_after_trim(_film), s);
428         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
429
430         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
431            then convert that ContentTime to frames at the content's rate.  However this fails for
432            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
433            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
434
435            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
436         */
437         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
438 }
439
440
441 DCPTime
442 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
443 {
444         /* See comment in dcp_to_content_video */
445         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
446         return d + piece->content->position();
447 }
448
449
450 Frame
451 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
452 {
453         auto s = t - piece->content->position ();
454         s = min (piece->content->length_after_trim(_film), s);
455         /* See notes in dcp_to_content_video */
456         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
457 }
458
459
460 DCPTime
461 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
462 {
463         /* See comment in dcp_to_content_video */
464         return DCPTime::from_frames (f, _film->audio_frame_rate())
465                 - DCPTime (piece->content->trim_start(), piece->frc)
466                 + piece->content->position();
467 }
468
469
470 ContentTime
471 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
472 {
473         auto s = t - piece->content->position ();
474         s = min (piece->content->length_after_trim(_film), s);
475         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
476 }
477
478
479 DCPTime
480 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
481 {
482         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
483 }
484
485
486 vector<shared_ptr<Font>>
487 Player::get_subtitle_fonts ()
488 {
489         boost::mutex::scoped_lock lm (_mutex);
490
491         vector<shared_ptr<Font>> fonts;
492         for (auto piece: _pieces) {
493                 for (auto text: piece->content->text) {
494                         auto text_fonts = text->fonts();
495                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
496                 }
497         }
498
499         return fonts;
500 }
501
502
503 /** Set this player never to produce any video data */
504 void
505 Player::set_ignore_video ()
506 {
507         _ignore_video = true;
508         setup_pieces();
509 }
510
511
512 void
513 Player::set_ignore_audio ()
514 {
515         _ignore_audio = true;
516         setup_pieces();
517 }
518
519
520 void
521 Player::set_ignore_text ()
522 {
523         _ignore_text = true;
524         setup_pieces();
525 }
526
527
528 /** Set the player to always burn open texts into the image regardless of the content settings */
529 void
530 Player::set_always_burn_open_subtitles ()
531 {
532         boost::mutex::scoped_lock lm (_mutex);
533         _always_burn_open_subtitles = true;
534 }
535
536
537 /** Sets up the player to be faster, possibly at the expense of quality */
538 void
539 Player::set_fast ()
540 {
541         boost::mutex::scoped_lock lm (_mutex);
542         _fast = true;
543         setup_pieces_unlocked ();
544 }
545
546
547 void
548 Player::set_play_referenced ()
549 {
550         boost::mutex::scoped_lock lm (_mutex);
551         _play_referenced = true;
552         setup_pieces_unlocked ();
553 }
554
555
556 static void
557 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
558 {
559         DCPOMATIC_ASSERT (r);
560         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
561         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
562         if (r->actual_duration() > 0) {
563                 a.push_back (
564                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
565                         );
566         }
567 }
568
569
570 list<ReferencedReelAsset>
571 Player::get_reel_assets ()
572 {
573         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
574
575         list<ReferencedReelAsset> reel_assets;
576
577         for (auto content: playlist()->content()) {
578                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
579                 if (!dcp) {
580                         continue;
581                 }
582
583                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
584                         continue;
585                 }
586
587                 scoped_ptr<DCPDecoder> decoder;
588                 try {
589                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
590                 } catch (...) {
591                         return reel_assets;
592                 }
593
594                 auto const frame_rate = _film->video_frame_rate();
595                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
596                 /* We should only be referencing if the DCP rate is the same as the film rate */
597                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
598
599                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
600                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
601
602                 /* position in the asset from the start */
603                 int64_t offset_from_start = 0;
604                 /* position i the asset from the end */
605                 int64_t offset_from_end = 0;
606                 for (auto reel: decoder->reels()) {
607                         /* Assume that main picture duration is the length of the reel */
608                         offset_from_end += reel->main_picture()->actual_duration();
609                 }
610
611                 for (auto reel: decoder->reels()) {
612
613                         /* Assume that main picture duration is the length of the reel */
614                         int64_t const reel_duration = reel->main_picture()->actual_duration();
615
616                         /* See doc/design/trim_reels.svg */
617                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
618                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
619
620                         auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
621                         if (dcp->reference_video()) {
622                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
623                         }
624
625                         if (dcp->reference_audio()) {
626                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
627                         }
628
629                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
630                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
631                         }
632
633                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
634                                 for (auto caption: reel->closed_captions()) {
635                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
636                                 }
637                         }
638
639                         offset_from_start += reel_duration;
640                         offset_from_end -= reel_duration;
641                 }
642         }
643
644         return reel_assets;
645 }
646
647
648 bool
649 Player::pass ()
650 {
651         boost::mutex::scoped_lock lm (_mutex);
652
653         if (_suspended) {
654                 /* We can't pass in this state */
655                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
656                 return false;
657         }
658
659         if (_playback_length == DCPTime()) {
660                 /* Special; just give one black frame */
661                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
662                 return true;
663         }
664
665         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
666
667         shared_ptr<Piece> earliest_content;
668         optional<DCPTime> earliest_time;
669
670         for (auto i: _pieces) {
671                 if (i->done) {
672                         continue;
673                 }
674
675                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
676                 if (t > i->content->end(_film)) {
677                         i->done = true;
678                 } else {
679
680                         /* Given two choices at the same time, pick the one with texts so we see it before
681                            the video.
682                         */
683                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
684                                 earliest_time = t;
685                                 earliest_content = i;
686                         }
687                 }
688         }
689
690         bool done = false;
691
692         enum {
693                 NONE,
694                 CONTENT,
695                 BLACK,
696                 SILENT
697         } which = NONE;
698
699         if (earliest_content) {
700                 which = CONTENT;
701         }
702
703         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
704                 earliest_time = _black.position ();
705                 which = BLACK;
706         }
707
708         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
709                 earliest_time = _silent.position ();
710                 which = SILENT;
711         }
712
713         switch (which) {
714         case CONTENT:
715         {
716                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
717                 earliest_content->done = earliest_content->decoder->pass ();
718                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
719                 if (dcp && !_play_referenced && dcp->reference_audio()) {
720                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
721                            to `hide' the fact that no audio was emitted during the referenced DCP (though
722                            we need to behave as though it was).
723                         */
724                         _next_audio_time = dcp->end (_film);
725                 }
726                 break;
727         }
728         case BLACK:
729                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
730                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
731                 _black.set_position (_black.position() + one_video_frame());
732                 break;
733         case SILENT:
734         {
735                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
736                 DCPTimePeriod period (_silent.period_at_position());
737                 if (_next_audio_time) {
738                         /* Sometimes the thing that happened last finishes fractionally before
739                            or after this silence.  Bodge the start time of the silence to fix it.
740                            I think this is nothing to worry about since we will just add or
741                            remove a little silence at the end of some content.
742                         */
743                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
744                         /* Let's not worry about less than a frame at 24fps */
745                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
746                         if (error >= too_much_error) {
747                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
748                         }
749                         DCPOMATIC_ASSERT (error < too_much_error);
750                         period.from = *_next_audio_time;
751                 }
752                 if (period.duration() > one_video_frame()) {
753                         period.to = period.from + one_video_frame();
754                 }
755                 fill_audio (period);
756                 _silent.set_position (period.to);
757                 break;
758         }
759         case NONE:
760                 done = true;
761                 break;
762         }
763
764         /* Emit any audio that is ready */
765
766         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
767            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
768            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
769            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
770            that will never come, causing bugs like #2101.
771         */
772         constexpr int ignore_streams_behind = 5;
773
774         using state_pair = std::pair<AudioStreamPtr, StreamState>;
775
776         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
777         auto latest_last_push_end = std::max_element(
778                 _stream_states.begin(),
779                 _stream_states.end(),
780                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
781                 );
782
783         if (latest_last_push_end != _stream_states.end()) {
784                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
785         }
786
787         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
788         std::map<AudioStreamPtr, StreamState> alive_stream_states;
789         for (auto const& i: _stream_states) {
790                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
791                         alive_stream_states.insert(i);
792                 } else {
793                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
794                 }
795         }
796
797         auto pull_to = _playback_length;
798         for (auto const& i: alive_stream_states) {
799                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
800                         pull_to = i.second.last_push_end;
801                 }
802         }
803         if (!_silent.done() && _silent.position() < pull_to) {
804                 pull_to = _silent.position();
805         }
806
807         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
808         auto audio = _audio_merger.pull (pull_to);
809         for (auto i = audio.begin(); i != audio.end(); ++i) {
810                 if (_next_audio_time && i->second < *_next_audio_time) {
811                         /* This new data comes before the last we emitted (or the last seek); discard it */
812                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
813                         if (!cut.first) {
814                                 continue;
815                         }
816                         *i = cut;
817                 } else if (_next_audio_time && i->second > *_next_audio_time) {
818                         /* There's a gap between this data and the last we emitted; fill with silence */
819                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
820                 }
821
822                 emit_audio (i->first, i->second);
823         }
824
825         if (done) {
826                 if (_shuffler) {
827                         _shuffler->flush ();
828                 }
829                 for (auto const& i: _delay) {
830                         do_emit_video(i.first, i.second);
831                 }
832
833                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
834                  * However, if we have L and R video files, and one is shorter than the other,
835                  * the fill code in ::video mostly takes care of filling in the gaps.
836                  * However, since it fills at the point when it knows there is more video coming
837                  * at time t (so it should fill any gap up to t) it can't do anything right at the
838                  * end.  This is particularly bad news if the last frame emitted is a LEFT
839                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
840                  * Here's a hack to workaround that particular case.
841                  */
842                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
843                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
844                 }
845         }
846
847         return done;
848 }
849
850
851 /** @return Open subtitles for the frame at the given time, converted to images */
852 optional<PositionImage>
853 Player::open_subtitles_for_frame (DCPTime time) const
854 {
855         list<PositionImage> captions;
856         int const vfr = _film->video_frame_rate();
857
858         for (
859                 auto j:
860                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
861                 ) {
862
863                 /* Bitmap subtitles */
864                 for (auto i: j.bitmap) {
865                         if (!i.image) {
866                                 continue;
867                         }
868
869                         /* i.image will already have been scaled to fit _video_container_size */
870                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
871
872                         captions.push_back (
873                                 PositionImage (
874                                         i.image,
875                                         Position<int> (
876                                                 lrint(_video_container_size.width * i.rectangle.x),
877                                                 lrint(_video_container_size.height * i.rectangle.y)
878                                                 )
879                                         )
880                                 );
881                 }
882
883                 /* String subtitles (rendered to an image) */
884                 if (!j.string.empty()) {
885                         auto s = render_text(j.string, _video_container_size, time, vfr);
886                         copy (s.begin(), s.end(), back_inserter (captions));
887                 }
888         }
889
890         if (captions.empty()) {
891                 return {};
892         }
893
894         return merge (captions, _subtitle_alignment);
895 }
896
897
898 void
899 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
900 {
901         if (_suspended) {
902                 return;
903         }
904
905         auto piece = weak_piece.lock ();
906         if (!piece) {
907                 return;
908         }
909
910         if (!piece->content->video->use()) {
911                 return;
912         }
913
914         FrameRateChange frc (_film, piece->content);
915         if (frc.skip && (video.frame % 2) == 1) {
916                 return;
917         }
918
919         /* Time of the first frame we will emit */
920         DCPTime const time = content_video_to_dcp (piece, video.frame);
921         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
922
923         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
924            if it's after the content's period here as in that case we still need to fill any gap between
925            `now' and the end of the content's period.
926         */
927         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
928                 return;
929         }
930
931         if (piece->ignore_video && piece->ignore_video->contains(time)) {
932                 return;
933         }
934
935         /* Fill gaps that we discover now that we have some video which needs to be emitted.
936            This is where we need to fill to.
937         */
938         DCPTime fill_to = min (time, piece->content->end(_film));
939
940         if (_next_video_time) {
941                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
942
943                 /* Fill if we have more than half a frame to do */
944                 if ((fill_to - fill_from) > one_video_frame() / 2) {
945                         auto last = _last_video.find (weak_piece);
946                         if (_film->three_d()) {
947                                 auto fill_to_eyes = video.eyes;
948                                 if (fill_to_eyes == Eyes::BOTH) {
949                                         fill_to_eyes = Eyes::LEFT;
950                                 }
951                                 if (fill_to == piece->content->end(_film)) {
952                                         /* Don't fill after the end of the content */
953                                         fill_to_eyes = Eyes::LEFT;
954                                 }
955                                 auto j = fill_from;
956                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
957                                 if (eyes == Eyes::BOTH) {
958                                         eyes = Eyes::LEFT;
959                                 }
960                                 while (j < fill_to || eyes != fill_to_eyes) {
961                                         if (last != _last_video.end()) {
962                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
963                                                 auto copy = last->second->shallow_copy();
964                                                 copy->set_eyes (eyes);
965                                                 emit_video (copy, j);
966                                         } else {
967                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
968                                                 emit_video (black_player_video_frame(eyes), j);
969                                         }
970                                         if (eyes == Eyes::RIGHT) {
971                                                 j += one_video_frame();
972                                         }
973                                         eyes = increment_eyes (eyes);
974                                 }
975                         } else {
976                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
977                                         if (last != _last_video.end()) {
978                                                 emit_video (last->second, j);
979                                         } else {
980                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
981                                         }
982                                 }
983                         }
984                 }
985         }
986
987         auto const content_video = piece->content->video;
988
989         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
990                 video.image,
991                 content_video->actual_crop(),
992                 content_video->fade (_film, video.frame),
993                 scale_for_display(
994                         content_video->scaled_size(_film->frame_size()),
995                         _video_container_size,
996                         _film->frame_size(),
997                         content_video->pixel_quanta()
998                         ),
999                 _video_container_size,
1000                 video.eyes,
1001                 video.part,
1002                 content_video->colour_conversion(),
1003                 content_video->range(),
1004                 piece->content,
1005                 video.frame,
1006                 false
1007                 );
1008
1009         DCPTime t = time;
1010         for (int i = 0; i < frc.repeat; ++i) {
1011                 if (t < piece->content->end(_film)) {
1012                         emit_video (_last_video[weak_piece], t);
1013                 }
1014                 t += one_video_frame ();
1015         }
1016 }
1017
1018
1019 void
1020 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1021 {
1022         if (_suspended) {
1023                 return;
1024         }
1025
1026         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1027
1028         auto piece = weak_piece.lock ();
1029         if (!piece) {
1030                 return;
1031         }
1032
1033         auto content = piece->content->audio;
1034         DCPOMATIC_ASSERT (content);
1035
1036         int const rfr = content->resampled_frame_rate (_film);
1037
1038         /* Compute time in the DCP */
1039         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1040         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1041
1042         /* And the end of this block in the DCP */
1043         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1044
1045         /* Remove anything that comes before the start or after the end of the content */
1046         if (time < piece->content->position()) {
1047                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1048                 if (!cut.first) {
1049                         /* This audio is entirely discarded */
1050                         return;
1051                 }
1052                 content_audio.audio = cut.first;
1053                 time = cut.second;
1054         } else if (time > piece->content->end(_film)) {
1055                 /* Discard it all */
1056                 return;
1057         } else if (end > piece->content->end(_film)) {
1058                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1059                 if (remaining_frames == 0) {
1060                         return;
1061                 }
1062                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1063         }
1064
1065         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1066
1067         /* Gain and fade */
1068
1069         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1070         if (content->gain() != 0 || !fade_coeffs.empty()) {
1071                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1072                 if (!fade_coeffs.empty()) {
1073                         /* Apply both fade and gain */
1074                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1075                         auto const channels = gain_buffers->channels();
1076                         auto const frames = fade_coeffs.size();
1077                         auto data = gain_buffers->data();
1078                         auto const gain = db_to_linear (content->gain());
1079                         for (auto channel = 0; channel < channels; ++channel) {
1080                                 for (auto frame = 0U; frame < frames; ++frame) {
1081                                         data[channel][frame] *= gain * fade_coeffs[frame];
1082                                 }
1083                         }
1084                 } else {
1085                         /* Just apply gain */
1086                         gain_buffers->apply_gain (content->gain());
1087                 }
1088                 content_audio.audio = gain_buffers;
1089         }
1090
1091         /* Remap */
1092
1093         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1094
1095         /* Process */
1096
1097         if (_audio_processor) {
1098                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1099         }
1100
1101         /* Push */
1102
1103         _audio_merger.push (content_audio.audio, time);
1104         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1105         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1106 }
1107
1108
1109 void
1110 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1111 {
1112         if (_suspended) {
1113                 return;
1114         }
1115
1116         auto piece = weak_piece.lock ();
1117         auto content = weak_content.lock ();
1118         if (!piece || !content) {
1119                 return;
1120         }
1121
1122         PlayerText ps;
1123         for (auto& sub: subtitle.subs)
1124         {
1125                 /* Apply content's subtitle offsets */
1126                 sub.rectangle.x += content->x_offset ();
1127                 sub.rectangle.y += content->y_offset ();
1128
1129                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1130                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1131                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1132
1133                 /* Apply content's subtitle scale */
1134                 sub.rectangle.width *= content->x_scale ();
1135                 sub.rectangle.height *= content->y_scale ();
1136
1137                 auto image = sub.image;
1138
1139                 /* We will scale the subtitle up to fit _video_container_size */
1140                 int const width = sub.rectangle.width * _video_container_size.width;
1141                 int const height = sub.rectangle.height * _video_container_size.height;
1142                 if (width == 0 || height == 0) {
1143                         return;
1144                 }
1145
1146                 dcp::Size scaled_size (width, height);
1147                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1148         }
1149
1150         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1151         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1152 }
1153
1154
1155 void
1156 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1157 {
1158         if (_suspended) {
1159                 return;
1160         }
1161
1162         auto piece = weak_piece.lock ();
1163         auto content = weak_content.lock ();
1164         if (!piece || !content) {
1165                 return;
1166         }
1167
1168         PlayerText ps;
1169         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1170
1171         if (from > piece->content->end(_film)) {
1172                 return;
1173         }
1174
1175         for (auto s: subtitle.subs) {
1176                 s.set_h_position (s.h_position() + content->x_offset());
1177                 s.set_v_position (s.v_position() + content->y_offset());
1178                 float const xs = content->x_scale();
1179                 float const ys = content->y_scale();
1180                 float size = s.size();
1181
1182                 /* Adjust size to express the common part of the scaling;
1183                    e.g. if xs = ys = 0.5 we scale size by 2.
1184                 */
1185                 if (xs > 1e-5 && ys > 1e-5) {
1186                         size *= 1 / min (1 / xs, 1 / ys);
1187                 }
1188                 s.set_size (size);
1189
1190                 /* Then express aspect ratio changes */
1191                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1192                         s.set_aspect_adjust (xs / ys);
1193                 }
1194
1195                 s.set_in (dcp::Time(from.seconds(), 1000));
1196                 ps.string.push_back (s);
1197         }
1198
1199         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1200 }
1201
1202
1203 void
1204 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1205 {
1206         if (_suspended) {
1207                 return;
1208         }
1209
1210         auto content = weak_content.lock ();
1211         if (!content) {
1212                 return;
1213         }
1214
1215         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1216                 return;
1217         }
1218
1219         auto piece = weak_piece.lock ();
1220         if (!piece) {
1221                 return;
1222         }
1223
1224         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1225
1226         if (dcp_to > piece->content->end(_film)) {
1227                 return;
1228         }
1229
1230         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1231
1232         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1233         if (content->use() && !always && !content->burn()) {
1234                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1235         }
1236 }
1237
1238
1239 void
1240 Player::seek (DCPTime time, bool accurate)
1241 {
1242         boost::mutex::scoped_lock lm (_mutex);
1243         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1244
1245         if (_suspended) {
1246                 /* We can't seek in this state */
1247                 return;
1248         }
1249
1250         if (_shuffler) {
1251                 _shuffler->clear ();
1252         }
1253
1254         _delay.clear ();
1255
1256         if (_audio_processor) {
1257                 _audio_processor->flush ();
1258         }
1259
1260         _audio_merger.clear ();
1261         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1262                 _active_texts[i].clear ();
1263         }
1264
1265         for (auto i: _pieces) {
1266                 if (time < i->content->position()) {
1267                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1268                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1269                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1270                            been trimmed to a point between keyframes, or something).
1271                         */
1272                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1273                         i->done = false;
1274                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1275                         /* During; seek to position */
1276                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1277                         i->done = false;
1278                 } else {
1279                         /* After; this piece is done */
1280                         i->done = true;
1281                 }
1282         }
1283
1284         if (accurate) {
1285                 _next_video_time = time;
1286                 _next_video_eyes = Eyes::LEFT;
1287                 _next_audio_time = time;
1288         } else {
1289                 _next_video_time = boost::none;
1290                 _next_video_eyes = boost::none;
1291                 _next_audio_time = boost::none;
1292         }
1293
1294         _black.set_position (time);
1295         _silent.set_position (time);
1296
1297         _last_video.clear ();
1298 }
1299
1300
1301 void
1302 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1303 {
1304         if (!_film->three_d()) {
1305                 if (pv->eyes() == Eyes::LEFT) {
1306                         /* Use left-eye images for both eyes... */
1307                         pv->set_eyes (Eyes::BOTH);
1308                 } else if (pv->eyes() == Eyes::RIGHT) {
1309                         /* ...and discard the right */
1310                         return;
1311                 }
1312         }
1313
1314         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1315            player before the video that requires them.
1316         */
1317         _delay.push_back (make_pair (pv, time));
1318
1319         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1320                 _next_video_time = time + one_video_frame();
1321         }
1322         _next_video_eyes = increment_eyes (pv->eyes());
1323
1324         if (_delay.size() < 3) {
1325                 return;
1326         }
1327
1328         auto to_do = _delay.front();
1329         _delay.pop_front();
1330         do_emit_video (to_do.first, to_do.second);
1331 }
1332
1333
1334 void
1335 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1336 {
1337         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1338                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1339                         _active_texts[i].clear_before (time);
1340                 }
1341         }
1342
1343         auto subtitles = open_subtitles_for_frame (time);
1344         if (subtitles) {
1345                 pv->set_text (subtitles.get ());
1346         }
1347
1348         Video (pv, time);
1349 }
1350
1351
1352 void
1353 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1354 {
1355         /* Log if the assert below is about to fail */
1356         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1357                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1358         }
1359
1360         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1361         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1362         Audio (data, time, _film->audio_frame_rate());
1363         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1364 }
1365
1366
1367 void
1368 Player::fill_audio (DCPTimePeriod period)
1369 {
1370         if (period.from == period.to) {
1371                 return;
1372         }
1373
1374         DCPOMATIC_ASSERT (period.from < period.to);
1375
1376         DCPTime t = period.from;
1377         while (t < period.to) {
1378                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1379                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1380                 if (samples) {
1381                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1382                         silence->make_silent ();
1383                         emit_audio (silence, t);
1384                 }
1385                 t += block;
1386         }
1387 }
1388
1389
1390 DCPTime
1391 Player::one_video_frame () const
1392 {
1393         return DCPTime::from_frames (1, _film->video_frame_rate ());
1394 }
1395
1396
1397 pair<shared_ptr<AudioBuffers>, DCPTime>
1398 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1399 {
1400         auto const discard_time = discard_to - time;
1401         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1402         auto remaining_frames = audio->frames() - discard_frames;
1403         if (remaining_frames <= 0) {
1404                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1405         }
1406         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1407         return make_pair(cut, time + discard_time);
1408 }
1409
1410
1411 void
1412 Player::set_dcp_decode_reduction (optional<int> reduction)
1413 {
1414         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1415
1416         {
1417                 boost::mutex::scoped_lock lm (_mutex);
1418
1419                 if (reduction == _dcp_decode_reduction) {
1420                         lm.unlock ();
1421                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1422                         return;
1423                 }
1424
1425                 _dcp_decode_reduction = reduction;
1426                 setup_pieces_unlocked ();
1427         }
1428
1429         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1430 }
1431
1432
1433 optional<DCPTime>
1434 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1435 {
1436         boost::mutex::scoped_lock lm (_mutex);
1437
1438         for (auto i: _pieces) {
1439                 if (i->content == content) {
1440                         return content_time_to_dcp (i, t);
1441                 }
1442         }
1443
1444         /* We couldn't find this content; perhaps things are being changed over */
1445         return {};
1446 }
1447
1448
1449 optional<ContentTime>
1450 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1451 {
1452         boost::mutex::scoped_lock lm (_mutex);
1453
1454         for (auto i: _pieces) {
1455                 if (i->content == content) {
1456                         return dcp_to_content_time (i, t);
1457                 }
1458         }
1459
1460         /* We couldn't find this content; perhaps things are being changed over */
1461         return {};
1462 }
1463
1464
1465 shared_ptr<const Playlist>
1466 Player::playlist () const
1467 {
1468         return _playlist ? _playlist : _film->playlist();
1469 }
1470
1471
1472 void
1473 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1474 {
1475         if (_suspended) {
1476                 return;
1477         }
1478
1479         auto piece = weak_piece.lock ();
1480         DCPOMATIC_ASSERT (piece);
1481
1482         auto const vfr = _film->video_frame_rate();
1483
1484         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1485         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1486                 return;
1487         }
1488
1489         Atmos (data.data, dcp_time, data.metadata);
1490 }
1491