Use atomic for _play_referenced.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _ignore_video(false)
103         , _ignore_audio(false)
104         , _ignore_text(false)
105         , _always_burn_open_subtitles(false)
106         , _fast(false)
107         , _tolerant (film->tolerant())
108         , _play_referenced(false)
109         , _audio_merger (_film->audio_frame_rate())
110         , _subtitle_alignment (subtitle_alignment)
111 {
112         construct ();
113 }
114
115
116 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
117         : _film (film)
118         , _playlist (playlist_)
119         , _suspended (0)
120         , _ignore_video(false)
121         , _ignore_audio(false)
122         , _ignore_text(false)
123         , _always_burn_open_subtitles(false)
124         , _fast(false)
125         , _tolerant (film->tolerant())
126         , _play_referenced(false)
127         , _audio_merger (_film->audio_frame_rate())
128 {
129         construct ();
130 }
131
132
133 void
134 Player::construct ()
135 {
136         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
137         /* The butler must hear about this first, so since we are proxying this through to the butler we must
138            be first.
139         */
140         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
141         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
142         set_video_container_size (_film->frame_size ());
143
144         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
145
146         setup_pieces ();
147         seek (DCPTime (), true);
148 }
149
150
151 void
152 Player::setup_pieces ()
153 {
154         boost::mutex::scoped_lock lm (_mutex);
155         setup_pieces_unlocked ();
156 }
157
158
159 bool
160 have_video (shared_ptr<const Content> content)
161 {
162         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
163 }
164
165
166 bool
167 have_audio (shared_ptr<const Content> content)
168 {
169         return static_cast<bool>(content->audio) && content->can_be_played();
170 }
171
172
173 void
174 Player::setup_pieces_unlocked ()
175 {
176         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
177
178         auto old_pieces = _pieces;
179         _pieces.clear ();
180
181         auto playlist_content = playlist()->content();
182         bool const have_threed = std::any_of(
183                 playlist_content.begin(),
184                 playlist_content.end(),
185                 [](shared_ptr<const Content> c) {
186                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
187                 });
188
189
190         if (have_threed) {
191                 _shuffler.reset(new Shuffler());
192                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
193         }
194
195         for (auto i: playlist()->content()) {
196
197                 if (!i->paths_valid ()) {
198                         continue;
199                 }
200
201                 if (_ignore_video && _ignore_audio && i->text.empty()) {
202                         /* We're only interested in text and this content has none */
203                         continue;
204                 }
205
206                 shared_ptr<Decoder> old_decoder;
207                 for (auto j: old_pieces) {
208                         if (j->content == i) {
209                                 old_decoder = j->decoder;
210                                 break;
211                         }
212                 }
213
214                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
215                 DCPOMATIC_ASSERT (decoder);
216
217                 FrameRateChange frc (_film, i);
218
219                 if (decoder->video && _ignore_video) {
220                         decoder->video->set_ignore (true);
221                 }
222
223                 if (decoder->audio && _ignore_audio) {
224                         decoder->audio->set_ignore (true);
225                 }
226
227                 if (_ignore_text) {
228                         for (auto i: decoder->text) {
229                                 i->set_ignore (true);
230                         }
231                 }
232
233                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
234                 if (dcp) {
235                         dcp->set_decode_referenced (_play_referenced);
236                         if (_play_referenced) {
237                                 dcp->set_forced_reduction (_dcp_decode_reduction);
238                         }
239                 }
240
241                 auto piece = make_shared<Piece>(i, decoder, frc);
242                 _pieces.push_back (piece);
243
244                 if (decoder->video) {
245                         if (have_threed) {
246                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
247                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
248                         } else {
249                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
250                         }
251                 }
252
253                 if (decoder->audio) {
254                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
255                 }
256
257                 auto j = decoder->text.begin();
258
259                 while (j != decoder->text.end()) {
260                         (*j)->BitmapStart.connect (
261                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
262                                 );
263                         (*j)->PlainStart.connect (
264                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
265                                 );
266                         (*j)->Stop.connect (
267                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
268                                 );
269
270                         ++j;
271                 }
272
273                 if (decoder->atmos) {
274                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
275                 }
276         }
277
278         _stream_states.clear ();
279         for (auto i: _pieces) {
280                 if (i->content->audio) {
281                         for (auto j: i->content->audio->streams()) {
282                                 _stream_states[j] = StreamState (i, i->content->position ());
283                         }
284                 }
285         }
286
287         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
288                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
289         };
290
291         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
292                 if (ignore_overlap((*i)->content->video)) {
293                         /* Look for content later in the content list with in-use video that overlaps this */
294                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
295                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
296                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
297                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
298                                 }
299                         }
300                 }
301         }
302
303         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
304         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
305
306         _next_video_time = boost::none;
307         _next_video_eyes = Eyes::BOTH;
308         _next_audio_time = boost::none;
309 }
310
311
312 void
313 Player::playlist_content_change (ChangeType type, int property, bool frequent)
314 {
315         if (property == VideoContentProperty::CROP) {
316                 if (type == ChangeType::DONE) {
317                         auto const vcs = video_container_size();
318                         boost::mutex::scoped_lock lm (_mutex);
319                         for (auto const& i: _delay) {
320                                 i.first->reset_metadata (_film, vcs);
321                         }
322                 }
323         } else {
324                 if (type == ChangeType::PENDING) {
325                         /* The player content is probably about to change, so we can't carry on
326                            until that has happened and we've rebuilt our pieces.  Stop pass()
327                            and seek() from working until then.
328                         */
329                         ++_suspended;
330                 } else if (type == ChangeType::DONE) {
331                         /* A change in our content has gone through.  Re-build our pieces. */
332                         setup_pieces ();
333                         --_suspended;
334                 } else if (type == ChangeType::CANCELLED) {
335                         --_suspended;
336                 }
337         }
338
339         Change (type, property, frequent);
340 }
341
342
343 void
344 Player::set_video_container_size (dcp::Size s)
345 {
346         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
347
348         {
349                 boost::mutex::scoped_lock lm (_mutex);
350
351                 if (s == _video_container_size) {
352                         lm.unlock ();
353                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
354                         return;
355                 }
356
357                 _video_container_size = s;
358
359                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
360                 _black_image->make_black ();
361         }
362
363         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
364 }
365
366
367 void
368 Player::playlist_change (ChangeType type)
369 {
370         if (type == ChangeType::DONE) {
371                 setup_pieces ();
372         }
373         Change (type, PlayerProperty::PLAYLIST, false);
374 }
375
376
377 void
378 Player::film_change (ChangeType type, Film::Property p)
379 {
380         /* Here we should notice Film properties that affect our output, and
381            alert listeners that our output now would be different to how it was
382            last time we were run.
383         */
384
385         if (p == Film::Property::CONTAINER) {
386                 Change (type, PlayerProperty::FILM_CONTAINER, false);
387         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
388                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
389                    so we need new pieces here.
390                 */
391                 if (type == ChangeType::DONE) {
392                         setup_pieces ();
393                 }
394                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
395         } else if (p == Film::Property::AUDIO_PROCESSOR) {
396                 if (type == ChangeType::DONE && _film->audio_processor ()) {
397                         boost::mutex::scoped_lock lm (_mutex);
398                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
399                 }
400         } else if (p == Film::Property::AUDIO_CHANNELS) {
401                 if (type == ChangeType::DONE) {
402                         boost::mutex::scoped_lock lm (_mutex);
403                         _audio_merger.clear ();
404                 }
405         }
406 }
407
408
409 shared_ptr<PlayerVideo>
410 Player::black_player_video_frame (Eyes eyes) const
411 {
412         return std::make_shared<PlayerVideo> (
413                 std::make_shared<const RawImageProxy>(_black_image),
414                 Crop(),
415                 optional<double>(),
416                 _video_container_size,
417                 _video_container_size,
418                 eyes,
419                 Part::WHOLE,
420                 PresetColourConversion::all().front().conversion,
421                 VideoRange::FULL,
422                 std::weak_ptr<Content>(),
423                 boost::optional<Frame>(),
424                 false
425         );
426 }
427
428
429 Frame
430 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
431 {
432         auto s = t - piece->content->position ();
433         s = min (piece->content->length_after_trim(_film), s);
434         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
435
436         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
437            then convert that ContentTime to frames at the content's rate.  However this fails for
438            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
439            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
440
441            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
442         */
443         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
444 }
445
446
447 DCPTime
448 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
449 {
450         /* See comment in dcp_to_content_video */
451         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
452         return d + piece->content->position();
453 }
454
455
456 Frame
457 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
458 {
459         auto s = t - piece->content->position ();
460         s = min (piece->content->length_after_trim(_film), s);
461         /* See notes in dcp_to_content_video */
462         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
463 }
464
465
466 DCPTime
467 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
468 {
469         /* See comment in dcp_to_content_video */
470         return DCPTime::from_frames (f, _film->audio_frame_rate())
471                 - DCPTime (piece->content->trim_start(), piece->frc)
472                 + piece->content->position();
473 }
474
475
476 ContentTime
477 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
478 {
479         auto s = t - piece->content->position ();
480         s = min (piece->content->length_after_trim(_film), s);
481         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
482 }
483
484
485 DCPTime
486 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
487 {
488         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
489 }
490
491
492 vector<shared_ptr<Font>>
493 Player::get_subtitle_fonts ()
494 {
495         boost::mutex::scoped_lock lm (_mutex);
496
497         vector<shared_ptr<Font>> fonts;
498         for (auto piece: _pieces) {
499                 for (auto text: piece->content->text) {
500                         auto text_fonts = text->fonts();
501                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
502                 }
503         }
504
505         return fonts;
506 }
507
508
509 /** Set this player never to produce any video data */
510 void
511 Player::set_ignore_video ()
512 {
513         _ignore_video = true;
514         setup_pieces();
515 }
516
517
518 void
519 Player::set_ignore_audio ()
520 {
521         _ignore_audio = true;
522         setup_pieces();
523 }
524
525
526 void
527 Player::set_ignore_text ()
528 {
529         _ignore_text = true;
530         setup_pieces();
531 }
532
533
534 /** Set the player to always burn open texts into the image regardless of the content settings */
535 void
536 Player::set_always_burn_open_subtitles ()
537 {
538         _always_burn_open_subtitles = true;
539 }
540
541
542 /** Sets up the player to be faster, possibly at the expense of quality */
543 void
544 Player::set_fast ()
545 {
546         _fast = true;
547         setup_pieces();
548 }
549
550
551 void
552 Player::set_play_referenced ()
553 {
554         _play_referenced = true;
555         setup_pieces();
556 }
557
558
559 static void
560 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
561 {
562         DCPOMATIC_ASSERT (r);
563         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
564         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
565         if (r->actual_duration() > 0) {
566                 a.push_back (
567                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
568                         );
569         }
570 }
571
572
573 list<ReferencedReelAsset>
574 Player::get_reel_assets ()
575 {
576         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
577
578         list<ReferencedReelAsset> reel_assets;
579
580         for (auto content: playlist()->content()) {
581                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
582                 if (!dcp) {
583                         continue;
584                 }
585
586                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
587                         continue;
588                 }
589
590                 scoped_ptr<DCPDecoder> decoder;
591                 try {
592                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
593                 } catch (...) {
594                         return reel_assets;
595                 }
596
597                 auto const frame_rate = _film->video_frame_rate();
598                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
599                 /* We should only be referencing if the DCP rate is the same as the film rate */
600                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
601
602                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
603                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
604
605                 /* position in the asset from the start */
606                 int64_t offset_from_start = 0;
607                 /* position i the asset from the end */
608                 int64_t offset_from_end = 0;
609                 for (auto reel: decoder->reels()) {
610                         /* Assume that main picture duration is the length of the reel */
611                         offset_from_end += reel->main_picture()->actual_duration();
612                 }
613
614                 for (auto reel: decoder->reels()) {
615
616                         /* Assume that main picture duration is the length of the reel */
617                         int64_t const reel_duration = reel->main_picture()->actual_duration();
618
619                         /* See doc/design/trim_reels.svg */
620                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
621                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
622
623                         auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
624                         if (dcp->reference_video()) {
625                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
626                         }
627
628                         if (dcp->reference_audio()) {
629                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
630                         }
631
632                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
633                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
634                         }
635
636                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
637                                 for (auto caption: reel->closed_captions()) {
638                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
639                                 }
640                         }
641
642                         offset_from_start += reel_duration;
643                         offset_from_end -= reel_duration;
644                 }
645         }
646
647         return reel_assets;
648 }
649
650
651 bool
652 Player::pass ()
653 {
654         boost::mutex::scoped_lock lm (_mutex);
655
656         if (_suspended) {
657                 /* We can't pass in this state */
658                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
659                 return false;
660         }
661
662         if (_playback_length == DCPTime()) {
663                 /* Special; just give one black frame */
664                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
665                 return true;
666         }
667
668         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
669
670         shared_ptr<Piece> earliest_content;
671         optional<DCPTime> earliest_time;
672
673         for (auto i: _pieces) {
674                 if (i->done) {
675                         continue;
676                 }
677
678                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
679                 if (t > i->content->end(_film)) {
680                         i->done = true;
681                 } else {
682
683                         /* Given two choices at the same time, pick the one with texts so we see it before
684                            the video.
685                         */
686                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
687                                 earliest_time = t;
688                                 earliest_content = i;
689                         }
690                 }
691         }
692
693         bool done = false;
694
695         enum {
696                 NONE,
697                 CONTENT,
698                 BLACK,
699                 SILENT
700         } which = NONE;
701
702         if (earliest_content) {
703                 which = CONTENT;
704         }
705
706         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
707                 earliest_time = _black.position ();
708                 which = BLACK;
709         }
710
711         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
712                 earliest_time = _silent.position ();
713                 which = SILENT;
714         }
715
716         switch (which) {
717         case CONTENT:
718         {
719                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
720                 earliest_content->done = earliest_content->decoder->pass ();
721                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
722                 if (dcp && !_play_referenced && dcp->reference_audio()) {
723                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
724                            to `hide' the fact that no audio was emitted during the referenced DCP (though
725                            we need to behave as though it was).
726                         */
727                         _next_audio_time = dcp->end (_film);
728                 }
729                 break;
730         }
731         case BLACK:
732                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
733                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
734                 _black.set_position (_black.position() + one_video_frame());
735                 break;
736         case SILENT:
737         {
738                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
739                 DCPTimePeriod period (_silent.period_at_position());
740                 if (_next_audio_time) {
741                         /* Sometimes the thing that happened last finishes fractionally before
742                            or after this silence.  Bodge the start time of the silence to fix it.
743                            I think this is nothing to worry about since we will just add or
744                            remove a little silence at the end of some content.
745                         */
746                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
747                         /* Let's not worry about less than a frame at 24fps */
748                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
749                         if (error >= too_much_error) {
750                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
751                         }
752                         DCPOMATIC_ASSERT (error < too_much_error);
753                         period.from = *_next_audio_time;
754                 }
755                 if (period.duration() > one_video_frame()) {
756                         period.to = period.from + one_video_frame();
757                 }
758                 fill_audio (period);
759                 _silent.set_position (period.to);
760                 break;
761         }
762         case NONE:
763                 done = true;
764                 break;
765         }
766
767         /* Emit any audio that is ready */
768
769         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
770            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
771            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
772            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
773            that will never come, causing bugs like #2101.
774         */
775         constexpr int ignore_streams_behind = 5;
776
777         using state_pair = std::pair<AudioStreamPtr, StreamState>;
778
779         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
780         auto latest_last_push_end = std::max_element(
781                 _stream_states.begin(),
782                 _stream_states.end(),
783                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
784                 );
785
786         if (latest_last_push_end != _stream_states.end()) {
787                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
788         }
789
790         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
791         std::map<AudioStreamPtr, StreamState> alive_stream_states;
792         for (auto const& i: _stream_states) {
793                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
794                         alive_stream_states.insert(i);
795                 } else {
796                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
797                 }
798         }
799
800         auto pull_to = _playback_length;
801         for (auto const& i: alive_stream_states) {
802                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
803                         pull_to = i.second.last_push_end;
804                 }
805         }
806         if (!_silent.done() && _silent.position() < pull_to) {
807                 pull_to = _silent.position();
808         }
809
810         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
811         auto audio = _audio_merger.pull (pull_to);
812         for (auto i = audio.begin(); i != audio.end(); ++i) {
813                 if (_next_audio_time && i->second < *_next_audio_time) {
814                         /* This new data comes before the last we emitted (or the last seek); discard it */
815                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
816                         if (!cut.first) {
817                                 continue;
818                         }
819                         *i = cut;
820                 } else if (_next_audio_time && i->second > *_next_audio_time) {
821                         /* There's a gap between this data and the last we emitted; fill with silence */
822                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
823                 }
824
825                 emit_audio (i->first, i->second);
826         }
827
828         if (done) {
829                 if (_shuffler) {
830                         _shuffler->flush ();
831                 }
832                 for (auto const& i: _delay) {
833                         do_emit_video(i.first, i.second);
834                 }
835
836                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
837                  * However, if we have L and R video files, and one is shorter than the other,
838                  * the fill code in ::video mostly takes care of filling in the gaps.
839                  * However, since it fills at the point when it knows there is more video coming
840                  * at time t (so it should fill any gap up to t) it can't do anything right at the
841                  * end.  This is particularly bad news if the last frame emitted is a LEFT
842                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
843                  * Here's a hack to workaround that particular case.
844                  */
845                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
846                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
847                 }
848         }
849
850         return done;
851 }
852
853
854 /** @return Open subtitles for the frame at the given time, converted to images */
855 optional<PositionImage>
856 Player::open_subtitles_for_frame (DCPTime time) const
857 {
858         list<PositionImage> captions;
859         int const vfr = _film->video_frame_rate();
860
861         for (
862                 auto j:
863                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
864                 ) {
865
866                 /* Bitmap subtitles */
867                 for (auto i: j.bitmap) {
868                         if (!i.image) {
869                                 continue;
870                         }
871
872                         /* i.image will already have been scaled to fit _video_container_size */
873                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
874
875                         captions.push_back (
876                                 PositionImage (
877                                         i.image,
878                                         Position<int> (
879                                                 lrint(_video_container_size.width * i.rectangle.x),
880                                                 lrint(_video_container_size.height * i.rectangle.y)
881                                                 )
882                                         )
883                                 );
884                 }
885
886                 /* String subtitles (rendered to an image) */
887                 if (!j.string.empty()) {
888                         auto s = render_text(j.string, _video_container_size, time, vfr);
889                         copy (s.begin(), s.end(), back_inserter (captions));
890                 }
891         }
892
893         if (captions.empty()) {
894                 return {};
895         }
896
897         return merge (captions, _subtitle_alignment);
898 }
899
900
901 void
902 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
903 {
904         if (_suspended) {
905                 return;
906         }
907
908         auto piece = weak_piece.lock ();
909         if (!piece) {
910                 return;
911         }
912
913         if (!piece->content->video->use()) {
914                 return;
915         }
916
917         FrameRateChange frc (_film, piece->content);
918         if (frc.skip && (video.frame % 2) == 1) {
919                 return;
920         }
921
922         /* Time of the first frame we will emit */
923         DCPTime const time = content_video_to_dcp (piece, video.frame);
924         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
925
926         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
927            if it's after the content's period here as in that case we still need to fill any gap between
928            `now' and the end of the content's period.
929         */
930         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
931                 return;
932         }
933
934         if (piece->ignore_video && piece->ignore_video->contains(time)) {
935                 return;
936         }
937
938         /* Fill gaps that we discover now that we have some video which needs to be emitted.
939            This is where we need to fill to.
940         */
941         DCPTime fill_to = min (time, piece->content->end(_film));
942
943         if (_next_video_time) {
944                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
945
946                 /* Fill if we have more than half a frame to do */
947                 if ((fill_to - fill_from) > one_video_frame() / 2) {
948                         auto last = _last_video.find (weak_piece);
949                         if (_film->three_d()) {
950                                 auto fill_to_eyes = video.eyes;
951                                 if (fill_to_eyes == Eyes::BOTH) {
952                                         fill_to_eyes = Eyes::LEFT;
953                                 }
954                                 if (fill_to == piece->content->end(_film)) {
955                                         /* Don't fill after the end of the content */
956                                         fill_to_eyes = Eyes::LEFT;
957                                 }
958                                 auto j = fill_from;
959                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
960                                 if (eyes == Eyes::BOTH) {
961                                         eyes = Eyes::LEFT;
962                                 }
963                                 while (j < fill_to || eyes != fill_to_eyes) {
964                                         if (last != _last_video.end()) {
965                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
966                                                 auto copy = last->second->shallow_copy();
967                                                 copy->set_eyes (eyes);
968                                                 emit_video (copy, j);
969                                         } else {
970                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
971                                                 emit_video (black_player_video_frame(eyes), j);
972                                         }
973                                         if (eyes == Eyes::RIGHT) {
974                                                 j += one_video_frame();
975                                         }
976                                         eyes = increment_eyes (eyes);
977                                 }
978                         } else {
979                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
980                                         if (last != _last_video.end()) {
981                                                 emit_video (last->second, j);
982                                         } else {
983                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
984                                         }
985                                 }
986                         }
987                 }
988         }
989
990         auto const content_video = piece->content->video;
991
992         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
993                 video.image,
994                 content_video->actual_crop(),
995                 content_video->fade (_film, video.frame),
996                 scale_for_display(
997                         content_video->scaled_size(_film->frame_size()),
998                         _video_container_size,
999                         _film->frame_size(),
1000                         content_video->pixel_quanta()
1001                         ),
1002                 _video_container_size,
1003                 video.eyes,
1004                 video.part,
1005                 content_video->colour_conversion(),
1006                 content_video->range(),
1007                 piece->content,
1008                 video.frame,
1009                 false
1010                 );
1011
1012         DCPTime t = time;
1013         for (int i = 0; i < frc.repeat; ++i) {
1014                 if (t < piece->content->end(_film)) {
1015                         emit_video (_last_video[weak_piece], t);
1016                 }
1017                 t += one_video_frame ();
1018         }
1019 }
1020
1021
1022 void
1023 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1024 {
1025         if (_suspended) {
1026                 return;
1027         }
1028
1029         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1030
1031         auto piece = weak_piece.lock ();
1032         if (!piece) {
1033                 return;
1034         }
1035
1036         auto content = piece->content->audio;
1037         DCPOMATIC_ASSERT (content);
1038
1039         int const rfr = content->resampled_frame_rate (_film);
1040
1041         /* Compute time in the DCP */
1042         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1043         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1044
1045         /* And the end of this block in the DCP */
1046         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1047
1048         /* Remove anything that comes before the start or after the end of the content */
1049         if (time < piece->content->position()) {
1050                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1051                 if (!cut.first) {
1052                         /* This audio is entirely discarded */
1053                         return;
1054                 }
1055                 content_audio.audio = cut.first;
1056                 time = cut.second;
1057         } else if (time > piece->content->end(_film)) {
1058                 /* Discard it all */
1059                 return;
1060         } else if (end > piece->content->end(_film)) {
1061                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1062                 if (remaining_frames == 0) {
1063                         return;
1064                 }
1065                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1066         }
1067
1068         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1069
1070         /* Gain and fade */
1071
1072         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1073         if (content->gain() != 0 || !fade_coeffs.empty()) {
1074                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1075                 if (!fade_coeffs.empty()) {
1076                         /* Apply both fade and gain */
1077                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1078                         auto const channels = gain_buffers->channels();
1079                         auto const frames = fade_coeffs.size();
1080                         auto data = gain_buffers->data();
1081                         auto const gain = db_to_linear (content->gain());
1082                         for (auto channel = 0; channel < channels; ++channel) {
1083                                 for (auto frame = 0U; frame < frames; ++frame) {
1084                                         data[channel][frame] *= gain * fade_coeffs[frame];
1085                                 }
1086                         }
1087                 } else {
1088                         /* Just apply gain */
1089                         gain_buffers->apply_gain (content->gain());
1090                 }
1091                 content_audio.audio = gain_buffers;
1092         }
1093
1094         /* Remap */
1095
1096         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1097
1098         /* Process */
1099
1100         if (_audio_processor) {
1101                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1102         }
1103
1104         /* Push */
1105
1106         _audio_merger.push (content_audio.audio, time);
1107         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1108         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1109 }
1110
1111
1112 void
1113 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1114 {
1115         if (_suspended) {
1116                 return;
1117         }
1118
1119         auto piece = weak_piece.lock ();
1120         auto content = weak_content.lock ();
1121         if (!piece || !content) {
1122                 return;
1123         }
1124
1125         PlayerText ps;
1126         for (auto& sub: subtitle.subs)
1127         {
1128                 /* Apply content's subtitle offsets */
1129                 sub.rectangle.x += content->x_offset ();
1130                 sub.rectangle.y += content->y_offset ();
1131
1132                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1133                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1134                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1135
1136                 /* Apply content's subtitle scale */
1137                 sub.rectangle.width *= content->x_scale ();
1138                 sub.rectangle.height *= content->y_scale ();
1139
1140                 auto image = sub.image;
1141
1142                 /* We will scale the subtitle up to fit _video_container_size */
1143                 int const width = sub.rectangle.width * _video_container_size.width;
1144                 int const height = sub.rectangle.height * _video_container_size.height;
1145                 if (width == 0 || height == 0) {
1146                         return;
1147                 }
1148
1149                 dcp::Size scaled_size (width, height);
1150                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1151         }
1152
1153         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1154         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1155 }
1156
1157
1158 void
1159 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1160 {
1161         if (_suspended) {
1162                 return;
1163         }
1164
1165         auto piece = weak_piece.lock ();
1166         auto content = weak_content.lock ();
1167         if (!piece || !content) {
1168                 return;
1169         }
1170
1171         PlayerText ps;
1172         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1173
1174         if (from > piece->content->end(_film)) {
1175                 return;
1176         }
1177
1178         for (auto s: subtitle.subs) {
1179                 s.set_h_position (s.h_position() + content->x_offset());
1180                 s.set_v_position (s.v_position() + content->y_offset());
1181                 float const xs = content->x_scale();
1182                 float const ys = content->y_scale();
1183                 float size = s.size();
1184
1185                 /* Adjust size to express the common part of the scaling;
1186                    e.g. if xs = ys = 0.5 we scale size by 2.
1187                 */
1188                 if (xs > 1e-5 && ys > 1e-5) {
1189                         size *= 1 / min (1 / xs, 1 / ys);
1190                 }
1191                 s.set_size (size);
1192
1193                 /* Then express aspect ratio changes */
1194                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1195                         s.set_aspect_adjust (xs / ys);
1196                 }
1197
1198                 s.set_in (dcp::Time(from.seconds(), 1000));
1199                 ps.string.push_back (s);
1200         }
1201
1202         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1203 }
1204
1205
1206 void
1207 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1208 {
1209         if (_suspended) {
1210                 return;
1211         }
1212
1213         auto content = weak_content.lock ();
1214         if (!content) {
1215                 return;
1216         }
1217
1218         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1219                 return;
1220         }
1221
1222         auto piece = weak_piece.lock ();
1223         if (!piece) {
1224                 return;
1225         }
1226
1227         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1228
1229         if (dcp_to > piece->content->end(_film)) {
1230                 return;
1231         }
1232
1233         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1234
1235         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1236         if (content->use() && !always && !content->burn()) {
1237                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1238         }
1239 }
1240
1241
1242 void
1243 Player::seek (DCPTime time, bool accurate)
1244 {
1245         boost::mutex::scoped_lock lm (_mutex);
1246         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1247
1248         if (_suspended) {
1249                 /* We can't seek in this state */
1250                 return;
1251         }
1252
1253         if (_shuffler) {
1254                 _shuffler->clear ();
1255         }
1256
1257         _delay.clear ();
1258
1259         if (_audio_processor) {
1260                 _audio_processor->flush ();
1261         }
1262
1263         _audio_merger.clear ();
1264         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1265                 _active_texts[i].clear ();
1266         }
1267
1268         for (auto i: _pieces) {
1269                 if (time < i->content->position()) {
1270                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1271                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1272                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1273                            been trimmed to a point between keyframes, or something).
1274                         */
1275                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1276                         i->done = false;
1277                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1278                         /* During; seek to position */
1279                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1280                         i->done = false;
1281                 } else {
1282                         /* After; this piece is done */
1283                         i->done = true;
1284                 }
1285         }
1286
1287         if (accurate) {
1288                 _next_video_time = time;
1289                 _next_video_eyes = Eyes::LEFT;
1290                 _next_audio_time = time;
1291         } else {
1292                 _next_video_time = boost::none;
1293                 _next_video_eyes = boost::none;
1294                 _next_audio_time = boost::none;
1295         }
1296
1297         _black.set_position (time);
1298         _silent.set_position (time);
1299
1300         _last_video.clear ();
1301 }
1302
1303
1304 void
1305 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1306 {
1307         if (!_film->three_d()) {
1308                 if (pv->eyes() == Eyes::LEFT) {
1309                         /* Use left-eye images for both eyes... */
1310                         pv->set_eyes (Eyes::BOTH);
1311                 } else if (pv->eyes() == Eyes::RIGHT) {
1312                         /* ...and discard the right */
1313                         return;
1314                 }
1315         }
1316
1317         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1318            player before the video that requires them.
1319         */
1320         _delay.push_back (make_pair (pv, time));
1321
1322         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1323                 _next_video_time = time + one_video_frame();
1324         }
1325         _next_video_eyes = increment_eyes (pv->eyes());
1326
1327         if (_delay.size() < 3) {
1328                 return;
1329         }
1330
1331         auto to_do = _delay.front();
1332         _delay.pop_front();
1333         do_emit_video (to_do.first, to_do.second);
1334 }
1335
1336
1337 void
1338 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1339 {
1340         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1341                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1342                         _active_texts[i].clear_before (time);
1343                 }
1344         }
1345
1346         auto subtitles = open_subtitles_for_frame (time);
1347         if (subtitles) {
1348                 pv->set_text (subtitles.get ());
1349         }
1350
1351         Video (pv, time);
1352 }
1353
1354
1355 void
1356 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1357 {
1358         /* Log if the assert below is about to fail */
1359         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1360                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1361         }
1362
1363         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1364         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1365         Audio (data, time, _film->audio_frame_rate());
1366         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1367 }
1368
1369
1370 void
1371 Player::fill_audio (DCPTimePeriod period)
1372 {
1373         if (period.from == period.to) {
1374                 return;
1375         }
1376
1377         DCPOMATIC_ASSERT (period.from < period.to);
1378
1379         DCPTime t = period.from;
1380         while (t < period.to) {
1381                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1382                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1383                 if (samples) {
1384                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1385                         silence->make_silent ();
1386                         emit_audio (silence, t);
1387                 }
1388                 t += block;
1389         }
1390 }
1391
1392
1393 DCPTime
1394 Player::one_video_frame () const
1395 {
1396         return DCPTime::from_frames (1, _film->video_frame_rate ());
1397 }
1398
1399
1400 pair<shared_ptr<AudioBuffers>, DCPTime>
1401 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1402 {
1403         auto const discard_time = discard_to - time;
1404         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1405         auto remaining_frames = audio->frames() - discard_frames;
1406         if (remaining_frames <= 0) {
1407                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1408         }
1409         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1410         return make_pair(cut, time + discard_time);
1411 }
1412
1413
1414 void
1415 Player::set_dcp_decode_reduction (optional<int> reduction)
1416 {
1417         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1418
1419         {
1420                 boost::mutex::scoped_lock lm (_mutex);
1421
1422                 if (reduction == _dcp_decode_reduction) {
1423                         lm.unlock ();
1424                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1425                         return;
1426                 }
1427
1428                 _dcp_decode_reduction = reduction;
1429                 setup_pieces_unlocked ();
1430         }
1431
1432         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1433 }
1434
1435
1436 optional<DCPTime>
1437 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1438 {
1439         boost::mutex::scoped_lock lm (_mutex);
1440
1441         for (auto i: _pieces) {
1442                 if (i->content == content) {
1443                         return content_time_to_dcp (i, t);
1444                 }
1445         }
1446
1447         /* We couldn't find this content; perhaps things are being changed over */
1448         return {};
1449 }
1450
1451
1452 optional<ContentTime>
1453 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1454 {
1455         boost::mutex::scoped_lock lm (_mutex);
1456
1457         for (auto i: _pieces) {
1458                 if (i->content == content) {
1459                         return dcp_to_content_time (i, t);
1460                 }
1461         }
1462
1463         /* We couldn't find this content; perhaps things are being changed over */
1464         return {};
1465 }
1466
1467
1468 shared_ptr<const Playlist>
1469 Player::playlist () const
1470 {
1471         return _playlist ? _playlist : _film->playlist();
1472 }
1473
1474
1475 void
1476 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1477 {
1478         if (_suspended) {
1479                 return;
1480         }
1481
1482         auto piece = weak_piece.lock ();
1483         DCPOMATIC_ASSERT (piece);
1484
1485         auto const vfr = _film->video_frame_rate();
1486
1487         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1488         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1489                 return;
1490         }
1491
1492         Atmos (data.data, dcp_time, data.metadata);
1493 }
1494