Make the _film member of Player a weak_ptr.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::make_shared;
75 using std::max;
76 using std::min;
77 using std::min;
78 using std::pair;
79 using std::shared_ptr;
80 using std::vector;
81 using std::weak_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96 int const PlayerProperty::IGNORE_VIDEO = 706;
97 int const PlayerProperty::IGNORE_AUDIO = 707;
98 int const PlayerProperty::IGNORE_TEXT = 708;
99 int const PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES = 709;
100 int const PlayerProperty::PLAY_REFERENCED = 710;
101
102
103 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
104         : _film (film)
105         , _suspended (0)
106         , _ignore_video(false)
107         , _ignore_audio(false)
108         , _ignore_text(false)
109         , _always_burn_open_subtitles(false)
110         , _fast(false)
111         , _tolerant (film->tolerant())
112         , _play_referenced(false)
113         , _audio_merger(film->audio_frame_rate())
114         , _subtitle_alignment (subtitle_alignment)
115 {
116         construct ();
117 }
118
119
120 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
121         : _film (film)
122         , _playlist (playlist_)
123         , _suspended (0)
124         , _ignore_video(false)
125         , _ignore_audio(false)
126         , _ignore_text(false)
127         , _always_burn_open_subtitles(false)
128         , _fast(false)
129         , _tolerant (film->tolerant())
130         , _play_referenced(false)
131         , _audio_merger(film->audio_frame_rate())
132 {
133         construct ();
134 }
135
136
137 void
138 Player::construct ()
139 {
140         auto film = _film.lock();
141         DCPOMATIC_ASSERT(film);
142
143         _film_changed_connection = film->Change.connect(bind(&Player::film_change, this, _1, _2));
144         /* The butler must hear about this first, so since we are proxying this through to the butler we must
145            be first.
146         */
147         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
148         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
149         set_video_container_size(film->frame_size());
150
151         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
152
153         setup_pieces ();
154         seek (DCPTime (), true);
155 }
156
157
158 bool
159 have_video (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
162 }
163
164
165 bool
166 have_audio (shared_ptr<const Content> content)
167 {
168         return static_cast<bool>(content->audio) && content->can_be_played();
169 }
170
171
172 void
173 Player::setup_pieces ()
174 {
175         boost::mutex::scoped_lock lm (_mutex);
176
177         auto old_pieces = _pieces;
178         _pieces.clear ();
179
180         auto film = _film.lock();
181         if (!film) {
182                 return;
183         }
184
185         _playback_length = _playlist ? _playlist->length(film) : film->length();
186
187         auto playlist_content = playlist()->content();
188         bool const have_threed = std::any_of(
189                 playlist_content.begin(),
190                 playlist_content.end(),
191                 [](shared_ptr<const Content> c) {
192                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
193                 });
194
195
196         if (have_threed) {
197                 _shuffler.reset(new Shuffler());
198                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
199         }
200
201         for (auto i: playlist()->content()) {
202
203                 if (!i->paths_valid ()) {
204                         continue;
205                 }
206
207                 if (_ignore_video && _ignore_audio && i->text.empty()) {
208                         /* We're only interested in text and this content has none */
209                         continue;
210                 }
211
212                 shared_ptr<Decoder> old_decoder;
213                 for (auto j: old_pieces) {
214                         if (j->content == i) {
215                                 old_decoder = j->decoder;
216                                 break;
217                         }
218                 }
219
220                 auto decoder = decoder_factory(film, i, _fast, _tolerant, old_decoder);
221                 DCPOMATIC_ASSERT (decoder);
222
223                 FrameRateChange frc(film, i);
224
225                 if (decoder->video && _ignore_video) {
226                         decoder->video->set_ignore (true);
227                 }
228
229                 if (decoder->audio && _ignore_audio) {
230                         decoder->audio->set_ignore (true);
231                 }
232
233                 if (_ignore_text) {
234                         for (auto i: decoder->text) {
235                                 i->set_ignore (true);
236                         }
237                 }
238
239                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
240                 if (dcp) {
241                         dcp->set_decode_referenced (_play_referenced);
242                         if (_play_referenced) {
243                                 dcp->set_forced_reduction (_dcp_decode_reduction);
244                         }
245                 }
246
247                 auto piece = make_shared<Piece>(i, decoder, frc);
248                 _pieces.push_back (piece);
249
250                 if (decoder->video) {
251                         if (have_threed) {
252                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
253                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
254                         } else {
255                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
256                         }
257                 }
258
259                 if (decoder->audio) {
260                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
261                 }
262
263                 auto j = decoder->text.begin();
264
265                 while (j != decoder->text.end()) {
266                         (*j)->BitmapStart.connect (
267                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
268                                 );
269                         (*j)->PlainStart.connect (
270                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
271                                 );
272                         (*j)->Stop.connect (
273                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
274                                 );
275
276                         ++j;
277                 }
278
279                 if (decoder->atmos) {
280                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
281                 }
282         }
283
284         _stream_states.clear ();
285         for (auto i: _pieces) {
286                 if (i->content->audio) {
287                         for (auto j: i->content->audio->streams()) {
288                                 _stream_states[j] = StreamState (i, i->content->position ());
289                         }
290                 }
291         }
292
293         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
294                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
295         };
296
297         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
298                 if (ignore_overlap((*i)->content->video)) {
299                         /* Look for content later in the content list with in-use video that overlaps this */
300                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(film));
301                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
302                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
303                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(film)).overlap(period);
304                                 }
305                         }
306                 }
307         }
308
309         _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
310         _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
311
312         _next_video_time = boost::none;
313         _next_video_eyes = Eyes::BOTH;
314         _next_audio_time = boost::none;
315 }
316
317
318 void
319 Player::playlist_content_change (ChangeType type, int property, bool frequent)
320 {
321         auto film = _film.lock();
322         if (!film) {
323                 return;
324         }
325
326         if (property == VideoContentProperty::CROP) {
327                 if (type == ChangeType::DONE) {
328                         boost::mutex::scoped_lock lm (_mutex);
329                         for (auto const& i: _delay) {
330                                 i.first->reset_metadata(film, _video_container_size);
331                         }
332                 }
333         } else {
334                 if (type == ChangeType::PENDING) {
335                         /* The player content is probably about to change, so we can't carry on
336                            until that has happened and we've rebuilt our pieces.  Stop pass()
337                            and seek() from working until then.
338                         */
339                         ++_suspended;
340                 } else if (type == ChangeType::DONE) {
341                         /* A change in our content has gone through.  Re-build our pieces. */
342                         setup_pieces ();
343                         --_suspended;
344                 } else if (type == ChangeType::CANCELLED) {
345                         --_suspended;
346                 }
347         }
348
349         Change (type, property, frequent);
350 }
351
352
353 void
354 Player::set_video_container_size (dcp::Size s)
355 {
356         ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
357
358         if (s == _video_container_size) {
359                 cc.abort();
360                 return;
361         }
362
363         _video_container_size = s;
364
365         {
366                 boost::mutex::scoped_lock lm(_black_image_mutex);
367                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
368                 _black_image->make_black ();
369         }
370 }
371
372
373 void
374 Player::playlist_change (ChangeType type)
375 {
376         if (type == ChangeType::DONE) {
377                 setup_pieces ();
378         }
379         Change (type, PlayerProperty::PLAYLIST, false);
380 }
381
382
383 void
384 Player::film_change (ChangeType type, Film::Property p)
385 {
386         /* Here we should notice Film properties that affect our output, and
387            alert listeners that our output now would be different to how it was
388            last time we were run.
389         */
390
391         auto film = _film.lock();
392         if (!film) {
393                 return;
394         }
395
396         if (p == Film::Property::CONTAINER) {
397                 Change (type, PlayerProperty::FILM_CONTAINER, false);
398         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
399                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
400                    so we need new pieces here.
401                 */
402                 if (type == ChangeType::DONE) {
403                         setup_pieces ();
404                 }
405                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
406         } else if (p == Film::Property::AUDIO_PROCESSOR) {
407                 if (type == ChangeType::DONE && film->audio_processor ()) {
408                         boost::mutex::scoped_lock lm (_mutex);
409                         _audio_processor = film->audio_processor()->clone(film->audio_frame_rate());
410                 }
411         } else if (p == Film::Property::AUDIO_CHANNELS) {
412                 if (type == ChangeType::DONE) {
413                         boost::mutex::scoped_lock lm (_mutex);
414                         _audio_merger.clear ();
415                 }
416         }
417 }
418
419
420 shared_ptr<PlayerVideo>
421 Player::black_player_video_frame (Eyes eyes) const
422 {
423         boost::mutex::scoped_lock lm(_black_image_mutex);
424
425         return std::make_shared<PlayerVideo> (
426                 std::make_shared<const RawImageProxy>(_black_image),
427                 Crop(),
428                 optional<double>(),
429                 _video_container_size,
430                 _video_container_size,
431                 eyes,
432                 Part::WHOLE,
433                 PresetColourConversion::all().front().conversion,
434                 VideoRange::FULL,
435                 std::weak_ptr<Content>(),
436                 boost::optional<Frame>(),
437                 false
438         );
439 }
440
441
442 Frame
443 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
444 {
445         auto film = _film.lock();
446         DCPOMATIC_ASSERT(film);
447
448         auto s = t - piece->content->position ();
449         s = min (piece->content->length_after_trim(film), s);
450         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
451
452         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
453            then convert that ContentTime to frames at the content's rate.  However this fails for
454            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
455            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
456
457            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
458         */
459         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
460 }
461
462
463 DCPTime
464 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
465 {
466         /* See comment in dcp_to_content_video */
467         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
468         return d + piece->content->position();
469 }
470
471
472 Frame
473 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
474 {
475         auto film = _film.lock();
476         DCPOMATIC_ASSERT(film);
477
478         auto s = t - piece->content->position ();
479         s = min (piece->content->length_after_trim(film), s);
480         /* See notes in dcp_to_content_video */
481         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(film->audio_frame_rate());
482 }
483
484
485 DCPTime
486 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
487 {
488         auto film = _film.lock();
489         DCPOMATIC_ASSERT(film);
490
491         /* See comment in dcp_to_content_video */
492         return DCPTime::from_frames(f, film->audio_frame_rate())
493                 - DCPTime (piece->content->trim_start(), piece->frc)
494                 + piece->content->position();
495 }
496
497
498 ContentTime
499 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
500 {
501         auto film = _film.lock();
502         DCPOMATIC_ASSERT(film);
503
504         auto s = t - piece->content->position ();
505         s = min (piece->content->length_after_trim(film), s);
506         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
507 }
508
509
510 DCPTime
511 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
512 {
513         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
514 }
515
516
517 vector<shared_ptr<Font>>
518 Player::get_subtitle_fonts ()
519 {
520         boost::mutex::scoped_lock lm (_mutex);
521
522         vector<shared_ptr<Font>> fonts;
523         for (auto piece: _pieces) {
524                 for (auto text: piece->content->text) {
525                         auto text_fonts = text->fonts();
526                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
527                 }
528         }
529
530         return fonts;
531 }
532
533
534 /** Set this player never to produce any video data */
535 void
536 Player::set_ignore_video ()
537 {
538         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_VIDEO);
539         _ignore_video = true;
540         setup_pieces();
541 }
542
543
544 void
545 Player::set_ignore_audio ()
546 {
547         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_AUDIO);
548         _ignore_audio = true;
549         setup_pieces();
550 }
551
552
553 void
554 Player::set_ignore_text ()
555 {
556         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_TEXT);
557         _ignore_text = true;
558         setup_pieces();
559 }
560
561
562 /** Set the player to always burn open texts into the image regardless of the content settings */
563 void
564 Player::set_always_burn_open_subtitles ()
565 {
566         ChangeSignaller<Player, int> cc(this, PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES);
567         _always_burn_open_subtitles = true;
568 }
569
570
571 /** Sets up the player to be faster, possibly at the expense of quality */
572 void
573 Player::set_fast ()
574 {
575         _fast = true;
576         setup_pieces();
577 }
578
579
580 void
581 Player::set_play_referenced ()
582 {
583         ChangeSignaller<Player, int> cc(this, PlayerProperty::PLAY_REFERENCED);
584         _play_referenced = true;
585         setup_pieces();
586 }
587
588
589 bool
590 Player::pass ()
591 {
592         boost::mutex::scoped_lock lm (_mutex);
593
594         if (_suspended) {
595                 /* We can't pass in this state */
596                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
597                 return false;
598         }
599
600         auto film = _film.lock();
601
602         if (_playback_length.load() == DCPTime() || !film) {
603                 /* Special; just give one black frame */
604                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
605                 return true;
606         }
607
608         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
609
610         shared_ptr<Piece> earliest_content;
611         optional<DCPTime> earliest_time;
612
613         for (auto i: _pieces) {
614                 if (i->done) {
615                         continue;
616                 }
617
618                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
619                 if (t > i->content->end(film)) {
620                         i->done = true;
621                 } else {
622
623                         /* Given two choices at the same time, pick the one with texts so we see it before
624                            the video.
625                         */
626                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
627                                 earliest_time = t;
628                                 earliest_content = i;
629                         }
630                 }
631         }
632
633         bool done = false;
634
635         enum {
636                 NONE,
637                 CONTENT,
638                 BLACK,
639                 SILENT
640         } which = NONE;
641
642         if (earliest_content) {
643                 which = CONTENT;
644         }
645
646         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
647                 earliest_time = _black.position ();
648                 which = BLACK;
649         }
650
651         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
652                 earliest_time = _silent.position ();
653                 which = SILENT;
654         }
655
656         switch (which) {
657         case CONTENT:
658         {
659                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
660                 earliest_content->done = earliest_content->decoder->pass ();
661                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
662                 if (dcp && !_play_referenced && dcp->reference_audio()) {
663                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
664                            to `hide' the fact that no audio was emitted during the referenced DCP (though
665                            we need to behave as though it was).
666                         */
667                         _next_audio_time = dcp->end(film);
668                 }
669                 break;
670         }
671         case BLACK:
672                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
673                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
674                 _black.set_position (_black.position() + one_video_frame());
675                 break;
676         case SILENT:
677         {
678                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
679                 DCPTimePeriod period (_silent.period_at_position());
680                 if (_next_audio_time) {
681                         /* Sometimes the thing that happened last finishes fractionally before
682                            or after this silence.  Bodge the start time of the silence to fix it.
683                            I think this is nothing to worry about since we will just add or
684                            remove a little silence at the end of some content.
685                         */
686                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
687                         /* Let's not worry about less than a frame at 24fps */
688                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
689                         if (error >= too_much_error) {
690                                 film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
691                         }
692                         DCPOMATIC_ASSERT (error < too_much_error);
693                         period.from = *_next_audio_time;
694                 }
695                 if (period.duration() > one_video_frame()) {
696                         period.to = period.from + one_video_frame();
697                 }
698                 fill_audio (period);
699                 _silent.set_position (period.to);
700                 break;
701         }
702         case NONE:
703                 done = true;
704                 break;
705         }
706
707         /* Emit any audio that is ready */
708
709         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
710            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
711            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
712            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
713            that will never come, causing bugs like #2101.
714         */
715         constexpr int ignore_streams_behind = 5;
716
717         using state_pair = std::pair<AudioStreamPtr, StreamState>;
718
719         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
720         auto latest_last_push_end = std::max_element(
721                 _stream_states.begin(),
722                 _stream_states.end(),
723                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
724                 );
725
726         if (latest_last_push_end != _stream_states.end()) {
727                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
728         }
729
730         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
731         std::map<AudioStreamPtr, StreamState> alive_stream_states;
732         for (auto const& i: _stream_states) {
733                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
734                         alive_stream_states.insert(i);
735                 } else {
736                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
737                 }
738         }
739
740         auto pull_to = _playback_length.load();
741         for (auto const& i: alive_stream_states) {
742                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
743                         pull_to = i.second.last_push_end;
744                 }
745         }
746         if (!_silent.done() && _silent.position() < pull_to) {
747                 pull_to = _silent.position();
748         }
749
750         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
751         auto audio = _audio_merger.pull (pull_to);
752         for (auto i = audio.begin(); i != audio.end(); ++i) {
753                 if (_next_audio_time && i->second < *_next_audio_time) {
754                         /* This new data comes before the last we emitted (or the last seek); discard it */
755                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
756                         if (!cut.first) {
757                                 continue;
758                         }
759                         *i = cut;
760                 } else if (_next_audio_time && i->second > *_next_audio_time) {
761                         /* There's a gap between this data and the last we emitted; fill with silence */
762                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
763                 }
764
765                 emit_audio (i->first, i->second);
766         }
767
768         if (done) {
769                 if (_shuffler) {
770                         _shuffler->flush ();
771                 }
772                 for (auto const& i: _delay) {
773                         do_emit_video(i.first, i.second);
774                 }
775
776                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
777                  * However, if we have L and R video files, and one is shorter than the other,
778                  * the fill code in ::video mostly takes care of filling in the gaps.
779                  * However, since it fills at the point when it knows there is more video coming
780                  * at time t (so it should fill any gap up to t) it can't do anything right at the
781                  * end.  This is particularly bad news if the last frame emitted is a LEFT
782                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
783                  * Here's a hack to workaround that particular case.
784                  */
785                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
786                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
787                 }
788         }
789
790         return done;
791 }
792
793
794 /** @return Open subtitles for the frame at the given time, converted to images */
795 optional<PositionImage>
796 Player::open_subtitles_for_frame (DCPTime time) const
797 {
798         auto film = _film.lock();
799         if (!film) {
800                 return {};
801         }
802
803         list<PositionImage> captions;
804         int const vfr = film->video_frame_rate();
805
806         for (
807                 auto j:
808                 _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
809                 ) {
810
811                 /* Bitmap subtitles */
812                 for (auto i: j.bitmap) {
813                         if (!i.image) {
814                                 continue;
815                         }
816
817                         /* i.image will already have been scaled to fit _video_container_size */
818                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
819
820                         captions.push_back (
821                                 PositionImage (
822                                         i.image,
823                                         Position<int> (
824                                                 lrint(_video_container_size.load().width * i.rectangle.x),
825                                                 lrint(_video_container_size.load().height * i.rectangle.y)
826                                                 )
827                                         )
828                                 );
829                 }
830
831                 /* String subtitles (rendered to an image) */
832                 if (!j.string.empty()) {
833                         auto s = render_text(j.string, _video_container_size, time, vfr);
834                         copy (s.begin(), s.end(), back_inserter (captions));
835                 }
836         }
837
838         if (captions.empty()) {
839                 return {};
840         }
841
842         return merge (captions, _subtitle_alignment);
843 }
844
845
846 void
847 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
848 {
849         if (_suspended) {
850                 return;
851         }
852
853         auto piece = weak_piece.lock ();
854         if (!piece) {
855                 return;
856         }
857
858         if (!piece->content->video->use()) {
859                 return;
860         }
861
862         auto film = _film.lock();
863         if (!film) {
864                 return;
865         }
866
867         FrameRateChange frc(film, piece->content);
868         if (frc.skip && (video.frame % 2) == 1) {
869                 return;
870         }
871
872         /* Time of the first frame we will emit */
873         DCPTime const time = content_video_to_dcp (piece, video.frame);
874         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
875
876         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
877            if it's after the content's period here as in that case we still need to fill any gap between
878            `now' and the end of the content's period.
879         */
880         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
881                 return;
882         }
883
884         if (piece->ignore_video && piece->ignore_video->contains(time)) {
885                 return;
886         }
887
888         /* Fill gaps that we discover now that we have some video which needs to be emitted.
889            This is where we need to fill to.
890         */
891         DCPTime fill_to = min(time, piece->content->end(film));
892
893         if (_next_video_time) {
894                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
895
896                 /* Fill if we have more than half a frame to do */
897                 if ((fill_to - fill_from) > one_video_frame() / 2) {
898                         auto last = _last_video.find (weak_piece);
899                         if (film->three_d()) {
900                                 auto fill_to_eyes = video.eyes;
901                                 if (fill_to_eyes == Eyes::BOTH) {
902                                         fill_to_eyes = Eyes::LEFT;
903                                 }
904                                 if (fill_to == piece->content->end(film)) {
905                                         /* Don't fill after the end of the content */
906                                         fill_to_eyes = Eyes::LEFT;
907                                 }
908                                 auto j = fill_from;
909                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
910                                 if (eyes == Eyes::BOTH) {
911                                         eyes = Eyes::LEFT;
912                                 }
913                                 while (j < fill_to || eyes != fill_to_eyes) {
914                                         if (last != _last_video.end()) {
915                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
916                                                 auto copy = last->second->shallow_copy();
917                                                 copy->set_eyes (eyes);
918                                                 emit_video (copy, j);
919                                         } else {
920                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
921                                                 emit_video (black_player_video_frame(eyes), j);
922                                         }
923                                         if (eyes == Eyes::RIGHT) {
924                                                 j += one_video_frame();
925                                         }
926                                         eyes = increment_eyes (eyes);
927                                 }
928                         } else {
929                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
930                                         if (last != _last_video.end()) {
931                                                 emit_video (last->second, j);
932                                         } else {
933                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
934                                         }
935                                 }
936                         }
937                 }
938         }
939
940         auto const content_video = piece->content->video;
941
942         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
943                 video.image,
944                 content_video->actual_crop(),
945                 content_video->fade(film, video.frame),
946                 scale_for_display(
947                         content_video->scaled_size(film->frame_size()),
948                         _video_container_size,
949                         film->frame_size(),
950                         content_video->pixel_quanta()
951                         ),
952                 _video_container_size,
953                 video.eyes,
954                 video.part,
955                 content_video->colour_conversion(),
956                 content_video->range(),
957                 piece->content,
958                 video.frame,
959                 false
960                 );
961
962         DCPTime t = time;
963         for (int i = 0; i < frc.repeat; ++i) {
964                 if (t < piece->content->end(film)) {
965                         emit_video (_last_video[weak_piece], t);
966                 }
967                 t += one_video_frame ();
968         }
969 }
970
971
972 void
973 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
974 {
975         if (_suspended) {
976                 return;
977         }
978
979         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
980
981         auto piece = weak_piece.lock ();
982         if (!piece) {
983                 return;
984         }
985
986         auto film = _film.lock();
987         if (!film) {
988                 return;
989         }
990
991         auto content = piece->content->audio;
992         DCPOMATIC_ASSERT (content);
993
994         int const rfr = content->resampled_frame_rate(film);
995
996         /* Compute time in the DCP */
997         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
998         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
999
1000         /* And the end of this block in the DCP */
1001         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1002
1003         /* Remove anything that comes before the start or after the end of the content */
1004         if (time < piece->content->position()) {
1005                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1006                 if (!cut.first) {
1007                         /* This audio is entirely discarded */
1008                         return;
1009                 }
1010                 content_audio.audio = cut.first;
1011                 time = cut.second;
1012         } else if (time > piece->content->end(film)) {
1013                 /* Discard it all */
1014                 return;
1015         } else if (end > piece->content->end(film)) {
1016                 Frame const remaining_frames = DCPTime(piece->content->end(film) - time).frames_round(rfr);
1017                 if (remaining_frames == 0) {
1018                         return;
1019                 }
1020                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1021         }
1022
1023         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1024
1025         /* Gain and fade */
1026
1027         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1028         if (content->gain() != 0 || !fade_coeffs.empty()) {
1029                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1030                 if (!fade_coeffs.empty()) {
1031                         /* Apply both fade and gain */
1032                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1033                         auto const channels = gain_buffers->channels();
1034                         auto const frames = fade_coeffs.size();
1035                         auto data = gain_buffers->data();
1036                         auto const gain = db_to_linear (content->gain());
1037                         for (auto channel = 0; channel < channels; ++channel) {
1038                                 for (auto frame = 0U; frame < frames; ++frame) {
1039                                         data[channel][frame] *= gain * fade_coeffs[frame];
1040                                 }
1041                         }
1042                 } else {
1043                         /* Just apply gain */
1044                         gain_buffers->apply_gain (content->gain());
1045                 }
1046                 content_audio.audio = gain_buffers;
1047         }
1048
1049         /* Remap */
1050
1051         content_audio.audio = remap(content_audio.audio, film->audio_channels(), stream->mapping());
1052
1053         /* Process */
1054
1055         if (_audio_processor) {
1056                 content_audio.audio = _audio_processor->run(content_audio.audio, film->audio_channels());
1057         }
1058
1059         /* Push */
1060
1061         _audio_merger.push (content_audio.audio, time);
1062         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1063         _stream_states[stream].last_push_end = time + DCPTime::from_frames(content_audio.audio->frames(), film->audio_frame_rate());
1064 }
1065
1066
1067 void
1068 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1069 {
1070         if (_suspended) {
1071                 return;
1072         }
1073
1074         auto piece = weak_piece.lock ();
1075         auto content = weak_content.lock ();
1076         if (!piece || !content) {
1077                 return;
1078         }
1079
1080         PlayerText ps;
1081         for (auto& sub: subtitle.subs)
1082         {
1083                 /* Apply content's subtitle offsets */
1084                 sub.rectangle.x += content->x_offset ();
1085                 sub.rectangle.y += content->y_offset ();
1086
1087                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1088                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1089                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1090
1091                 /* Apply content's subtitle scale */
1092                 sub.rectangle.width *= content->x_scale ();
1093                 sub.rectangle.height *= content->y_scale ();
1094
1095                 auto image = sub.image;
1096
1097                 /* We will scale the subtitle up to fit _video_container_size */
1098                 int const width = sub.rectangle.width * _video_container_size.load().width;
1099                 int const height = sub.rectangle.height * _video_container_size.load().height;
1100                 if (width == 0 || height == 0) {
1101                         return;
1102                 }
1103
1104                 dcp::Size scaled_size (width, height);
1105                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1106         }
1107
1108         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1109         _active_texts[content->type()].add_from(weak_content, ps, from);
1110 }
1111
1112
1113 void
1114 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1115 {
1116         if (_suspended) {
1117                 return;
1118         }
1119
1120         auto piece = weak_piece.lock ();
1121         auto content = weak_content.lock ();
1122         auto film = _film.lock();
1123         if (!piece || !content || !film) {
1124                 return;
1125         }
1126
1127         PlayerText ps;
1128         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1129
1130         if (from > piece->content->end(film)) {
1131                 return;
1132         }
1133
1134         for (auto s: subtitle.subs) {
1135                 s.set_h_position (s.h_position() + content->x_offset());
1136                 s.set_v_position (s.v_position() + content->y_offset());
1137                 float const xs = content->x_scale();
1138                 float const ys = content->y_scale();
1139                 float size = s.size();
1140
1141                 /* Adjust size to express the common part of the scaling;
1142                    e.g. if xs = ys = 0.5 we scale size by 2.
1143                 */
1144                 if (xs > 1e-5 && ys > 1e-5) {
1145                         size *= 1 / min (1 / xs, 1 / ys);
1146                 }
1147                 s.set_size (size);
1148
1149                 /* Then express aspect ratio changes */
1150                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1151                         s.set_aspect_adjust (xs / ys);
1152                 }
1153
1154                 s.set_in (dcp::Time(from.seconds(), 1000));
1155                 ps.string.push_back (s);
1156         }
1157
1158         _active_texts[content->type()].add_from(weak_content, ps, from);
1159 }
1160
1161
1162 void
1163 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1164 {
1165         if (_suspended) {
1166                 return;
1167         }
1168
1169         auto content = weak_content.lock ();
1170         if (!content) {
1171                 return;
1172         }
1173
1174         if (!_active_texts[content->type()].have(weak_content)) {
1175                 return;
1176         }
1177
1178         auto piece = weak_piece.lock ();
1179         auto film = _film.lock();
1180         if (!piece || !film) {
1181                 return;
1182         }
1183
1184         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1185
1186         if (dcp_to > piece->content->end(film)) {
1187                 return;
1188         }
1189
1190         auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
1191
1192         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1193         if (content->use() && !always && !content->burn()) {
1194                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1195         }
1196 }
1197
1198
1199 void
1200 Player::seek (DCPTime time, bool accurate)
1201 {
1202         boost::mutex::scoped_lock lm (_mutex);
1203         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1204
1205         if (_suspended) {
1206                 /* We can't seek in this state */
1207                 return;
1208         }
1209
1210         auto film = _film.lock();
1211         if (!film) {
1212                 return;
1213         }
1214
1215         if (_shuffler) {
1216                 _shuffler->clear ();
1217         }
1218
1219         _delay.clear ();
1220
1221         if (_audio_processor) {
1222                 _audio_processor->flush ();
1223         }
1224
1225         _audio_merger.clear ();
1226         std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
1227
1228         for (auto i: _pieces) {
1229                 if (time < i->content->position()) {
1230                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1231                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1232                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1233                            been trimmed to a point between keyframes, or something).
1234                         */
1235                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1236                         i->done = false;
1237                 } else if (i->content->position() <= time && time < i->content->end(film)) {
1238                         /* During; seek to position */
1239                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1240                         i->done = false;
1241                 } else {
1242                         /* After; this piece is done */
1243                         i->done = true;
1244                 }
1245         }
1246
1247         if (accurate) {
1248                 _next_video_time = time;
1249                 _next_video_eyes = Eyes::LEFT;
1250                 _next_audio_time = time;
1251         } else {
1252                 _next_video_time = boost::none;
1253                 _next_video_eyes = boost::none;
1254                 _next_audio_time = boost::none;
1255         }
1256
1257         _black.set_position (time);
1258         _silent.set_position (time);
1259
1260         _last_video.clear ();
1261 }
1262
1263
1264 void
1265 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1266 {
1267         auto film = _film.lock();
1268         DCPOMATIC_ASSERT(film);
1269
1270         if (!film->three_d()) {
1271                 if (pv->eyes() == Eyes::LEFT) {
1272                         /* Use left-eye images for both eyes... */
1273                         pv->set_eyes (Eyes::BOTH);
1274                 } else if (pv->eyes() == Eyes::RIGHT) {
1275                         /* ...and discard the right */
1276                         return;
1277                 }
1278         }
1279
1280         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1281            player before the video that requires them.
1282         */
1283         _delay.push_back (make_pair (pv, time));
1284
1285         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1286                 _next_video_time = time + one_video_frame();
1287         }
1288         _next_video_eyes = increment_eyes (pv->eyes());
1289
1290         if (_delay.size() < 3) {
1291                 return;
1292         }
1293
1294         auto to_do = _delay.front();
1295         _delay.pop_front();
1296         do_emit_video (to_do.first, to_do.second);
1297 }
1298
1299
1300 void
1301 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1302 {
1303         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1304                 std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
1305         }
1306
1307         auto subtitles = open_subtitles_for_frame (time);
1308         if (subtitles) {
1309                 pv->set_text (subtitles.get ());
1310         }
1311
1312         Video (pv, time);
1313 }
1314
1315
1316 void
1317 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1318 {
1319         auto film = _film.lock();
1320         DCPOMATIC_ASSERT(film);
1321
1322         /* Log if the assert below is about to fail */
1323         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1324                 film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1325         }
1326
1327         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1328         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1329         Audio(data, time, film->audio_frame_rate());
1330         _next_audio_time = time + DCPTime::from_frames(data->frames(), film->audio_frame_rate());
1331 }
1332
1333
1334 void
1335 Player::fill_audio (DCPTimePeriod period)
1336 {
1337         auto film = _film.lock();
1338         DCPOMATIC_ASSERT(film);
1339
1340         if (period.from == period.to) {
1341                 return;
1342         }
1343
1344         DCPOMATIC_ASSERT (period.from < period.to);
1345
1346         DCPTime t = period.from;
1347         while (t < period.to) {
1348                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1349                 Frame const samples = block.frames_round(film->audio_frame_rate());
1350                 if (samples) {
1351                         auto silence = make_shared<AudioBuffers>(film->audio_channels(), samples);
1352                         silence->make_silent ();
1353                         emit_audio (silence, t);
1354                 }
1355                 t += block;
1356         }
1357 }
1358
1359
1360 DCPTime
1361 Player::one_video_frame () const
1362 {
1363         auto film = _film.lock();
1364         DCPOMATIC_ASSERT(film);
1365
1366         return DCPTime::from_frames(1, film->video_frame_rate ());
1367 }
1368
1369
1370 pair<shared_ptr<AudioBuffers>, DCPTime>
1371 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1372 {
1373         auto film = _film.lock();
1374         DCPOMATIC_ASSERT(film);
1375
1376         auto const discard_time = discard_to - time;
1377         auto const discard_frames = discard_time.frames_round(film->audio_frame_rate());
1378         auto remaining_frames = audio->frames() - discard_frames;
1379         if (remaining_frames <= 0) {
1380                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1381         }
1382         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1383         return make_pair(cut, time + discard_time);
1384 }
1385
1386
1387 void
1388 Player::set_dcp_decode_reduction (optional<int> reduction)
1389 {
1390         ChangeSignaller<Player, int> cc(this, PlayerProperty::DCP_DECODE_REDUCTION);
1391
1392         if (reduction == _dcp_decode_reduction.load()) {
1393                 cc.abort();
1394                 return;
1395         }
1396
1397         _dcp_decode_reduction = reduction;
1398         setup_pieces();
1399 }
1400
1401
1402 optional<DCPTime>
1403 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1404 {
1405         boost::mutex::scoped_lock lm (_mutex);
1406
1407         for (auto i: _pieces) {
1408                 if (i->content == content) {
1409                         return content_time_to_dcp (i, t);
1410                 }
1411         }
1412
1413         /* We couldn't find this content; perhaps things are being changed over */
1414         return {};
1415 }
1416
1417
1418 optional<ContentTime>
1419 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1420 {
1421         boost::mutex::scoped_lock lm (_mutex);
1422
1423         for (auto i: _pieces) {
1424                 if (i->content == content) {
1425                         return dcp_to_content_time (i, t);
1426                 }
1427         }
1428
1429         /* We couldn't find this content; perhaps things are being changed over */
1430         return {};
1431 }
1432
1433
1434 shared_ptr<const Playlist>
1435 Player::playlist () const
1436 {
1437         auto film = _film.lock();
1438         if (!film) {
1439                 return {};
1440         }
1441
1442         return _playlist ? _playlist : film->playlist();
1443 }
1444
1445
1446 void
1447 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1448 {
1449         if (_suspended) {
1450                 return;
1451         }
1452
1453         auto film = _film.lock();
1454         DCPOMATIC_ASSERT(film);
1455
1456         auto piece = weak_piece.lock ();
1457         DCPOMATIC_ASSERT (piece);
1458
1459         auto const vfr = film->video_frame_rate();
1460
1461         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1462         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(film))) {
1463                 return;
1464         }
1465
1466         Atmos (data.data, dcp_time, data.metadata);
1467 }
1468
1469
1470 void
1471 Player::signal_change(ChangeType type, int property)
1472 {
1473         Change(type, property, false);
1474 }
1475