Add some missing change signals in the Player.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::make_shared;
75 using std::max;
76 using std::min;
77 using std::min;
78 using std::pair;
79 using std::shared_ptr;
80 using std::vector;
81 using std::weak_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96 int const PlayerProperty::IGNORE_VIDEO = 706;
97 int const PlayerProperty::IGNORE_AUDIO = 707;
98 int const PlayerProperty::IGNORE_TEXT = 708;
99 int const PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES = 709;
100 int const PlayerProperty::PLAY_REFERENCED = 710;
101
102
103 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
104         : _film (film)
105         , _suspended (0)
106         , _ignore_video(false)
107         , _ignore_audio(false)
108         , _ignore_text(false)
109         , _always_burn_open_subtitles(false)
110         , _fast(false)
111         , _tolerant (film->tolerant())
112         , _play_referenced(false)
113         , _audio_merger (_film->audio_frame_rate())
114         , _subtitle_alignment (subtitle_alignment)
115 {
116         construct ();
117 }
118
119
120 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
121         : _film (film)
122         , _playlist (playlist_)
123         , _suspended (0)
124         , _ignore_video(false)
125         , _ignore_audio(false)
126         , _ignore_text(false)
127         , _always_burn_open_subtitles(false)
128         , _fast(false)
129         , _tolerant (film->tolerant())
130         , _play_referenced(false)
131         , _audio_merger (_film->audio_frame_rate())
132 {
133         construct ();
134 }
135
136
137 void
138 Player::construct ()
139 {
140         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
141         /* The butler must hear about this first, so since we are proxying this through to the butler we must
142            be first.
143         */
144         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
145         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
146         set_video_container_size (_film->frame_size ());
147
148         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
149
150         setup_pieces ();
151         seek (DCPTime (), true);
152 }
153
154
155 bool
156 have_video (shared_ptr<const Content> content)
157 {
158         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
159 }
160
161
162 bool
163 have_audio (shared_ptr<const Content> content)
164 {
165         return static_cast<bool>(content->audio) && content->can_be_played();
166 }
167
168
169 void
170 Player::setup_pieces ()
171 {
172         boost::mutex::scoped_lock lm (_mutex);
173
174         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
175
176         auto old_pieces = _pieces;
177         _pieces.clear ();
178
179         auto playlist_content = playlist()->content();
180         bool const have_threed = std::any_of(
181                 playlist_content.begin(),
182                 playlist_content.end(),
183                 [](shared_ptr<const Content> c) {
184                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
185                 });
186
187
188         if (have_threed) {
189                 _shuffler.reset(new Shuffler());
190                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
191         }
192
193         for (auto i: playlist()->content()) {
194
195                 if (!i->paths_valid ()) {
196                         continue;
197                 }
198
199                 if (_ignore_video && _ignore_audio && i->text.empty()) {
200                         /* We're only interested in text and this content has none */
201                         continue;
202                 }
203
204                 shared_ptr<Decoder> old_decoder;
205                 for (auto j: old_pieces) {
206                         if (j->content == i) {
207                                 old_decoder = j->decoder;
208                                 break;
209                         }
210                 }
211
212                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
213                 DCPOMATIC_ASSERT (decoder);
214
215                 FrameRateChange frc (_film, i);
216
217                 if (decoder->video && _ignore_video) {
218                         decoder->video->set_ignore (true);
219                 }
220
221                 if (decoder->audio && _ignore_audio) {
222                         decoder->audio->set_ignore (true);
223                 }
224
225                 if (_ignore_text) {
226                         for (auto i: decoder->text) {
227                                 i->set_ignore (true);
228                         }
229                 }
230
231                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
232                 if (dcp) {
233                         dcp->set_decode_referenced (_play_referenced);
234                         if (_play_referenced) {
235                                 dcp->set_forced_reduction (_dcp_decode_reduction);
236                         }
237                 }
238
239                 auto piece = make_shared<Piece>(i, decoder, frc);
240                 _pieces.push_back (piece);
241
242                 if (decoder->video) {
243                         if (have_threed) {
244                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
245                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
246                         } else {
247                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
248                         }
249                 }
250
251                 if (decoder->audio) {
252                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
253                 }
254
255                 auto j = decoder->text.begin();
256
257                 while (j != decoder->text.end()) {
258                         (*j)->BitmapStart.connect (
259                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
260                                 );
261                         (*j)->PlainStart.connect (
262                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
263                                 );
264                         (*j)->Stop.connect (
265                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
266                                 );
267
268                         ++j;
269                 }
270
271                 if (decoder->atmos) {
272                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
273                 }
274         }
275
276         _stream_states.clear ();
277         for (auto i: _pieces) {
278                 if (i->content->audio) {
279                         for (auto j: i->content->audio->streams()) {
280                                 _stream_states[j] = StreamState (i, i->content->position ());
281                         }
282                 }
283         }
284
285         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
286                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
287         };
288
289         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
290                 if (ignore_overlap((*i)->content->video)) {
291                         /* Look for content later in the content list with in-use video that overlaps this */
292                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
293                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
294                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
295                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
296                                 }
297                         }
298                 }
299         }
300
301         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
302         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
303
304         _next_video_time = boost::none;
305         _next_video_eyes = Eyes::BOTH;
306         _next_audio_time = boost::none;
307 }
308
309
310 void
311 Player::playlist_content_change (ChangeType type, int property, bool frequent)
312 {
313         if (property == VideoContentProperty::CROP) {
314                 if (type == ChangeType::DONE) {
315                         boost::mutex::scoped_lock lm (_mutex);
316                         for (auto const& i: _delay) {
317                                 i.first->reset_metadata(_film, _video_container_size);
318                         }
319                 }
320         } else {
321                 if (type == ChangeType::PENDING) {
322                         /* The player content is probably about to change, so we can't carry on
323                            until that has happened and we've rebuilt our pieces.  Stop pass()
324                            and seek() from working until then.
325                         */
326                         ++_suspended;
327                 } else if (type == ChangeType::DONE) {
328                         /* A change in our content has gone through.  Re-build our pieces. */
329                         setup_pieces ();
330                         --_suspended;
331                 } else if (type == ChangeType::CANCELLED) {
332                         --_suspended;
333                 }
334         }
335
336         Change (type, property, frequent);
337 }
338
339
340 void
341 Player::set_video_container_size (dcp::Size s)
342 {
343         ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
344
345         if (s == _video_container_size) {
346                 cc.abort();
347                 return;
348         }
349
350         _video_container_size = s;
351
352         {
353                 boost::mutex::scoped_lock lm(_black_image_mutex);
354                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
355                 _black_image->make_black ();
356         }
357 }
358
359
360 void
361 Player::playlist_change (ChangeType type)
362 {
363         if (type == ChangeType::DONE) {
364                 setup_pieces ();
365         }
366         Change (type, PlayerProperty::PLAYLIST, false);
367 }
368
369
370 void
371 Player::film_change (ChangeType type, Film::Property p)
372 {
373         /* Here we should notice Film properties that affect our output, and
374            alert listeners that our output now would be different to how it was
375            last time we were run.
376         */
377
378         if (p == Film::Property::CONTAINER) {
379                 Change (type, PlayerProperty::FILM_CONTAINER, false);
380         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
381                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
382                    so we need new pieces here.
383                 */
384                 if (type == ChangeType::DONE) {
385                         setup_pieces ();
386                 }
387                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
388         } else if (p == Film::Property::AUDIO_PROCESSOR) {
389                 if (type == ChangeType::DONE && _film->audio_processor ()) {
390                         boost::mutex::scoped_lock lm (_mutex);
391                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
392                 }
393         } else if (p == Film::Property::AUDIO_CHANNELS) {
394                 if (type == ChangeType::DONE) {
395                         boost::mutex::scoped_lock lm (_mutex);
396                         _audio_merger.clear ();
397                 }
398         }
399 }
400
401
402 shared_ptr<PlayerVideo>
403 Player::black_player_video_frame (Eyes eyes) const
404 {
405         boost::mutex::scoped_lock lm(_black_image_mutex);
406
407         return std::make_shared<PlayerVideo> (
408                 std::make_shared<const RawImageProxy>(_black_image),
409                 Crop(),
410                 optional<double>(),
411                 _video_container_size,
412                 _video_container_size,
413                 eyes,
414                 Part::WHOLE,
415                 PresetColourConversion::all().front().conversion,
416                 VideoRange::FULL,
417                 std::weak_ptr<Content>(),
418                 boost::optional<Frame>(),
419                 false
420         );
421 }
422
423
424 Frame
425 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
426 {
427         auto s = t - piece->content->position ();
428         s = min (piece->content->length_after_trim(_film), s);
429         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
430
431         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
432            then convert that ContentTime to frames at the content's rate.  However this fails for
433            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
434            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
435
436            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
437         */
438         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
439 }
440
441
442 DCPTime
443 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
444 {
445         /* See comment in dcp_to_content_video */
446         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
447         return d + piece->content->position();
448 }
449
450
451 Frame
452 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
453 {
454         auto s = t - piece->content->position ();
455         s = min (piece->content->length_after_trim(_film), s);
456         /* See notes in dcp_to_content_video */
457         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
458 }
459
460
461 DCPTime
462 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
463 {
464         /* See comment in dcp_to_content_video */
465         return DCPTime::from_frames (f, _film->audio_frame_rate())
466                 - DCPTime (piece->content->trim_start(), piece->frc)
467                 + piece->content->position();
468 }
469
470
471 ContentTime
472 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
473 {
474         auto s = t - piece->content->position ();
475         s = min (piece->content->length_after_trim(_film), s);
476         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
477 }
478
479
480 DCPTime
481 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
482 {
483         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
484 }
485
486
487 vector<shared_ptr<Font>>
488 Player::get_subtitle_fonts ()
489 {
490         boost::mutex::scoped_lock lm (_mutex);
491
492         vector<shared_ptr<Font>> fonts;
493         for (auto piece: _pieces) {
494                 for (auto text: piece->content->text) {
495                         auto text_fonts = text->fonts();
496                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
497                 }
498         }
499
500         return fonts;
501 }
502
503
504 /** Set this player never to produce any video data */
505 void
506 Player::set_ignore_video ()
507 {
508         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_VIDEO);
509         _ignore_video = true;
510         setup_pieces();
511 }
512
513
514 void
515 Player::set_ignore_audio ()
516 {
517         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_AUDIO);
518         _ignore_audio = true;
519         setup_pieces();
520 }
521
522
523 void
524 Player::set_ignore_text ()
525 {
526         ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_TEXT);
527         _ignore_text = true;
528         setup_pieces();
529 }
530
531
532 /** Set the player to always burn open texts into the image regardless of the content settings */
533 void
534 Player::set_always_burn_open_subtitles ()
535 {
536         ChangeSignaller<Player, int> cc(this, PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES);
537         _always_burn_open_subtitles = true;
538 }
539
540
541 /** Sets up the player to be faster, possibly at the expense of quality */
542 void
543 Player::set_fast ()
544 {
545         _fast = true;
546         setup_pieces();
547 }
548
549
550 void
551 Player::set_play_referenced ()
552 {
553         ChangeSignaller<Player, int> cc(this, PlayerProperty::PLAY_REFERENCED);
554         _play_referenced = true;
555         setup_pieces();
556 }
557
558
559 bool
560 Player::pass ()
561 {
562         boost::mutex::scoped_lock lm (_mutex);
563
564         if (_suspended) {
565                 /* We can't pass in this state */
566                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
567                 return false;
568         }
569
570         if (_playback_length.load() == DCPTime()) {
571                 /* Special; just give one black frame */
572                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
573                 return true;
574         }
575
576         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
577
578         shared_ptr<Piece> earliest_content;
579         optional<DCPTime> earliest_time;
580
581         for (auto i: _pieces) {
582                 if (i->done) {
583                         continue;
584                 }
585
586                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
587                 if (t > i->content->end(_film)) {
588                         i->done = true;
589                 } else {
590
591                         /* Given two choices at the same time, pick the one with texts so we see it before
592                            the video.
593                         */
594                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
595                                 earliest_time = t;
596                                 earliest_content = i;
597                         }
598                 }
599         }
600
601         bool done = false;
602
603         enum {
604                 NONE,
605                 CONTENT,
606                 BLACK,
607                 SILENT
608         } which = NONE;
609
610         if (earliest_content) {
611                 which = CONTENT;
612         }
613
614         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
615                 earliest_time = _black.position ();
616                 which = BLACK;
617         }
618
619         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
620                 earliest_time = _silent.position ();
621                 which = SILENT;
622         }
623
624         switch (which) {
625         case CONTENT:
626         {
627                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
628                 earliest_content->done = earliest_content->decoder->pass ();
629                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
630                 if (dcp && !_play_referenced && dcp->reference_audio()) {
631                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
632                            to `hide' the fact that no audio was emitted during the referenced DCP (though
633                            we need to behave as though it was).
634                         */
635                         _next_audio_time = dcp->end (_film);
636                 }
637                 break;
638         }
639         case BLACK:
640                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
641                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
642                 _black.set_position (_black.position() + one_video_frame());
643                 break;
644         case SILENT:
645         {
646                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
647                 DCPTimePeriod period (_silent.period_at_position());
648                 if (_next_audio_time) {
649                         /* Sometimes the thing that happened last finishes fractionally before
650                            or after this silence.  Bodge the start time of the silence to fix it.
651                            I think this is nothing to worry about since we will just add or
652                            remove a little silence at the end of some content.
653                         */
654                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
655                         /* Let's not worry about less than a frame at 24fps */
656                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
657                         if (error >= too_much_error) {
658                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
659                         }
660                         DCPOMATIC_ASSERT (error < too_much_error);
661                         period.from = *_next_audio_time;
662                 }
663                 if (period.duration() > one_video_frame()) {
664                         period.to = period.from + one_video_frame();
665                 }
666                 fill_audio (period);
667                 _silent.set_position (period.to);
668                 break;
669         }
670         case NONE:
671                 done = true;
672                 break;
673         }
674
675         /* Emit any audio that is ready */
676
677         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
678            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
679            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
680            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
681            that will never come, causing bugs like #2101.
682         */
683         constexpr int ignore_streams_behind = 5;
684
685         using state_pair = std::pair<AudioStreamPtr, StreamState>;
686
687         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
688         auto latest_last_push_end = std::max_element(
689                 _stream_states.begin(),
690                 _stream_states.end(),
691                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
692                 );
693
694         if (latest_last_push_end != _stream_states.end()) {
695                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
696         }
697
698         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
699         std::map<AudioStreamPtr, StreamState> alive_stream_states;
700         for (auto const& i: _stream_states) {
701                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
702                         alive_stream_states.insert(i);
703                 } else {
704                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
705                 }
706         }
707
708         auto pull_to = _playback_length.load();
709         for (auto const& i: alive_stream_states) {
710                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
711                         pull_to = i.second.last_push_end;
712                 }
713         }
714         if (!_silent.done() && _silent.position() < pull_to) {
715                 pull_to = _silent.position();
716         }
717
718         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
719         auto audio = _audio_merger.pull (pull_to);
720         for (auto i = audio.begin(); i != audio.end(); ++i) {
721                 if (_next_audio_time && i->second < *_next_audio_time) {
722                         /* This new data comes before the last we emitted (or the last seek); discard it */
723                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
724                         if (!cut.first) {
725                                 continue;
726                         }
727                         *i = cut;
728                 } else if (_next_audio_time && i->second > *_next_audio_time) {
729                         /* There's a gap between this data and the last we emitted; fill with silence */
730                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
731                 }
732
733                 emit_audio (i->first, i->second);
734         }
735
736         if (done) {
737                 if (_shuffler) {
738                         _shuffler->flush ();
739                 }
740                 for (auto const& i: _delay) {
741                         do_emit_video(i.first, i.second);
742                 }
743
744                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
745                  * However, if we have L and R video files, and one is shorter than the other,
746                  * the fill code in ::video mostly takes care of filling in the gaps.
747                  * However, since it fills at the point when it knows there is more video coming
748                  * at time t (so it should fill any gap up to t) it can't do anything right at the
749                  * end.  This is particularly bad news if the last frame emitted is a LEFT
750                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
751                  * Here's a hack to workaround that particular case.
752                  */
753                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
754                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
755                 }
756         }
757
758         return done;
759 }
760
761
762 /** @return Open subtitles for the frame at the given time, converted to images */
763 optional<PositionImage>
764 Player::open_subtitles_for_frame (DCPTime time) const
765 {
766         list<PositionImage> captions;
767         int const vfr = _film->video_frame_rate();
768
769         for (
770                 auto j:
771                 _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
772                 ) {
773
774                 /* Bitmap subtitles */
775                 for (auto i: j.bitmap) {
776                         if (!i.image) {
777                                 continue;
778                         }
779
780                         /* i.image will already have been scaled to fit _video_container_size */
781                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
782
783                         captions.push_back (
784                                 PositionImage (
785                                         i.image,
786                                         Position<int> (
787                                                 lrint(_video_container_size.load().width * i.rectangle.x),
788                                                 lrint(_video_container_size.load().height * i.rectangle.y)
789                                                 )
790                                         )
791                                 );
792                 }
793
794                 /* String subtitles (rendered to an image) */
795                 if (!j.string.empty()) {
796                         auto s = render_text(j.string, _video_container_size, time, vfr);
797                         copy (s.begin(), s.end(), back_inserter (captions));
798                 }
799         }
800
801         if (captions.empty()) {
802                 return {};
803         }
804
805         return merge (captions, _subtitle_alignment);
806 }
807
808
809 void
810 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
811 {
812         if (_suspended) {
813                 return;
814         }
815
816         auto piece = weak_piece.lock ();
817         if (!piece) {
818                 return;
819         }
820
821         if (!piece->content->video->use()) {
822                 return;
823         }
824
825         FrameRateChange frc (_film, piece->content);
826         if (frc.skip && (video.frame % 2) == 1) {
827                 return;
828         }
829
830         /* Time of the first frame we will emit */
831         DCPTime const time = content_video_to_dcp (piece, video.frame);
832         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
833
834         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
835            if it's after the content's period here as in that case we still need to fill any gap between
836            `now' and the end of the content's period.
837         */
838         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
839                 return;
840         }
841
842         if (piece->ignore_video && piece->ignore_video->contains(time)) {
843                 return;
844         }
845
846         /* Fill gaps that we discover now that we have some video which needs to be emitted.
847            This is where we need to fill to.
848         */
849         DCPTime fill_to = min (time, piece->content->end(_film));
850
851         if (_next_video_time) {
852                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
853
854                 /* Fill if we have more than half a frame to do */
855                 if ((fill_to - fill_from) > one_video_frame() / 2) {
856                         auto last = _last_video.find (weak_piece);
857                         if (_film->three_d()) {
858                                 auto fill_to_eyes = video.eyes;
859                                 if (fill_to_eyes == Eyes::BOTH) {
860                                         fill_to_eyes = Eyes::LEFT;
861                                 }
862                                 if (fill_to == piece->content->end(_film)) {
863                                         /* Don't fill after the end of the content */
864                                         fill_to_eyes = Eyes::LEFT;
865                                 }
866                                 auto j = fill_from;
867                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
868                                 if (eyes == Eyes::BOTH) {
869                                         eyes = Eyes::LEFT;
870                                 }
871                                 while (j < fill_to || eyes != fill_to_eyes) {
872                                         if (last != _last_video.end()) {
873                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
874                                                 auto copy = last->second->shallow_copy();
875                                                 copy->set_eyes (eyes);
876                                                 emit_video (copy, j);
877                                         } else {
878                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
879                                                 emit_video (black_player_video_frame(eyes), j);
880                                         }
881                                         if (eyes == Eyes::RIGHT) {
882                                                 j += one_video_frame();
883                                         }
884                                         eyes = increment_eyes (eyes);
885                                 }
886                         } else {
887                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
888                                         if (last != _last_video.end()) {
889                                                 emit_video (last->second, j);
890                                         } else {
891                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
892                                         }
893                                 }
894                         }
895                 }
896         }
897
898         auto const content_video = piece->content->video;
899
900         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
901                 video.image,
902                 content_video->actual_crop(),
903                 content_video->fade (_film, video.frame),
904                 scale_for_display(
905                         content_video->scaled_size(_film->frame_size()),
906                         _video_container_size,
907                         _film->frame_size(),
908                         content_video->pixel_quanta()
909                         ),
910                 _video_container_size,
911                 video.eyes,
912                 video.part,
913                 content_video->colour_conversion(),
914                 content_video->range(),
915                 piece->content,
916                 video.frame,
917                 false
918                 );
919
920         DCPTime t = time;
921         for (int i = 0; i < frc.repeat; ++i) {
922                 if (t < piece->content->end(_film)) {
923                         emit_video (_last_video[weak_piece], t);
924                 }
925                 t += one_video_frame ();
926         }
927 }
928
929
930 void
931 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
932 {
933         if (_suspended) {
934                 return;
935         }
936
937         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
938
939         auto piece = weak_piece.lock ();
940         if (!piece) {
941                 return;
942         }
943
944         auto content = piece->content->audio;
945         DCPOMATIC_ASSERT (content);
946
947         int const rfr = content->resampled_frame_rate (_film);
948
949         /* Compute time in the DCP */
950         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
951         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
952
953         /* And the end of this block in the DCP */
954         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
955
956         /* Remove anything that comes before the start or after the end of the content */
957         if (time < piece->content->position()) {
958                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
959                 if (!cut.first) {
960                         /* This audio is entirely discarded */
961                         return;
962                 }
963                 content_audio.audio = cut.first;
964                 time = cut.second;
965         } else if (time > piece->content->end(_film)) {
966                 /* Discard it all */
967                 return;
968         } else if (end > piece->content->end(_film)) {
969                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
970                 if (remaining_frames == 0) {
971                         return;
972                 }
973                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
974         }
975
976         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
977
978         /* Gain and fade */
979
980         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
981         if (content->gain() != 0 || !fade_coeffs.empty()) {
982                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
983                 if (!fade_coeffs.empty()) {
984                         /* Apply both fade and gain */
985                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
986                         auto const channels = gain_buffers->channels();
987                         auto const frames = fade_coeffs.size();
988                         auto data = gain_buffers->data();
989                         auto const gain = db_to_linear (content->gain());
990                         for (auto channel = 0; channel < channels; ++channel) {
991                                 for (auto frame = 0U; frame < frames; ++frame) {
992                                         data[channel][frame] *= gain * fade_coeffs[frame];
993                                 }
994                         }
995                 } else {
996                         /* Just apply gain */
997                         gain_buffers->apply_gain (content->gain());
998                 }
999                 content_audio.audio = gain_buffers;
1000         }
1001
1002         /* Remap */
1003
1004         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1005
1006         /* Process */
1007
1008         if (_audio_processor) {
1009                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1010         }
1011
1012         /* Push */
1013
1014         _audio_merger.push (content_audio.audio, time);
1015         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1016         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1017 }
1018
1019
1020 void
1021 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1022 {
1023         if (_suspended) {
1024                 return;
1025         }
1026
1027         auto piece = weak_piece.lock ();
1028         auto content = weak_content.lock ();
1029         if (!piece || !content) {
1030                 return;
1031         }
1032
1033         PlayerText ps;
1034         for (auto& sub: subtitle.subs)
1035         {
1036                 /* Apply content's subtitle offsets */
1037                 sub.rectangle.x += content->x_offset ();
1038                 sub.rectangle.y += content->y_offset ();
1039
1040                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1041                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1042                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1043
1044                 /* Apply content's subtitle scale */
1045                 sub.rectangle.width *= content->x_scale ();
1046                 sub.rectangle.height *= content->y_scale ();
1047
1048                 auto image = sub.image;
1049
1050                 /* We will scale the subtitle up to fit _video_container_size */
1051                 int const width = sub.rectangle.width * _video_container_size.load().width;
1052                 int const height = sub.rectangle.height * _video_container_size.load().height;
1053                 if (width == 0 || height == 0) {
1054                         return;
1055                 }
1056
1057                 dcp::Size scaled_size (width, height);
1058                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1059         }
1060
1061         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1062         _active_texts[content->type()].add_from(weak_content, ps, from);
1063 }
1064
1065
1066 void
1067 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1068 {
1069         if (_suspended) {
1070                 return;
1071         }
1072
1073         auto piece = weak_piece.lock ();
1074         auto content = weak_content.lock ();
1075         if (!piece || !content) {
1076                 return;
1077         }
1078
1079         PlayerText ps;
1080         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1081
1082         if (from > piece->content->end(_film)) {
1083                 return;
1084         }
1085
1086         for (auto s: subtitle.subs) {
1087                 s.set_h_position (s.h_position() + content->x_offset());
1088                 s.set_v_position (s.v_position() + content->y_offset());
1089                 float const xs = content->x_scale();
1090                 float const ys = content->y_scale();
1091                 float size = s.size();
1092
1093                 /* Adjust size to express the common part of the scaling;
1094                    e.g. if xs = ys = 0.5 we scale size by 2.
1095                 */
1096                 if (xs > 1e-5 && ys > 1e-5) {
1097                         size *= 1 / min (1 / xs, 1 / ys);
1098                 }
1099                 s.set_size (size);
1100
1101                 /* Then express aspect ratio changes */
1102                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1103                         s.set_aspect_adjust (xs / ys);
1104                 }
1105
1106                 s.set_in (dcp::Time(from.seconds(), 1000));
1107                 ps.string.push_back (s);
1108         }
1109
1110         _active_texts[content->type()].add_from(weak_content, ps, from);
1111 }
1112
1113
1114 void
1115 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1116 {
1117         if (_suspended) {
1118                 return;
1119         }
1120
1121         auto content = weak_content.lock ();
1122         if (!content) {
1123                 return;
1124         }
1125
1126         if (!_active_texts[content->type()].have(weak_content)) {
1127                 return;
1128         }
1129
1130         auto piece = weak_piece.lock ();
1131         if (!piece) {
1132                 return;
1133         }
1134
1135         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1136
1137         if (dcp_to > piece->content->end(_film)) {
1138                 return;
1139         }
1140
1141         auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
1142
1143         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1144         if (content->use() && !always && !content->burn()) {
1145                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1146         }
1147 }
1148
1149
1150 void
1151 Player::seek (DCPTime time, bool accurate)
1152 {
1153         boost::mutex::scoped_lock lm (_mutex);
1154         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1155
1156         if (_suspended) {
1157                 /* We can't seek in this state */
1158                 return;
1159         }
1160
1161         if (_shuffler) {
1162                 _shuffler->clear ();
1163         }
1164
1165         _delay.clear ();
1166
1167         if (_audio_processor) {
1168                 _audio_processor->flush ();
1169         }
1170
1171         _audio_merger.clear ();
1172         std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
1173
1174         for (auto i: _pieces) {
1175                 if (time < i->content->position()) {
1176                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1177                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1178                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1179                            been trimmed to a point between keyframes, or something).
1180                         */
1181                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1182                         i->done = false;
1183                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1184                         /* During; seek to position */
1185                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1186                         i->done = false;
1187                 } else {
1188                         /* After; this piece is done */
1189                         i->done = true;
1190                 }
1191         }
1192
1193         if (accurate) {
1194                 _next_video_time = time;
1195                 _next_video_eyes = Eyes::LEFT;
1196                 _next_audio_time = time;
1197         } else {
1198                 _next_video_time = boost::none;
1199                 _next_video_eyes = boost::none;
1200                 _next_audio_time = boost::none;
1201         }
1202
1203         _black.set_position (time);
1204         _silent.set_position (time);
1205
1206         _last_video.clear ();
1207 }
1208
1209
1210 void
1211 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1212 {
1213         if (!_film->three_d()) {
1214                 if (pv->eyes() == Eyes::LEFT) {
1215                         /* Use left-eye images for both eyes... */
1216                         pv->set_eyes (Eyes::BOTH);
1217                 } else if (pv->eyes() == Eyes::RIGHT) {
1218                         /* ...and discard the right */
1219                         return;
1220                 }
1221         }
1222
1223         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1224            player before the video that requires them.
1225         */
1226         _delay.push_back (make_pair (pv, time));
1227
1228         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1229                 _next_video_time = time + one_video_frame();
1230         }
1231         _next_video_eyes = increment_eyes (pv->eyes());
1232
1233         if (_delay.size() < 3) {
1234                 return;
1235         }
1236
1237         auto to_do = _delay.front();
1238         _delay.pop_front();
1239         do_emit_video (to_do.first, to_do.second);
1240 }
1241
1242
1243 void
1244 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1245 {
1246         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1247                 std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
1248         }
1249
1250         auto subtitles = open_subtitles_for_frame (time);
1251         if (subtitles) {
1252                 pv->set_text (subtitles.get ());
1253         }
1254
1255         Video (pv, time);
1256 }
1257
1258
1259 void
1260 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1261 {
1262         /* Log if the assert below is about to fail */
1263         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1264                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1265         }
1266
1267         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1268         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1269         Audio (data, time, _film->audio_frame_rate());
1270         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1271 }
1272
1273
1274 void
1275 Player::fill_audio (DCPTimePeriod period)
1276 {
1277         if (period.from == period.to) {
1278                 return;
1279         }
1280
1281         DCPOMATIC_ASSERT (period.from < period.to);
1282
1283         DCPTime t = period.from;
1284         while (t < period.to) {
1285                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1286                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1287                 if (samples) {
1288                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1289                         silence->make_silent ();
1290                         emit_audio (silence, t);
1291                 }
1292                 t += block;
1293         }
1294 }
1295
1296
1297 DCPTime
1298 Player::one_video_frame () const
1299 {
1300         return DCPTime::from_frames (1, _film->video_frame_rate ());
1301 }
1302
1303
1304 pair<shared_ptr<AudioBuffers>, DCPTime>
1305 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1306 {
1307         auto const discard_time = discard_to - time;
1308         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1309         auto remaining_frames = audio->frames() - discard_frames;
1310         if (remaining_frames <= 0) {
1311                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1312         }
1313         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1314         return make_pair(cut, time + discard_time);
1315 }
1316
1317
1318 void
1319 Player::set_dcp_decode_reduction (optional<int> reduction)
1320 {
1321         ChangeSignaller<Player, int> cc(this, PlayerProperty::DCP_DECODE_REDUCTION);
1322
1323         if (reduction == _dcp_decode_reduction.load()) {
1324                 cc.abort();
1325                 return;
1326         }
1327
1328         _dcp_decode_reduction = reduction;
1329         setup_pieces();
1330 }
1331
1332
1333 optional<DCPTime>
1334 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1335 {
1336         boost::mutex::scoped_lock lm (_mutex);
1337
1338         for (auto i: _pieces) {
1339                 if (i->content == content) {
1340                         return content_time_to_dcp (i, t);
1341                 }
1342         }
1343
1344         /* We couldn't find this content; perhaps things are being changed over */
1345         return {};
1346 }
1347
1348
1349 optional<ContentTime>
1350 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1351 {
1352         boost::mutex::scoped_lock lm (_mutex);
1353
1354         for (auto i: _pieces) {
1355                 if (i->content == content) {
1356                         return dcp_to_content_time (i, t);
1357                 }
1358         }
1359
1360         /* We couldn't find this content; perhaps things are being changed over */
1361         return {};
1362 }
1363
1364
1365 shared_ptr<const Playlist>
1366 Player::playlist () const
1367 {
1368         return _playlist ? _playlist : _film->playlist();
1369 }
1370
1371
1372 void
1373 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1374 {
1375         if (_suspended) {
1376                 return;
1377         }
1378
1379         auto piece = weak_piece.lock ();
1380         DCPOMATIC_ASSERT (piece);
1381
1382         auto const vfr = _film->video_frame_rate();
1383
1384         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1385         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1386                 return;
1387         }
1388
1389         Atmos (data.data, dcp_time, data.metadata);
1390 }
1391
1392
1393 void
1394 Player::signal_change(ChangeType type, int property)
1395 {
1396         Change(type, property, false);
1397 }
1398