2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
38 #include "frame_rate_change.h"
40 #include "image_decoder.h"
43 #include "maths_util.h"
46 #include "player_video.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
55 #include "video_decoder.h"
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
70 using std::dynamic_pointer_cast;
73 using std::make_shared;
74 using std::make_shared;
79 using std::shared_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
87 using namespace dcpomatic;
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
98 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
101 , _ignore_video(false)
102 , _ignore_audio(false)
103 , _ignore_text(false)
104 , _always_burn_open_subtitles(false)
106 , _tolerant (film->tolerant())
107 , _play_referenced(false)
108 , _audio_merger (_film->audio_frame_rate())
109 , _subtitle_alignment (subtitle_alignment)
115 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
117 , _playlist (playlist_)
119 , _ignore_video(false)
120 , _ignore_audio(false)
121 , _ignore_text(false)
122 , _always_burn_open_subtitles(false)
124 , _tolerant (film->tolerant())
125 , _play_referenced(false)
126 , _audio_merger (_film->audio_frame_rate())
135 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
136 /* The butler must hear about this first, so since we are proxying this through to the butler we must
139 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
140 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
141 set_video_container_size (_film->frame_size ());
143 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
146 seek (DCPTime (), true);
151 have_video (shared_ptr<const Content> content)
153 return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
158 have_audio (shared_ptr<const Content> content)
160 return static_cast<bool>(content->audio) && content->can_be_played();
165 Player::setup_pieces ()
167 boost::mutex::scoped_lock lm (_mutex);
169 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
171 auto old_pieces = _pieces;
174 auto playlist_content = playlist()->content();
175 bool const have_threed = std::any_of(
176 playlist_content.begin(),
177 playlist_content.end(),
178 [](shared_ptr<const Content> c) {
179 return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
184 _shuffler.reset(new Shuffler());
185 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
188 for (auto i: playlist()->content()) {
190 if (!i->paths_valid ()) {
194 if (_ignore_video && _ignore_audio && i->text.empty()) {
195 /* We're only interested in text and this content has none */
199 shared_ptr<Decoder> old_decoder;
200 for (auto j: old_pieces) {
201 if (j->content == i) {
202 old_decoder = j->decoder;
207 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
208 DCPOMATIC_ASSERT (decoder);
210 FrameRateChange frc (_film, i);
212 if (decoder->video && _ignore_video) {
213 decoder->video->set_ignore (true);
216 if (decoder->audio && _ignore_audio) {
217 decoder->audio->set_ignore (true);
221 for (auto i: decoder->text) {
222 i->set_ignore (true);
226 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
228 dcp->set_decode_referenced (_play_referenced);
229 if (_play_referenced) {
230 dcp->set_forced_reduction (_dcp_decode_reduction);
234 auto piece = make_shared<Piece>(i, decoder, frc);
235 _pieces.push_back (piece);
237 if (decoder->video) {
239 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
240 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
242 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
246 if (decoder->audio) {
247 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
250 auto j = decoder->text.begin();
252 while (j != decoder->text.end()) {
253 (*j)->BitmapStart.connect (
254 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256 (*j)->PlainStart.connect (
257 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
260 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
266 if (decoder->atmos) {
267 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
271 _stream_states.clear ();
272 for (auto i: _pieces) {
273 if (i->content->audio) {
274 for (auto j: i->content->audio->streams()) {
275 _stream_states[j] = StreamState (i, i->content->position ());
280 auto ignore_overlap = [](shared_ptr<VideoContent> v) {
281 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
284 for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
285 if (ignore_overlap((*i)->content->video)) {
286 /* Look for content later in the content list with in-use video that overlaps this */
287 auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
288 for (auto j = std::next(i); j != _pieces.end(); ++j) {
289 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
290 (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
296 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
297 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
299 _next_video_time = boost::none;
300 _next_video_eyes = Eyes::BOTH;
301 _next_audio_time = boost::none;
306 Player::playlist_content_change (ChangeType type, int property, bool frequent)
308 if (property == VideoContentProperty::CROP) {
309 if (type == ChangeType::DONE) {
310 boost::mutex::scoped_lock lm (_mutex);
311 for (auto const& i: _delay) {
312 i.first->reset_metadata(_film, _video_container_size);
316 if (type == ChangeType::PENDING) {
317 /* The player content is probably about to change, so we can't carry on
318 until that has happened and we've rebuilt our pieces. Stop pass()
319 and seek() from working until then.
322 } else if (type == ChangeType::DONE) {
323 /* A change in our content has gone through. Re-build our pieces. */
326 } else if (type == ChangeType::CANCELLED) {
331 Change (type, property, frequent);
336 Player::set_video_container_size (dcp::Size s)
338 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
340 if (s == _video_container_size) {
341 Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
345 _video_container_size = s;
348 boost::mutex::scoped_lock lm(_black_image_mutex);
349 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
350 _black_image->make_black ();
353 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
358 Player::playlist_change (ChangeType type)
360 if (type == ChangeType::DONE) {
363 Change (type, PlayerProperty::PLAYLIST, false);
368 Player::film_change (ChangeType type, Film::Property p)
370 /* Here we should notice Film properties that affect our output, and
371 alert listeners that our output now would be different to how it was
372 last time we were run.
375 if (p == Film::Property::CONTAINER) {
376 Change (type, PlayerProperty::FILM_CONTAINER, false);
377 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
378 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
379 so we need new pieces here.
381 if (type == ChangeType::DONE) {
384 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
385 } else if (p == Film::Property::AUDIO_PROCESSOR) {
386 if (type == ChangeType::DONE && _film->audio_processor ()) {
387 boost::mutex::scoped_lock lm (_mutex);
388 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
390 } else if (p == Film::Property::AUDIO_CHANNELS) {
391 if (type == ChangeType::DONE) {
392 boost::mutex::scoped_lock lm (_mutex);
393 _audio_merger.clear ();
399 shared_ptr<PlayerVideo>
400 Player::black_player_video_frame (Eyes eyes) const
402 boost::mutex::scoped_lock lm(_black_image_mutex);
404 return std::make_shared<PlayerVideo> (
405 std::make_shared<const RawImageProxy>(_black_image),
408 _video_container_size,
409 _video_container_size,
412 PresetColourConversion::all().front().conversion,
414 std::weak_ptr<Content>(),
415 boost::optional<Frame>(),
422 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
424 auto s = t - piece->content->position ();
425 s = min (piece->content->length_after_trim(_film), s);
426 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
428 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
429 then convert that ContentTime to frames at the content's rate. However this fails for
430 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
431 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
433 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
435 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
440 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
442 /* See comment in dcp_to_content_video */
443 auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
444 return d + piece->content->position();
449 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
451 auto s = t - piece->content->position ();
452 s = min (piece->content->length_after_trim(_film), s);
453 /* See notes in dcp_to_content_video */
454 return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
459 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
461 /* See comment in dcp_to_content_video */
462 return DCPTime::from_frames (f, _film->audio_frame_rate())
463 - DCPTime (piece->content->trim_start(), piece->frc)
464 + piece->content->position();
469 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
471 auto s = t - piece->content->position ();
472 s = min (piece->content->length_after_trim(_film), s);
473 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
478 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
480 return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
484 vector<shared_ptr<Font>>
485 Player::get_subtitle_fonts ()
487 boost::mutex::scoped_lock lm (_mutex);
489 vector<shared_ptr<Font>> fonts;
490 for (auto piece: _pieces) {
491 for (auto text: piece->content->text) {
492 auto text_fonts = text->fonts();
493 copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
501 /** Set this player never to produce any video data */
503 Player::set_ignore_video ()
505 _ignore_video = true;
511 Player::set_ignore_audio ()
513 _ignore_audio = true;
519 Player::set_ignore_text ()
526 /** Set the player to always burn open texts into the image regardless of the content settings */
528 Player::set_always_burn_open_subtitles ()
530 _always_burn_open_subtitles = true;
534 /** Sets up the player to be faster, possibly at the expense of quality */
544 Player::set_play_referenced ()
546 _play_referenced = true;
554 boost::mutex::scoped_lock lm (_mutex);
557 /* We can't pass in this state */
558 LOG_DEBUG_PLAYER_NC ("Player is suspended");
562 if (_playback_length.load() == DCPTime()) {
563 /* Special; just give one black frame */
564 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
568 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
570 shared_ptr<Piece> earliest_content;
571 optional<DCPTime> earliest_time;
573 for (auto i: _pieces) {
578 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
579 if (t > i->content->end(_film)) {
583 /* Given two choices at the same time, pick the one with texts so we see it before
586 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
588 earliest_content = i;
602 if (earliest_content) {
606 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
607 earliest_time = _black.position ();
611 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
612 earliest_time = _silent.position ();
619 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
620 earliest_content->done = earliest_content->decoder->pass ();
621 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
622 if (dcp && !_play_referenced && dcp->reference_audio()) {
623 /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
624 to `hide' the fact that no audio was emitted during the referenced DCP (though
625 we need to behave as though it was).
627 _next_audio_time = dcp->end (_film);
632 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
633 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
634 _black.set_position (_black.position() + one_video_frame());
638 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
639 DCPTimePeriod period (_silent.period_at_position());
640 if (_next_audio_time) {
641 /* Sometimes the thing that happened last finishes fractionally before
642 or after this silence. Bodge the start time of the silence to fix it.
643 I think this is nothing to worry about since we will just add or
644 remove a little silence at the end of some content.
646 int64_t const error = labs(period.from.get() - _next_audio_time->get());
647 /* Let's not worry about less than a frame at 24fps */
648 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
649 if (error >= too_much_error) {
650 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
652 DCPOMATIC_ASSERT (error < too_much_error);
653 period.from = *_next_audio_time;
655 if (period.duration() > one_video_frame()) {
656 period.to = period.from + one_video_frame();
659 _silent.set_position (period.to);
667 /* Emit any audio that is ready */
669 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
670 of our streams, or the position of the _silent. First, though we choose only streams that are less than
671 ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
672 behind it has finished). This is so that we don't withhold audio indefinitely awaiting data from a stream
673 that will never come, causing bugs like #2101.
675 constexpr int ignore_streams_behind = 5;
677 using state_pair = std::pair<AudioStreamPtr, StreamState>;
679 /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
680 auto latest_last_push_end = std::max_element(
681 _stream_states.begin(),
682 _stream_states.end(),
683 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
686 if (latest_last_push_end != _stream_states.end()) {
687 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
690 /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
691 std::map<AudioStreamPtr, StreamState> alive_stream_states;
692 for (auto const& i: _stream_states) {
693 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
694 alive_stream_states.insert(i);
696 LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
700 auto pull_to = _playback_length.load();
701 for (auto const& i: alive_stream_states) {
702 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
703 pull_to = i.second.last_push_end;
706 if (!_silent.done() && _silent.position() < pull_to) {
707 pull_to = _silent.position();
710 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
711 auto audio = _audio_merger.pull (pull_to);
712 for (auto i = audio.begin(); i != audio.end(); ++i) {
713 if (_next_audio_time && i->second < *_next_audio_time) {
714 /* This new data comes before the last we emitted (or the last seek); discard it */
715 auto cut = discard_audio (i->first, i->second, *_next_audio_time);
720 } else if (_next_audio_time && i->second > *_next_audio_time) {
721 /* There's a gap between this data and the last we emitted; fill with silence */
722 fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
725 emit_audio (i->first, i->second);
732 for (auto const& i: _delay) {
733 do_emit_video(i.first, i.second);
736 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
737 * However, if we have L and R video files, and one is shorter than the other,
738 * the fill code in ::video mostly takes care of filling in the gaps.
739 * However, since it fills at the point when it knows there is more video coming
740 * at time t (so it should fill any gap up to t) it can't do anything right at the
741 * end. This is particularly bad news if the last frame emitted is a LEFT
742 * eye, as the MXF writer will complain about the 3D sequence being wrong.
743 * Here's a hack to workaround that particular case.
745 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
746 do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
754 /** @return Open subtitles for the frame at the given time, converted to images */
755 optional<PositionImage>
756 Player::open_subtitles_for_frame (DCPTime time) const
758 list<PositionImage> captions;
759 int const vfr = _film->video_frame_rate();
763 _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
766 /* Bitmap subtitles */
767 for (auto i: j.bitmap) {
772 /* i.image will already have been scaled to fit _video_container_size */
773 dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
779 lrint(_video_container_size.load().width * i.rectangle.x),
780 lrint(_video_container_size.load().height * i.rectangle.y)
786 /* String subtitles (rendered to an image) */
787 if (!j.string.empty()) {
788 auto s = render_text(j.string, _video_container_size, time, vfr);
789 copy (s.begin(), s.end(), back_inserter (captions));
793 if (captions.empty()) {
797 return merge (captions, _subtitle_alignment);
802 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
808 auto piece = weak_piece.lock ();
813 if (!piece->content->video->use()) {
817 FrameRateChange frc (_film, piece->content);
818 if (frc.skip && (video.frame % 2) == 1) {
822 /* Time of the first frame we will emit */
823 DCPTime const time = content_video_to_dcp (piece, video.frame);
824 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
826 /* Discard if it's before the content's period or the last accurate seek. We can't discard
827 if it's after the content's period here as in that case we still need to fill any gap between
828 `now' and the end of the content's period.
830 if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
834 if (piece->ignore_video && piece->ignore_video->contains(time)) {
838 /* Fill gaps that we discover now that we have some video which needs to be emitted.
839 This is where we need to fill to.
841 DCPTime fill_to = min (time, piece->content->end(_film));
843 if (_next_video_time) {
844 DCPTime fill_from = max (*_next_video_time, piece->content->position());
846 /* Fill if we have more than half a frame to do */
847 if ((fill_to - fill_from) > one_video_frame() / 2) {
848 auto last = _last_video.find (weak_piece);
849 if (_film->three_d()) {
850 auto fill_to_eyes = video.eyes;
851 if (fill_to_eyes == Eyes::BOTH) {
852 fill_to_eyes = Eyes::LEFT;
854 if (fill_to == piece->content->end(_film)) {
855 /* Don't fill after the end of the content */
856 fill_to_eyes = Eyes::LEFT;
859 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
860 if (eyes == Eyes::BOTH) {
863 while (j < fill_to || eyes != fill_to_eyes) {
864 if (last != _last_video.end()) {
865 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
866 auto copy = last->second->shallow_copy();
867 copy->set_eyes (eyes);
868 emit_video (copy, j);
870 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
871 emit_video (black_player_video_frame(eyes), j);
873 if (eyes == Eyes::RIGHT) {
874 j += one_video_frame();
876 eyes = increment_eyes (eyes);
879 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
880 if (last != _last_video.end()) {
881 emit_video (last->second, j);
883 emit_video (black_player_video_frame(Eyes::BOTH), j);
890 auto const content_video = piece->content->video;
892 _last_video[weak_piece] = std::make_shared<PlayerVideo>(
894 content_video->actual_crop(),
895 content_video->fade (_film, video.frame),
897 content_video->scaled_size(_film->frame_size()),
898 _video_container_size,
900 content_video->pixel_quanta()
902 _video_container_size,
905 content_video->colour_conversion(),
906 content_video->range(),
913 for (int i = 0; i < frc.repeat; ++i) {
914 if (t < piece->content->end(_film)) {
915 emit_video (_last_video[weak_piece], t);
917 t += one_video_frame ();
923 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
929 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
931 auto piece = weak_piece.lock ();
936 auto content = piece->content->audio;
937 DCPOMATIC_ASSERT (content);
939 int const rfr = content->resampled_frame_rate (_film);
941 /* Compute time in the DCP */
942 auto time = resampled_audio_to_dcp (piece, content_audio.frame);
943 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
945 /* And the end of this block in the DCP */
946 auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
948 /* Remove anything that comes before the start or after the end of the content */
949 if (time < piece->content->position()) {
950 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
952 /* This audio is entirely discarded */
955 content_audio.audio = cut.first;
957 } else if (time > piece->content->end(_film)) {
960 } else if (end > piece->content->end(_film)) {
961 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
962 if (remaining_frames == 0) {
965 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
968 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
972 auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
973 if (content->gain() != 0 || !fade_coeffs.empty()) {
974 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
975 if (!fade_coeffs.empty()) {
976 /* Apply both fade and gain */
977 DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
978 auto const channels = gain_buffers->channels();
979 auto const frames = fade_coeffs.size();
980 auto data = gain_buffers->data();
981 auto const gain = db_to_linear (content->gain());
982 for (auto channel = 0; channel < channels; ++channel) {
983 for (auto frame = 0U; frame < frames; ++frame) {
984 data[channel][frame] *= gain * fade_coeffs[frame];
988 /* Just apply gain */
989 gain_buffers->apply_gain (content->gain());
991 content_audio.audio = gain_buffers;
996 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1000 if (_audio_processor) {
1001 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1006 _audio_merger.push (content_audio.audio, time);
1007 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1008 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1013 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1019 auto piece = weak_piece.lock ();
1020 auto content = weak_content.lock ();
1021 if (!piece || !content) {
1026 for (auto& sub: subtitle.subs)
1028 /* Apply content's subtitle offsets */
1029 sub.rectangle.x += content->x_offset ();
1030 sub.rectangle.y += content->y_offset ();
1032 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1033 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1034 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1036 /* Apply content's subtitle scale */
1037 sub.rectangle.width *= content->x_scale ();
1038 sub.rectangle.height *= content->y_scale ();
1040 auto image = sub.image;
1042 /* We will scale the subtitle up to fit _video_container_size */
1043 int const width = sub.rectangle.width * _video_container_size.load().width;
1044 int const height = sub.rectangle.height * _video_container_size.load().height;
1045 if (width == 0 || height == 0) {
1049 dcp::Size scaled_size (width, height);
1050 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1053 DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1054 _active_texts[content->type()].add_from(weak_content, ps, from);
1059 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1065 auto piece = weak_piece.lock ();
1066 auto content = weak_content.lock ();
1067 if (!piece || !content) {
1072 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1074 if (from > piece->content->end(_film)) {
1078 for (auto s: subtitle.subs) {
1079 s.set_h_position (s.h_position() + content->x_offset());
1080 s.set_v_position (s.v_position() + content->y_offset());
1081 float const xs = content->x_scale();
1082 float const ys = content->y_scale();
1083 float size = s.size();
1085 /* Adjust size to express the common part of the scaling;
1086 e.g. if xs = ys = 0.5 we scale size by 2.
1088 if (xs > 1e-5 && ys > 1e-5) {
1089 size *= 1 / min (1 / xs, 1 / ys);
1093 /* Then express aspect ratio changes */
1094 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1095 s.set_aspect_adjust (xs / ys);
1098 s.set_in (dcp::Time(from.seconds(), 1000));
1099 ps.string.push_back (s);
1102 _active_texts[content->type()].add_from(weak_content, ps, from);
1107 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1113 auto content = weak_content.lock ();
1118 if (!_active_texts[content->type()].have(weak_content)) {
1122 auto piece = weak_piece.lock ();
1127 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1129 if (dcp_to > piece->content->end(_film)) {
1133 auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
1135 bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1136 if (content->use() && !always && !content->burn()) {
1137 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1143 Player::seek (DCPTime time, bool accurate)
1145 boost::mutex::scoped_lock lm (_mutex);
1146 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1149 /* We can't seek in this state */
1154 _shuffler->clear ();
1159 if (_audio_processor) {
1160 _audio_processor->flush ();
1163 _audio_merger.clear ();
1164 std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
1166 for (auto i: _pieces) {
1167 if (time < i->content->position()) {
1168 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1169 we must seek this (following) content accurately, otherwise when we come to the end of the current
1170 content we may not start right at the beginning of the next, causing a gap (if the next content has
1171 been trimmed to a point between keyframes, or something).
1173 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1175 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1176 /* During; seek to position */
1177 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1180 /* After; this piece is done */
1186 _next_video_time = time;
1187 _next_video_eyes = Eyes::LEFT;
1188 _next_audio_time = time;
1190 _next_video_time = boost::none;
1191 _next_video_eyes = boost::none;
1192 _next_audio_time = boost::none;
1195 _black.set_position (time);
1196 _silent.set_position (time);
1198 _last_video.clear ();
1203 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1205 if (!_film->three_d()) {
1206 if (pv->eyes() == Eyes::LEFT) {
1207 /* Use left-eye images for both eyes... */
1208 pv->set_eyes (Eyes::BOTH);
1209 } else if (pv->eyes() == Eyes::RIGHT) {
1210 /* ...and discard the right */
1215 /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1216 player before the video that requires them.
1218 _delay.push_back (make_pair (pv, time));
1220 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1221 _next_video_time = time + one_video_frame();
1223 _next_video_eyes = increment_eyes (pv->eyes());
1225 if (_delay.size() < 3) {
1229 auto to_do = _delay.front();
1231 do_emit_video (to_do.first, to_do.second);
1236 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1238 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1239 std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
1242 auto subtitles = open_subtitles_for_frame (time);
1244 pv->set_text (subtitles.get ());
1252 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1254 /* Log if the assert below is about to fail */
1255 if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1256 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1259 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1260 DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1261 Audio (data, time, _film->audio_frame_rate());
1262 _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1267 Player::fill_audio (DCPTimePeriod period)
1269 if (period.from == period.to) {
1273 DCPOMATIC_ASSERT (period.from < period.to);
1275 DCPTime t = period.from;
1276 while (t < period.to) {
1277 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1278 Frame const samples = block.frames_round(_film->audio_frame_rate());
1280 auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1281 silence->make_silent ();
1282 emit_audio (silence, t);
1290 Player::one_video_frame () const
1292 return DCPTime::from_frames (1, _film->video_frame_rate ());
1296 pair<shared_ptr<AudioBuffers>, DCPTime>
1297 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1299 auto const discard_time = discard_to - time;
1300 auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1301 auto remaining_frames = audio->frames() - discard_frames;
1302 if (remaining_frames <= 0) {
1303 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1305 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1306 return make_pair(cut, time + discard_time);
1311 Player::set_dcp_decode_reduction (optional<int> reduction)
1313 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1315 if (reduction == _dcp_decode_reduction.load()) {
1316 Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1320 _dcp_decode_reduction = reduction;
1323 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1328 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1330 boost::mutex::scoped_lock lm (_mutex);
1332 for (auto i: _pieces) {
1333 if (i->content == content) {
1334 return content_time_to_dcp (i, t);
1338 /* We couldn't find this content; perhaps things are being changed over */
1343 optional<ContentTime>
1344 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1346 boost::mutex::scoped_lock lm (_mutex);
1348 for (auto i: _pieces) {
1349 if (i->content == content) {
1350 return dcp_to_content_time (i, t);
1354 /* We couldn't find this content; perhaps things are being changed over */
1359 shared_ptr<const Playlist>
1360 Player::playlist () const
1362 return _playlist ? _playlist : _film->playlist();
1367 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1373 auto piece = weak_piece.lock ();
1374 DCPOMATIC_ASSERT (piece);
1376 auto const vfr = _film->video_frame_rate();
1378 DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1379 if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1383 Atmos (data.data, dcp_time, data.metadata);