2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
38 #include "frame_rate_change.h"
40 #include "image_decoder.h"
43 #include "maths_util.h"
46 #include "player_video.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
55 #include "video_decoder.h"
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
70 using std::dynamic_pointer_cast;
73 using std::make_shared;
74 using std::make_shared;
79 using std::shared_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
87 using namespace dcpomatic;
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96 int const PlayerProperty::IGNORE_VIDEO = 706;
97 int const PlayerProperty::IGNORE_AUDIO = 707;
98 int const PlayerProperty::IGNORE_TEXT = 708;
99 int const PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES = 709;
100 int const PlayerProperty::PLAY_REFERENCED = 710;
103 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
106 , _ignore_video(false)
107 , _ignore_audio(false)
108 , _ignore_text(false)
109 , _always_burn_open_subtitles(false)
111 , _tolerant (film->tolerant())
112 , _play_referenced(false)
113 , _audio_merger(film->audio_frame_rate())
114 , _subtitle_alignment (subtitle_alignment)
120 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
122 , _playlist (playlist_)
124 , _ignore_video(false)
125 , _ignore_audio(false)
126 , _ignore_text(false)
127 , _always_burn_open_subtitles(false)
129 , _tolerant (film->tolerant())
130 , _play_referenced(false)
131 , _audio_merger(film->audio_frame_rate())
140 auto film = _film.lock();
141 DCPOMATIC_ASSERT(film);
144 set_video_container_size(film->frame_size());
146 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
149 seek (DCPTime (), true);
156 auto film = _film.lock();
157 DCPOMATIC_ASSERT(film);
159 _film_changed_connection = film->Change.connect(bind(&Player::film_change, this, _1, _2));
160 /* The butler must hear about this first, so since we are proxying this through to the butler we must
163 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
164 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
168 Player::Player(Player&& other)
170 , _playlist(std::move(other._playlist))
171 , _suspended(other._suspended.load())
172 , _pieces(std::move(other._pieces))
173 , _video_container_size(other._video_container_size.load())
174 , _black_image(std::move(other._black_image))
175 , _ignore_video(other._ignore_video.load())
176 , _ignore_audio(other._ignore_audio.load())
177 , _ignore_text(other._ignore_text.load())
178 , _always_burn_open_subtitles(other._always_burn_open_subtitles.load())
179 , _fast(other._fast.load())
180 , _tolerant(other._tolerant)
181 , _play_referenced(other._play_referenced.load())
182 , _next_video_time(other._next_video_time)
183 , _next_audio_time(other._next_audio_time)
184 , _dcp_decode_reduction(other._dcp_decode_reduction.load())
185 , _last_video(std::move(other._last_video))
186 , _audio_merger(std::move(other._audio_merger))
187 , _shuffler(std::move(other._shuffler))
188 , _delay(std::move(other._delay))
189 , _stream_states(std::move(other._stream_states))
190 , _black(std::move(other._black))
191 , _silent(std::move(other._silent))
192 , _active_texts(std::move(other._active_texts))
193 , _audio_processor(std::move(other._audio_processor))
194 , _playback_length(other._playback_length.load())
195 , _subtitle_alignment(other._subtitle_alignment)
202 Player::operator=(Player&& other)
204 if (this == &other) {
208 _film = std::move(other._film);
209 _playlist = std::move(other._playlist);
210 _suspended = other._suspended.load();
211 _pieces = std::move(other._pieces);
212 _video_container_size = other._video_container_size.load();
213 _black_image = std::move(other._black_image);
214 _ignore_video = other._ignore_video.load();
215 _ignore_audio = other._ignore_audio.load();
216 _ignore_text = other._ignore_text.load();
217 _always_burn_open_subtitles = other._always_burn_open_subtitles.load();
218 _fast = other._fast.load();
219 _tolerant = other._tolerant;
220 _play_referenced = other._play_referenced.load();
221 _next_video_time = other._next_video_time;
222 _next_audio_time = other._next_audio_time;
223 _dcp_decode_reduction = other._dcp_decode_reduction.load();
224 _last_video = std::move(other._last_video);
225 _audio_merger = std::move(other._audio_merger);
226 _shuffler = std::move(other._shuffler);
227 _delay = std::move(other._delay);
228 _stream_states = std::move(other._stream_states);
229 _black = std::move(other._black);
230 _silent = std::move(other._silent);
231 _active_texts = std::move(other._active_texts);
232 _audio_processor = std::move(other._audio_processor);
233 _playback_length = other._playback_length.load();
234 _subtitle_alignment = other._subtitle_alignment;
243 have_video (shared_ptr<const Content> content)
245 return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
250 have_audio (shared_ptr<const Content> content)
252 return static_cast<bool>(content->audio) && content->can_be_played();
257 Player::setup_pieces ()
259 boost::mutex::scoped_lock lm (_mutex);
261 auto old_pieces = _pieces;
264 auto film = _film.lock();
269 _playback_length = _playlist ? _playlist->length(film) : film->length();
271 auto playlist_content = playlist()->content();
272 bool const have_threed = std::any_of(
273 playlist_content.begin(),
274 playlist_content.end(),
275 [](shared_ptr<const Content> c) {
276 return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
281 _shuffler.reset(new Shuffler());
282 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
285 for (auto content: playlist()->content()) {
287 if (!content->paths_valid()) {
291 if (_ignore_video && _ignore_audio && content->text.empty()) {
292 /* We're only interested in text and this content has none */
296 shared_ptr<Decoder> old_decoder;
297 for (auto j: old_pieces) {
298 if (j->content == content) {
299 old_decoder = j->decoder;
304 auto decoder = decoder_factory(film, content, _fast, _tolerant, old_decoder);
305 DCPOMATIC_ASSERT (decoder);
307 FrameRateChange frc(film, content);
309 if (decoder->video && _ignore_video) {
310 decoder->video->set_ignore (true);
313 if (decoder->audio && _ignore_audio) {
314 decoder->audio->set_ignore (true);
318 for (auto i: decoder->text) {
319 i->set_ignore (true);
323 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
325 dcp->set_decode_referenced (_play_referenced);
326 if (_play_referenced) {
327 dcp->set_forced_reduction (_dcp_decode_reduction);
331 auto piece = make_shared<Piece>(content, decoder, frc);
332 _pieces.push_back (piece);
334 if (decoder->video) {
336 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
337 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
339 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
343 if (decoder->audio) {
344 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
347 auto j = decoder->text.begin();
349 while (j != decoder->text.end()) {
350 (*j)->BitmapStart.connect (
351 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
353 (*j)->PlainStart.connect (
354 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
357 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
363 if (decoder->atmos) {
364 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
368 _stream_states.clear ();
369 for (auto i: _pieces) {
370 if (i->content->audio) {
371 for (auto j: i->content->audio->streams()) {
372 _stream_states[j] = StreamState(i);
377 auto ignore_overlap = [](shared_ptr<VideoContent> v) {
378 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
381 for (auto piece = _pieces.begin(); piece != _pieces.end(); ++piece) {
382 if (ignore_overlap((*piece)->content->video)) {
383 /* Look for content later in the content list with in-use video that overlaps this */
384 auto const period = (*piece)->content->period(film);
385 for (auto later_piece = std::next(piece); later_piece != _pieces.end(); ++later_piece) {
386 if (ignore_overlap((*later_piece)->content->video)) {
387 if (auto overlap = (*later_piece)->content->period(film).overlap(period)) {
388 (*piece)->ignore_video.push_back(*overlap);
395 _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
396 _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
398 _next_video_time = boost::none;
399 _next_video_eyes = Eyes::BOTH;
400 _next_audio_time = boost::none;
405 Player::playlist_content_change (ChangeType type, int property, bool frequent)
407 auto film = _film.lock();
412 if (property == VideoContentProperty::CROP) {
413 if (type == ChangeType::DONE) {
414 boost::mutex::scoped_lock lm (_mutex);
415 for (auto const& i: _delay) {
416 i.first->reset_metadata(film, _video_container_size);
420 if (type == ChangeType::PENDING) {
421 /* The player content is probably about to change, so we can't carry on
422 until that has happened and we've rebuilt our pieces. Stop pass()
423 and seek() from working until then.
426 } else if (type == ChangeType::DONE) {
427 /* A change in our content has gone through. Re-build our pieces. */
430 } else if (type == ChangeType::CANCELLED) {
435 Change (type, property, frequent);
440 Player::set_video_container_size (dcp::Size s)
442 ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
444 if (s == _video_container_size) {
449 _video_container_size = s;
452 boost::mutex::scoped_lock lm(_black_image_mutex);
453 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
454 _black_image->make_black ();
460 Player::playlist_change (ChangeType type)
462 if (type == ChangeType::DONE) {
465 Change (type, PlayerProperty::PLAYLIST, false);
470 Player::film_change (ChangeType type, Film::Property p)
472 /* Here we should notice Film properties that affect our output, and
473 alert listeners that our output now would be different to how it was
474 last time we were run.
477 auto film = _film.lock();
482 if (p == Film::Property::CONTAINER) {
483 Change (type, PlayerProperty::FILM_CONTAINER, false);
484 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
485 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
486 so we need new pieces here.
488 if (type == ChangeType::DONE) {
491 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
492 } else if (p == Film::Property::AUDIO_PROCESSOR) {
493 if (type == ChangeType::DONE && film->audio_processor ()) {
494 boost::mutex::scoped_lock lm (_mutex);
495 _audio_processor = film->audio_processor()->clone(film->audio_frame_rate());
497 } else if (p == Film::Property::AUDIO_CHANNELS) {
498 if (type == ChangeType::DONE) {
499 boost::mutex::scoped_lock lm (_mutex);
500 _audio_merger.clear ();
506 shared_ptr<PlayerVideo>
507 Player::black_player_video_frame (Eyes eyes) const
509 boost::mutex::scoped_lock lm(_black_image_mutex);
511 return std::make_shared<PlayerVideo> (
512 std::make_shared<const RawImageProxy>(_black_image),
515 _video_container_size,
516 _video_container_size,
519 PresetColourConversion::all().front().conversion,
521 std::weak_ptr<Content>(),
522 boost::optional<Frame>(),
529 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
531 auto film = _film.lock();
532 DCPOMATIC_ASSERT(film);
534 auto s = t - piece->content->position ();
535 s = min (piece->content->length_after_trim(film), s);
536 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
538 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
539 then convert that ContentTime to frames at the content's rate. However this fails for
540 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
541 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
543 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
545 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
550 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
552 /* See comment in dcp_to_content_video */
553 auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
554 return d + piece->content->position();
559 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
561 auto film = _film.lock();
562 DCPOMATIC_ASSERT(film);
564 auto s = t - piece->content->position ();
565 s = min (piece->content->length_after_trim(film), s);
566 /* See notes in dcp_to_content_video */
567 return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(film->audio_frame_rate());
572 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
574 auto film = _film.lock();
575 DCPOMATIC_ASSERT(film);
577 /* See comment in dcp_to_content_video */
578 return DCPTime::from_frames(f, film->audio_frame_rate())
579 - DCPTime (piece->content->trim_start(), piece->frc)
580 + piece->content->position();
585 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
587 auto film = _film.lock();
588 DCPOMATIC_ASSERT(film);
590 auto s = t - piece->content->position ();
591 s = min (piece->content->length_after_trim(film), s);
592 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
597 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
599 return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
603 vector<shared_ptr<Font>>
604 Player::get_subtitle_fonts ()
606 boost::mutex::scoped_lock lm (_mutex);
608 vector<shared_ptr<Font>> fonts;
609 for (auto piece: _pieces) {
610 for (auto text: piece->content->text) {
611 auto text_fonts = text->fonts();
612 copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
620 /** Set this player never to produce any video data */
622 Player::set_ignore_video ()
624 ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_VIDEO);
625 _ignore_video = true;
631 Player::set_ignore_audio ()
633 ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_AUDIO);
634 _ignore_audio = true;
640 Player::set_ignore_text ()
642 ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_TEXT);
648 /** Set the player to always burn open texts into the image regardless of the content settings */
650 Player::set_always_burn_open_subtitles ()
652 ChangeSignaller<Player, int> cc(this, PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES);
653 _always_burn_open_subtitles = true;
657 /** Sets up the player to be faster, possibly at the expense of quality */
667 Player::set_play_referenced ()
669 ChangeSignaller<Player, int> cc(this, PlayerProperty::PLAY_REFERENCED);
670 _play_referenced = true;
678 boost::mutex::scoped_lock lm (_mutex);
681 /* We can't pass in this state */
682 LOG_DEBUG_PLAYER_NC ("Player is suspended");
686 auto film = _film.lock();
688 if (_playback_length.load() == DCPTime() || !film) {
689 /* Special; just give one black frame */
690 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
694 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
696 shared_ptr<Piece> earliest_content;
697 optional<DCPTime> earliest_time;
699 for (auto i: _pieces) {
704 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
705 if (t > i->content->end(film)) {
709 /* Given two choices at the same time, pick the one with texts so we see it before
712 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
714 earliest_content = i;
728 if (earliest_content) {
732 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
733 earliest_time = _black.position ();
737 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
738 earliest_time = _silent.position ();
745 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
746 earliest_content->done = earliest_content->decoder->pass ();
747 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
748 if (dcp && !_play_referenced && dcp->reference_audio()) {
749 /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
750 to `hide' the fact that no audio was emitted during the referenced DCP (though
751 we need to behave as though it was).
753 _next_audio_time = dcp->end(film);
758 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
759 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
760 _black.set_position (_black.position() + one_video_frame());
764 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
765 DCPTimePeriod period (_silent.period_at_position());
766 if (_next_audio_time) {
767 /* Sometimes the thing that happened last finishes fractionally before
768 or after this silence. Bodge the start time of the silence to fix it.
769 I think this is nothing to worry about since we will just add or
770 remove a little silence at the end of some content.
772 int64_t const error = labs(period.from.get() - _next_audio_time->get());
773 /* Let's not worry about less than a frame at 24fps */
774 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
775 if (error >= too_much_error) {
776 film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
778 DCPOMATIC_ASSERT (error < too_much_error);
779 period.from = *_next_audio_time;
781 if (period.duration() > one_video_frame()) {
782 period.to = period.from + one_video_frame();
785 _silent.set_position (period.to);
793 /* Emit any audio that is ready */
795 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
796 of our streams, or the position of the _silent. First, though we choose only streams that are less than
797 ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
798 behind it has finished). This is so that we don't withhold audio indefinitely awaiting data from a stream
799 that will never come, causing bugs like #2101.
801 constexpr int ignore_streams_behind = 5;
803 using state_pair = std::pair<AudioStreamPtr, StreamState>;
805 /* Find streams that have pushed */
806 std::vector<state_pair> have_pushed;
807 std::copy_if(_stream_states.begin(), _stream_states.end(), std::back_inserter(have_pushed), [](state_pair const& a) { return static_cast<bool>(a.second.last_push_end); });
809 /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
810 auto latest_last_push_end = std::max_element(
813 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end.get() < b.second.last_push_end.get(); }
816 if (latest_last_push_end != have_pushed.end()) {
817 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end.get()));
820 /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
821 std::map<AudioStreamPtr, StreamState> alive_stream_states;
822 for (auto const& i: _stream_states) {
823 if (!i.second.last_push_end || (latest_last_push_end->second.last_push_end.get() - i.second.last_push_end.get()) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
824 alive_stream_states.insert(i);
826 LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
830 auto pull_to = _playback_length.load();
831 for (auto const& i: alive_stream_states) {
832 auto position = i.second.last_push_end.get_value_or(i.second.piece->content->position());
833 if (!i.second.piece->done && position < pull_to) {
837 if (!_silent.done() && _silent.position() < pull_to) {
838 pull_to = _silent.position();
841 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
842 auto audio = _audio_merger.pull (pull_to);
843 for (auto i = audio.begin(); i != audio.end(); ++i) {
844 if (_next_audio_time && i->second < *_next_audio_time) {
845 /* This new data comes before the last we emitted (or the last seek); discard it */
846 auto cut = discard_audio (i->first, i->second, *_next_audio_time);
851 } else if (_next_audio_time && i->second > *_next_audio_time) {
852 /* There's a gap between this data and the last we emitted; fill with silence */
853 fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
856 emit_audio (i->first, i->second);
863 for (auto const& i: _delay) {
864 do_emit_video(i.first, i.second);
867 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
868 * However, if we have L and R video files, and one is shorter than the other,
869 * the fill code in ::video mostly takes care of filling in the gaps.
870 * However, since it fills at the point when it knows there is more video coming
871 * at time t (so it should fill any gap up to t) it can't do anything right at the
872 * end. This is particularly bad news if the last frame emitted is a LEFT
873 * eye, as the MXF writer will complain about the 3D sequence being wrong.
874 * Here's a hack to workaround that particular case.
876 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
877 do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
885 /** @return Open subtitles for the frame at the given time, converted to images */
886 optional<PositionImage>
887 Player::open_subtitles_for_frame (DCPTime time) const
889 auto film = _film.lock();
894 list<PositionImage> captions;
895 int const vfr = film->video_frame_rate();
899 _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
902 /* Bitmap subtitles */
903 for (auto i: j.bitmap) {
908 /* i.image will already have been scaled to fit _video_container_size */
909 dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
915 lrint(_video_container_size.load().width * i.rectangle.x),
916 lrint(_video_container_size.load().height * i.rectangle.y)
922 /* String subtitles (rendered to an image) */
923 if (!j.string.empty()) {
924 auto s = render_text(j.string, _video_container_size, time, vfr);
925 copy (s.begin(), s.end(), back_inserter (captions));
929 if (captions.empty()) {
933 return merge (captions, _subtitle_alignment);
939 increment_eyes (Eyes e)
941 if (e == Eyes::LEFT) {
950 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
956 auto piece = weak_piece.lock ();
961 if (!piece->content->video->use()) {
965 auto film = _film.lock();
970 FrameRateChange frc(film, piece->content);
971 if (frc.skip && (video.frame % 2) == 1) {
975 vector<Eyes> eyes_to_emit;
977 if (!film->three_d()) {
978 if (video.eyes == Eyes::RIGHT) {
979 /* 2D film, 3D content: discard right */
981 } else if (video.eyes == Eyes::LEFT) {
982 /* 2D film, 3D content: emit left as "both" */
983 video.eyes = Eyes::BOTH;
984 eyes_to_emit = { Eyes::BOTH };
987 if (video.eyes == Eyes::BOTH) {
988 /* 3D film, 2D content; emit "both" for left and right */
989 eyes_to_emit = { Eyes::LEFT, Eyes::RIGHT };
993 if (eyes_to_emit.empty()) {
994 eyes_to_emit = { video.eyes };
997 /* Time of the first frame we will emit */
998 DCPTime const time = content_video_to_dcp (piece, video.frame);
999 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
1001 /* Discard if it's before the content's period or the last accurate seek. We can't discard
1002 if it's after the content's period here as in that case we still need to fill any gap between
1003 `now' and the end of the content's period.
1005 if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
1009 auto ignore_video = std::find_if(
1010 piece->ignore_video.begin(),
1011 piece->ignore_video.end(),
1012 [time](DCPTimePeriod period) { return period.contains(time); }
1014 if (ignore_video != piece->ignore_video.end()) {
1018 /* Fill gaps that we discover now that we have some video which needs to be emitted.
1019 This is where we need to fill to.
1021 DCPTime fill_to = min(time, piece->content->end(film));
1023 if (_next_video_time) {
1024 DCPTime fill_from = max (*_next_video_time, piece->content->position());
1026 /* Fill if we have more than half a frame to do */
1027 if ((fill_to - fill_from) > one_video_frame() / 2) {
1028 auto last = _last_video.find (weak_piece);
1029 if (film->three_d()) {
1030 auto fill_to_eyes = eyes_to_emit[0];
1031 if (fill_to_eyes == Eyes::BOTH) {
1032 fill_to_eyes = Eyes::LEFT;
1034 if (fill_to == piece->content->end(film)) {
1035 /* Don't fill after the end of the content */
1036 fill_to_eyes = Eyes::LEFT;
1039 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
1040 if (eyes == Eyes::BOTH) {
1043 while (j < fill_to || eyes != fill_to_eyes) {
1044 if (last != _last_video.end()) {
1045 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
1046 auto copy = last->second->shallow_copy();
1047 copy->set_eyes (eyes);
1048 emit_video (copy, j);
1050 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
1051 emit_video (black_player_video_frame(eyes), j);
1053 if (eyes == Eyes::RIGHT) {
1054 j += one_video_frame();
1056 eyes = increment_eyes (eyes);
1059 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
1060 if (last != _last_video.end()) {
1061 emit_video (last->second, j);
1063 emit_video (black_player_video_frame(Eyes::BOTH), j);
1070 auto const content_video = piece->content->video;
1072 for (auto eyes: eyes_to_emit) {
1073 _last_video[weak_piece] = std::make_shared<PlayerVideo>(
1075 content_video->actual_crop(),
1076 content_video->fade(film, video.frame),
1078 content_video->scaled_size(film->frame_size()),
1079 _video_container_size,
1081 content_video->pixel_quanta()
1083 _video_container_size,
1086 content_video->colour_conversion(),
1087 content_video->range(),
1094 for (int i = 0; i < frc.repeat; ++i) {
1095 if (t < piece->content->end(film)) {
1096 emit_video (_last_video[weak_piece], t);
1098 t += one_video_frame ();
1105 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
1111 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1113 auto piece = weak_piece.lock ();
1118 auto film = _film.lock();
1123 auto content = piece->content->audio;
1124 DCPOMATIC_ASSERT (content);
1126 int const rfr = content->resampled_frame_rate(film);
1128 /* Compute time in the DCP */
1129 auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1131 /* And the end of this block in the DCP */
1132 auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1133 LOG_DEBUG_PLAYER("Received audio frame %1 covering %2 to %3 (%4)", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename());
1135 /* Remove anything that comes before the start or after the end of the content */
1136 if (time < piece->content->position()) {
1137 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1139 /* This audio is entirely discarded */
1142 content_audio.audio = cut.first;
1144 } else if (time > piece->content->end(film)) {
1145 /* Discard it all */
1147 } else if (end > piece->content->end(film)) {
1148 Frame const remaining_frames = DCPTime(piece->content->end(film) - time).frames_round(rfr);
1149 if (remaining_frames == 0) {
1152 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1155 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1159 auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
1160 if (content->gain() != 0 || !fade_coeffs.empty()) {
1161 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1162 if (!fade_coeffs.empty()) {
1163 /* Apply both fade and gain */
1164 DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1165 auto const channels = gain_buffers->channels();
1166 auto const frames = fade_coeffs.size();
1167 auto data = gain_buffers->data();
1168 auto const gain = db_to_linear (content->gain());
1169 for (auto channel = 0; channel < channels; ++channel) {
1170 for (auto frame = 0U; frame < frames; ++frame) {
1171 data[channel][frame] *= gain * fade_coeffs[frame];
1175 /* Just apply gain */
1176 gain_buffers->apply_gain (content->gain());
1178 content_audio.audio = gain_buffers;
1183 content_audio.audio = remap(content_audio.audio, film->audio_channels(), stream->mapping());
1187 if (_audio_processor) {
1188 content_audio.audio = _audio_processor->run(content_audio.audio, film->audio_channels());
1193 _audio_merger.push (content_audio.audio, time);
1194 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1195 _stream_states[stream].last_push_end = time + DCPTime::from_frames(content_audio.audio->frames(), film->audio_frame_rate());
1200 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1206 auto piece = weak_piece.lock ();
1207 auto content = weak_content.lock ();
1208 if (!piece || !content) {
1213 for (auto& sub: subtitle.subs)
1215 /* Apply content's subtitle offsets */
1216 sub.rectangle.x += content->x_offset ();
1217 sub.rectangle.y += content->y_offset ();
1219 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1220 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1221 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1223 /* Apply content's subtitle scale */
1224 sub.rectangle.width *= content->x_scale ();
1225 sub.rectangle.height *= content->y_scale ();
1227 auto image = sub.image;
1229 /* We will scale the subtitle up to fit _video_container_size */
1230 int const width = sub.rectangle.width * _video_container_size.load().width;
1231 int const height = sub.rectangle.height * _video_container_size.load().height;
1232 if (width == 0 || height == 0) {
1236 dcp::Size scaled_size (width, height);
1237 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1240 DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1241 _active_texts[content->type()].add_from(weak_content, ps, from);
1246 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1252 auto piece = weak_piece.lock ();
1253 auto content = weak_content.lock ();
1254 auto film = _film.lock();
1255 if (!piece || !content || !film) {
1260 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1262 if (from > piece->content->end(film)) {
1266 for (auto s: subtitle.subs) {
1267 s.set_h_position (s.h_position() + content->x_offset());
1268 s.set_v_position (s.v_position() + content->y_offset());
1269 float const xs = content->x_scale();
1270 float const ys = content->y_scale();
1271 float size = s.size();
1273 /* Adjust size to express the common part of the scaling;
1274 e.g. if xs = ys = 0.5 we scale size by 2.
1276 if (xs > 1e-5 && ys > 1e-5) {
1277 size *= 1 / min (1 / xs, 1 / ys);
1281 /* Then express aspect ratio changes */
1282 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1283 s.set_aspect_adjust (xs / ys);
1286 s.set_in (dcp::Time(from.seconds(), 1000));
1287 ps.string.push_back (s);
1290 _active_texts[content->type()].add_from(weak_content, ps, from);
1295 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1301 auto content = weak_content.lock ();
1306 if (!_active_texts[content->type()].have(weak_content)) {
1310 auto piece = weak_piece.lock ();
1311 auto film = _film.lock();
1312 if (!piece || !film) {
1316 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1318 if (dcp_to > piece->content->end(film)) {
1322 auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
1324 bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1325 if (content->use() && !always && !content->burn()) {
1326 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1332 Player::seek (DCPTime time, bool accurate)
1334 boost::mutex::scoped_lock lm (_mutex);
1335 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1338 /* We can't seek in this state */
1342 auto film = _film.lock();
1348 _shuffler->clear ();
1353 if (_audio_processor) {
1354 _audio_processor->flush ();
1357 _audio_merger.clear ();
1358 std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
1360 for (auto i: _pieces) {
1361 if (time < i->content->position()) {
1362 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1363 we must seek this (following) content accurately, otherwise when we come to the end of the current
1364 content we may not start right at the beginning of the next, causing a gap (if the next content has
1365 been trimmed to a point between keyframes, or something).
1367 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1369 } else if (i->content->position() <= time && time < i->content->end(film)) {
1370 /* During; seek to position */
1371 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1374 /* After; this piece is done */
1380 _next_video_time = time;
1381 _next_video_eyes = Eyes::LEFT;
1382 _next_audio_time = time;
1384 _next_video_time = boost::none;
1385 _next_video_eyes = boost::none;
1386 _next_audio_time = boost::none;
1389 _black.set_position (time);
1390 _silent.set_position (time);
1392 _last_video.clear ();
1394 for (auto& state: _stream_states) {
1395 state.second.last_push_end = boost::none;
1401 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1403 auto film = _film.lock();
1404 DCPOMATIC_ASSERT(film);
1406 if (!film->three_d()) {
1407 if (pv->eyes() == Eyes::LEFT) {
1408 /* Use left-eye images for both eyes... */
1409 pv->set_eyes (Eyes::BOTH);
1410 } else if (pv->eyes() == Eyes::RIGHT) {
1411 /* ...and discard the right */
1416 /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1417 player before the video that requires them.
1419 _delay.push_back (make_pair (pv, time));
1421 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1422 _next_video_time = time + one_video_frame();
1424 _next_video_eyes = increment_eyes (pv->eyes());
1426 if (_delay.size() < 3) {
1430 auto to_do = _delay.front();
1432 do_emit_video (to_do.first, to_do.second);
1437 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1439 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1440 std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
1443 auto subtitles = open_subtitles_for_frame (time);
1445 pv->set_text (subtitles.get ());
1453 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1455 auto film = _film.lock();
1456 DCPOMATIC_ASSERT(film);
1458 /* Log if the assert below is about to fail */
1459 if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1460 film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1463 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1464 DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1465 Audio(data, time, film->audio_frame_rate());
1466 _next_audio_time = time + DCPTime::from_frames(data->frames(), film->audio_frame_rate());
1471 Player::fill_audio (DCPTimePeriod period)
1473 auto film = _film.lock();
1474 DCPOMATIC_ASSERT(film);
1476 if (period.from == period.to) {
1480 DCPOMATIC_ASSERT (period.from < period.to);
1482 DCPTime t = period.from;
1483 while (t < period.to) {
1484 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1485 Frame const samples = block.frames_round(film->audio_frame_rate());
1487 auto silence = make_shared<AudioBuffers>(film->audio_channels(), samples);
1488 silence->make_silent ();
1489 emit_audio (silence, t);
1497 Player::one_video_frame () const
1499 auto film = _film.lock();
1500 DCPOMATIC_ASSERT(film);
1502 return DCPTime::from_frames(1, film->video_frame_rate ());
1506 pair<shared_ptr<AudioBuffers>, DCPTime>
1507 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1509 auto film = _film.lock();
1510 DCPOMATIC_ASSERT(film);
1512 auto const discard_time = discard_to - time;
1513 auto const discard_frames = discard_time.frames_round(film->audio_frame_rate());
1514 auto remaining_frames = audio->frames() - discard_frames;
1515 if (remaining_frames <= 0) {
1516 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1518 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1519 return make_pair(cut, time + discard_time);
1524 Player::set_dcp_decode_reduction (optional<int> reduction)
1526 ChangeSignaller<Player, int> cc(this, PlayerProperty::DCP_DECODE_REDUCTION);
1528 if (reduction == _dcp_decode_reduction.load()) {
1533 _dcp_decode_reduction = reduction;
1539 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1541 boost::mutex::scoped_lock lm (_mutex);
1543 for (auto i: _pieces) {
1544 if (i->content == content) {
1545 return content_time_to_dcp (i, t);
1549 /* We couldn't find this content; perhaps things are being changed over */
1554 optional<ContentTime>
1555 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1557 boost::mutex::scoped_lock lm (_mutex);
1559 for (auto i: _pieces) {
1560 if (i->content == content) {
1561 return dcp_to_content_time (i, t);
1565 /* We couldn't find this content; perhaps things are being changed over */
1570 shared_ptr<const Playlist>
1571 Player::playlist () const
1573 auto film = _film.lock();
1578 return _playlist ? _playlist : film->playlist();
1583 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1589 auto film = _film.lock();
1590 DCPOMATIC_ASSERT(film);
1592 auto piece = weak_piece.lock ();
1593 DCPOMATIC_ASSERT (piece);
1595 auto const vfr = film->video_frame_rate();
1597 DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1598 if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(film))) {
1602 Atmos (data.data, dcp_time, data.metadata);
1607 Player::signal_change(ChangeType type, int property)
1609 Change(type, property, false);