2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
31 #include "raw_image_proxy.h"
34 #include "render_text.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
69 using std::dynamic_pointer_cast;
72 using std::make_shared;
78 using std::shared_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
87 using namespace dcpomatic;
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
98 Player::Player (shared_ptr<const Film> film)
101 , _tolerant (film->tolerant())
102 , _audio_merger (_film->audio_frame_rate())
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110 , _playlist (playlist_)
112 , _tolerant (film->tolerant())
113 , _audio_merger (_film->audio_frame_rate())
122 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123 /* The butler must hear about this first, so since we are proxying this through to the butler we must
126 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128 set_video_container_size (_film->frame_size ());
130 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
133 seek (DCPTime (), true);
144 Player::setup_pieces ()
146 boost::mutex::scoped_lock lm (_mutex);
147 setup_pieces_unlocked ();
152 have_video (shared_ptr<const Content> content)
154 return static_cast<bool>(content->video) && content->video->use();
159 have_audio (shared_ptr<const Content> content)
161 return static_cast<bool>(content->audio);
166 Player::setup_pieces_unlocked ()
168 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170 auto old_pieces = _pieces;
174 _shuffler = new Shuffler();
175 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
177 for (auto i: playlist()->content()) {
179 if (!i->paths_valid ()) {
183 if (_ignore_video && _ignore_audio && i->text.empty()) {
184 /* We're only interested in text and this content has none */
188 shared_ptr<Decoder> old_decoder;
189 for (auto j: old_pieces) {
190 auto decoder = j->decoder_for(i);
192 old_decoder = decoder;
197 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
198 DCPOMATIC_ASSERT (decoder);
200 FrameRateChange frc (_film, i);
202 if (decoder->video && _ignore_video) {
203 decoder->video->set_ignore (true);
206 if (decoder->audio && _ignore_audio) {
207 decoder->audio->set_ignore (true);
211 for (auto i: decoder->text) {
212 i->set_ignore (true);
216 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
218 dcp->set_decode_referenced (_play_referenced);
219 if (_play_referenced) {
220 dcp->set_forced_reduction (_dcp_decode_reduction);
224 auto piece = make_shared<Piece>(i, decoder, frc);
225 _pieces.push_back (piece);
227 if (decoder->video) {
228 if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
229 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
232 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
236 if (decoder->audio) {
237 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
240 auto j = decoder->text.begin();
242 while (j != decoder->text.end()) {
243 (*j)->BitmapStart.connect (
244 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
246 (*j)->PlainStart.connect (
247 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
250 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
256 if (decoder->atmos) {
257 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
261 for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
262 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
263 /* Look for content later in the content list with in-use video that overlaps this */
264 auto period = DCPTimePeriod((*i)->position(), (*i)->end(_film));
267 for (; j != _pieces.end(); ++j) {
268 if ((*j)->use_video()) {
269 (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->end(_film)).overlap(period);
275 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
276 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
278 _last_video_time = boost::optional<dcpomatic::DCPTime>();
279 _last_video_eyes = Eyes::BOTH;
280 _last_audio_time = boost::optional<dcpomatic::DCPTime>();
285 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
287 boost::mutex::scoped_lock lm (_mutex);
289 for (auto i: _pieces) {
290 auto dcp = i->content_time_to_dcp(content, t);
296 /* We couldn't find this content; perhaps things are being changed over */
302 Player::playlist_content_change (ChangeType type, int property, bool frequent)
304 if (property == VideoContentProperty::CROP) {
305 if (type == ChangeType::DONE) {
306 auto const vcs = video_container_size();
307 boost::mutex::scoped_lock lm (_mutex);
308 for (auto const& i: _delay) {
309 i.first->reset_metadata (_film, vcs);
313 if (type == ChangeType::PENDING) {
314 /* The player content is probably about to change, so we can't carry on
315 until that has happened and we've rebuilt our pieces. Stop pass()
316 and seek() from working until then.
319 } else if (type == ChangeType::DONE) {
320 /* A change in our content has gone through. Re-build our pieces. */
323 } else if (type == ChangeType::CANCELLED) {
328 Change (type, property, frequent);
333 Player::set_video_container_size (dcp::Size s)
335 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
338 boost::mutex::scoped_lock lm (_mutex);
340 if (s == _video_container_size) {
342 Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
346 _video_container_size = s;
348 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
349 _black_image->make_black ();
352 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
357 Player::playlist_change (ChangeType type)
359 if (type == ChangeType::DONE) {
362 Change (type, PlayerProperty::PLAYLIST, false);
367 Player::film_change (ChangeType type, Film::Property p)
369 /* Here we should notice Film properties that affect our output, and
370 alert listeners that our output now would be different to how it was
371 last time we were run.
374 if (p == Film::Property::CONTAINER) {
375 Change (type, PlayerProperty::FILM_CONTAINER, false);
376 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
377 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
378 so we need new pieces here.
380 if (type == ChangeType::DONE) {
383 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
384 } else if (p == Film::Property::AUDIO_PROCESSOR) {
385 if (type == ChangeType::DONE && _film->audio_processor ()) {
386 boost::mutex::scoped_lock lm (_mutex);
387 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
389 } else if (p == Film::Property::AUDIO_CHANNELS) {
390 if (type == ChangeType::DONE) {
391 boost::mutex::scoped_lock lm (_mutex);
392 _audio_merger.clear ();
398 shared_ptr<PlayerVideo>
399 Player::black_player_video_frame (Eyes eyes) const
401 return std::make_shared<PlayerVideo> (
402 std::make_shared<const RawImageProxy>(_black_image),
405 _video_container_size,
406 _video_container_size,
409 PresetColourConversion::all().front().conversion,
411 std::weak_ptr<Content>(),
412 boost::optional<Frame>(),
419 Player::get_subtitle_fonts ()
421 boost::mutex::scoped_lock lm (_mutex);
423 vector<FontData> fonts;
424 for (auto i: _pieces) {
425 /* XXX: things may go wrong if there are duplicate font IDs
426 with different font files.
428 auto f = i->decoder->fonts ();
429 copy (f.begin(), f.end(), back_inserter(fonts));
436 /** Set this player never to produce any video data */
438 Player::set_ignore_video ()
440 boost::mutex::scoped_lock lm (_mutex);
441 _ignore_video = true;
442 setup_pieces_unlocked ();
447 Player::set_ignore_audio ()
449 boost::mutex::scoped_lock lm (_mutex);
450 _ignore_audio = true;
451 setup_pieces_unlocked ();
456 Player::set_ignore_text ()
458 boost::mutex::scoped_lock lm (_mutex);
460 setup_pieces_unlocked ();
464 /** Set the player to always burn open texts into the image regardless of the content settings */
466 Player::set_always_burn_open_subtitles ()
468 boost::mutex::scoped_lock lm (_mutex);
469 _always_burn_open_subtitles = true;
473 /** Sets up the player to be faster, possibly at the expense of quality */
477 boost::mutex::scoped_lock lm (_mutex);
479 setup_pieces_unlocked ();
484 Player::set_play_referenced ()
486 boost::mutex::scoped_lock lm (_mutex);
487 _play_referenced = true;
488 setup_pieces_unlocked ();
493 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
495 DCPOMATIC_ASSERT (r);
496 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
497 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
498 if (r->actual_duration() > 0) {
500 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
506 list<ReferencedReelAsset>
507 Player::get_reel_assets ()
509 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
511 list<ReferencedReelAsset> a;
513 for (auto i: playlist()->content()) {
514 auto j = dynamic_pointer_cast<DCPContent> (i);
519 scoped_ptr<DCPDecoder> decoder;
521 decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
526 DCPOMATIC_ASSERT (j->video_frame_rate ());
527 double const cfr = j->video_frame_rate().get();
528 Frame const trim_start = j->trim_start().frames_round (cfr);
529 Frame const trim_end = j->trim_end().frames_round (cfr);
530 int const ffr = _film->video_frame_rate ();
532 /* position in the asset from the start */
533 int64_t offset_from_start = 0;
534 /* position in the asset from the end */
535 int64_t offset_from_end = 0;
536 for (auto k: decoder->reels()) {
537 /* Assume that main picture duration is the length of the reel */
538 offset_from_end += k->main_picture()->actual_duration();
541 for (auto k: decoder->reels()) {
543 /* Assume that main picture duration is the length of the reel */
544 int64_t const reel_duration = k->main_picture()->actual_duration();
546 /* See doc/design/trim_reels.svg */
547 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
548 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
550 auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
551 if (j->reference_video ()) {
552 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
555 if (j->reference_audio ()) {
556 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
559 if (j->reference_text (TextType::OPEN_SUBTITLE)) {
560 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
563 if (j->reference_text (TextType::CLOSED_CAPTION)) {
564 for (auto l: k->closed_captions()) {
565 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
569 offset_from_start += reel_duration;
570 offset_from_end -= reel_duration;
581 boost::mutex::scoped_lock lm (_mutex);
584 /* We can't pass in this state */
585 LOG_DEBUG_PLAYER_NC ("Player is suspended");
589 if (_playback_length == DCPTime()) {
590 /* Special; just give one black frame */
591 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
595 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
597 shared_ptr<Piece> earliest_content;
598 optional<DCPTime> earliest_time;
600 for (auto i: _pieces) {
605 auto const t = i->decoder_position ();
606 if (t > i->end(_film)) {
610 /* Given two choices at the same time, pick the one with texts so we see it before
613 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->has_text())) {
615 earliest_content = i;
629 if (earliest_content) {
633 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
634 earliest_time = _black.position ();
638 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
639 earliest_time = _silent.position ();
646 earliest_content->pass();
647 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
648 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
649 to `hide' the fact that no audio was emitted during the referenced DCP (though
650 we need to behave as though it was).
652 _last_audio_time = earliest_content->end (_film);
657 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
658 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
659 _black.set_position (_black.position() + one_video_frame());
663 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
664 DCPTimePeriod period (_silent.period_at_position());
665 if (_last_audio_time) {
666 /* Sometimes the thing that happened last finishes fractionally before
667 or after this silence. Bodge the start time of the silence to fix it.
668 I think this is nothing to worry about since we will just add or
669 remove a little silence at the end of some content.
671 int64_t const error = labs(period.from.get() - _last_audio_time->get());
672 /* Let's not worry about less than a frame at 24fps */
673 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
674 if (error >= too_much_error) {
675 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
677 DCPOMATIC_ASSERT (error < too_much_error);
678 period.from = *_last_audio_time;
680 if (period.duration() > one_video_frame()) {
681 period.to = period.from + one_video_frame();
684 _silent.set_position (period.to);
692 /* Emit any audio that is ready */
694 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
695 of our streams, or the position of the _silent.
697 auto pull_to = _playback_length;
698 for (auto i: _pieces) {
699 i->update_pull_to (pull_to);
701 if (!_silent.done() && _silent.position() < pull_to) {
702 pull_to = _silent.position();
705 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
706 auto audio = _audio_merger.pull (pull_to);
707 for (auto i = audio.begin(); i != audio.end(); ++i) {
708 if (_last_audio_time && i->second < *_last_audio_time) {
709 /* This new data comes before the last we emitted (or the last seek); discard it */
710 auto cut = discard_audio (i->first, i->second, *_last_audio_time);
715 } else if (_last_audio_time && i->second > *_last_audio_time) {
716 /* There's a gap between this data and the last we emitted; fill with silence */
717 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
720 emit_audio (i->first, i->second);
725 for (auto const& i: _delay) {
726 do_emit_video(i.first, i.second);
734 /** @return Open subtitles for the frame at the given time, converted to images */
735 optional<PositionImage>
736 Player::open_subtitles_for_frame (DCPTime time) const
738 list<PositionImage> captions;
739 int const vfr = _film->video_frame_rate();
743 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
746 /* Bitmap subtitles */
747 for (auto i: j.bitmap) {
752 /* i.image will already have been scaled to fit _video_container_size */
753 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
759 lrint(_video_container_size.width * i.rectangle.x),
760 lrint(_video_container_size.height * i.rectangle.y)
766 /* String subtitles (rendered to an image) */
767 if (!j.string.empty()) {
768 auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
769 copy (s.begin(), s.end(), back_inserter (captions));
773 if (captions.empty()) {
777 return merge (captions);
782 Player::video (weak_ptr<Piece> wp, ContentVideo video)
784 auto piece = wp.lock ();
789 if (!piece->use_video()) {
793 auto frc = piece->frame_rate_change();
794 if (frc.skip && (video.frame % 2) == 1) {
798 /* Time of the first frame we will emit */
799 DCPTime const time = piece->content_video_to_dcp (video.frame);
800 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
802 /* Discard if it's before the content's period or the last accurate seek. We can't discard
803 if it's after the content's period here as in that case we still need to fill any gap between
804 `now' and the end of the content's period.
806 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
810 if (piece->ignore_video && piece->ignore_video->contains(time)) {
814 /* Fill gaps that we discover now that we have some video which needs to be emitted.
815 This is where we need to fill to.
817 DCPTime fill_to = min (time, piece->end(_film));
819 if (_last_video_time) {
820 DCPTime fill_from = max (*_last_video_time, piece->position());
822 /* Fill if we have more than half a frame to do */
823 if ((fill_to - fill_from) > one_video_frame() / 2) {
824 auto last = _last_video.find (wp);
825 if (_film->three_d()) {
826 auto fill_to_eyes = video.eyes;
827 if (fill_to_eyes == Eyes::BOTH) {
828 fill_to_eyes = Eyes::LEFT;
830 if (fill_to == piece->end(_film)) {
831 /* Don't fill after the end of the content */
832 fill_to_eyes = Eyes::LEFT;
835 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
836 if (eyes == Eyes::BOTH) {
839 while (j < fill_to || eyes != fill_to_eyes) {
840 if (last != _last_video.end()) {
841 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
842 auto copy = last->second->shallow_copy();
843 copy->set_eyes (eyes);
844 emit_video (copy, j);
846 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
847 emit_video (black_player_video_frame(eyes), j);
849 if (eyes == Eyes::RIGHT) {
850 j += one_video_frame();
852 eyes = increment_eyes (eyes);
855 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
856 if (last != _last_video.end()) {
857 emit_video (last->second, j);
859 emit_video (black_player_video_frame(Eyes::BOTH), j);
866 _last_video[wp] = piece->player_video (video, _film, _video_container_size);
869 for (int i = 0; i < frc.repeat; ++i) {
870 if (t < piece->end(_film)) {
871 emit_video (_last_video[wp], t);
873 t += one_video_frame ();
879 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
881 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
883 auto piece = wp.lock ();
888 int const rfr = piece->resampled_audio_frame_rate (_film);
890 /* Compute time in the DCP */
891 auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
892 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
894 /* And the end of this block in the DCP */
895 auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
897 /* Remove anything that comes before the start or after the end of the content */
898 if (time < piece->position()) {
899 auto cut = discard_audio (content_audio.audio, time, piece->position());
901 /* This audio is entirely discarded */
904 content_audio.audio = cut.first;
906 } else if (time > piece->end(_film)) {
909 } else if (end > piece->end(_film)) {
910 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
911 if (remaining_frames == 0) {
914 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
917 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
921 if (piece->audio_gain() != 0) {
922 auto gain = make_shared<AudioBuffers>(content_audio.audio);
923 gain->apply_gain (piece->audio_gain());
924 content_audio.audio = gain;
929 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
933 if (_audio_processor) {
934 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
939 _audio_merger.push (content_audio.audio, time);
940 piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
945 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
947 auto piece = wp.lock ();
948 auto content = wc.lock ();
949 auto text = wt.lock ();
950 if (!piece || !content || !text) {
954 /* Apply content's subtitle offsets */
955 subtitle.sub.rectangle.x += text->x_offset ();
956 subtitle.sub.rectangle.y += text->y_offset ();
958 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
959 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
960 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
962 /* Apply content's subtitle scale */
963 subtitle.sub.rectangle.width *= text->x_scale ();
964 subtitle.sub.rectangle.height *= text->y_scale ();
967 auto image = subtitle.sub.image;
969 /* We will scale the subtitle up to fit _video_container_size */
970 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
971 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
972 if (width == 0 || height == 0) {
976 dcp::Size scaled_size (width, height);
977 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
978 auto from = piece->content_time_to_dcp(content, subtitle.from());
979 DCPOMATIC_ASSERT (from);
981 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
986 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
988 auto piece = wp.lock ();
989 auto content = wc.lock ();
990 auto text = wt.lock ();
991 if (!piece || !content || !text) {
996 auto const from = piece->content_time_to_dcp(content, subtitle.from());
997 DCPOMATIC_ASSERT (from);
999 if (from > piece->end(_film)) {
1003 for (auto s: subtitle.subs) {
1004 s.set_h_position (s.h_position() + text->x_offset ());
1005 s.set_v_position (s.v_position() + text->y_offset ());
1006 float const xs = text->x_scale();
1007 float const ys = text->y_scale();
1008 float size = s.size();
1010 /* Adjust size to express the common part of the scaling;
1011 e.g. if xs = ys = 0.5 we scale size by 2.
1013 if (xs > 1e-5 && ys > 1e-5) {
1014 size *= 1 / min (1 / xs, 1 / ys);
1018 /* Then express aspect ratio changes */
1019 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1020 s.set_aspect_adjust (xs / ys);
1023 s.set_in (dcp::Time(from->seconds(), 1000));
1024 ps.string.push_back (StringText (s, text->outline_width()));
1025 ps.add_fonts (text->fonts ());
1028 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1033 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1035 auto content = wc.lock ();
1036 auto text = wt.lock ();
1041 if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1045 shared_ptr<Piece> piece = wp.lock ();
1050 auto const dcp_to = piece->content_time_to_dcp(content, to);
1051 DCPOMATIC_ASSERT (dcp_to);
1053 if (*dcp_to > piece->end(_film)) {
1057 auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1059 bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1060 if (text->use() && !always && !text->burn()) {
1061 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1067 Player::seek (DCPTime time, bool accurate)
1069 boost::mutex::scoped_lock lm (_mutex);
1070 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1073 /* We can't seek in this state */
1078 _shuffler->clear ();
1083 if (_audio_processor) {
1084 _audio_processor->flush ();
1087 _audio_merger.clear ();
1088 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1089 _active_texts[i].clear ();
1092 for (auto i: _pieces) {
1093 i->seek (_film, time, accurate);
1097 _last_video_time = time;
1098 _last_video_eyes = Eyes::LEFT;
1099 _last_audio_time = time;
1101 _last_video_time = optional<DCPTime>();
1102 _last_video_eyes = optional<Eyes>();
1103 _last_audio_time = optional<DCPTime>();
1106 _black.set_position (time);
1107 _silent.set_position (time);
1109 _last_video.clear ();
1114 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1116 if (!_film->three_d()) {
1117 if (pv->eyes() == Eyes::LEFT) {
1118 /* Use left-eye images for both eyes... */
1119 pv->set_eyes (Eyes::BOTH);
1120 } else if (pv->eyes() == Eyes::RIGHT) {
1121 /* ...and discard the right */
1126 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1127 player before the video that requires them.
1129 _delay.push_back (make_pair (pv, time));
1131 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1132 _last_video_time = time + one_video_frame();
1134 _last_video_eyes = increment_eyes (pv->eyes());
1136 if (_delay.size() < 3) {
1140 auto to_do = _delay.front();
1142 do_emit_video (to_do.first, to_do.second);
1147 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1149 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1150 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1151 _active_texts[i].clear_before (time);
1155 auto subtitles = open_subtitles_for_frame (time);
1157 pv->set_text (subtitles.get ());
1165 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1167 /* Log if the assert below is about to fail */
1168 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1169 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1172 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1173 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1174 Audio (data, time, _film->audio_frame_rate());
1175 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1180 Player::fill_audio (DCPTimePeriod period)
1182 if (period.from == period.to) {
1186 DCPOMATIC_ASSERT (period.from < period.to);
1188 DCPTime t = period.from;
1189 while (t < period.to) {
1190 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1191 Frame const samples = block.frames_round(_film->audio_frame_rate());
1193 auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1194 silence->make_silent ();
1195 emit_audio (silence, t);
1203 Player::one_video_frame () const
1205 return DCPTime::from_frames (1, _film->video_frame_rate ());
1209 pair<shared_ptr<AudioBuffers>, DCPTime>
1210 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1212 auto const discard_time = discard_to - time;
1213 auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1214 auto remaining_frames = audio->frames() - discard_frames;
1215 if (remaining_frames <= 0) {
1216 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1218 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1219 return make_pair(cut, time + discard_time);
1224 Player::set_dcp_decode_reduction (optional<int> reduction)
1226 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1229 boost::mutex::scoped_lock lm (_mutex);
1231 if (reduction == _dcp_decode_reduction) {
1233 Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1237 _dcp_decode_reduction = reduction;
1238 setup_pieces_unlocked ();
1241 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1245 shared_ptr<const Playlist>
1246 Player::playlist () const
1248 return _playlist ? _playlist : _film->playlist();
1253 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1255 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);