2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
29 #include "dcp_content.h"
30 #include "dcp_decoder.h"
31 #include "dcpomatic_log.h"
33 #include "decoder_factory.h"
34 #include "ffmpeg_content.h"
36 #include "frame_rate_change.h"
38 #include "image_decoder.h"
41 #include "piece_video.h"
43 #include "player_video.h"
46 #include "raw_image_proxy.h"
47 #include "referenced_reel_asset.h"
48 #include "render_text.h"
50 #include "text_content.h"
51 #include "text_decoder.h"
53 #include "video_decoder.h"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
68 using std::dynamic_pointer_cast;
71 using std::make_shared;
72 using std::make_shared;
78 using std::shared_ptr;
81 using std::unique_ptr;
82 using boost::optional;
83 #if BOOST_VERSION >= 106100
84 using namespace boost::placeholders;
86 using namespace dcpomatic;
89 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
90 int const PlayerProperty::PLAYLIST = 701;
91 int const PlayerProperty::FILM_CONTAINER = 702;
92 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
93 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
94 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97 Player::Player (shared_ptr<const Film> film)
100 , _tolerant (film->tolerant())
101 , _audio_merger (_film->audio_frame_rate())
107 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109 , _playlist (playlist_)
111 , _tolerant (film->tolerant())
112 , _audio_merger (_film->audio_frame_rate())
121 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
122 /* The butler must hear about this first, so since we are proxying this through to the butler we must
125 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
126 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
127 set_video_container_size (_film->frame_size ());
129 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
132 seek (DCPTime (), true);
137 Player::setup_pieces ()
139 boost::mutex::scoped_lock lm (_mutex);
140 setup_pieces_unlocked ();
145 have_video (shared_ptr<const Content> content)
147 return static_cast<bool>(content->video) && content->video->use();
152 have_audio (shared_ptr<const Content> content)
154 return static_cast<bool>(content->audio);
159 Player::setup_pieces_unlocked ()
161 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
163 auto old_pieces = _pieces;
166 _shuffler.reset (new Shuffler());
167 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
169 for (auto i: playlist()->content()) {
171 if (!i->paths_valid ()) {
175 if (_ignore_video && _ignore_audio && i->text.empty()) {
176 /* We're only interested in text and this content has none */
180 shared_ptr<Decoder> old_decoder;
181 for (auto j: old_pieces) {
182 auto decoder = j->decoder_for(i);
184 old_decoder = decoder;
189 auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
190 DCPOMATIC_ASSERT (decoder);
192 FrameRateChange frc (_film, i);
194 if (decoder->video && _ignore_video) {
195 decoder->video->set_ignore (true);
198 if (decoder->audio && _ignore_audio) {
199 decoder->audio->set_ignore (true);
203 for (auto i: decoder->text) {
204 i->set_ignore (true);
208 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
210 dcp->set_decode_referenced (_play_referenced);
211 if (_play_referenced) {
212 dcp->set_forced_reduction (_dcp_decode_reduction);
216 auto piece = make_shared<Piece>(_film, i, decoder, frc, _fast);
217 _pieces.push_back (piece);
220 if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
221 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
222 piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
224 piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
229 piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
232 auto j = decoder->text.begin();
234 while (j != decoder->text.end()) {
235 (*j)->BitmapStart.connect (
236 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
238 (*j)->PlainStart.connect (
239 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
242 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
248 if (decoder->atmos) {
249 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
253 for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
254 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
255 /* Look for content later in the content list with in-use video that overlaps this */
256 for (auto j = std::next(i); j != _pieces.end(); ++j) {
257 if ((*j)->use_video()) {
258 (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
264 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
265 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
267 _last_video_time = boost::optional<dcpomatic::DCPTime>();
268 _last_video_eyes = Eyes::BOTH;
269 _last_audio_time = boost::optional<dcpomatic::DCPTime>();
274 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
276 boost::mutex::scoped_lock lm (_mutex);
278 for (auto i: _pieces) {
279 auto dcp = i->content_time_to_dcp(content, t);
285 /* We couldn't find this content; perhaps things are being changed over */
291 Player::playlist_content_change (ChangeType type, int property, bool frequent)
293 if (property == VideoContentProperty::CROP) {
294 if (type == ChangeType::DONE) {
295 auto const vcs = video_container_size();
296 boost::mutex::scoped_lock lm (_mutex);
297 for (auto const& i: _delay) {
298 i.first->reset_metadata (_film, vcs);
302 if (type == ChangeType::PENDING) {
303 /* The player content is probably about to change, so we can't carry on
304 until that has happened and we've rebuilt our pieces. Stop pass()
305 and seek() from working until then.
308 } else if (type == ChangeType::DONE) {
309 /* A change in our content has gone through. Re-build our pieces. */
312 } else if (type == ChangeType::CANCELLED) {
317 Change (type, property, frequent);
322 Player::set_video_container_size (dcp::Size s)
324 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
327 boost::mutex::scoped_lock lm (_mutex);
329 if (s == _video_container_size) {
331 Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
335 _video_container_size = s;
337 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
338 _black_image->make_black ();
341 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
346 Player::playlist_change (ChangeType type)
348 if (type == ChangeType::DONE) {
351 Change (type, PlayerProperty::PLAYLIST, false);
356 Player::film_change (ChangeType type, Film::Property p)
358 /* Here we should notice Film properties that affect our output, and
359 alert listeners that our output now would be different to how it was
360 last time we were run.
363 if (p == Film::Property::CONTAINER) {
364 Change (type, PlayerProperty::FILM_CONTAINER, false);
365 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
366 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
367 so we need new pieces here.
369 if (type == ChangeType::DONE) {
372 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
373 } else if (p == Film::Property::AUDIO_PROCESSOR) {
374 if (type == ChangeType::DONE && _film->audio_processor ()) {
375 boost::mutex::scoped_lock lm (_mutex);
376 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
378 } else if (p == Film::Property::AUDIO_CHANNELS) {
379 if (type == ChangeType::DONE) {
380 boost::mutex::scoped_lock lm (_mutex);
381 _audio_merger.clear ();
387 shared_ptr<PlayerVideo>
388 Player::black_player_video_frame (Eyes eyes) const
390 return std::make_shared<PlayerVideo> (
391 std::make_shared<const RawImageProxy>(_black_image),
394 _video_container_size,
395 _video_container_size,
398 PresetColourConversion::all().front().conversion,
400 std::weak_ptr<Content>(),
401 boost::optional<Frame>(),
408 Player::get_subtitle_fonts ()
410 boost::mutex::scoped_lock lm (_mutex);
412 vector<FontData> fonts;
413 for (auto i: _pieces) {
414 /* XXX: things may go wrong if there are duplicate font IDs
415 with different font files.
417 auto f = i->fonts ();
418 copy (f.begin(), f.end(), back_inserter(fonts));
425 /** Set this player never to produce any video data */
427 Player::set_ignore_video ()
429 boost::mutex::scoped_lock lm (_mutex);
430 _ignore_video = true;
431 setup_pieces_unlocked ();
436 Player::set_ignore_audio ()
438 boost::mutex::scoped_lock lm (_mutex);
439 _ignore_audio = true;
440 setup_pieces_unlocked ();
445 Player::set_ignore_text ()
447 boost::mutex::scoped_lock lm (_mutex);
449 setup_pieces_unlocked ();
453 /** Set the player to always burn open texts into the image regardless of the content settings */
455 Player::set_always_burn_open_subtitles ()
457 boost::mutex::scoped_lock lm (_mutex);
458 _always_burn_open_subtitles = true;
462 /** Sets up the player to be faster, possibly at the expense of quality */
466 boost::mutex::scoped_lock lm (_mutex);
468 setup_pieces_unlocked ();
473 Player::set_play_referenced ()
475 boost::mutex::scoped_lock lm (_mutex);
476 _play_referenced = true;
477 setup_pieces_unlocked ();
482 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
484 DCPOMATIC_ASSERT (r);
485 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
486 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
487 if (r->actual_duration() > 0) {
489 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
495 list<ReferencedReelAsset>
496 Player::get_reel_assets ()
498 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
500 list<ReferencedReelAsset> a;
502 for (auto i: playlist()->content()) {
503 auto j = dynamic_pointer_cast<DCPContent> (i);
508 unique_ptr<DCPDecoder> decoder;
510 decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
515 DCPOMATIC_ASSERT (j->video_frame_rate ());
516 double const cfr = j->video_frame_rate().get();
517 Frame const trim_start = j->trim_start().frames_round (cfr);
518 Frame const trim_end = j->trim_end().frames_round (cfr);
519 int const ffr = _film->video_frame_rate ();
521 /* position in the asset from the start */
522 int64_t offset_from_start = 0;
523 /* position in the asset from the end */
524 int64_t offset_from_end = 0;
525 for (auto k: decoder->reels()) {
526 /* Assume that main picture duration is the length of the reel */
527 offset_from_end += k->main_picture()->actual_duration();
530 for (auto k: decoder->reels()) {
532 /* Assume that main picture duration is the length of the reel */
533 int64_t const reel_duration = k->main_picture()->actual_duration();
535 /* See doc/design/trim_reels.svg */
536 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
537 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
539 auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
540 if (j->reference_video ()) {
541 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
544 if (j->reference_audio ()) {
545 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
548 if (j->reference_text (TextType::OPEN_SUBTITLE)) {
549 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
552 if (j->reference_text (TextType::CLOSED_CAPTION)) {
553 for (auto l: k->closed_captions()) {
554 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
558 offset_from_start += reel_duration;
559 offset_from_end -= reel_duration;
570 boost::mutex::scoped_lock lm (_mutex);
573 /* We can't pass in this state */
574 LOG_DEBUG_PLAYER_NC ("Player is suspended");
578 if (_playback_length == DCPTime()) {
579 /* Special; just give one black frame */
580 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
584 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
586 shared_ptr<Piece> earliest_content;
587 optional<DCPTime> earliest_time;
589 for (auto i: _pieces) {
590 auto time = i->decoder_before(earliest_time);
592 earliest_time = *time;
593 earliest_content = i;
606 if (earliest_content) {
610 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
611 earliest_time = _black.position ();
615 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
616 earliest_time = _silent.position ();
623 earliest_content->pass();
624 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
625 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
626 to `hide' the fact that no audio was emitted during the referenced DCP (though
627 we need to behave as though it was).
629 _last_audio_time = earliest_content->end ();
634 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
635 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
636 _black.set_position (_black.position() + one_video_frame());
640 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
641 DCPTimePeriod period (_silent.period_at_position());
642 if (_last_audio_time) {
643 /* Sometimes the thing that happened last finishes fractionally before
644 or after this silence. Bodge the start time of the silence to fix it.
645 I think this is nothing to worry about since we will just add or
646 remove a little silence at the end of some content.
648 int64_t const error = labs(period.from.get() - _last_audio_time->get());
649 /* Let's not worry about less than a frame at 24fps */
650 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
651 if (error >= too_much_error) {
652 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
654 DCPOMATIC_ASSERT (error < too_much_error);
655 period.from = *_last_audio_time;
657 if (period.duration() > one_video_frame()) {
658 period.to = period.from + one_video_frame();
661 _silent.set_position (period.to);
669 /* Emit any audio that is ready */
671 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
672 of our streams, or the position of the _silent.
674 auto pull_to = _playback_length;
675 for (auto i: _pieces) {
676 i->update_pull_to (pull_to);
678 if (!_silent.done() && _silent.position() < pull_to) {
679 pull_to = _silent.position();
682 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
683 auto audio = _audio_merger.pull (pull_to);
684 for (auto i = audio.begin(); i != audio.end(); ++i) {
685 if (_last_audio_time && i->second < *_last_audio_time) {
686 /* This new data comes before the last we emitted (or the last seek); discard it */
687 auto cut = discard_audio (i->first, i->second, *_last_audio_time);
692 } else if (_last_audio_time && i->second > *_last_audio_time) {
693 /* There's a gap between this data and the last we emitted; fill with silence */
694 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
697 emit_audio (i->first, i->second);
702 for (auto const& i: _delay) {
703 do_emit_video(i.first, i.second);
711 /** @return Open subtitles for the frame at the given time, converted to images */
712 optional<PositionImage>
713 Player::open_subtitles_for_frame (DCPTime time) const
715 list<PositionImage> captions;
716 int const vfr = _film->video_frame_rate();
720 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
723 /* Bitmap subtitles */
724 for (auto i: j.bitmap) {
729 /* i.image will already have been scaled to fit _video_container_size */
730 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
736 lrint(_video_container_size.width * i.rectangle.x),
737 lrint(_video_container_size.height * i.rectangle.y)
743 /* String subtitles (rendered to an image) */
744 if (!j.string.empty()) {
745 auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
746 copy (s.begin(), s.end(), back_inserter (captions));
750 if (captions.empty()) {
754 return merge (captions);
759 Player::video (weak_ptr<Piece> wp, PieceVideo video)
761 auto piece = wp.lock ();
766 if (!piece->use_video()) {
770 auto frc = piece->frame_rate_change();
771 if (frc.skip && (video.frame % 2) == 1) {
775 /* Time of the first frame we will emit */
776 DCPTime const time = piece->content_video_to_dcp (video.frame);
777 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
779 /* Discard if it's before the content's period or the last accurate seek. We can't discard
780 if it's after the content's period here as in that case we still need to fill any gap between
781 `now' and the end of the content's period.
783 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
787 if (piece->ignore_video_at(time)) {
791 /* Fill gaps that we discover now that we have some video which needs to be emitted.
792 This is where we need to fill to.
794 DCPTime fill_to = min (time, piece->end());
796 if (_last_video_time) {
797 DCPTime fill_from = max (*_last_video_time, piece->position());
799 /* Fill if we have more than half a frame to do */
800 if ((fill_to - fill_from) > one_video_frame() / 2) {
801 auto last = _last_video.find (wp);
802 if (_film->three_d()) {
803 auto fill_to_eyes = video.eyes;
804 if (fill_to_eyes == Eyes::BOTH) {
805 fill_to_eyes = Eyes::LEFT;
807 if (fill_to == piece->end()) {
808 /* Don't fill after the end of the content */
809 fill_to_eyes = Eyes::LEFT;
812 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
813 if (eyes == Eyes::BOTH) {
816 while (j < fill_to || eyes != fill_to_eyes) {
817 if (last != _last_video.end()) {
818 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
819 auto copy = last->second->shallow_copy();
820 copy->set_eyes (eyes);
821 emit_video (copy, j);
823 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
824 emit_video (black_player_video_frame(eyes), j);
826 if (eyes == Eyes::RIGHT) {
827 j += one_video_frame();
829 eyes = increment_eyes (eyes);
832 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
833 if (last != _last_video.end()) {
834 emit_video (last->second, j);
836 emit_video (black_player_video_frame(Eyes::BOTH), j);
843 _last_video[wp] = piece->player_video (video, _video_container_size);
846 for (int i = 0; i < frc.repeat; ++i) {
847 if (t < piece->end()) {
848 emit_video (_last_video[wp], t);
850 t += one_video_frame ();
856 Player::audio (weak_ptr<Piece> wp, PieceAudio piece_audio)
858 DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
860 auto piece = wp.lock ();
865 int const rfr = piece->resampled_audio_frame_rate ();
867 /* Compute time in the DCP */
868 auto time = piece->resampled_audio_to_dcp (piece_audio.frame);
869 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", piece_audio.frame, to_string(time));
871 /* And the end of this block in the DCP */
872 auto end = time + DCPTime::from_frames(piece_audio.audio->frames(), rfr);
874 /* Remove anything that comes before the start or after the end of the content */
875 if (time < piece->position()) {
876 auto cut = discard_audio (piece_audio.audio, time, piece->position());
878 /* This audio is entirely discarded */
881 piece_audio.audio = cut.first;
883 } else if (time > piece->end()) {
886 } else if (end > piece->end()) {
887 Frame const remaining_frames = DCPTime(piece->end() - time).frames_round(rfr);
888 if (remaining_frames == 0) {
891 piece_audio.audio = make_shared<AudioBuffers>(piece_audio.audio, remaining_frames, 0);
894 DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
898 if (piece->audio_gain() != 0) {
899 auto gain = make_shared<AudioBuffers>(piece_audio.audio);
900 gain->apply_gain (piece->audio_gain());
901 piece_audio.audio = gain;
906 piece_audio.audio = remap (piece_audio.audio, _film->audio_channels(), piece_audio.stream->mapping());
910 if (_audio_processor) {
911 piece_audio.audio = _audio_processor->run (piece_audio.audio, _film->audio_channels ());
916 _audio_merger.push (piece_audio.audio, time);
917 piece->set_last_push_end (piece_audio.stream, time + DCPTime::from_frames(piece_audio.audio->frames(), _film->audio_frame_rate()));
922 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
924 auto piece = wp.lock ();
925 auto content = wc.lock ();
926 auto text = wt.lock ();
927 if (!piece || !content || !text) {
931 /* Apply content's subtitle offsets */
932 subtitle.sub.rectangle.x += text->x_offset ();
933 subtitle.sub.rectangle.y += text->y_offset ();
935 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
936 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
937 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
939 /* Apply content's subtitle scale */
940 subtitle.sub.rectangle.width *= text->x_scale ();
941 subtitle.sub.rectangle.height *= text->y_scale ();
944 auto image = subtitle.sub.image;
946 /* We will scale the subtitle up to fit _video_container_size */
947 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
948 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
949 if (width == 0 || height == 0) {
953 dcp::Size scaled_size (width, height);
954 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
955 auto from = piece->content_time_to_dcp(content, subtitle.from());
956 DCPOMATIC_ASSERT (from);
958 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
963 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
965 auto piece = wp.lock ();
966 auto content = wc.lock ();
967 auto text = wt.lock ();
968 if (!piece || !content || !text) {
973 auto const from = piece->content_time_to_dcp(content, subtitle.from());
974 DCPOMATIC_ASSERT (from);
976 if (from > piece->end()) {
980 for (auto s: subtitle.subs) {
981 s.set_h_position (s.h_position() + text->x_offset ());
982 s.set_v_position (s.v_position() + text->y_offset ());
983 float const xs = text->x_scale();
984 float const ys = text->y_scale();
985 float size = s.size();
987 /* Adjust size to express the common part of the scaling;
988 e.g. if xs = ys = 0.5 we scale size by 2.
990 if (xs > 1e-5 && ys > 1e-5) {
991 size *= 1 / min (1 / xs, 1 / ys);
995 /* Then express aspect ratio changes */
996 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
997 s.set_aspect_adjust (xs / ys);
1000 s.set_in (dcp::Time(from->seconds(), 1000));
1001 ps.string.push_back (StringText (s, text->outline_width()));
1002 ps.add_fonts (text->fonts ());
1005 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1010 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1012 auto content = wc.lock ();
1013 auto text = wt.lock ();
1018 if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1022 shared_ptr<Piece> piece = wp.lock ();
1027 auto const dcp_to = piece->content_time_to_dcp(content, to);
1028 DCPOMATIC_ASSERT (dcp_to);
1030 if (*dcp_to > piece->end()) {
1034 auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1036 bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1037 if (text->use() && !always && !text->burn()) {
1038 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1044 Player::seek (DCPTime time, bool accurate)
1046 boost::mutex::scoped_lock lm (_mutex);
1047 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1050 /* We can't seek in this state */
1055 _shuffler->clear ();
1060 if (_audio_processor) {
1061 _audio_processor->flush ();
1064 _audio_merger.clear ();
1065 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1066 _active_texts[i].clear ();
1069 for (auto i: _pieces) {
1070 i->seek (time, accurate);
1074 _last_video_time = time;
1075 _last_video_eyes = Eyes::LEFT;
1076 _last_audio_time = time;
1078 _last_video_time = optional<DCPTime>();
1079 _last_video_eyes = optional<Eyes>();
1080 _last_audio_time = optional<DCPTime>();
1083 _black.set_position (time);
1084 _silent.set_position (time);
1086 _last_video.clear ();
1091 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1093 if (!_film->three_d()) {
1094 if (pv->eyes() == Eyes::LEFT) {
1095 /* Use left-eye images for both eyes... */
1096 pv->set_eyes (Eyes::BOTH);
1097 } else if (pv->eyes() == Eyes::RIGHT) {
1098 /* ...and discard the right */
1103 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1104 player before the video that requires them.
1106 _delay.push_back (make_pair (pv, time));
1108 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1109 _last_video_time = time + one_video_frame();
1111 _last_video_eyes = increment_eyes (pv->eyes());
1113 if (_delay.size() < 3) {
1117 auto to_do = _delay.front();
1119 do_emit_video (to_do.first, to_do.second);
1124 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1126 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1127 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1128 _active_texts[i].clear_before (time);
1132 auto subtitles = open_subtitles_for_frame (time);
1134 pv->set_text (subtitles.get ());
1142 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1144 /* Log if the assert below is about to fail */
1145 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1146 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1149 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1150 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1151 Audio (data, time, _film->audio_frame_rate());
1152 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1157 Player::fill_audio (DCPTimePeriod period)
1159 if (period.from == period.to) {
1163 DCPOMATIC_ASSERT (period.from < period.to);
1165 DCPTime t = period.from;
1166 while (t < period.to) {
1167 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1168 Frame const samples = block.frames_round(_film->audio_frame_rate());
1170 auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1171 silence->make_silent ();
1172 emit_audio (silence, t);
1180 Player::one_video_frame () const
1182 return DCPTime::from_frames (1, _film->video_frame_rate ());
1186 pair<shared_ptr<AudioBuffers>, DCPTime>
1187 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1189 auto const discard_time = discard_to - time;
1190 auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1191 auto remaining_frames = audio->frames() - discard_frames;
1192 if (remaining_frames <= 0) {
1193 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1195 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1196 return make_pair(cut, time + discard_time);
1201 Player::set_dcp_decode_reduction (optional<int> reduction)
1203 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1206 boost::mutex::scoped_lock lm (_mutex);
1208 if (reduction == _dcp_decode_reduction) {
1210 Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1214 _dcp_decode_reduction = reduction;
1215 setup_pieces_unlocked ();
1218 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1222 shared_ptr<const Playlist>
1223 Player::playlist () const
1225 return _playlist ? _playlist : _film->playlist();
1230 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1232 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);