2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
67 using std::dynamic_pointer_cast;
70 using std::make_shared;
76 using std::shared_ptr;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
84 using namespace dcpomatic;
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
93 Player::Player (shared_ptr<const Film> film)
96 , _tolerant (film->tolerant())
97 , _audio_merger (_film->audio_frame_rate())
102 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
104 , _playlist (playlist_)
106 , _tolerant (film->tolerant())
107 , _audio_merger (_film->audio_frame_rate())
115 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
116 /* The butler must hear about this first, so since we are proxying this through to the butler we must
119 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
120 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
121 set_video_container_size (_film->frame_size ());
123 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
126 seek (DCPTime (), true);
135 Player::setup_pieces ()
137 boost::mutex::scoped_lock lm (_mutex);
138 setup_pieces_unlocked ();
143 have_video (shared_ptr<const Content> content)
145 return static_cast<bool>(content->video) && content->video->use();
149 have_audio (shared_ptr<const Content> content)
151 return static_cast<bool>(content->audio);
155 Player::setup_pieces_unlocked ()
157 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
159 auto old_pieces = _pieces;
163 _shuffler = new Shuffler();
164 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
166 for (auto i: playlist()->content()) {
168 if (!i->paths_valid ()) {
172 if (_ignore_video && _ignore_audio && i->text.empty()) {
173 /* We're only interested in text and this content has none */
177 shared_ptr<Decoder> old_decoder;
178 for (auto j: old_pieces) {
179 if (j->content == i) {
180 old_decoder = j->decoder;
185 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
186 DCPOMATIC_ASSERT (decoder);
188 FrameRateChange frc (_film, i);
190 if (decoder->video && _ignore_video) {
191 decoder->video->set_ignore (true);
194 if (decoder->audio && _ignore_audio) {
195 decoder->audio->set_ignore (true);
199 for (auto i: decoder->text) {
200 i->set_ignore (true);
204 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
206 dcp->set_decode_referenced (_play_referenced);
207 if (_play_referenced) {
208 dcp->set_forced_reduction (_dcp_decode_reduction);
212 auto piece = make_shared<Piece>(i, decoder, frc);
213 _pieces.push_back (piece);
215 if (decoder->video) {
216 if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
217 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
218 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
220 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
224 if (decoder->audio) {
225 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
228 auto j = decoder->text.begin();
230 while (j != decoder->text.end()) {
231 (*j)->BitmapStart.connect (
232 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
234 (*j)->PlainStart.connect (
235 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
238 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244 if (decoder->atmos) {
245 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
249 _stream_states.clear ();
250 for (auto i: _pieces) {
251 if (i->content->audio) {
252 for (auto j: i->content->audio->streams()) {
253 _stream_states[j] = StreamState (i, i->content->position ());
258 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
259 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
261 _last_video_time = DCPTime ();
262 _last_video_eyes = Eyes::BOTH;
263 _last_audio_time = DCPTime ();
267 Player::playlist_content_change (ChangeType type, int property, bool frequent)
269 if (property == VideoContentProperty::CROP) {
270 if (type == ChangeType::DONE) {
271 dcp::Size const vcs = video_container_size();
272 boost::mutex::scoped_lock lm (_mutex);
273 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
274 i->first->reset_metadata (_film, vcs);
278 if (type == ChangeType::PENDING) {
279 /* The player content is probably about to change, so we can't carry on
280 until that has happened and we've rebuilt our pieces. Stop pass()
281 and seek() from working until then.
284 } else if (type == ChangeType::DONE) {
285 /* A change in our content has gone through. Re-build our pieces. */
288 } else if (type == ChangeType::CANCELLED) {
293 Change (type, property, frequent);
297 Player::set_video_container_size (dcp::Size s)
299 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
302 boost::mutex::scoped_lock lm (_mutex);
304 if (s == _video_container_size) {
306 Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
310 _video_container_size = s;
312 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
313 _black_image->make_black ();
316 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
320 Player::playlist_change (ChangeType type)
322 if (type == ChangeType::DONE) {
325 Change (type, PlayerProperty::PLAYLIST, false);
329 Player::film_change (ChangeType type, Film::Property p)
331 /* Here we should notice Film properties that affect our output, and
332 alert listeners that our output now would be different to how it was
333 last time we were run.
336 if (p == Film::Property::CONTAINER) {
337 Change (type, PlayerProperty::FILM_CONTAINER, false);
338 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
339 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
340 so we need new pieces here.
342 if (type == ChangeType::DONE) {
345 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
346 } else if (p == Film::Property::AUDIO_PROCESSOR) {
347 if (type == ChangeType::DONE && _film->audio_processor ()) {
348 boost::mutex::scoped_lock lm (_mutex);
349 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
351 } else if (p == Film::Property::AUDIO_CHANNELS) {
352 if (type == ChangeType::DONE) {
353 boost::mutex::scoped_lock lm (_mutex);
354 _audio_merger.clear ();
359 shared_ptr<PlayerVideo>
360 Player::black_player_video_frame (Eyes eyes) const
362 return std::make_shared<PlayerVideo> (
363 std::make_shared<const RawImageProxy>(_black_image),
366 _video_container_size,
367 _video_container_size,
370 PresetColourConversion::all().front().conversion,
372 std::weak_ptr<Content>(),
373 boost::optional<Frame>(),
379 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(_film), s);
383 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
385 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
386 then convert that ContentTime to frames at the content's rate. However this fails for
387 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
388 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
390 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
392 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
396 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
398 /* See comment in dcp_to_content_video */
399 auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
400 return d + piece->content->position();
404 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
406 auto s = t - piece->content->position ();
407 s = min (piece->content->length_after_trim(_film), s);
408 /* See notes in dcp_to_content_video */
409 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
413 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
415 /* See comment in dcp_to_content_video */
416 return DCPTime::from_frames (f, _film->audio_frame_rate())
417 - DCPTime (piece->content->trim_start(), piece->frc)
418 + piece->content->position();
422 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
424 auto s = t - piece->content->position ();
425 s = min (piece->content->length_after_trim(_film), s);
426 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
430 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
432 return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
436 Player::get_subtitle_fonts ()
438 boost::mutex::scoped_lock lm (_mutex);
440 vector<FontData> fonts;
441 for (auto i: _pieces) {
442 /* XXX: things may go wrong if there are duplicate font IDs
443 with different font files.
445 auto f = i->decoder->fonts ();
446 copy (f.begin(), f.end(), back_inserter(fonts));
452 /** Set this player never to produce any video data */
454 Player::set_ignore_video ()
456 boost::mutex::scoped_lock lm (_mutex);
457 _ignore_video = true;
458 setup_pieces_unlocked ();
462 Player::set_ignore_audio ()
464 boost::mutex::scoped_lock lm (_mutex);
465 _ignore_audio = true;
466 setup_pieces_unlocked ();
470 Player::set_ignore_text ()
472 boost::mutex::scoped_lock lm (_mutex);
474 setup_pieces_unlocked ();
477 /** Set the player to always burn open texts into the image regardless of the content settings */
479 Player::set_always_burn_open_subtitles ()
481 boost::mutex::scoped_lock lm (_mutex);
482 _always_burn_open_subtitles = true;
485 /** Sets up the player to be faster, possibly at the expense of quality */
489 boost::mutex::scoped_lock lm (_mutex);
491 setup_pieces_unlocked ();
495 Player::set_play_referenced ()
497 boost::mutex::scoped_lock lm (_mutex);
498 _play_referenced = true;
499 setup_pieces_unlocked ();
503 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
505 DCPOMATIC_ASSERT (r);
506 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
507 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
508 if (r->actual_duration() > 0) {
510 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
515 list<ReferencedReelAsset>
516 Player::get_reel_assets ()
518 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
520 list<ReferencedReelAsset> a;
522 for (auto i: playlist()->content()) {
523 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
528 scoped_ptr<DCPDecoder> decoder;
530 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
535 DCPOMATIC_ASSERT (j->video_frame_rate ());
536 double const cfr = j->video_frame_rate().get();
537 Frame const trim_start = j->trim_start().frames_round (cfr);
538 Frame const trim_end = j->trim_end().frames_round (cfr);
539 int const ffr = _film->video_frame_rate ();
541 /* position in the asset from the start */
542 int64_t offset_from_start = 0;
543 /* position in the asset from the end */
544 int64_t offset_from_end = 0;
545 for (auto k: decoder->reels()) {
546 /* Assume that main picture duration is the length of the reel */
547 offset_from_end += k->main_picture()->actual_duration();
550 for (auto k: decoder->reels()) {
552 /* Assume that main picture duration is the length of the reel */
553 int64_t const reel_duration = k->main_picture()->actual_duration();
555 /* See doc/design/trim_reels.svg */
556 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
557 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
559 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
560 if (j->reference_video ()) {
561 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
564 if (j->reference_audio ()) {
565 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
568 if (j->reference_text (TextType::OPEN_SUBTITLE)) {
569 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
572 if (j->reference_text (TextType::CLOSED_CAPTION)) {
573 for (auto l: k->closed_captions()) {
574 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
578 offset_from_start += reel_duration;
579 offset_from_end -= reel_duration;
589 boost::mutex::scoped_lock lm (_mutex);
592 /* We can't pass in this state */
593 LOG_DEBUG_PLAYER_NC ("Player is suspended");
597 if (_playback_length == DCPTime()) {
598 /* Special; just give one black frame */
599 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
603 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
605 shared_ptr<Piece> earliest_content;
606 optional<DCPTime> earliest_time;
608 for (auto i: _pieces) {
613 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
614 if (t > i->content->end(_film)) {
618 /* Given two choices at the same time, pick the one with texts so we see it before
621 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
623 earliest_content = i;
637 if (earliest_content) {
641 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
642 earliest_time = _black.position ();
646 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
647 earliest_time = _silent.position ();
654 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
655 earliest_content->done = earliest_content->decoder->pass ();
656 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
657 if (dcp && !_play_referenced && dcp->reference_audio()) {
658 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
659 to `hide' the fact that no audio was emitted during the referenced DCP (though
660 we need to behave as though it was).
662 _last_audio_time = dcp->end (_film);
667 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
668 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
669 _black.set_position (_black.position() + one_video_frame());
673 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
674 DCPTimePeriod period (_silent.period_at_position());
675 if (_last_audio_time) {
676 /* Sometimes the thing that happened last finishes fractionally before
677 or after this silence. Bodge the start time of the silence to fix it.
678 I think this is nothing to worry about since we will just add or
679 remove a little silence at the end of some content.
681 int64_t const error = labs(period.from.get() - _last_audio_time->get());
682 /* Let's not worry about less than a frame at 24fps */
683 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
684 if (error >= too_much_error) {
685 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
687 DCPOMATIC_ASSERT (error < too_much_error);
688 period.from = *_last_audio_time;
690 if (period.duration() > one_video_frame()) {
691 period.to = period.from + one_video_frame();
694 _silent.set_position (period.to);
702 /* Emit any audio that is ready */
704 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
705 of our streams, or the position of the _silent.
707 DCPTime pull_to = _playback_length;
708 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
709 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
710 pull_to = i->second.last_push_end;
713 if (!_silent.done() && _silent.position() < pull_to) {
714 pull_to = _silent.position();
717 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
718 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
719 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
720 if (_last_audio_time && i->second < *_last_audio_time) {
721 /* This new data comes before the last we emitted (or the last seek); discard it */
722 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
727 } else if (_last_audio_time && i->second > *_last_audio_time) {
728 /* There's a gap between this data and the last we emitted; fill with silence */
729 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
732 emit_audio (i->first, i->second);
737 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
738 do_emit_video(i->first, i->second);
745 /** @return Open subtitles for the frame at the given time, converted to images */
746 optional<PositionImage>
747 Player::open_subtitles_for_frame (DCPTime time) const
749 list<PositionImage> captions;
750 int const vfr = _film->video_frame_rate();
754 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
757 /* Bitmap subtitles */
758 for (auto i: j.bitmap) {
763 /* i.image will already have been scaled to fit _video_container_size */
764 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
770 lrint (_video_container_size.width * i.rectangle.x),
771 lrint (_video_container_size.height * i.rectangle.y)
777 /* String subtitles (rendered to an image) */
778 if (!j.string.empty ()) {
779 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
780 copy (s.begin(), s.end(), back_inserter (captions));
784 if (captions.empty ()) {
785 return optional<PositionImage> ();
788 return merge (captions);
792 Player::video (weak_ptr<Piece> wp, ContentVideo video)
794 shared_ptr<Piece> piece = wp.lock ();
799 if (!piece->content->video->use()) {
803 FrameRateChange frc (_film, piece->content);
804 if (frc.skip && (video.frame % 2) == 1) {
808 /* Time of the first frame we will emit */
809 DCPTime const time = content_video_to_dcp (piece, video.frame);
810 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
812 /* Discard if it's before the content's period or the last accurate seek. We can't discard
813 if it's after the content's period here as in that case we still need to fill any gap between
814 `now' and the end of the content's period.
816 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
820 /* Fill gaps that we discover now that we have some video which needs to be emitted.
821 This is where we need to fill to.
823 DCPTime fill_to = min (time, piece->content->end(_film));
825 if (_last_video_time) {
826 DCPTime fill_from = max (*_last_video_time, piece->content->position());
828 /* Fill if we have more than half a frame to do */
829 if ((fill_to - fill_from) > one_video_frame() / 2) {
830 LastVideoMap::const_iterator last = _last_video.find (wp);
831 if (_film->three_d()) {
832 Eyes fill_to_eyes = video.eyes;
833 if (fill_to_eyes == Eyes::BOTH) {
834 fill_to_eyes = Eyes::LEFT;
836 if (fill_to == piece->content->end(_film)) {
837 /* Don't fill after the end of the content */
838 fill_to_eyes = Eyes::LEFT;
840 DCPTime j = fill_from;
841 Eyes eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
842 if (eyes == Eyes::BOTH) {
845 while (j < fill_to || eyes != fill_to_eyes) {
846 if (last != _last_video.end()) {
847 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
848 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
849 copy->set_eyes (eyes);
850 emit_video (copy, j);
852 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
853 emit_video (black_player_video_frame(eyes), j);
855 if (eyes == Eyes::RIGHT) {
856 j += one_video_frame();
858 eyes = increment_eyes (eyes);
861 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
862 if (last != _last_video.end()) {
863 emit_video (last->second, j);
865 emit_video (black_player_video_frame(Eyes::BOTH), j);
872 _last_video[wp].reset (
875 piece->content->video->crop (),
876 piece->content->video->fade (_film, video.frame),
877 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
878 _video_container_size,
881 piece->content->video->colour_conversion(),
882 piece->content->video->range(),
890 for (int i = 0; i < frc.repeat; ++i) {
891 if (t < piece->content->end(_film)) {
892 emit_video (_last_video[wp], t);
894 t += one_video_frame ();
899 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
901 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
903 shared_ptr<Piece> piece = wp.lock ();
908 shared_ptr<AudioContent> content = piece->content->audio;
909 DCPOMATIC_ASSERT (content);
911 int const rfr = content->resampled_frame_rate (_film);
913 /* Compute time in the DCP */
914 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
915 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
917 /* And the end of this block in the DCP */
918 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
920 /* Remove anything that comes before the start or after the end of the content */
921 if (time < piece->content->position()) {
922 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
924 /* This audio is entirely discarded */
927 content_audio.audio = cut.first;
929 } else if (time > piece->content->end(_film)) {
932 } else if (end > piece->content->end(_film)) {
933 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
934 if (remaining_frames == 0) {
937 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
940 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
944 if (content->gain() != 0) {
945 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
946 gain->apply_gain (content->gain ());
947 content_audio.audio = gain;
952 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
956 if (_audio_processor) {
957 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
962 _audio_merger.push (content_audio.audio, time);
963 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
964 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
968 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
970 shared_ptr<Piece> piece = wp.lock ();
971 shared_ptr<const TextContent> text = wc.lock ();
972 if (!piece || !text) {
976 /* Apply content's subtitle offsets */
977 subtitle.sub.rectangle.x += text->x_offset ();
978 subtitle.sub.rectangle.y += text->y_offset ();
980 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
981 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
982 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
984 /* Apply content's subtitle scale */
985 subtitle.sub.rectangle.width *= text->x_scale ();
986 subtitle.sub.rectangle.height *= text->y_scale ();
989 shared_ptr<Image> image = subtitle.sub.image;
991 /* We will scale the subtitle up to fit _video_container_size */
992 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
993 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
994 if (width == 0 || height == 0) {
998 dcp::Size scaled_size (width, height);
999 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1000 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1002 _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1006 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1008 shared_ptr<Piece> piece = wp.lock ();
1009 shared_ptr<const TextContent> text = wc.lock ();
1010 if (!piece || !text) {
1015 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1017 if (from > piece->content->end(_film)) {
1021 for (auto s: subtitle.subs) {
1022 s.set_h_position (s.h_position() + text->x_offset ());
1023 s.set_v_position (s.v_position() + text->y_offset ());
1024 float const xs = text->x_scale();
1025 float const ys = text->y_scale();
1026 float size = s.size();
1028 /* Adjust size to express the common part of the scaling;
1029 e.g. if xs = ys = 0.5 we scale size by 2.
1031 if (xs > 1e-5 && ys > 1e-5) {
1032 size *= 1 / min (1 / xs, 1 / ys);
1036 /* Then express aspect ratio changes */
1037 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1038 s.set_aspect_adjust (xs / ys);
1041 s.set_in (dcp::Time(from.seconds(), 1000));
1042 ps.string.push_back (StringText (s, text->outline_width()));
1043 ps.add_fonts (text->fonts ());
1046 _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1050 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1052 shared_ptr<const TextContent> text = wc.lock ();
1057 if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1061 shared_ptr<Piece> piece = wp.lock ();
1066 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1068 if (dcp_to > piece->content->end(_film)) {
1072 pair<PlayerText, DCPTime> from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1074 bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1075 if (text->use() && !always && !text->burn()) {
1076 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1081 Player::seek (DCPTime time, bool accurate)
1083 boost::mutex::scoped_lock lm (_mutex);
1084 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1087 /* We can't seek in this state */
1092 _shuffler->clear ();
1097 if (_audio_processor) {
1098 _audio_processor->flush ();
1101 _audio_merger.clear ();
1102 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1103 _active_texts[i].clear ();
1106 for (auto i: _pieces) {
1107 if (time < i->content->position()) {
1108 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1109 we must seek this (following) content accurately, otherwise when we come to the end of the current
1110 content we may not start right at the beginning of the next, causing a gap (if the next content has
1111 been trimmed to a point between keyframes, or something).
1113 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1115 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1116 /* During; seek to position */
1117 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1120 /* After; this piece is done */
1126 _last_video_time = time;
1127 _last_video_eyes = Eyes::LEFT;
1128 _last_audio_time = time;
1130 _last_video_time = optional<DCPTime>();
1131 _last_video_eyes = optional<Eyes>();
1132 _last_audio_time = optional<DCPTime>();
1135 _black.set_position (time);
1136 _silent.set_position (time);
1138 _last_video.clear ();
1142 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1144 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1145 player before the video that requires them.
1147 _delay.push_back (make_pair (pv, time));
1149 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1150 _last_video_time = time + one_video_frame();
1152 _last_video_eyes = increment_eyes (pv->eyes());
1154 if (_delay.size() < 3) {
1158 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1160 do_emit_video (to_do.first, to_do.second);
1164 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1166 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1167 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1168 _active_texts[i].clear_before (time);
1172 auto subtitles = open_subtitles_for_frame (time);
1174 pv->set_text (subtitles.get ());
1181 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1183 /* Log if the assert below is about to fail */
1184 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1185 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1188 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1189 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1190 Audio (data, time, _film->audio_frame_rate());
1191 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1195 Player::fill_audio (DCPTimePeriod period)
1197 if (period.from == period.to) {
1201 DCPOMATIC_ASSERT (period.from < period.to);
1203 DCPTime t = period.from;
1204 while (t < period.to) {
1205 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1206 Frame const samples = block.frames_round(_film->audio_frame_rate());
1208 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1209 silence->make_silent ();
1210 emit_audio (silence, t);
1217 Player::one_video_frame () const
1219 return DCPTime::from_frames (1, _film->video_frame_rate ());
1222 pair<shared_ptr<AudioBuffers>, DCPTime>
1223 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1225 DCPTime const discard_time = discard_to - time;
1226 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1227 Frame remaining_frames = audio->frames() - discard_frames;
1228 if (remaining_frames <= 0) {
1229 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1231 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1232 return make_pair(cut, time + discard_time);
1236 Player::set_dcp_decode_reduction (optional<int> reduction)
1238 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1241 boost::mutex::scoped_lock lm (_mutex);
1243 if (reduction == _dcp_decode_reduction) {
1245 Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1249 _dcp_decode_reduction = reduction;
1250 setup_pieces_unlocked ();
1253 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1257 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1259 boost::mutex::scoped_lock lm (_mutex);
1261 for (auto i: _pieces) {
1262 if (i->content == content) {
1263 return content_time_to_dcp (i, t);
1267 /* We couldn't find this content; perhaps things are being changed over */
1272 shared_ptr<const Playlist>
1273 Player::playlist () const
1275 return _playlist ? _playlist : _film->playlist();
1280 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1282 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);