2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
84 using namespace dcpomatic;
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
93 Player::Player (shared_ptr<const Film> film)
96 , _ignore_video (false)
97 , _ignore_audio (false)
98 , _ignore_text (false)
99 , _always_burn_open_subtitles (false)
101 , _tolerant (film->tolerant())
102 , _play_referenced (false)
103 , _audio_merger (_film->audio_frame_rate())
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111 , _playlist (playlist_)
113 , _ignore_video (false)
114 , _ignore_audio (false)
115 , _ignore_text (false)
116 , _always_burn_open_subtitles (false)
118 , _tolerant (film->tolerant())
119 , _play_referenced (false)
120 , _audio_merger (_film->audio_frame_rate())
129 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130 /* The butler must hear about this first, so since we are proxying this through to the butler we must
133 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135 set_video_container_size (_film->frame_size ());
137 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
140 seek (DCPTime (), true);
149 Player::setup_pieces ()
151 boost::mutex::scoped_lock lm (_mutex);
152 setup_pieces_unlocked ();
157 have_video (shared_ptr<const Content> content)
159 return static_cast<bool>(content->video) && content->video->use();
163 have_audio (shared_ptr<const Content> content)
165 return static_cast<bool>(content->audio);
169 Player::setup_pieces_unlocked ()
171 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
173 list<shared_ptr<Piece> > old_pieces = _pieces;
177 _shuffler = new Shuffler();
178 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
180 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
182 if (!i->paths_valid ()) {
186 if (_ignore_video && _ignore_audio && i->text.empty()) {
187 /* We're only interested in text and this content has none */
191 shared_ptr<Decoder> old_decoder;
192 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
193 if (j->content == i) {
194 old_decoder = j->decoder;
199 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
200 DCPOMATIC_ASSERT (decoder);
202 FrameRateChange frc (_film, i);
204 if (decoder->video && _ignore_video) {
205 decoder->video->set_ignore (true);
208 if (decoder->audio && _ignore_audio) {
209 decoder->audio->set_ignore (true);
213 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
214 i->set_ignore (true);
218 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
220 dcp->set_decode_referenced (_play_referenced);
221 if (_play_referenced) {
222 dcp->set_forced_reduction (_dcp_decode_reduction);
226 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
227 _pieces.push_back (piece);
229 if (decoder->video) {
230 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
231 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
232 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
234 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
238 if (decoder->audio) {
239 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
242 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
244 while (j != decoder->text.end()) {
245 (*j)->BitmapStart.connect (
246 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
248 (*j)->PlainStart.connect (
249 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258 if (decoder->atmos) {
259 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
263 _stream_states.clear ();
264 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
265 if (i->content->audio) {
266 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
267 _stream_states[j] = StreamState (i, i->content->position ());
272 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
273 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
275 _last_video_time = DCPTime ();
276 _last_video_eyes = EYES_BOTH;
277 _last_audio_time = DCPTime ();
281 Player::playlist_content_change (ChangeType type, int property, bool frequent)
283 if (property == VideoContentProperty::CROP) {
284 if (type == CHANGE_TYPE_DONE) {
285 dcp::Size const vcs = video_container_size();
286 boost::mutex::scoped_lock lm (_mutex);
287 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
288 i->first->reset_metadata (_film, vcs);
292 if (type == CHANGE_TYPE_PENDING) {
293 /* The player content is probably about to change, so we can't carry on
294 until that has happened and we've rebuilt our pieces. Stop pass()
295 and seek() from working until then.
298 } else if (type == CHANGE_TYPE_DONE) {
299 /* A change in our content has gone through. Re-build our pieces. */
302 } else if (type == CHANGE_TYPE_CANCELLED) {
307 Change (type, property, frequent);
311 Player::set_video_container_size (dcp::Size s)
313 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
316 boost::mutex::scoped_lock lm (_mutex);
318 if (s == _video_container_size) {
320 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
324 _video_container_size = s;
326 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
327 _black_image->make_black ();
330 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
334 Player::playlist_change (ChangeType type)
336 if (type == CHANGE_TYPE_DONE) {
339 Change (type, PlayerProperty::PLAYLIST, false);
343 Player::film_change (ChangeType type, Film::Property p)
345 /* Here we should notice Film properties that affect our output, and
346 alert listeners that our output now would be different to how it was
347 last time we were run.
350 if (p == Film::CONTAINER) {
351 Change (type, PlayerProperty::FILM_CONTAINER, false);
352 } else if (p == Film::VIDEO_FRAME_RATE) {
353 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
354 so we need new pieces here.
356 if (type == CHANGE_TYPE_DONE) {
359 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
360 } else if (p == Film::AUDIO_PROCESSOR) {
361 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
362 boost::mutex::scoped_lock lm (_mutex);
363 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
365 } else if (p == Film::AUDIO_CHANNELS) {
366 if (type == CHANGE_TYPE_DONE) {
367 boost::mutex::scoped_lock lm (_mutex);
368 _audio_merger.clear ();
373 shared_ptr<PlayerVideo>
374 Player::black_player_video_frame (Eyes eyes) const
376 return shared_ptr<PlayerVideo> (
378 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
381 _video_container_size,
382 _video_container_size,
385 PresetColourConversion::all().front().conversion,
387 boost::weak_ptr<Content>(),
388 boost::optional<Frame>(),
395 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
397 DCPTime s = t - piece->content->position ();
398 s = min (piece->content->length_after_trim(_film), s);
399 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
401 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
402 then convert that ContentTime to frames at the content's rate. However this fails for
403 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
404 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
406 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
408 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
412 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
414 /* See comment in dcp_to_content_video */
415 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
416 return d + piece->content->position();
420 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
422 DCPTime s = t - piece->content->position ();
423 s = min (piece->content->length_after_trim(_film), s);
424 /* See notes in dcp_to_content_video */
425 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
429 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
431 /* See comment in dcp_to_content_video */
432 return DCPTime::from_frames (f, _film->audio_frame_rate())
433 - DCPTime (piece->content->trim_start(), piece->frc)
434 + piece->content->position();
438 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
440 DCPTime s = t - piece->content->position ();
441 s = min (piece->content->length_after_trim(_film), s);
442 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
446 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
448 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
452 Player::get_subtitle_fonts ()
454 boost::mutex::scoped_lock lm (_mutex);
456 vector<FontData> fonts;
457 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
458 /* XXX: things may go wrong if there are duplicate font IDs
459 with different font files.
461 vector<FontData> f = i->decoder->fonts ();
462 copy (f.begin(), f.end(), back_inserter(fonts));
468 /** Set this player never to produce any video data */
470 Player::set_ignore_video ()
472 boost::mutex::scoped_lock lm (_mutex);
473 _ignore_video = true;
474 setup_pieces_unlocked ();
478 Player::set_ignore_audio ()
480 boost::mutex::scoped_lock lm (_mutex);
481 _ignore_audio = true;
482 setup_pieces_unlocked ();
486 Player::set_ignore_text ()
488 boost::mutex::scoped_lock lm (_mutex);
490 setup_pieces_unlocked ();
493 /** Set the player to always burn open texts into the image regardless of the content settings */
495 Player::set_always_burn_open_subtitles ()
497 boost::mutex::scoped_lock lm (_mutex);
498 _always_burn_open_subtitles = true;
501 /** Sets up the player to be faster, possibly at the expense of quality */
505 boost::mutex::scoped_lock lm (_mutex);
507 setup_pieces_unlocked ();
511 Player::set_play_referenced ()
513 boost::mutex::scoped_lock lm (_mutex);
514 _play_referenced = true;
515 setup_pieces_unlocked ();
519 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
521 DCPOMATIC_ASSERT (r);
522 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
523 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
524 if (r->actual_duration() > 0) {
526 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
531 list<ReferencedReelAsset>
532 Player::get_reel_assets ()
534 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
536 list<ReferencedReelAsset> a;
538 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
539 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
544 scoped_ptr<DCPDecoder> decoder;
546 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
551 DCPOMATIC_ASSERT (j->video_frame_rate ());
552 double const cfr = j->video_frame_rate().get();
553 Frame const trim_start = j->trim_start().frames_round (cfr);
554 Frame const trim_end = j->trim_end().frames_round (cfr);
555 int const ffr = _film->video_frame_rate ();
557 /* position in the asset from the start */
558 int64_t offset_from_start = 0;
559 /* position in the asset from the end */
560 int64_t offset_from_end = 0;
561 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
562 /* Assume that main picture duration is the length of the reel */
563 offset_from_end += k->main_picture()->actual_duration();
566 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
568 /* Assume that main picture duration is the length of the reel */
569 int64_t const reel_duration = k->main_picture()->actual_duration();
571 /* See doc/design/trim_reels.svg */
572 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
573 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
575 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
576 if (j->reference_video ()) {
577 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
580 if (j->reference_audio ()) {
581 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
584 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
585 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
588 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
589 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
590 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
594 offset_from_start += reel_duration;
595 offset_from_end -= reel_duration;
605 boost::mutex::scoped_lock lm (_mutex);
608 /* We can't pass in this state */
609 LOG_DEBUG_PLAYER_NC ("Player is suspended");
613 if (_playback_length == DCPTime()) {
614 /* Special; just give one black frame */
615 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
619 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
621 shared_ptr<Piece> earliest_content;
622 optional<DCPTime> earliest_time;
624 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
629 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
630 if (t > i->content->end(_film)) {
634 /* Given two choices at the same time, pick the one with texts so we see it before
637 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
639 earliest_content = i;
653 if (earliest_content) {
657 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
658 earliest_time = _black.position ();
662 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
663 earliest_time = _silent.position ();
670 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
671 earliest_content->done = earliest_content->decoder->pass ();
672 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
673 if (dcp && !_play_referenced && dcp->reference_audio()) {
674 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
675 to `hide' the fact that no audio was emitted during the referenced DCP (though
676 we need to behave as though it was).
678 _last_audio_time = dcp->end (_film);
683 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
684 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
685 _black.set_position (_black.position() + one_video_frame());
689 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
690 DCPTimePeriod period (_silent.period_at_position());
691 if (_last_audio_time) {
692 /* Sometimes the thing that happened last finishes fractionally before
693 or after this silence. Bodge the start time of the silence to fix it.
694 I think this is nothing to worry about since we will just add or
695 remove a little silence at the end of some content.
697 int64_t const error = labs(period.from.get() - _last_audio_time->get());
698 /* Let's not worry about less than a frame at 24fps */
699 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
700 if (error >= too_much_error) {
701 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
703 DCPOMATIC_ASSERT (error < too_much_error);
704 period.from = *_last_audio_time;
706 if (period.duration() > one_video_frame()) {
707 period.to = period.from + one_video_frame();
710 _silent.set_position (period.to);
718 /* Emit any audio that is ready */
720 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
721 of our streams, or the position of the _silent.
723 DCPTime pull_to = _playback_length;
724 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
725 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
726 pull_to = i->second.last_push_end;
729 if (!_silent.done() && _silent.position() < pull_to) {
730 pull_to = _silent.position();
733 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
734 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
735 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
736 if (_last_audio_time && i->second < *_last_audio_time) {
737 /* This new data comes before the last we emitted (or the last seek); discard it */
738 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
743 } else if (_last_audio_time && i->second > *_last_audio_time) {
744 /* There's a gap between this data and the last we emitted; fill with silence */
745 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
748 emit_audio (i->first, i->second);
753 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
754 do_emit_video(i->first, i->second);
761 /** @return Open subtitles for the frame at the given time, converted to images */
762 optional<PositionImage>
763 Player::open_subtitles_for_frame (DCPTime time) const
765 list<PositionImage> captions;
766 int const vfr = _film->video_frame_rate();
770 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
773 /* Bitmap subtitles */
774 BOOST_FOREACH (BitmapText i, j.bitmap) {
779 /* i.image will already have been scaled to fit _video_container_size */
780 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
786 lrint (_video_container_size.width * i.rectangle.x),
787 lrint (_video_container_size.height * i.rectangle.y)
793 /* String subtitles (rendered to an image) */
794 if (!j.string.empty ()) {
795 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
796 copy (s.begin(), s.end(), back_inserter (captions));
800 if (captions.empty ()) {
801 return optional<PositionImage> ();
804 return merge (captions);
808 Player::video (weak_ptr<Piece> wp, ContentVideo video)
810 shared_ptr<Piece> piece = wp.lock ();
815 if (!piece->content->video->use()) {
819 FrameRateChange frc (_film, piece->content);
820 if (frc.skip && (video.frame % 2) == 1) {
824 /* Time of the first frame we will emit */
825 DCPTime const time = content_video_to_dcp (piece, video.frame);
826 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
828 /* Discard if it's before the content's period or the last accurate seek. We can't discard
829 if it's after the content's period here as in that case we still need to fill any gap between
830 `now' and the end of the content's period.
832 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
836 /* Fill gaps that we discover now that we have some video which needs to be emitted.
837 This is where we need to fill to.
839 DCPTime fill_to = min (time, piece->content->end(_film));
841 if (_last_video_time) {
842 DCPTime fill_from = max (*_last_video_time, piece->content->position());
844 /* Fill if we have more than half a frame to do */
845 if ((fill_to - fill_from) > one_video_frame() / 2) {
846 LastVideoMap::const_iterator last = _last_video.find (wp);
847 if (_film->three_d()) {
848 Eyes fill_to_eyes = video.eyes;
849 if (fill_to_eyes == EYES_BOTH) {
850 fill_to_eyes = EYES_LEFT;
852 if (fill_to == piece->content->end(_film)) {
853 /* Don't fill after the end of the content */
854 fill_to_eyes = EYES_LEFT;
856 DCPTime j = fill_from;
857 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
858 if (eyes == EYES_BOTH) {
861 while (j < fill_to || eyes != fill_to_eyes) {
862 if (last != _last_video.end()) {
863 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
864 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
865 copy->set_eyes (eyes);
866 emit_video (copy, j);
868 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
869 emit_video (black_player_video_frame(eyes), j);
871 if (eyes == EYES_RIGHT) {
872 j += one_video_frame();
874 eyes = increment_eyes (eyes);
877 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
878 if (last != _last_video.end()) {
879 emit_video (last->second, j);
881 emit_video (black_player_video_frame(EYES_BOTH), j);
888 _last_video[wp].reset (
891 piece->content->video->crop (),
892 piece->content->video->fade (_film, video.frame),
893 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
894 _video_container_size,
897 piece->content->video->colour_conversion(),
898 piece->content->video->range(),
906 for (int i = 0; i < frc.repeat; ++i) {
907 if (t < piece->content->end(_film)) {
908 emit_video (_last_video[wp], t);
910 t += one_video_frame ();
915 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
917 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
919 shared_ptr<Piece> piece = wp.lock ();
924 shared_ptr<AudioContent> content = piece->content->audio;
925 DCPOMATIC_ASSERT (content);
927 int const rfr = content->resampled_frame_rate (_film);
929 /* Compute time in the DCP */
930 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
931 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
933 /* And the end of this block in the DCP */
934 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
936 /* Remove anything that comes before the start or after the end of the content */
937 if (time < piece->content->position()) {
938 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
940 /* This audio is entirely discarded */
943 content_audio.audio = cut.first;
945 } else if (time > piece->content->end(_film)) {
948 } else if (end > piece->content->end(_film)) {
949 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
950 if (remaining_frames == 0) {
953 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
956 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
960 if (content->gain() != 0) {
961 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
962 gain->apply_gain (content->gain ());
963 content_audio.audio = gain;
968 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
972 if (_audio_processor) {
973 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
978 _audio_merger.push (content_audio.audio, time);
979 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
980 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
984 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
986 shared_ptr<Piece> piece = wp.lock ();
987 shared_ptr<const TextContent> text = wc.lock ();
988 if (!piece || !text) {
992 /* Apply content's subtitle offsets */
993 subtitle.sub.rectangle.x += text->x_offset ();
994 subtitle.sub.rectangle.y += text->y_offset ();
996 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
997 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
998 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1000 /* Apply content's subtitle scale */
1001 subtitle.sub.rectangle.width *= text->x_scale ();
1002 subtitle.sub.rectangle.height *= text->y_scale ();
1005 shared_ptr<Image> image = subtitle.sub.image;
1007 /* We will scale the subtitle up to fit _video_container_size */
1008 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1009 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1010 if (width == 0 || height == 0) {
1014 dcp::Size scaled_size (width, height);
1015 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1016 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1018 _active_texts[text->type()].add_from (wc, ps, from);
1022 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1024 shared_ptr<Piece> piece = wp.lock ();
1025 shared_ptr<const TextContent> text = wc.lock ();
1026 if (!piece || !text) {
1031 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1033 if (from > piece->content->end(_film)) {
1037 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1038 s.set_h_position (s.h_position() + text->x_offset ());
1039 s.set_v_position (s.v_position() + text->y_offset ());
1040 float const xs = text->x_scale();
1041 float const ys = text->y_scale();
1042 float size = s.size();
1044 /* Adjust size to express the common part of the scaling;
1045 e.g. if xs = ys = 0.5 we scale size by 2.
1047 if (xs > 1e-5 && ys > 1e-5) {
1048 size *= 1 / min (1 / xs, 1 / ys);
1052 /* Then express aspect ratio changes */
1053 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1054 s.set_aspect_adjust (xs / ys);
1057 s.set_in (dcp::Time(from.seconds(), 1000));
1058 ps.string.push_back (StringText (s, text->outline_width()));
1059 ps.add_fonts (text->fonts ());
1062 _active_texts[text->type()].add_from (wc, ps, from);
1066 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1068 shared_ptr<const TextContent> text = wc.lock ();
1073 if (!_active_texts[text->type()].have(wc)) {
1077 shared_ptr<Piece> piece = wp.lock ();
1082 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1084 if (dcp_to > piece->content->end(_film)) {
1088 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1090 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1091 if (text->use() && !always && !text->burn()) {
1092 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1097 Player::seek (DCPTime time, bool accurate)
1099 boost::mutex::scoped_lock lm (_mutex);
1100 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1103 /* We can't seek in this state */
1108 _shuffler->clear ();
1113 if (_audio_processor) {
1114 _audio_processor->flush ();
1117 _audio_merger.clear ();
1118 for (int i = 0; i < TEXT_COUNT; ++i) {
1119 _active_texts[i].clear ();
1122 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1123 if (time < i->content->position()) {
1124 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1125 we must seek this (following) content accurately, otherwise when we come to the end of the current
1126 content we may not start right at the beginning of the next, causing a gap (if the next content has
1127 been trimmed to a point between keyframes, or something).
1129 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1131 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1132 /* During; seek to position */
1133 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1136 /* After; this piece is done */
1142 _last_video_time = time;
1143 _last_video_eyes = EYES_LEFT;
1144 _last_audio_time = time;
1146 _last_video_time = optional<DCPTime>();
1147 _last_video_eyes = optional<Eyes>();
1148 _last_audio_time = optional<DCPTime>();
1151 _black.set_position (time);
1152 _silent.set_position (time);
1154 _last_video.clear ();
1158 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1160 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1161 player before the video that requires them.
1163 _delay.push_back (make_pair (pv, time));
1165 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1166 _last_video_time = time + one_video_frame();
1168 _last_video_eyes = increment_eyes (pv->eyes());
1170 if (_delay.size() < 3) {
1174 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1176 do_emit_video (to_do.first, to_do.second);
1180 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1182 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1183 for (int i = 0; i < TEXT_COUNT; ++i) {
1184 _active_texts[i].clear_before (time);
1188 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1190 pv->set_text (subtitles.get ());
1197 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1199 /* Log if the assert below is about to fail */
1200 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1201 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1204 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1205 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1206 Audio (data, time, _film->audio_frame_rate());
1207 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1211 Player::fill_audio (DCPTimePeriod period)
1213 if (period.from == period.to) {
1217 DCPOMATIC_ASSERT (period.from < period.to);
1219 DCPTime t = period.from;
1220 while (t < period.to) {
1221 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1222 Frame const samples = block.frames_round(_film->audio_frame_rate());
1224 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1225 silence->make_silent ();
1226 emit_audio (silence, t);
1233 Player::one_video_frame () const
1235 return DCPTime::from_frames (1, _film->video_frame_rate ());
1238 pair<shared_ptr<AudioBuffers>, DCPTime>
1239 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1241 DCPTime const discard_time = discard_to - time;
1242 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1243 Frame remaining_frames = audio->frames() - discard_frames;
1244 if (remaining_frames <= 0) {
1245 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1247 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1248 return make_pair(cut, time + discard_time);
1252 Player::set_dcp_decode_reduction (optional<int> reduction)
1254 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1257 boost::mutex::scoped_lock lm (_mutex);
1259 if (reduction == _dcp_decode_reduction) {
1261 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1265 _dcp_decode_reduction = reduction;
1266 setup_pieces_unlocked ();
1269 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1273 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1275 boost::mutex::scoped_lock lm (_mutex);
1277 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1278 if (i->content == content) {
1279 return content_time_to_dcp (i, t);
1283 /* We couldn't find this content; perhaps things are being changed over */
1284 return optional<DCPTime>();
1288 shared_ptr<const Playlist>
1289 Player::playlist () const
1291 return _playlist ? _playlist : _film->playlist();
1296 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1298 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);