2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
84 using namespace dcpomatic;
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
93 Player::Player (shared_ptr<const Film> film)
96 , _ignore_video (false)
97 , _ignore_audio (false)
98 , _ignore_text (false)
99 , _always_burn_open_subtitles (false)
101 , _tolerant (film->tolerant())
102 , _play_referenced (false)
103 , _audio_merger (_film->audio_frame_rate())
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111 , _playlist (playlist_)
113 , _ignore_video (false)
114 , _ignore_audio (false)
115 , _ignore_text (false)
116 , _always_burn_open_subtitles (false)
118 , _tolerant (film->tolerant())
119 , _play_referenced (false)
120 , _audio_merger (_film->audio_frame_rate())
129 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130 /* The butler must hear about this first, so since we are proxying this through to the butler we must
133 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135 set_video_container_size (_film->frame_size ());
137 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
140 seek (DCPTime (), true);
149 Player::setup_pieces ()
151 boost::mutex::scoped_lock lm (_mutex);
152 setup_pieces_unlocked ();
157 have_video (shared_ptr<const Content> content)
159 return static_cast<bool>(content->video) && content->video->use();
163 have_audio (shared_ptr<const Content> content)
165 return static_cast<bool>(content->audio);
169 Player::setup_pieces_unlocked ()
171 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
173 list<shared_ptr<Piece> > old_pieces = _pieces;
177 _shuffler = new Shuffler();
178 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
180 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
182 if (!i->paths_valid ()) {
186 if (_ignore_video && _ignore_audio && i->text.empty()) {
187 /* We're only interested in text and this content has none */
191 shared_ptr<Decoder> old_decoder;
192 /* XXX: needs to check vector of Content and use the old decoders, but
193 * this will all be different as we have to coalesce content before
195 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
196 if (j->content == i) {
197 old_decoder = j->decoder;
203 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
204 DCPOMATIC_ASSERT (decoder);
206 FrameRateChange frc (_film, i);
208 if (decoder->video && _ignore_video) {
209 decoder->video->set_ignore (true);
212 if (decoder->audio && _ignore_audio) {
213 decoder->audio->set_ignore (true);
217 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
218 i->set_ignore (true);
222 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
224 dcp->set_decode_referenced (_play_referenced);
225 if (_play_referenced) {
226 dcp->set_forced_reduction (_dcp_decode_reduction);
230 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
231 _pieces.push_back (piece);
233 if (decoder->video) {
234 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
235 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
236 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
238 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
242 if (decoder->audio) {
243 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
246 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
248 while (j != decoder->text.end()) {
249 (*j)->BitmapStart.connect (
250 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252 (*j)->PlainStart.connect (
253 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
262 if (decoder->atmos) {
263 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
267 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
268 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
270 _last_video_time = DCPTime ();
271 _last_video_eyes = EYES_BOTH;
272 _last_audio_time = DCPTime ();
276 Player::playlist_content_change (ChangeType type, int property, bool frequent)
278 if (property == VideoContentProperty::CROP) {
279 if (type == CHANGE_TYPE_DONE) {
280 dcp::Size const vcs = video_container_size();
281 boost::mutex::scoped_lock lm (_mutex);
282 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
283 i->first->reset_metadata (_film, vcs);
287 if (type == CHANGE_TYPE_PENDING) {
288 /* The player content is probably about to change, so we can't carry on
289 until that has happened and we've rebuilt our pieces. Stop pass()
290 and seek() from working until then.
293 } else if (type == CHANGE_TYPE_DONE) {
294 /* A change in our content has gone through. Re-build our pieces. */
297 } else if (type == CHANGE_TYPE_CANCELLED) {
302 Change (type, property, frequent);
306 Player::set_video_container_size (dcp::Size s)
308 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311 boost::mutex::scoped_lock lm (_mutex);
313 if (s == _video_container_size) {
315 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
319 _video_container_size = s;
321 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
322 _black_image->make_black ();
325 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
329 Player::playlist_change (ChangeType type)
331 if (type == CHANGE_TYPE_DONE) {
334 Change (type, PlayerProperty::PLAYLIST, false);
338 Player::film_change (ChangeType type, Film::Property p)
340 /* Here we should notice Film properties that affect our output, and
341 alert listeners that our output now would be different to how it was
342 last time we were run.
345 if (p == Film::CONTAINER) {
346 Change (type, PlayerProperty::FILM_CONTAINER, false);
347 } else if (p == Film::VIDEO_FRAME_RATE) {
348 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
349 so we need new pieces here.
351 if (type == CHANGE_TYPE_DONE) {
354 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
355 } else if (p == Film::AUDIO_PROCESSOR) {
356 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
357 boost::mutex::scoped_lock lm (_mutex);
358 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
360 } else if (p == Film::AUDIO_CHANNELS) {
361 if (type == CHANGE_TYPE_DONE) {
362 boost::mutex::scoped_lock lm (_mutex);
363 _audio_merger.clear ();
368 shared_ptr<PlayerVideo>
369 Player::black_player_video_frame (Eyes eyes) const
371 return shared_ptr<PlayerVideo> (
373 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
376 _video_container_size,
377 _video_container_size,
380 PresetColourConversion::all().front().conversion,
382 boost::weak_ptr<Content>(),
383 boost::optional<Frame>(),
391 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
393 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
394 then convert that ContentTime to frames at the content's rate. However this fails for
395 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
396 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
398 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
400 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
401 return d + piece->position();
406 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
408 /* See comment in dcp_to_content_video */
409 return DCPTime::from_frames (f, _film->audio_frame_rate())
410 - DCPTime (piece->content->trim_start(), piece->frc)
415 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
417 DCPTime s = t - piece->position ();
418 s = min (piece->content->length_after_trim(_film), s);
419 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
423 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
425 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->position());
428 list<shared_ptr<Font> >
429 Player::get_subtitle_fonts ()
431 boost::mutex::scoped_lock lm (_mutex);
433 list<shared_ptr<Font> > fonts;
434 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
435 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
436 /* XXX: things may go wrong if there are duplicate font IDs
437 with different font files.
439 list<shared_ptr<Font> > f = j->fonts ();
440 copy (f.begin(), f.end(), back_inserter (fonts));
447 /** Set this player never to produce any video data */
449 Player::set_ignore_video ()
451 boost::mutex::scoped_lock lm (_mutex);
452 _ignore_video = true;
453 setup_pieces_unlocked ();
457 Player::set_ignore_audio ()
459 boost::mutex::scoped_lock lm (_mutex);
460 _ignore_audio = true;
461 setup_pieces_unlocked ();
465 Player::set_ignore_text ()
467 boost::mutex::scoped_lock lm (_mutex);
469 setup_pieces_unlocked ();
472 /** Set the player to always burn open texts into the image regardless of the content settings */
474 Player::set_always_burn_open_subtitles ()
476 boost::mutex::scoped_lock lm (_mutex);
477 _always_burn_open_subtitles = true;
480 /** Sets up the player to be faster, possibly at the expense of quality */
484 boost::mutex::scoped_lock lm (_mutex);
486 setup_pieces_unlocked ();
490 Player::set_play_referenced ()
492 boost::mutex::scoped_lock lm (_mutex);
493 _play_referenced = true;
494 setup_pieces_unlocked ();
498 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
500 DCPOMATIC_ASSERT (r);
501 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
502 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
503 if (r->actual_duration() > 0) {
505 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
510 list<ReferencedReelAsset>
511 Player::get_reel_assets ()
513 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
515 list<ReferencedReelAsset> a;
517 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
518 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
523 scoped_ptr<DCPDecoder> decoder;
525 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
530 DCPOMATIC_ASSERT (j->video_frame_rate ());
531 double const cfr = j->video_frame_rate().get();
532 Frame const trim_start = j->trim_start().frames_round (cfr);
533 Frame const trim_end = j->trim_end().frames_round (cfr);
534 int const ffr = _film->video_frame_rate ();
536 /* position in the asset from the start */
537 int64_t offset_from_start = 0;
538 /* position in the asset from the end */
539 int64_t offset_from_end = 0;
540 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
541 /* Assume that main picture duration is the length of the reel */
542 offset_from_end += k->main_picture()->actual_duration();
545 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
547 /* Assume that main picture duration is the length of the reel */
548 int64_t const reel_duration = k->main_picture()->actual_duration();
550 /* See doc/design/trim_reels.svg */
551 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
552 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
554 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
555 if (j->reference_video ()) {
556 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
559 if (j->reference_audio ()) {
560 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
563 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
564 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
567 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
568 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
569 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
573 offset_from_start += reel_duration;
574 offset_from_end -= reel_duration;
584 boost::mutex::scoped_lock lm (_mutex);
587 /* We can't pass in this state */
588 LOG_DEBUG_PLAYER_NC ("Player is suspended");
592 if (_playback_length == DCPTime()) {
593 /* Special; just give one black frame */
594 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
598 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
600 shared_ptr<Piece> earliest_content;
601 optional<DCPTime> earliest_time;
603 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
608 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
609 if (t > i->end(_film)) {
613 /* Given two choices at the same time, pick the one with texts so we see it before
616 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
618 earliest_content = i;
632 if (earliest_content) {
636 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
637 earliest_time = _black.position ();
641 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
642 earliest_time = _silent.position ();
649 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
650 earliest_content->done = earliest_content->decoder->pass ();
651 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
652 if (dcp && !_play_referenced && dcp->reference_audio()) {
653 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
654 to `hide' the fact that no audio was emitted during the referenced DCP (though
655 we need to behave as though it was).
657 _last_audio_time = dcp->end (_film);
662 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
663 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
664 _black.set_position (_black.position() + one_video_frame());
668 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
669 DCPTimePeriod period (_silent.period_at_position());
670 if (_last_audio_time) {
671 /* Sometimes the thing that happened last finishes fractionally before
672 or after this silence. Bodge the start time of the silence to fix it.
673 I think this is nothing to worry about since we will just add or
674 remove a little silence at the end of some content.
676 int64_t const error = labs(period.from.get() - _last_audio_time->get());
677 /* Let's not worry about less than a frame at 24fps */
678 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
679 if (error >= too_much_error) {
680 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
682 DCPOMATIC_ASSERT (error < too_much_error);
683 period.from = *_last_audio_time;
685 if (period.duration() > one_video_frame()) {
686 period.to = period.from + one_video_frame();
689 _silent.set_position (period.to);
697 /* Emit any audio that is ready */
699 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
700 of our streams, or the position of the _silent.
702 DCPTime pull_to = _playback_length;
703 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
704 i->update_pull_to (pull_to);
706 if (!_silent.done() && _silent.position() < pull_to) {
707 pull_to = _silent.position();
710 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
711 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
712 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
713 if (_last_audio_time && i->second < *_last_audio_time) {
714 /* This new data comes before the last we emitted (or the last seek); discard it */
715 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
720 } else if (_last_audio_time && i->second > *_last_audio_time) {
721 /* There's a gap between this data and the last we emitted; fill with silence */
722 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
725 emit_audio (i->first, i->second);
730 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
731 do_emit_video(i->first, i->second);
738 /** @return Open subtitles for the frame at the given time, converted to images */
739 optional<PositionImage>
740 Player::open_subtitles_for_frame (DCPTime time) const
742 list<PositionImage> captions;
743 int const vfr = _film->video_frame_rate();
747 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
750 /* Bitmap subtitles */
751 BOOST_FOREACH (BitmapText i, j.bitmap) {
756 /* i.image will already have been scaled to fit _video_container_size */
757 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
763 lrint (_video_container_size.width * i.rectangle.x),
764 lrint (_video_container_size.height * i.rectangle.y)
770 /* String subtitles (rendered to an image) */
771 if (!j.string.empty ()) {
772 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
773 copy (s.begin(), s.end(), back_inserter (captions));
777 if (captions.empty ()) {
778 return optional<PositionImage> ();
781 return merge (captions);
785 Player::video (weak_ptr<Piece> wp, ContentVideo video)
787 shared_ptr<Piece> piece = wp.lock ();
792 if (!piece->content->video->use()) {
796 FrameRateChange frc (_film, piece->content);
797 if (frc.skip && (video.frame % 2) == 1) {
801 /* Time of the first frame we will emit */
802 DCPTime const time = content_video_to_dcp (piece, video.frame);
803 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
805 /* Discard if it's before the content's period or the last accurate seek. We can't discard
806 if it's after the content's period here as in that case we still need to fill any gap between
807 `now' and the end of the content's period.
809 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
813 /* Fill gaps that we discover now that we have some video which needs to be emitted.
814 This is where we need to fill to.
816 DCPTime fill_to = min (time, piece->end(_film));
818 if (_last_video_time) {
819 DCPTime fill_from = max (*_last_video_time, piece->position());
821 /* Fill if we have more than half a frame to do */
822 if ((fill_to - fill_from) > one_video_frame() / 2) {
823 LastVideoMap::const_iterator last = _last_video.find (wp);
824 if (_film->three_d()) {
825 Eyes fill_to_eyes = video.eyes;
826 if (fill_to_eyes == EYES_BOTH) {
827 fill_to_eyes = EYES_LEFT;
829 if (fill_to == piece->end(_film)) {
830 /* Don't fill after the end of the content */
831 fill_to_eyes = EYES_LEFT;
833 DCPTime j = fill_from;
834 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
835 if (eyes == EYES_BOTH) {
838 while (j < fill_to || eyes != fill_to_eyes) {
839 if (last != _last_video.end()) {
840 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
841 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
842 copy->set_eyes (eyes);
843 emit_video (copy, j);
845 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
846 emit_video (black_player_video_frame(eyes), j);
848 if (eyes == EYES_RIGHT) {
849 j += one_video_frame();
851 eyes = increment_eyes (eyes);
854 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
855 if (last != _last_video.end()) {
856 emit_video (last->second, j);
858 emit_video (black_player_video_frame(EYES_BOTH), j);
865 _last_video[wp].reset (
868 piece->content->video->crop (),
869 piece->content->video->fade (_film, video.frame),
870 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
871 _video_container_size,
874 piece->content->video->colour_conversion(),
875 piece->content->video->range(),
883 for (int i = 0; i < frc.repeat; ++i) {
884 if (t < piece->end(_film)) {
885 emit_video (_last_video[wp], t);
887 t += one_video_frame ();
892 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
894 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
896 shared_ptr<Piece> piece = wp.lock ();
901 shared_ptr<AudioContent> content = piece->content->audio;
902 DCPOMATIC_ASSERT (content);
904 int const rfr = content->resampled_frame_rate (_film);
906 /* Compute time in the DCP */
907 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
908 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
910 /* And the end of this block in the DCP */
911 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
913 /* Remove anything that comes before the start or after the end of the content */
914 if (time < piece->position()) {
915 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
917 /* This audio is entirely discarded */
920 content_audio.audio = cut.first;
922 } else if (time > piece->end(_film)) {
925 } else if (end > piece->end(_film)) {
926 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
927 if (remaining_frames == 0) {
930 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
933 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
937 if (content->gain() != 0) {
938 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
939 gain->apply_gain (content->gain ());
940 content_audio.audio = gain;
945 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
949 if (_audio_processor) {
950 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
955 _audio_merger.push (content_audio.audio, time);
956 /* XXX: this almost certainly needs to be more efficient; perhaps pieces fill a map to find
957 * the piece from the stream, then we can call the right piece with no loop.
959 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
960 i->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
965 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
967 shared_ptr<Piece> piece = wp.lock ();
968 shared_ptr<const TextContent> text = wc.lock ();
969 if (!piece || !text) {
973 /* Apply content's subtitle offsets */
974 subtitle.sub.rectangle.x += text->x_offset ();
975 subtitle.sub.rectangle.y += text->y_offset ();
977 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
978 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
979 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
981 /* Apply content's subtitle scale */
982 subtitle.sub.rectangle.width *= text->x_scale ();
983 subtitle.sub.rectangle.height *= text->y_scale ();
986 shared_ptr<Image> image = subtitle.sub.image;
988 /* We will scale the subtitle up to fit _video_container_size */
989 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
990 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
991 if (width == 0 || height == 0) {
995 dcp::Size scaled_size (width, height);
996 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
997 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
999 _active_texts[text->type()].add_from (wc, ps, from);
1003 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1005 shared_ptr<Piece> piece = wp.lock ();
1006 shared_ptr<const TextContent> text = wc.lock ();
1007 if (!piece || !text) {
1012 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1014 if (from > piece->end(_film)) {
1018 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1019 s.set_h_position (s.h_position() + text->x_offset ());
1020 s.set_v_position (s.v_position() + text->y_offset ());
1021 float const xs = text->x_scale();
1022 float const ys = text->y_scale();
1023 float size = s.size();
1025 /* Adjust size to express the common part of the scaling;
1026 e.g. if xs = ys = 0.5 we scale size by 2.
1028 if (xs > 1e-5 && ys > 1e-5) {
1029 size *= 1 / min (1 / xs, 1 / ys);
1033 /* Then express aspect ratio changes */
1034 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1035 s.set_aspect_adjust (xs / ys);
1038 s.set_in (dcp::Time(from.seconds(), 1000));
1039 ps.string.push_back (StringText (s, text->outline_width()));
1040 ps.add_fonts (text->fonts ());
1043 _active_texts[text->type()].add_from (wc, ps, from);
1047 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1049 shared_ptr<const TextContent> text = wc.lock ();
1054 if (!_active_texts[text->type()].have(wc)) {
1058 shared_ptr<Piece> piece = wp.lock ();
1063 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1065 if (dcp_to > piece->end(_film)) {
1069 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1071 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1072 if (text->use() && !always && !text->burn()) {
1073 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1078 Player::seek (DCPTime time, bool accurate)
1080 boost::mutex::scoped_lock lm (_mutex);
1081 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1084 /* We can't seek in this state */
1089 _shuffler->clear ();
1094 if (_audio_processor) {
1095 _audio_processor->flush ();
1098 _audio_merger.clear ();
1099 for (int i = 0; i < TEXT_COUNT; ++i) {
1100 _active_texts[i].clear ();
1103 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1104 if (time < i->position()) {
1105 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1106 we must seek this (following) content accurately, otherwise when we come to the end of the current
1107 content we may not start right at the beginning of the next, causing a gap (if the next content has
1108 been trimmed to a point between keyframes, or something).
1110 i->decoder->seek (dcp_to_content_time (i, i->position()), true);
1112 } else if (i->position() <= time && time < i->end(_film)) {
1113 /* During; seek to position */
1114 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1117 /* After; this piece is done */
1123 _last_video_time = time;
1124 _last_video_eyes = EYES_LEFT;
1125 _last_audio_time = time;
1127 _last_video_time = optional<DCPTime>();
1128 _last_video_eyes = optional<Eyes>();
1129 _last_audio_time = optional<DCPTime>();
1132 _black.set_position (time);
1133 _silent.set_position (time);
1135 _last_video.clear ();
1139 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1141 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1142 player before the video that requires them.
1144 _delay.push_back (make_pair (pv, time));
1146 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1147 _last_video_time = time + one_video_frame();
1149 _last_video_eyes = increment_eyes (pv->eyes());
1151 if (_delay.size() < 3) {
1155 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1157 do_emit_video (to_do.first, to_do.second);
1161 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1163 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1164 for (int i = 0; i < TEXT_COUNT; ++i) {
1165 _active_texts[i].clear_before (time);
1169 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1171 pv->set_text (subtitles.get ());
1178 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1180 /* Log if the assert below is about to fail */
1181 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1182 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1185 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1186 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1187 Audio (data, time, _film->audio_frame_rate());
1188 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1192 Player::fill_audio (DCPTimePeriod period)
1194 if (period.from == period.to) {
1198 DCPOMATIC_ASSERT (period.from < period.to);
1200 DCPTime t = period.from;
1201 while (t < period.to) {
1202 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1203 Frame const samples = block.frames_round(_film->audio_frame_rate());
1205 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1206 silence->make_silent ();
1207 emit_audio (silence, t);
1214 Player::one_video_frame () const
1216 return DCPTime::from_frames (1, _film->video_frame_rate ());
1219 pair<shared_ptr<AudioBuffers>, DCPTime>
1220 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1222 DCPTime const discard_time = discard_to - time;
1223 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1224 Frame remaining_frames = audio->frames() - discard_frames;
1225 if (remaining_frames <= 0) {
1226 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1228 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1229 return make_pair(cut, time + discard_time);
1233 Player::set_dcp_decode_reduction (optional<int> reduction)
1235 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1238 boost::mutex::scoped_lock lm (_mutex);
1240 if (reduction == _dcp_decode_reduction) {
1242 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1246 _dcp_decode_reduction = reduction;
1247 setup_pieces_unlocked ();
1250 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1254 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1256 boost::mutex::scoped_lock lm (_mutex);
1258 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1259 if (i->content == content) {
1260 return content_time_to_dcp (i, t);
1264 /* We couldn't find this content; perhaps things are being changed over */
1265 return optional<DCPTime>();
1269 shared_ptr<const Playlist>
1270 Player::playlist () const
1272 return _playlist ? _playlist : _film->playlist();
1277 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1279 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);