2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
84 using namespace dcpomatic;
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
93 Player::Player (shared_ptr<const Film> film)
96 , _ignore_video (false)
97 , _ignore_audio (false)
98 , _ignore_text (false)
99 , _always_burn_open_subtitles (false)
101 , _tolerant (film->tolerant())
102 , _play_referenced (false)
103 , _audio_merger (_film->audio_frame_rate())
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111 , _playlist (playlist_)
113 , _ignore_video (false)
114 , _ignore_audio (false)
115 , _ignore_text (false)
116 , _always_burn_open_subtitles (false)
118 , _tolerant (film->tolerant())
119 , _play_referenced (false)
120 , _audio_merger (_film->audio_frame_rate())
129 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130 /* The butler must hear about this first, so since we are proxying this through to the butler we must
133 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135 set_video_container_size (_film->frame_size ());
137 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
140 seek (DCPTime (), true);
149 Player::setup_pieces ()
151 boost::mutex::scoped_lock lm (_mutex);
152 setup_pieces_unlocked ();
157 have_video (shared_ptr<const Content> content)
159 return static_cast<bool>(content->video) && content->video->use();
163 have_audio (shared_ptr<const Content> content)
165 return static_cast<bool>(content->audio);
169 Player::setup_pieces_unlocked ()
171 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
173 list<shared_ptr<Piece> > old_pieces = _pieces;
177 _shuffler = new Shuffler();
178 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
180 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
182 if (!i->paths_valid ()) {
186 if (_ignore_video && _ignore_audio && i->text.empty()) {
187 /* We're only interested in text and this content has none */
191 shared_ptr<Decoder> old_decoder;
192 /* XXX: needs to check vector of Content and use the old decoders, but
193 * this will all be different as we have to coalesce content before
195 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
196 if (j->content == i) {
197 old_decoder = j->decoder;
203 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
204 DCPOMATIC_ASSERT (decoder);
206 FrameRateChange frc (_film, i);
208 if (decoder->video && _ignore_video) {
209 decoder->video->set_ignore (true);
212 if (decoder->audio && _ignore_audio) {
213 decoder->audio->set_ignore (true);
217 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
218 i->set_ignore (true);
222 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
224 dcp->set_decode_referenced (_play_referenced);
225 if (_play_referenced) {
226 dcp->set_forced_reduction (_dcp_decode_reduction);
230 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
231 _pieces.push_back (piece);
233 if (decoder->video) {
234 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
235 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
236 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
238 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
242 if (decoder->audio) {
243 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
246 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
248 while (j != decoder->text.end()) {
249 (*j)->BitmapStart.connect (
250 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252 (*j)->PlainStart.connect (
253 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
262 if (decoder->atmos) {
263 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
267 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
268 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
270 _last_video_time = DCPTime ();
271 _last_video_eyes = EYES_BOTH;
272 _last_audio_time = DCPTime ();
276 Player::playlist_content_change (ChangeType type, int property, bool frequent)
278 if (property == VideoContentProperty::CROP) {
279 if (type == CHANGE_TYPE_DONE) {
280 dcp::Size const vcs = video_container_size();
281 boost::mutex::scoped_lock lm (_mutex);
282 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
283 i->first->reset_metadata (_film, vcs);
287 if (type == CHANGE_TYPE_PENDING) {
288 /* The player content is probably about to change, so we can't carry on
289 until that has happened and we've rebuilt our pieces. Stop pass()
290 and seek() from working until then.
293 } else if (type == CHANGE_TYPE_DONE) {
294 /* A change in our content has gone through. Re-build our pieces. */
297 } else if (type == CHANGE_TYPE_CANCELLED) {
302 Change (type, property, frequent);
306 Player::set_video_container_size (dcp::Size s)
308 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311 boost::mutex::scoped_lock lm (_mutex);
313 if (s == _video_container_size) {
315 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
319 _video_container_size = s;
321 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
322 _black_image->make_black ();
325 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
329 Player::playlist_change (ChangeType type)
331 if (type == CHANGE_TYPE_DONE) {
334 Change (type, PlayerProperty::PLAYLIST, false);
338 Player::film_change (ChangeType type, Film::Property p)
340 /* Here we should notice Film properties that affect our output, and
341 alert listeners that our output now would be different to how it was
342 last time we were run.
345 if (p == Film::CONTAINER) {
346 Change (type, PlayerProperty::FILM_CONTAINER, false);
347 } else if (p == Film::VIDEO_FRAME_RATE) {
348 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
349 so we need new pieces here.
351 if (type == CHANGE_TYPE_DONE) {
354 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
355 } else if (p == Film::AUDIO_PROCESSOR) {
356 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
357 boost::mutex::scoped_lock lm (_mutex);
358 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
360 } else if (p == Film::AUDIO_CHANNELS) {
361 if (type == CHANGE_TYPE_DONE) {
362 boost::mutex::scoped_lock lm (_mutex);
363 _audio_merger.clear ();
368 shared_ptr<PlayerVideo>
369 Player::black_player_video_frame (Eyes eyes) const
371 return shared_ptr<PlayerVideo> (
373 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
376 _video_container_size,
377 _video_container_size,
380 PresetColourConversion::all().front().conversion,
382 boost::weak_ptr<Content>(),
383 boost::optional<Frame>(),
390 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
392 DCPTime s = t - piece->content->position ();
393 s = min (piece->content->length_after_trim(_film), s);
394 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
396 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
397 then convert that ContentTime to frames at the content's rate. However this fails for
398 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
399 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
401 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
403 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
407 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
409 /* See comment in dcp_to_content_video */
410 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
411 return d + piece->content->position();
415 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
417 DCPTime s = t - piece->content->position ();
418 s = min (piece->content->length_after_trim(_film), s);
419 /* See notes in dcp_to_content_video */
420 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
424 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
426 /* See comment in dcp_to_content_video */
427 return DCPTime::from_frames (f, _film->audio_frame_rate())
428 - DCPTime (piece->content->trim_start(), piece->frc)
429 + piece->content->position();
433 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
435 DCPTime s = t - piece->content->position ();
436 s = min (piece->content->length_after_trim(_film), s);
437 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
441 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
443 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
446 list<shared_ptr<Font> >
447 Player::get_subtitle_fonts ()
449 boost::mutex::scoped_lock lm (_mutex);
451 list<shared_ptr<Font> > fonts;
452 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
453 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
454 /* XXX: things may go wrong if there are duplicate font IDs
455 with different font files.
457 list<shared_ptr<Font> > f = j->fonts ();
458 copy (f.begin(), f.end(), back_inserter (fonts));
465 /** Set this player never to produce any video data */
467 Player::set_ignore_video ()
469 boost::mutex::scoped_lock lm (_mutex);
470 _ignore_video = true;
471 setup_pieces_unlocked ();
475 Player::set_ignore_audio ()
477 boost::mutex::scoped_lock lm (_mutex);
478 _ignore_audio = true;
479 setup_pieces_unlocked ();
483 Player::set_ignore_text ()
485 boost::mutex::scoped_lock lm (_mutex);
487 setup_pieces_unlocked ();
490 /** Set the player to always burn open texts into the image regardless of the content settings */
492 Player::set_always_burn_open_subtitles ()
494 boost::mutex::scoped_lock lm (_mutex);
495 _always_burn_open_subtitles = true;
498 /** Sets up the player to be faster, possibly at the expense of quality */
502 boost::mutex::scoped_lock lm (_mutex);
504 setup_pieces_unlocked ();
508 Player::set_play_referenced ()
510 boost::mutex::scoped_lock lm (_mutex);
511 _play_referenced = true;
512 setup_pieces_unlocked ();
516 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
518 DCPOMATIC_ASSERT (r);
519 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
520 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
521 if (r->actual_duration() > 0) {
523 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
528 list<ReferencedReelAsset>
529 Player::get_reel_assets ()
531 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
533 list<ReferencedReelAsset> a;
535 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
536 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
541 scoped_ptr<DCPDecoder> decoder;
543 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
548 DCPOMATIC_ASSERT (j->video_frame_rate ());
549 double const cfr = j->video_frame_rate().get();
550 Frame const trim_start = j->trim_start().frames_round (cfr);
551 Frame const trim_end = j->trim_end().frames_round (cfr);
552 int const ffr = _film->video_frame_rate ();
554 /* position in the asset from the start */
555 int64_t offset_from_start = 0;
556 /* position in the asset from the end */
557 int64_t offset_from_end = 0;
558 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
559 /* Assume that main picture duration is the length of the reel */
560 offset_from_end += k->main_picture()->actual_duration();
563 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
565 /* Assume that main picture duration is the length of the reel */
566 int64_t const reel_duration = k->main_picture()->actual_duration();
568 /* See doc/design/trim_reels.svg */
569 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
570 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
572 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
573 if (j->reference_video ()) {
574 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
577 if (j->reference_audio ()) {
578 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
581 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
582 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
585 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
586 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
587 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
591 offset_from_start += reel_duration;
592 offset_from_end -= reel_duration;
602 boost::mutex::scoped_lock lm (_mutex);
605 /* We can't pass in this state */
606 LOG_DEBUG_PLAYER_NC ("Player is suspended");
610 if (_playback_length == DCPTime()) {
611 /* Special; just give one black frame */
612 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
616 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
618 shared_ptr<Piece> earliest_content;
619 optional<DCPTime> earliest_time;
621 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
626 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
627 if (t > i->content->end(_film)) {
631 /* Given two choices at the same time, pick the one with texts so we see it before
634 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
636 earliest_content = i;
650 if (earliest_content) {
654 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
655 earliest_time = _black.position ();
659 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
660 earliest_time = _silent.position ();
667 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
668 earliest_content->done = earliest_content->decoder->pass ();
669 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
670 if (dcp && !_play_referenced && dcp->reference_audio()) {
671 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
672 to `hide' the fact that no audio was emitted during the referenced DCP (though
673 we need to behave as though it was).
675 _last_audio_time = dcp->end (_film);
680 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
681 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
682 _black.set_position (_black.position() + one_video_frame());
686 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
687 DCPTimePeriod period (_silent.period_at_position());
688 if (_last_audio_time) {
689 /* Sometimes the thing that happened last finishes fractionally before
690 or after this silence. Bodge the start time of the silence to fix it.
691 I think this is nothing to worry about since we will just add or
692 remove a little silence at the end of some content.
694 int64_t const error = labs(period.from.get() - _last_audio_time->get());
695 /* Let's not worry about less than a frame at 24fps */
696 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
697 if (error >= too_much_error) {
698 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
700 DCPOMATIC_ASSERT (error < too_much_error);
701 period.from = *_last_audio_time;
703 if (period.duration() > one_video_frame()) {
704 period.to = period.from + one_video_frame();
707 _silent.set_position (period.to);
715 /* Emit any audio that is ready */
717 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
718 of our streams, or the position of the _silent.
720 DCPTime pull_to = _playback_length;
721 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
722 i->update_pull_to (pull_to);
724 if (!_silent.done() && _silent.position() < pull_to) {
725 pull_to = _silent.position();
728 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
729 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
730 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
731 if (_last_audio_time && i->second < *_last_audio_time) {
732 /* This new data comes before the last we emitted (or the last seek); discard it */
733 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
738 } else if (_last_audio_time && i->second > *_last_audio_time) {
739 /* There's a gap between this data and the last we emitted; fill with silence */
740 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
743 emit_audio (i->first, i->second);
748 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
749 do_emit_video(i->first, i->second);
756 /** @return Open subtitles for the frame at the given time, converted to images */
757 optional<PositionImage>
758 Player::open_subtitles_for_frame (DCPTime time) const
760 list<PositionImage> captions;
761 int const vfr = _film->video_frame_rate();
765 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
768 /* Bitmap subtitles */
769 BOOST_FOREACH (BitmapText i, j.bitmap) {
774 /* i.image will already have been scaled to fit _video_container_size */
775 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
781 lrint (_video_container_size.width * i.rectangle.x),
782 lrint (_video_container_size.height * i.rectangle.y)
788 /* String subtitles (rendered to an image) */
789 if (!j.string.empty ()) {
790 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
791 copy (s.begin(), s.end(), back_inserter (captions));
795 if (captions.empty ()) {
796 return optional<PositionImage> ();
799 return merge (captions);
803 Player::video (weak_ptr<Piece> wp, ContentVideo video)
805 shared_ptr<Piece> piece = wp.lock ();
810 if (!piece->content->video->use()) {
814 FrameRateChange frc (_film, piece->content);
815 if (frc.skip && (video.frame % 2) == 1) {
819 /* Time of the first frame we will emit */
820 DCPTime const time = content_video_to_dcp (piece, video.frame);
821 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
823 /* Discard if it's before the content's period or the last accurate seek. We can't discard
824 if it's after the content's period here as in that case we still need to fill any gap between
825 `now' and the end of the content's period.
827 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
831 /* Fill gaps that we discover now that we have some video which needs to be emitted.
832 This is where we need to fill to.
834 DCPTime fill_to = min (time, piece->content->end(_film));
836 if (_last_video_time) {
837 DCPTime fill_from = max (*_last_video_time, piece->content->position());
839 /* Fill if we have more than half a frame to do */
840 if ((fill_to - fill_from) > one_video_frame() / 2) {
841 LastVideoMap::const_iterator last = _last_video.find (wp);
842 if (_film->three_d()) {
843 Eyes fill_to_eyes = video.eyes;
844 if (fill_to_eyes == EYES_BOTH) {
845 fill_to_eyes = EYES_LEFT;
847 if (fill_to == piece->content->end(_film)) {
848 /* Don't fill after the end of the content */
849 fill_to_eyes = EYES_LEFT;
851 DCPTime j = fill_from;
852 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
853 if (eyes == EYES_BOTH) {
856 while (j < fill_to || eyes != fill_to_eyes) {
857 if (last != _last_video.end()) {
858 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
859 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
860 copy->set_eyes (eyes);
861 emit_video (copy, j);
863 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
864 emit_video (black_player_video_frame(eyes), j);
866 if (eyes == EYES_RIGHT) {
867 j += one_video_frame();
869 eyes = increment_eyes (eyes);
872 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
873 if (last != _last_video.end()) {
874 emit_video (last->second, j);
876 emit_video (black_player_video_frame(EYES_BOTH), j);
883 _last_video[wp].reset (
886 piece->content->video->crop (),
887 piece->content->video->fade (_film, video.frame),
888 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
889 _video_container_size,
892 piece->content->video->colour_conversion(),
893 piece->content->video->range(),
901 for (int i = 0; i < frc.repeat; ++i) {
902 if (t < piece->content->end(_film)) {
903 emit_video (_last_video[wp], t);
905 t += one_video_frame ();
910 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
912 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
914 shared_ptr<Piece> piece = wp.lock ();
919 shared_ptr<AudioContent> content = piece->content->audio;
920 DCPOMATIC_ASSERT (content);
922 int const rfr = content->resampled_frame_rate (_film);
924 /* Compute time in the DCP */
925 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
926 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
928 /* And the end of this block in the DCP */
929 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
931 /* Remove anything that comes before the start or after the end of the content */
932 if (time < piece->content->position()) {
933 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
935 /* This audio is entirely discarded */
938 content_audio.audio = cut.first;
940 } else if (time > piece->content->end(_film)) {
943 } else if (end > piece->content->end(_film)) {
944 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
945 if (remaining_frames == 0) {
948 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
951 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
955 if (content->gain() != 0) {
956 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
957 gain->apply_gain (content->gain ());
958 content_audio.audio = gain;
963 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
967 if (_audio_processor) {
968 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
973 _audio_merger.push (content_audio.audio, time);
974 /* XXX: this almost certainly needs to be more efficient; perhaps pieces fill a map to find
975 * the piece from the stream, then we can call the right piece with no loop.
977 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
978 i->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
983 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
985 shared_ptr<Piece> piece = wp.lock ();
986 shared_ptr<const TextContent> text = wc.lock ();
987 if (!piece || !text) {
991 /* Apply content's subtitle offsets */
992 subtitle.sub.rectangle.x += text->x_offset ();
993 subtitle.sub.rectangle.y += text->y_offset ();
995 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
996 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
997 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
999 /* Apply content's subtitle scale */
1000 subtitle.sub.rectangle.width *= text->x_scale ();
1001 subtitle.sub.rectangle.height *= text->y_scale ();
1004 shared_ptr<Image> image = subtitle.sub.image;
1006 /* We will scale the subtitle up to fit _video_container_size */
1007 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1008 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1009 if (width == 0 || height == 0) {
1013 dcp::Size scaled_size (width, height);
1014 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1015 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1017 _active_texts[text->type()].add_from (wc, ps, from);
1021 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1023 shared_ptr<Piece> piece = wp.lock ();
1024 shared_ptr<const TextContent> text = wc.lock ();
1025 if (!piece || !text) {
1030 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1032 if (from > piece->content->end(_film)) {
1036 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1037 s.set_h_position (s.h_position() + text->x_offset ());
1038 s.set_v_position (s.v_position() + text->y_offset ());
1039 float const xs = text->x_scale();
1040 float const ys = text->y_scale();
1041 float size = s.size();
1043 /* Adjust size to express the common part of the scaling;
1044 e.g. if xs = ys = 0.5 we scale size by 2.
1046 if (xs > 1e-5 && ys > 1e-5) {
1047 size *= 1 / min (1 / xs, 1 / ys);
1051 /* Then express aspect ratio changes */
1052 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1053 s.set_aspect_adjust (xs / ys);
1056 s.set_in (dcp::Time(from.seconds(), 1000));
1057 ps.string.push_back (StringText (s, text->outline_width()));
1058 ps.add_fonts (text->fonts ());
1061 _active_texts[text->type()].add_from (wc, ps, from);
1065 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1067 shared_ptr<const TextContent> text = wc.lock ();
1072 if (!_active_texts[text->type()].have(wc)) {
1076 shared_ptr<Piece> piece = wp.lock ();
1081 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1083 if (dcp_to > piece->content->end(_film)) {
1087 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1089 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1090 if (text->use() && !always && !text->burn()) {
1091 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1096 Player::seek (DCPTime time, bool accurate)
1098 boost::mutex::scoped_lock lm (_mutex);
1099 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1102 /* We can't seek in this state */
1107 _shuffler->clear ();
1112 if (_audio_processor) {
1113 _audio_processor->flush ();
1116 _audio_merger.clear ();
1117 for (int i = 0; i < TEXT_COUNT; ++i) {
1118 _active_texts[i].clear ();
1121 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1122 if (time < i->content->position()) {
1123 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1124 we must seek this (following) content accurately, otherwise when we come to the end of the current
1125 content we may not start right at the beginning of the next, causing a gap (if the next content has
1126 been trimmed to a point between keyframes, or something).
1128 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1130 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1131 /* During; seek to position */
1132 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1135 /* After; this piece is done */
1141 _last_video_time = time;
1142 _last_video_eyes = EYES_LEFT;
1143 _last_audio_time = time;
1145 _last_video_time = optional<DCPTime>();
1146 _last_video_eyes = optional<Eyes>();
1147 _last_audio_time = optional<DCPTime>();
1150 _black.set_position (time);
1151 _silent.set_position (time);
1153 _last_video.clear ();
1157 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1159 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1160 player before the video that requires them.
1162 _delay.push_back (make_pair (pv, time));
1164 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1165 _last_video_time = time + one_video_frame();
1167 _last_video_eyes = increment_eyes (pv->eyes());
1169 if (_delay.size() < 3) {
1173 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1175 do_emit_video (to_do.first, to_do.second);
1179 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1181 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1182 for (int i = 0; i < TEXT_COUNT; ++i) {
1183 _active_texts[i].clear_before (time);
1187 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1189 pv->set_text (subtitles.get ());
1196 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1198 /* Log if the assert below is about to fail */
1199 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1200 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1203 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1204 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1205 Audio (data, time, _film->audio_frame_rate());
1206 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1210 Player::fill_audio (DCPTimePeriod period)
1212 if (period.from == period.to) {
1216 DCPOMATIC_ASSERT (period.from < period.to);
1218 DCPTime t = period.from;
1219 while (t < period.to) {
1220 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1221 Frame const samples = block.frames_round(_film->audio_frame_rate());
1223 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1224 silence->make_silent ();
1225 emit_audio (silence, t);
1232 Player::one_video_frame () const
1234 return DCPTime::from_frames (1, _film->video_frame_rate ());
1237 pair<shared_ptr<AudioBuffers>, DCPTime>
1238 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1240 DCPTime const discard_time = discard_to - time;
1241 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1242 Frame remaining_frames = audio->frames() - discard_frames;
1243 if (remaining_frames <= 0) {
1244 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1246 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1247 return make_pair(cut, time + discard_time);
1251 Player::set_dcp_decode_reduction (optional<int> reduction)
1253 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1256 boost::mutex::scoped_lock lm (_mutex);
1258 if (reduction == _dcp_decode_reduction) {
1260 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1264 _dcp_decode_reduction = reduction;
1265 setup_pieces_unlocked ();
1268 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1272 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1274 boost::mutex::scoped_lock lm (_mutex);
1276 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1277 if (i->content == content) {
1278 return content_time_to_dcp (i, t);
1282 /* We couldn't find this content; perhaps things are being changed over */
1283 return optional<DCPTime>();
1287 shared_ptr<const Playlist>
1288 Player::playlist () const
1290 return _playlist ? _playlist : _film->playlist();
1295 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1297 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);