2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
84 using namespace dcpomatic;
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
93 Player::Player (shared_ptr<const Film> film)
96 , _ignore_video (false)
97 , _ignore_audio (false)
98 , _ignore_text (false)
99 , _always_burn_open_subtitles (false)
101 , _tolerant (film->tolerant())
102 , _play_referenced (false)
103 , _audio_merger (_film->audio_frame_rate())
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111 , _playlist (playlist_)
113 , _ignore_video (false)
114 , _ignore_audio (false)
115 , _ignore_text (false)
116 , _always_burn_open_subtitles (false)
118 , _tolerant (film->tolerant())
119 , _play_referenced (false)
120 , _audio_merger (_film->audio_frame_rate())
129 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130 /* The butler must hear about this first, so since we are proxying this through to the butler we must
133 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135 set_video_container_size (_film->frame_size ());
137 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
140 seek (DCPTime (), true);
149 Player::setup_pieces ()
151 boost::mutex::scoped_lock lm (_mutex);
152 setup_pieces_unlocked ();
157 have_video (shared_ptr<const Content> content)
159 return static_cast<bool>(content->video) && content->video->use();
163 have_audio (shared_ptr<const Content> content)
165 return static_cast<bool>(content->audio);
169 Player::setup_pieces_unlocked ()
171 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
173 list<shared_ptr<Piece> > old_pieces = _pieces;
177 _shuffler = new Shuffler();
178 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
180 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
182 if (!i->paths_valid ()) {
186 if (_ignore_video && _ignore_audio && i->text.empty()) {
187 /* We're only interested in text and this content has none */
191 shared_ptr<Decoder> old_decoder;
192 /* XXX: needs to check vector of Content and use the old decoders, but
193 * this will all be different as we have to coalesce content before
195 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
196 if (j->content == i) {
197 old_decoder = j->decoder;
203 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
204 DCPOMATIC_ASSERT (decoder);
206 FrameRateChange frc (_film, i);
208 if (decoder->video && _ignore_video) {
209 decoder->video->set_ignore (true);
212 if (decoder->audio && _ignore_audio) {
213 decoder->audio->set_ignore (true);
217 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
218 i->set_ignore (true);
222 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
224 dcp->set_decode_referenced (_play_referenced);
225 if (_play_referenced) {
226 dcp->set_forced_reduction (_dcp_decode_reduction);
230 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
231 _pieces.push_back (piece);
233 if (decoder->video) {
234 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
235 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
236 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
238 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
242 if (decoder->audio) {
243 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
246 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
248 while (j != decoder->text.end()) {
249 (*j)->BitmapStart.connect (
250 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252 (*j)->PlainStart.connect (
253 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
262 if (decoder->atmos) {
263 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
267 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
268 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
270 _last_video_time = DCPTime ();
271 _last_video_eyes = EYES_BOTH;
272 _last_audio_time = DCPTime ();
276 Player::playlist_content_change (ChangeType type, int property, bool frequent)
278 if (property == VideoContentProperty::CROP) {
279 if (type == CHANGE_TYPE_DONE) {
280 dcp::Size const vcs = video_container_size();
281 boost::mutex::scoped_lock lm (_mutex);
282 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
283 i->first->reset_metadata (_film, vcs);
287 if (type == CHANGE_TYPE_PENDING) {
288 /* The player content is probably about to change, so we can't carry on
289 until that has happened and we've rebuilt our pieces. Stop pass()
290 and seek() from working until then.
293 } else if (type == CHANGE_TYPE_DONE) {
294 /* A change in our content has gone through. Re-build our pieces. */
297 } else if (type == CHANGE_TYPE_CANCELLED) {
302 Change (type, property, frequent);
306 Player::set_video_container_size (dcp::Size s)
308 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311 boost::mutex::scoped_lock lm (_mutex);
313 if (s == _video_container_size) {
315 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
319 _video_container_size = s;
321 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
322 _black_image->make_black ();
325 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
329 Player::playlist_change (ChangeType type)
331 if (type == CHANGE_TYPE_DONE) {
334 Change (type, PlayerProperty::PLAYLIST, false);
338 Player::film_change (ChangeType type, Film::Property p)
340 /* Here we should notice Film properties that affect our output, and
341 alert listeners that our output now would be different to how it was
342 last time we were run.
345 if (p == Film::CONTAINER) {
346 Change (type, PlayerProperty::FILM_CONTAINER, false);
347 } else if (p == Film::VIDEO_FRAME_RATE) {
348 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
349 so we need new pieces here.
351 if (type == CHANGE_TYPE_DONE) {
354 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
355 } else if (p == Film::AUDIO_PROCESSOR) {
356 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
357 boost::mutex::scoped_lock lm (_mutex);
358 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
360 } else if (p == Film::AUDIO_CHANNELS) {
361 if (type == CHANGE_TYPE_DONE) {
362 boost::mutex::scoped_lock lm (_mutex);
363 _audio_merger.clear ();
368 shared_ptr<PlayerVideo>
369 Player::black_player_video_frame (Eyes eyes) const
371 return shared_ptr<PlayerVideo> (
373 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
376 _video_container_size,
377 _video_container_size,
380 PresetColourConversion::all().front().conversion,
382 boost::weak_ptr<Content>(),
383 boost::optional<Frame>(),
390 list<shared_ptr<Font> >
391 Player::get_subtitle_fonts ()
393 boost::mutex::scoped_lock lm (_mutex);
395 list<shared_ptr<Font> > fonts;
396 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
397 i->add_fonts (fonts);
403 /** Set this player never to produce any video data */
405 Player::set_ignore_video ()
407 boost::mutex::scoped_lock lm (_mutex);
408 _ignore_video = true;
409 setup_pieces_unlocked ();
413 Player::set_ignore_audio ()
415 boost::mutex::scoped_lock lm (_mutex);
416 _ignore_audio = true;
417 setup_pieces_unlocked ();
421 Player::set_ignore_text ()
423 boost::mutex::scoped_lock lm (_mutex);
425 setup_pieces_unlocked ();
428 /** Set the player to always burn open texts into the image regardless of the content settings */
430 Player::set_always_burn_open_subtitles ()
432 boost::mutex::scoped_lock lm (_mutex);
433 _always_burn_open_subtitles = true;
436 /** Sets up the player to be faster, possibly at the expense of quality */
440 boost::mutex::scoped_lock lm (_mutex);
442 setup_pieces_unlocked ();
446 Player::set_play_referenced ()
448 boost::mutex::scoped_lock lm (_mutex);
449 _play_referenced = true;
450 setup_pieces_unlocked ();
454 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
456 DCPOMATIC_ASSERT (r);
457 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
458 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
459 if (r->actual_duration() > 0) {
461 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
466 list<ReferencedReelAsset>
467 Player::get_reel_assets ()
469 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
471 list<ReferencedReelAsset> a;
473 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
474 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
479 scoped_ptr<DCPDecoder> decoder;
481 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
486 DCPOMATIC_ASSERT (j->video_frame_rate ());
487 double const cfr = j->video_frame_rate().get();
488 Frame const trim_start = j->trim_start().frames_round (cfr);
489 Frame const trim_end = j->trim_end().frames_round (cfr);
490 int const ffr = _film->video_frame_rate ();
492 /* position in the asset from the start */
493 int64_t offset_from_start = 0;
494 /* position in the asset from the end */
495 int64_t offset_from_end = 0;
496 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
497 /* Assume that main picture duration is the length of the reel */
498 offset_from_end += k->main_picture()->actual_duration();
501 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
503 /* Assume that main picture duration is the length of the reel */
504 int64_t const reel_duration = k->main_picture()->actual_duration();
506 /* See doc/design/trim_reels.svg */
507 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
508 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
510 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
511 if (j->reference_video ()) {
512 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
515 if (j->reference_audio ()) {
516 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
519 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
520 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
523 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
524 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
525 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
529 offset_from_start += reel_duration;
530 offset_from_end -= reel_duration;
540 boost::mutex::scoped_lock lm (_mutex);
543 /* We can't pass in this state */
544 LOG_DEBUG_PLAYER_NC ("Player is suspended");
548 if (_playback_length == DCPTime()) {
549 /* Special; just give one black frame */
550 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
554 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
556 shared_ptr<Piece> earliest_content;
557 optional<DCPTime> earliest_time;
559 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
564 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
565 if (t > i->end(_film)) {
569 /* Given two choices at the same time, pick the one with texts so we see it before
572 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
574 earliest_content = i;
588 if (earliest_content) {
592 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
593 earliest_time = _black.position ();
597 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
598 earliest_time = _silent.position ();
605 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
606 earliest_content->done = earliest_content->decoder->pass ();
607 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
608 if (dcp && !_play_referenced && dcp->reference_audio()) {
609 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
610 to `hide' the fact that no audio was emitted during the referenced DCP (though
611 we need to behave as though it was).
613 _last_audio_time = dcp->end (_film);
618 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
619 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
620 _black.set_position (_black.position() + one_video_frame());
624 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
625 DCPTimePeriod period (_silent.period_at_position());
626 if (_last_audio_time) {
627 /* Sometimes the thing that happened last finishes fractionally before
628 or after this silence. Bodge the start time of the silence to fix it.
629 I think this is nothing to worry about since we will just add or
630 remove a little silence at the end of some content.
632 int64_t const error = labs(period.from.get() - _last_audio_time->get());
633 /* Let's not worry about less than a frame at 24fps */
634 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
635 if (error >= too_much_error) {
636 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
638 DCPOMATIC_ASSERT (error < too_much_error);
639 period.from = *_last_audio_time;
641 if (period.duration() > one_video_frame()) {
642 period.to = period.from + one_video_frame();
645 _silent.set_position (period.to);
653 /* Emit any audio that is ready */
655 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
656 of our streams, or the position of the _silent.
658 DCPTime pull_to = _playback_length;
659 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
660 i->update_pull_to (pull_to);
662 if (!_silent.done() && _silent.position() < pull_to) {
663 pull_to = _silent.position();
666 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
667 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
668 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
669 if (_last_audio_time && i->second < *_last_audio_time) {
670 /* This new data comes before the last we emitted (or the last seek); discard it */
671 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
676 } else if (_last_audio_time && i->second > *_last_audio_time) {
677 /* There's a gap between this data and the last we emitted; fill with silence */
678 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
681 emit_audio (i->first, i->second);
686 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
687 do_emit_video(i->first, i->second);
694 /** @return Open subtitles for the frame at the given time, converted to images */
695 optional<PositionImage>
696 Player::open_subtitles_for_frame (DCPTime time) const
698 list<PositionImage> captions;
699 int const vfr = _film->video_frame_rate();
703 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
706 /* Bitmap subtitles */
707 BOOST_FOREACH (BitmapText i, j.bitmap) {
712 /* i.image will already have been scaled to fit _video_container_size */
713 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
719 lrint (_video_container_size.width * i.rectangle.x),
720 lrint (_video_container_size.height * i.rectangle.y)
726 /* String subtitles (rendered to an image) */
727 if (!j.string.empty ()) {
728 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
729 copy (s.begin(), s.end(), back_inserter (captions));
733 if (captions.empty ()) {
734 return optional<PositionImage> ();
737 return merge (captions);
741 Player::video (weak_ptr<Piece> wp, ContentVideo video)
743 shared_ptr<Piece> piece = wp.lock ();
748 if (!piece->content->video->use()) {
752 FrameRateChange frc (_film, piece->content);
753 if (frc.skip && (video.frame % 2) == 1) {
757 /* Time of the first frame we will emit */
758 DCPTime const time = content_video_to_dcp (piece, video.frame);
759 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
761 /* Discard if it's before the content's period or the last accurate seek. We can't discard
762 if it's after the content's period here as in that case we still need to fill any gap between
763 `now' and the end of the content's period.
765 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
769 /* Fill gaps that we discover now that we have some video which needs to be emitted.
770 This is where we need to fill to.
772 DCPTime fill_to = min (time, piece->end(_film));
774 if (_last_video_time) {
775 DCPTime fill_from = max (*_last_video_time, piece->position());
777 /* Fill if we have more than half a frame to do */
778 if ((fill_to - fill_from) > one_video_frame() / 2) {
779 LastVideoMap::const_iterator last = _last_video.find (wp);
780 if (_film->three_d()) {
781 Eyes fill_to_eyes = video.eyes;
782 if (fill_to_eyes == EYES_BOTH) {
783 fill_to_eyes = EYES_LEFT;
785 if (fill_to == piece->end(_film)) {
786 /* Don't fill after the end of the content */
787 fill_to_eyes = EYES_LEFT;
789 DCPTime j = fill_from;
790 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
791 if (eyes == EYES_BOTH) {
794 while (j < fill_to || eyes != fill_to_eyes) {
795 if (last != _last_video.end()) {
796 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
797 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
798 copy->set_eyes (eyes);
799 emit_video (copy, j);
801 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
802 emit_video (black_player_video_frame(eyes), j);
804 if (eyes == EYES_RIGHT) {
805 j += one_video_frame();
807 eyes = increment_eyes (eyes);
810 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
811 if (last != _last_video.end()) {
812 emit_video (last->second, j);
814 emit_video (black_player_video_frame(EYES_BOTH), j);
821 _last_video[wp].reset (
824 piece->content->video->crop (),
825 piece->content->video->fade (_film, video.frame),
826 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
827 _video_container_size,
830 piece->content->video->colour_conversion(),
831 piece->content->video->range(),
839 for (int i = 0; i < frc.repeat; ++i) {
840 if (t < piece->end(_film)) {
841 emit_video (_last_video[wp], t);
843 t += one_video_frame ();
848 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
850 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
852 shared_ptr<Piece> piece = wp.lock ();
857 shared_ptr<AudioContent> content = piece->content->audio;
858 DCPOMATIC_ASSERT (content);
860 int const rfr = content->resampled_frame_rate (_film);
862 /* Compute time in the DCP */
863 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
864 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
866 /* And the end of this block in the DCP */
867 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
869 /* Remove anything that comes before the start or after the end of the content */
870 if (time < piece->position()) {
871 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
873 /* This audio is entirely discarded */
876 content_audio.audio = cut.first;
878 } else if (time > piece->end(_film)) {
881 } else if (end > piece->end(_film)) {
882 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
883 if (remaining_frames == 0) {
886 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
889 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
893 if (content->gain() != 0) {
894 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
895 gain->apply_gain (content->gain ());
896 content_audio.audio = gain;
901 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
905 if (_audio_processor) {
906 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
911 _audio_merger.push (content_audio.audio, time);
912 /* XXX: this almost certainly needs to be more efficient; perhaps pieces fill a map to find
913 * the piece from the stream, then we can call the right piece with no loop.
915 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
916 i->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
921 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
923 shared_ptr<Piece> piece = wp.lock ();
924 shared_ptr<const TextContent> text = wc.lock ();
925 if (!piece || !text) {
929 /* Apply content's subtitle offsets */
930 subtitle.sub.rectangle.x += text->x_offset ();
931 subtitle.sub.rectangle.y += text->y_offset ();
933 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
934 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
935 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
937 /* Apply content's subtitle scale */
938 subtitle.sub.rectangle.width *= text->x_scale ();
939 subtitle.sub.rectangle.height *= text->y_scale ();
942 shared_ptr<Image> image = subtitle.sub.image;
944 /* We will scale the subtitle up to fit _video_container_size */
945 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
946 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
947 if (width == 0 || height == 0) {
951 dcp::Size scaled_size (width, height);
952 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
953 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
955 _active_texts[text->type()].add_from (wc, ps, from);
959 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
961 shared_ptr<Piece> piece = wp.lock ();
962 shared_ptr<const TextContent> text = wc.lock ();
963 if (!piece || !text) {
968 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
970 if (from > piece->end(_film)) {
974 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
975 s.set_h_position (s.h_position() + text->x_offset ());
976 s.set_v_position (s.v_position() + text->y_offset ());
977 float const xs = text->x_scale();
978 float const ys = text->y_scale();
979 float size = s.size();
981 /* Adjust size to express the common part of the scaling;
982 e.g. if xs = ys = 0.5 we scale size by 2.
984 if (xs > 1e-5 && ys > 1e-5) {
985 size *= 1 / min (1 / xs, 1 / ys);
989 /* Then express aspect ratio changes */
990 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
991 s.set_aspect_adjust (xs / ys);
994 s.set_in (dcp::Time(from.seconds(), 1000));
995 ps.string.push_back (StringText (s, text->outline_width()));
996 ps.add_fonts (text->fonts ());
999 _active_texts[text->type()].add_from (wc, ps, from);
1003 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1005 shared_ptr<const TextContent> text = wc.lock ();
1010 if (!_active_texts[text->type()].have(wc)) {
1014 shared_ptr<Piece> piece = wp.lock ();
1019 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1021 if (dcp_to > piece->end(_film)) {
1025 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1027 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1028 if (text->use() && !always && !text->burn()) {
1029 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1034 Player::seek (DCPTime time, bool accurate)
1036 boost::mutex::scoped_lock lm (_mutex);
1037 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1040 /* We can't seek in this state */
1045 _shuffler->clear ();
1050 if (_audio_processor) {
1051 _audio_processor->flush ();
1054 _audio_merger.clear ();
1055 for (int i = 0; i < TEXT_COUNT; ++i) {
1056 _active_texts[i].clear ();
1059 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1060 if (time < i->position()) {
1061 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1062 we must seek this (following) content accurately, otherwise when we come to the end of the current
1063 content we may not start right at the beginning of the next, causing a gap (if the next content has
1064 been trimmed to a point between keyframes, or something).
1066 i->decoder->seek (dcp_to_content_time (i, i->position()), true);
1068 } else if (i->position() <= time && time < i->end(_film)) {
1069 /* During; seek to position */
1070 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1073 /* After; this piece is done */
1079 _last_video_time = time;
1080 _last_video_eyes = EYES_LEFT;
1081 _last_audio_time = time;
1083 _last_video_time = optional<DCPTime>();
1084 _last_video_eyes = optional<Eyes>();
1085 _last_audio_time = optional<DCPTime>();
1088 _black.set_position (time);
1089 _silent.set_position (time);
1091 _last_video.clear ();
1095 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1097 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1098 player before the video that requires them.
1100 _delay.push_back (make_pair (pv, time));
1102 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1103 _last_video_time = time + one_video_frame();
1105 _last_video_eyes = increment_eyes (pv->eyes());
1107 if (_delay.size() < 3) {
1111 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1113 do_emit_video (to_do.first, to_do.second);
1117 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1119 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1120 for (int i = 0; i < TEXT_COUNT; ++i) {
1121 _active_texts[i].clear_before (time);
1125 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1127 pv->set_text (subtitles.get ());
1134 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1136 /* Log if the assert below is about to fail */
1137 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1138 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1141 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1142 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1143 Audio (data, time, _film->audio_frame_rate());
1144 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1148 Player::fill_audio (DCPTimePeriod period)
1150 if (period.from == period.to) {
1154 DCPOMATIC_ASSERT (period.from < period.to);
1156 DCPTime t = period.from;
1157 while (t < period.to) {
1158 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1159 Frame const samples = block.frames_round(_film->audio_frame_rate());
1161 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1162 silence->make_silent ();
1163 emit_audio (silence, t);
1170 Player::one_video_frame () const
1172 return DCPTime::from_frames (1, _film->video_frame_rate ());
1175 pair<shared_ptr<AudioBuffers>, DCPTime>
1176 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1178 DCPTime const discard_time = discard_to - time;
1179 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1180 Frame remaining_frames = audio->frames() - discard_frames;
1181 if (remaining_frames <= 0) {
1182 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1184 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1185 return make_pair(cut, time + discard_time);
1189 Player::set_dcp_decode_reduction (optional<int> reduction)
1191 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1194 boost::mutex::scoped_lock lm (_mutex);
1196 if (reduction == _dcp_decode_reduction) {
1198 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1202 _dcp_decode_reduction = reduction;
1203 setup_pieces_unlocked ();
1206 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1210 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1212 boost::mutex::scoped_lock lm (_mutex);
1214 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1215 if (i->content == content) {
1216 return content_time_to_dcp (i, t);
1220 /* We couldn't find this content; perhaps things are being changed over */
1221 return optional<DCPTime>();
1225 shared_ptr<const Playlist>
1226 Player::playlist () const
1228 return _playlist ? _playlist : _film->playlist();
1233 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1235 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);