2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 using namespace dcpomatic;
83 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
84 int const PlayerProperty::PLAYLIST = 701;
85 int const PlayerProperty::FILM_CONTAINER = 702;
86 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
87 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
88 int const PlayerProperty::PLAYBACK_LENGTH = 705;
90 Player::Player (shared_ptr<const Film> film)
93 , _ignore_video (false)
94 , _ignore_audio (false)
95 , _ignore_text (false)
96 , _always_burn_open_subtitles (false)
98 , _tolerant (film->tolerant())
99 , _play_referenced (false)
100 , _audio_merger (_film->audio_frame_rate())
106 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
108 , _playlist (playlist_)
110 , _ignore_video (false)
111 , _ignore_audio (false)
112 , _ignore_text (false)
113 , _always_burn_open_subtitles (false)
115 , _tolerant (film->tolerant())
116 , _play_referenced (false)
117 , _audio_merger (_film->audio_frame_rate())
126 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127 /* The butler must hear about this first, so since we are proxying this through to the butler we must
130 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132 set_video_container_size (_film->frame_size ());
134 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
137 seek (DCPTime (), true);
146 Player::setup_pieces ()
148 boost::mutex::scoped_lock lm (_mutex);
149 setup_pieces_unlocked ();
154 have_video (shared_ptr<const Content> content)
156 return static_cast<bool>(content->video) && content->video->use();
160 have_audio (shared_ptr<const Content> content)
162 return static_cast<bool>(content->audio);
166 Player::setup_pieces_unlocked ()
168 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170 list<shared_ptr<Piece> > old_pieces = _pieces;
174 _shuffler = new Shuffler();
175 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
177 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
179 if (!i->paths_valid ()) {
183 if (_ignore_video && _ignore_audio && i->text.empty()) {
184 /* We're only interested in text and this content has none */
188 shared_ptr<Decoder> old_decoder;
189 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
190 if (j->content == i) {
191 old_decoder = j->decoder;
196 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197 DCPOMATIC_ASSERT (decoder);
199 FrameRateChange frc (_film, i);
201 if (decoder->video && _ignore_video) {
202 decoder->video->set_ignore (true);
205 if (decoder->audio && _ignore_audio) {
206 decoder->audio->set_ignore (true);
210 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
211 i->set_ignore (true);
215 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217 dcp->set_decode_referenced (_play_referenced);
218 if (_play_referenced) {
219 dcp->set_forced_reduction (_dcp_decode_reduction);
223 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
224 _pieces.push_back (piece);
226 if (decoder->video) {
227 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
228 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
235 if (decoder->audio) {
236 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
239 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
241 while (j != decoder->text.end()) {
242 (*j)->BitmapStart.connect (
243 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245 (*j)->PlainStart.connect (
246 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
249 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255 if (decoder->atmos) {
256 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
260 _stream_states.clear ();
261 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
262 if (i->content->audio) {
263 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
264 _stream_states[j] = StreamState (i, i->content->position ());
269 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
272 _last_video_time = DCPTime ();
273 _last_video_eyes = EYES_BOTH;
274 _last_audio_time = DCPTime ();
278 Player::playlist_content_change (ChangeType type, int property, bool frequent)
280 if (type == CHANGE_TYPE_PENDING) {
281 /* The player content is probably about to change, so we can't carry on
282 until that has happened and we've rebuilt our pieces. Stop pass()
283 and seek() from working until then.
286 } else if (type == CHANGE_TYPE_DONE) {
287 /* A change in our content has gone through. Re-build our pieces. */
290 } else if (type == CHANGE_TYPE_CANCELLED) {
294 Change (type, property, frequent);
298 Player::set_video_container_size (dcp::Size s)
300 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
303 boost::mutex::scoped_lock lm (_mutex);
305 if (s == _video_container_size) {
307 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311 _video_container_size = s;
313 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314 _black_image->make_black ();
317 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321 Player::playlist_change (ChangeType type)
323 if (type == CHANGE_TYPE_DONE) {
326 Change (type, PlayerProperty::PLAYLIST, false);
330 Player::film_change (ChangeType type, Film::Property p)
332 /* Here we should notice Film properties that affect our output, and
333 alert listeners that our output now would be different to how it was
334 last time we were run.
337 if (p == Film::CONTAINER) {
338 Change (type, PlayerProperty::FILM_CONTAINER, false);
339 } else if (p == Film::VIDEO_FRAME_RATE) {
340 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
341 so we need new pieces here.
343 if (type == CHANGE_TYPE_DONE) {
346 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
347 } else if (p == Film::AUDIO_PROCESSOR) {
348 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
349 boost::mutex::scoped_lock lm (_mutex);
350 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
352 } else if (p == Film::AUDIO_CHANNELS) {
353 if (type == CHANGE_TYPE_DONE) {
354 boost::mutex::scoped_lock lm (_mutex);
355 _audio_merger.clear ();
360 shared_ptr<PlayerVideo>
361 Player::black_player_video_frame (Eyes eyes) const
363 return shared_ptr<PlayerVideo> (
365 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
368 _video_container_size,
369 _video_container_size,
372 PresetColourConversion::all().front().conversion,
374 boost::weak_ptr<Content>(),
375 boost::optional<Frame>(),
382 list<shared_ptr<Font> >
383 Player::get_subtitle_fonts ()
385 boost::mutex::scoped_lock lm (_mutex);
387 list<shared_ptr<Font> > fonts;
388 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
389 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
390 /* XXX: things may go wrong if there are duplicate font IDs
391 with different font files.
393 list<shared_ptr<Font> > f = j->fonts ();
394 copy (f.begin(), f.end(), back_inserter (fonts));
401 /** Set this player never to produce any video data */
403 Player::set_ignore_video ()
405 boost::mutex::scoped_lock lm (_mutex);
406 _ignore_video = true;
407 setup_pieces_unlocked ();
411 Player::set_ignore_audio ()
413 boost::mutex::scoped_lock lm (_mutex);
414 _ignore_audio = true;
415 setup_pieces_unlocked ();
419 Player::set_ignore_text ()
421 boost::mutex::scoped_lock lm (_mutex);
423 setup_pieces_unlocked ();
426 /** Set the player to always burn open texts into the image regardless of the content settings */
428 Player::set_always_burn_open_subtitles ()
430 boost::mutex::scoped_lock lm (_mutex);
431 _always_burn_open_subtitles = true;
434 /** Sets up the player to be faster, possibly at the expense of quality */
438 boost::mutex::scoped_lock lm (_mutex);
440 setup_pieces_unlocked ();
444 Player::set_play_referenced ()
446 boost::mutex::scoped_lock lm (_mutex);
447 _play_referenced = true;
448 setup_pieces_unlocked ();
452 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
454 DCPOMATIC_ASSERT (r);
455 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
456 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
457 if (r->actual_duration() > 0) {
459 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
464 list<ReferencedReelAsset>
465 Player::get_reel_assets ()
467 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
469 list<ReferencedReelAsset> a;
471 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
472 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
477 scoped_ptr<DCPDecoder> decoder;
479 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
484 DCPOMATIC_ASSERT (j->video_frame_rate ());
485 double const cfr = j->video_frame_rate().get();
486 Frame const trim_start = j->trim_start().frames_round (cfr);
487 Frame const trim_end = j->trim_end().frames_round (cfr);
488 int const ffr = _film->video_frame_rate ();
490 /* position in the asset from the start */
491 int64_t offset_from_start = 0;
492 /* position in the asset from the end */
493 int64_t offset_from_end = 0;
494 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
495 /* Assume that main picture duration is the length of the reel */
496 offset_from_end += k->main_picture()->actual_duration();
499 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
501 /* Assume that main picture duration is the length of the reel */
502 int64_t const reel_duration = k->main_picture()->actual_duration();
504 /* See doc/design/trim_reels.svg */
505 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
506 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
508 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
509 if (j->reference_video ()) {
510 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
513 if (j->reference_audio ()) {
514 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
517 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
518 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
521 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
522 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
523 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
527 offset_from_start += reel_duration;
528 offset_from_end -= reel_duration;
538 boost::mutex::scoped_lock lm (_mutex);
541 /* We can't pass in this state */
545 if (_playback_length == DCPTime()) {
546 /* Special; just give one black frame */
547 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
551 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
553 shared_ptr<Piece> earliest_content;
554 optional<DCPTime> earliest_time;
556 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
561 DCPTime const t = i->content_time_to_dcp (max(i->decoder->position(), i->content->trim_start()));
562 if (t > i->end(_film)) {
566 /* Given two choices at the same time, pick the one with texts so we see it before
569 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
571 earliest_content = i;
585 if (earliest_content) {
589 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
590 earliest_time = _black.position ();
594 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
595 earliest_time = _silent.position ();
602 earliest_content->done = earliest_content->decoder->pass ();
603 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
604 if (dcp && !_play_referenced && dcp->reference_audio()) {
605 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
606 to `hide' the fact that no audio was emitted during the referenced DCP (though
607 we need to behave as though it was).
609 _last_audio_time = dcp->end (_film);
614 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
615 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
616 _black.set_position (_black.position() + one_video_frame());
620 DCPTimePeriod period (_silent.period_at_position());
621 if (_last_audio_time) {
622 /* Sometimes the thing that happened last finishes fractionally before
623 or after this silence. Bodge the start time of the silence to fix it.
624 I think this is nothing to worry about since we will just add or
625 remove a little silence at the end of some content.
627 int64_t const error = labs(period.from.get() - _last_audio_time->get());
628 /* Let's not worry about less than a frame at 24fps */
629 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
630 if (error >= too_much_error) {
631 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
633 DCPOMATIC_ASSERT (error < too_much_error);
634 period.from = *_last_audio_time;
636 if (period.duration() > one_video_frame()) {
637 period.to = period.from + one_video_frame();
640 _silent.set_position (period.to);
648 /* Emit any audio that is ready */
650 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
651 of our streams, or the position of the _silent.
653 DCPTime pull_to = _playback_length;
654 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
655 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
656 pull_to = i->second.last_push_end;
659 if (!_silent.done() && _silent.position() < pull_to) {
660 pull_to = _silent.position();
663 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
664 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
665 if (_last_audio_time && i->second < *_last_audio_time) {
666 /* This new data comes before the last we emitted (or the last seek); discard it */
667 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
672 } else if (_last_audio_time && i->second > *_last_audio_time) {
673 /* There's a gap between this data and the last we emitted; fill with silence */
674 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
677 emit_audio (i->first, i->second);
682 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
683 do_emit_video(i->first, i->second);
690 /** @return Open subtitles for the frame at the given time, converted to images */
691 optional<PositionImage>
692 Player::open_subtitles_for_frame (DCPTime time) const
694 list<PositionImage> captions;
695 int const vfr = _film->video_frame_rate();
699 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
702 /* Bitmap subtitles */
703 BOOST_FOREACH (BitmapText i, j.bitmap) {
708 /* i.image will already have been scaled to fit _video_container_size */
709 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
715 lrint (_video_container_size.width * i.rectangle.x),
716 lrint (_video_container_size.height * i.rectangle.y)
722 /* String subtitles (rendered to an image) */
723 if (!j.string.empty ()) {
724 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
725 copy (s.begin(), s.end(), back_inserter (captions));
729 if (captions.empty ()) {
730 return optional<PositionImage> ();
733 return merge (captions);
737 Player::video (weak_ptr<Piece> wp, ContentVideo video)
739 shared_ptr<Piece> piece = wp.lock ();
744 if (!piece->video_use()) {
748 if (piece->frc.skip && (video.frame % 2) == 1) {
752 /* Time of the first frame we will emit */
753 DCPTime const time = piece->content_video_to_dcp (video.frame);
755 /* Discard if it's before the content's period or the last accurate seek. We can't discard
756 if it's after the content's period here as in that case we still need to fill any gap between
757 `now' and the end of the content's period.
759 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
763 /* Fill gaps that we discover now that we have some video which needs to be emitted.
764 This is where we need to fill to.
766 DCPTime fill_to = min (time, piece->end(_film));
768 if (_last_video_time) {
769 DCPTime fill_from = max (*_last_video_time, piece->position());
771 /* Fill if we have more than half a frame to do */
772 if ((fill_to - fill_from) > one_video_frame() / 2) {
773 LastVideoMap::const_iterator last = _last_video.find (wp);
774 if (_film->three_d()) {
775 Eyes fill_to_eyes = video.eyes;
776 if (fill_to_eyes == EYES_BOTH) {
777 fill_to_eyes = EYES_LEFT;
779 if (fill_to == piece->end(_film)) {
780 /* Don't fill after the end of the content */
781 fill_to_eyes = EYES_LEFT;
783 DCPTime j = fill_from;
784 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
785 if (eyes == EYES_BOTH) {
788 while (j < fill_to || eyes != fill_to_eyes) {
789 if (last != _last_video.end()) {
790 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
791 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
792 copy->set_eyes (eyes);
793 emit_video (copy, j);
795 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
796 emit_video (black_player_video_frame(eyes), j);
798 if (eyes == EYES_RIGHT) {
799 j += one_video_frame();
801 eyes = increment_eyes (eyes);
804 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
805 if (last != _last_video.end()) {
806 emit_video (last->second, j);
808 emit_video (black_player_video_frame(EYES_BOTH), j);
815 _last_video[wp].reset (
819 piece->video_fade(_film, video.frame),
820 scale_for_display(piece->video_scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
821 _video_container_size,
824 piece->video_colour_conversion(),
825 piece->video_range(),
833 for (int i = 0; i < piece->frc.repeat; ++i) {
834 if (t < piece->end(_film)) {
835 emit_video (_last_video[wp], t);
837 t += one_video_frame ();
842 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
844 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
846 shared_ptr<Piece> piece = wp.lock ();
851 int const rfr = piece->audio_resampled_frame_rate (_film);
853 /* Compute time in the DCP */
854 DCPTime time = piece->resampled_audio_to_dcp (_film, content_audio.frame);
855 /* And the end of this block in the DCP */
856 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
858 /* Remove anything that comes before the start or after the end of the content */
859 if (time < piece->position()) {
860 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
862 /* This audio is entirely discarded */
865 content_audio.audio = cut.first;
867 } else if (time > piece->end(_film)) {
870 } else if (end > piece->end(_film)) {
871 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
872 if (remaining_frames == 0) {
875 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
878 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
882 if (piece->audio_gain() != 0) {
883 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
884 gain->apply_gain (piece->audio_gain());
885 content_audio.audio = gain;
890 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
894 if (_audio_processor) {
895 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
900 _audio_merger.push (content_audio.audio, time);
901 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
902 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
906 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
908 shared_ptr<Piece> piece = wp.lock ();
909 shared_ptr<const TextContent> text = wc.lock ();
910 if (!piece || !text) {
914 /* Apply content's subtitle offsets */
915 subtitle.sub.rectangle.x += text->x_offset ();
916 subtitle.sub.rectangle.y += text->y_offset ();
918 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
919 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
920 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
922 /* Apply content's subtitle scale */
923 subtitle.sub.rectangle.width *= text->x_scale ();
924 subtitle.sub.rectangle.height *= text->y_scale ();
927 shared_ptr<Image> image = subtitle.sub.image;
929 /* We will scale the subtitle up to fit _video_container_size */
930 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
931 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
932 if (width == 0 || height == 0) {
936 dcp::Size scaled_size (width, height);
937 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
938 DCPTime from (piece->content_time_to_dcp(subtitle.from()));
940 _active_texts[text->type()].add_from (wc, ps, from);
944 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
946 shared_ptr<Piece> piece = wp.lock ();
947 shared_ptr<const TextContent> text = wc.lock ();
948 if (!piece || !text) {
953 DCPTime const from (piece->content_time_to_dcp( subtitle.from()));
955 if (from > piece->end(_film)) {
959 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
960 s.set_h_position (s.h_position() + text->x_offset ());
961 s.set_v_position (s.v_position() + text->y_offset ());
962 float const xs = text->x_scale();
963 float const ys = text->y_scale();
964 float size = s.size();
966 /* Adjust size to express the common part of the scaling;
967 e.g. if xs = ys = 0.5 we scale size by 2.
969 if (xs > 1e-5 && ys > 1e-5) {
970 size *= 1 / min (1 / xs, 1 / ys);
974 /* Then express aspect ratio changes */
975 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
976 s.set_aspect_adjust (xs / ys);
979 s.set_in (dcp::Time(from.seconds(), 1000));
980 ps.string.push_back (StringText (s, text->outline_width()));
981 ps.add_fonts (text->fonts ());
984 _active_texts[text->type()].add_from (wc, ps, from);
988 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
990 shared_ptr<const TextContent> text = wc.lock ();
995 if (!_active_texts[text->type()].have(wc)) {
999 shared_ptr<Piece> piece = wp.lock ();
1004 DCPTime const dcp_to = piece->content_time_to_dcp(to);
1006 if (dcp_to > piece->end(_film)) {
1010 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1012 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1013 if (text->use() && !always && !text->burn()) {
1014 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1019 Player::seek (DCPTime time, bool accurate)
1021 boost::mutex::scoped_lock lm (_mutex);
1024 /* We can't seek in this state */
1029 _shuffler->clear ();
1034 if (_audio_processor) {
1035 _audio_processor->flush ();
1038 _audio_merger.clear ();
1039 for (int i = 0; i < TEXT_COUNT; ++i) {
1040 _active_texts[i].clear ();
1043 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1044 if (time < i->content->position()) {
1045 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1046 we must seek this (following) content accurately, otherwise when we come to the end of the current
1047 content we may not start right at the beginning of the next, causing a gap (if the next content has
1048 been trimmed to a point between keyframes, or something).
1050 i->decoder->seek (i->dcp_to_content_time(_film, i->content->position()), true);
1052 } else if (i->content->position() <= time && time < i->end(_film)) {
1053 /* During; seek to position */
1054 i->decoder->seek (i->dcp_to_content_time(_film, time), accurate);
1057 /* After; this piece is done */
1063 _last_video_time = time;
1064 _last_video_eyes = EYES_LEFT;
1065 _last_audio_time = time;
1067 _last_video_time = optional<DCPTime>();
1068 _last_video_eyes = optional<Eyes>();
1069 _last_audio_time = optional<DCPTime>();
1072 _black.set_position (time);
1073 _silent.set_position (time);
1075 _last_video.clear ();
1079 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1081 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1082 player before the video that requires them.
1084 _delay.push_back (make_pair (pv, time));
1086 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1087 _last_video_time = time + one_video_frame();
1089 _last_video_eyes = increment_eyes (pv->eyes());
1091 if (_delay.size() < 3) {
1095 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1097 do_emit_video (to_do.first, to_do.second);
1101 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1103 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1104 for (int i = 0; i < TEXT_COUNT; ++i) {
1105 _active_texts[i].clear_before (time);
1109 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1111 pv->set_text (subtitles.get ());
1118 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1120 /* Log if the assert below is about to fail */
1121 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1122 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1125 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1126 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1127 Audio (data, time, _film->audio_frame_rate());
1128 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1132 Player::fill_audio (DCPTimePeriod period)
1134 if (period.from == period.to) {
1138 DCPOMATIC_ASSERT (period.from < period.to);
1140 DCPTime t = period.from;
1141 while (t < period.to) {
1142 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1143 Frame const samples = block.frames_round(_film->audio_frame_rate());
1145 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1146 silence->make_silent ();
1147 emit_audio (silence, t);
1154 Player::one_video_frame () const
1156 return DCPTime::from_frames (1, _film->video_frame_rate ());
1159 pair<shared_ptr<AudioBuffers>, DCPTime>
1160 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1162 DCPTime const discard_time = discard_to - time;
1163 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1164 Frame remaining_frames = audio->frames() - discard_frames;
1165 if (remaining_frames <= 0) {
1166 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1168 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1169 return make_pair(cut, time + discard_time);
1173 Player::set_dcp_decode_reduction (optional<int> reduction)
1175 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1178 boost::mutex::scoped_lock lm (_mutex);
1180 if (reduction == _dcp_decode_reduction) {
1182 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1186 _dcp_decode_reduction = reduction;
1187 setup_pieces_unlocked ();
1190 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1194 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1196 boost::mutex::scoped_lock lm (_mutex);
1198 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1199 if (i->content == content) {
1200 return i->content_time_to_dcp (t);
1204 /* We couldn't find this content; perhaps things are being changed over */
1205 return optional<DCPTime>();
1209 shared_ptr<const Playlist>
1210 Player::playlist () const
1212 return _playlist ? _playlist : _film->playlist();
1217 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1219 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);