2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
75 using std::shared_ptr;
77 using std::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80 #if BOOST_VERSION >= 106100
81 using namespace boost::placeholders;
83 using namespace dcpomatic;
85 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
86 int const PlayerProperty::PLAYLIST = 701;
87 int const PlayerProperty::FILM_CONTAINER = 702;
88 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
89 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
90 int const PlayerProperty::PLAYBACK_LENGTH = 705;
92 Player::Player (shared_ptr<const Film> film)
95 , _ignore_video (false)
96 , _ignore_audio (false)
97 , _ignore_text (false)
98 , _always_burn_open_subtitles (false)
100 , _tolerant (film->tolerant())
101 , _play_referenced (false)
102 , _audio_merger (_film->audio_frame_rate())
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110 , _playlist (playlist_)
112 , _ignore_video (false)
113 , _ignore_audio (false)
114 , _ignore_text (false)
115 , _always_burn_open_subtitles (false)
117 , _tolerant (film->tolerant())
118 , _play_referenced (false)
119 , _audio_merger (_film->audio_frame_rate())
128 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
129 /* The butler must hear about this first, so since we are proxying this through to the butler we must
132 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
133 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
134 set_video_container_size (_film->frame_size ());
136 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
139 seek (DCPTime (), true);
148 Player::setup_pieces ()
150 boost::mutex::scoped_lock lm (_mutex);
151 setup_pieces_unlocked ();
156 have_video (shared_ptr<const Content> content)
158 return static_cast<bool>(content->video) && content->video->use();
162 have_audio (shared_ptr<const Content> content)
164 return static_cast<bool>(content->audio);
168 Player::setup_pieces_unlocked ()
170 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
172 list<shared_ptr<Piece> > old_pieces = _pieces;
176 _shuffler = new Shuffler();
177 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
179 for (auto i: playlist()->content()) {
181 if (!i->paths_valid ()) {
185 if (_ignore_video && _ignore_audio && i->text.empty()) {
186 /* We're only interested in text and this content has none */
190 shared_ptr<Decoder> old_decoder;
191 for (auto j: old_pieces) {
192 if (j->content == i) {
193 old_decoder = j->decoder;
198 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
199 DCPOMATIC_ASSERT (decoder);
201 FrameRateChange frc (_film, i);
203 if (decoder->video && _ignore_video) {
204 decoder->video->set_ignore (true);
207 if (decoder->audio && _ignore_audio) {
208 decoder->audio->set_ignore (true);
212 for (auto i: decoder->text) {
213 i->set_ignore (true);
217 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
219 dcp->set_decode_referenced (_play_referenced);
220 if (_play_referenced) {
221 dcp->set_forced_reduction (_dcp_decode_reduction);
225 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
226 _pieces.push_back (piece);
228 if (decoder->video) {
229 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
230 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
231 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
233 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
237 if (decoder->audio) {
238 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
241 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
243 while (j != decoder->text.end()) {
244 (*j)->BitmapStart.connect (
245 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247 (*j)->PlainStart.connect (
248 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
257 if (decoder->atmos) {
258 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
262 _stream_states.clear ();
263 for (auto i: _pieces) {
264 if (i->content->audio) {
265 for (auto j: i->content->audio->streams()) {
266 _stream_states[j] = StreamState (i, i->content->position ());
271 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
272 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
274 _last_video_time = DCPTime ();
275 _last_video_eyes = EYES_BOTH;
276 _last_audio_time = DCPTime ();
280 Player::playlist_content_change (ChangeType type, int property, bool frequent)
282 if (property == VideoContentProperty::CROP) {
283 if (type == CHANGE_TYPE_DONE) {
284 dcp::Size const vcs = video_container_size();
285 boost::mutex::scoped_lock lm (_mutex);
286 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
287 i->first->reset_metadata (_film, vcs);
291 if (type == CHANGE_TYPE_PENDING) {
292 /* The player content is probably about to change, so we can't carry on
293 until that has happened and we've rebuilt our pieces. Stop pass()
294 and seek() from working until then.
297 } else if (type == CHANGE_TYPE_DONE) {
298 /* A change in our content has gone through. Re-build our pieces. */
301 } else if (type == CHANGE_TYPE_CANCELLED) {
306 Change (type, property, frequent);
310 Player::set_video_container_size (dcp::Size s)
312 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
315 boost::mutex::scoped_lock lm (_mutex);
317 if (s == _video_container_size) {
319 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
323 _video_container_size = s;
325 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
326 _black_image->make_black ();
329 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
333 Player::playlist_change (ChangeType type)
335 if (type == CHANGE_TYPE_DONE) {
338 Change (type, PlayerProperty::PLAYLIST, false);
342 Player::film_change (ChangeType type, Film::Property p)
344 /* Here we should notice Film properties that affect our output, and
345 alert listeners that our output now would be different to how it was
346 last time we were run.
349 if (p == Film::CONTAINER) {
350 Change (type, PlayerProperty::FILM_CONTAINER, false);
351 } else if (p == Film::VIDEO_FRAME_RATE) {
352 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
353 so we need new pieces here.
355 if (type == CHANGE_TYPE_DONE) {
358 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
359 } else if (p == Film::AUDIO_PROCESSOR) {
360 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
361 boost::mutex::scoped_lock lm (_mutex);
362 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
364 } else if (p == Film::AUDIO_CHANNELS) {
365 if (type == CHANGE_TYPE_DONE) {
366 boost::mutex::scoped_lock lm (_mutex);
367 _audio_merger.clear ();
372 shared_ptr<PlayerVideo>
373 Player::black_player_video_frame (Eyes eyes) const
375 return shared_ptr<PlayerVideo> (
377 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
380 _video_container_size,
381 _video_container_size,
384 PresetColourConversion::all().front().conversion,
386 std::weak_ptr<Content>(),
387 boost::optional<Frame>(),
394 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
396 DCPTime s = t - piece->content->position ();
397 s = min (piece->content->length_after_trim(_film), s);
398 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
400 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
401 then convert that ContentTime to frames at the content's rate. However this fails for
402 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
403 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
405 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
407 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
411 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
413 /* See comment in dcp_to_content_video */
414 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
415 return d + piece->content->position();
419 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
421 DCPTime s = t - piece->content->position ();
422 s = min (piece->content->length_after_trim(_film), s);
423 /* See notes in dcp_to_content_video */
424 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
428 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
430 /* See comment in dcp_to_content_video */
431 return DCPTime::from_frames (f, _film->audio_frame_rate())
432 - DCPTime (piece->content->trim_start(), piece->frc)
433 + piece->content->position();
437 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
439 DCPTime s = t - piece->content->position ();
440 s = min (piece->content->length_after_trim(_film), s);
441 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
445 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
447 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
451 Player::get_subtitle_fonts ()
453 boost::mutex::scoped_lock lm (_mutex);
455 vector<FontData> fonts;
456 for (auto i: _pieces) {
457 /* XXX: things may go wrong if there are duplicate font IDs
458 with different font files.
460 vector<FontData> f = i->decoder->fonts ();
461 copy (f.begin(), f.end(), back_inserter(fonts));
467 /** Set this player never to produce any video data */
469 Player::set_ignore_video ()
471 boost::mutex::scoped_lock lm (_mutex);
472 _ignore_video = true;
473 setup_pieces_unlocked ();
477 Player::set_ignore_audio ()
479 boost::mutex::scoped_lock lm (_mutex);
480 _ignore_audio = true;
481 setup_pieces_unlocked ();
485 Player::set_ignore_text ()
487 boost::mutex::scoped_lock lm (_mutex);
489 setup_pieces_unlocked ();
492 /** Set the player to always burn open texts into the image regardless of the content settings */
494 Player::set_always_burn_open_subtitles ()
496 boost::mutex::scoped_lock lm (_mutex);
497 _always_burn_open_subtitles = true;
500 /** Sets up the player to be faster, possibly at the expense of quality */
504 boost::mutex::scoped_lock lm (_mutex);
506 setup_pieces_unlocked ();
510 Player::set_play_referenced ()
512 boost::mutex::scoped_lock lm (_mutex);
513 _play_referenced = true;
514 setup_pieces_unlocked ();
518 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
520 DCPOMATIC_ASSERT (r);
521 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
522 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
523 if (r->actual_duration() > 0) {
525 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
530 list<ReferencedReelAsset>
531 Player::get_reel_assets ()
533 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
535 list<ReferencedReelAsset> a;
537 for (auto i: playlist()->content()) {
538 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
543 scoped_ptr<DCPDecoder> decoder;
545 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
550 DCPOMATIC_ASSERT (j->video_frame_rate ());
551 double const cfr = j->video_frame_rate().get();
552 Frame const trim_start = j->trim_start().frames_round (cfr);
553 Frame const trim_end = j->trim_end().frames_round (cfr);
554 int const ffr = _film->video_frame_rate ();
556 /* position in the asset from the start */
557 int64_t offset_from_start = 0;
558 /* position in the asset from the end */
559 int64_t offset_from_end = 0;
560 for (auto k: decoder->reels()) {
561 /* Assume that main picture duration is the length of the reel */
562 offset_from_end += k->main_picture()->actual_duration();
565 for (auto k: decoder->reels()) {
567 /* Assume that main picture duration is the length of the reel */
568 int64_t const reel_duration = k->main_picture()->actual_duration();
570 /* See doc/design/trim_reels.svg */
571 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
572 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
574 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
575 if (j->reference_video ()) {
576 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
579 if (j->reference_audio ()) {
580 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
583 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
584 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
587 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
588 for (auto l: k->closed_captions()) {
589 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
593 offset_from_start += reel_duration;
594 offset_from_end -= reel_duration;
604 boost::mutex::scoped_lock lm (_mutex);
607 /* We can't pass in this state */
608 LOG_DEBUG_PLAYER_NC ("Player is suspended");
612 if (_playback_length == DCPTime()) {
613 /* Special; just give one black frame */
614 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
618 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
620 shared_ptr<Piece> earliest_content;
621 optional<DCPTime> earliest_time;
623 for (auto i: _pieces) {
628 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
629 if (t > i->content->end(_film)) {
633 /* Given two choices at the same time, pick the one with texts so we see it before
636 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
638 earliest_content = i;
652 if (earliest_content) {
656 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
657 earliest_time = _black.position ();
661 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
662 earliest_time = _silent.position ();
669 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
670 earliest_content->done = earliest_content->decoder->pass ();
671 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
672 if (dcp && !_play_referenced && dcp->reference_audio()) {
673 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
674 to `hide' the fact that no audio was emitted during the referenced DCP (though
675 we need to behave as though it was).
677 _last_audio_time = dcp->end (_film);
682 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
683 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
684 _black.set_position (_black.position() + one_video_frame());
688 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
689 DCPTimePeriod period (_silent.period_at_position());
690 if (_last_audio_time) {
691 /* Sometimes the thing that happened last finishes fractionally before
692 or after this silence. Bodge the start time of the silence to fix it.
693 I think this is nothing to worry about since we will just add or
694 remove a little silence at the end of some content.
696 int64_t const error = labs(period.from.get() - _last_audio_time->get());
697 /* Let's not worry about less than a frame at 24fps */
698 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
699 if (error >= too_much_error) {
700 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
702 DCPOMATIC_ASSERT (error < too_much_error);
703 period.from = *_last_audio_time;
705 if (period.duration() > one_video_frame()) {
706 period.to = period.from + one_video_frame();
709 _silent.set_position (period.to);
717 /* Emit any audio that is ready */
719 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
720 of our streams, or the position of the _silent.
722 DCPTime pull_to = _playback_length;
723 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
724 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
725 pull_to = i->second.last_push_end;
728 if (!_silent.done() && _silent.position() < pull_to) {
729 pull_to = _silent.position();
732 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
733 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
734 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
735 if (_last_audio_time && i->second < *_last_audio_time) {
736 /* This new data comes before the last we emitted (or the last seek); discard it */
737 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
742 } else if (_last_audio_time && i->second > *_last_audio_time) {
743 /* There's a gap between this data and the last we emitted; fill with silence */
744 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
747 emit_audio (i->first, i->second);
752 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
753 do_emit_video(i->first, i->second);
760 /** @return Open subtitles for the frame at the given time, converted to images */
761 optional<PositionImage>
762 Player::open_subtitles_for_frame (DCPTime time) const
764 list<PositionImage> captions;
765 int const vfr = _film->video_frame_rate();
769 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
772 /* Bitmap subtitles */
773 for (auto i: j.bitmap) {
778 /* i.image will already have been scaled to fit _video_container_size */
779 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
785 lrint (_video_container_size.width * i.rectangle.x),
786 lrint (_video_container_size.height * i.rectangle.y)
792 /* String subtitles (rendered to an image) */
793 if (!j.string.empty ()) {
794 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
795 copy (s.begin(), s.end(), back_inserter (captions));
799 if (captions.empty ()) {
800 return optional<PositionImage> ();
803 return merge (captions);
807 Player::video (weak_ptr<Piece> wp, ContentVideo video)
809 shared_ptr<Piece> piece = wp.lock ();
814 if (!piece->content->video->use()) {
818 FrameRateChange frc (_film, piece->content);
819 if (frc.skip && (video.frame % 2) == 1) {
823 /* Time of the first frame we will emit */
824 DCPTime const time = content_video_to_dcp (piece, video.frame);
825 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
827 /* Discard if it's before the content's period or the last accurate seek. We can't discard
828 if it's after the content's period here as in that case we still need to fill any gap between
829 `now' and the end of the content's period.
831 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
835 /* Fill gaps that we discover now that we have some video which needs to be emitted.
836 This is where we need to fill to.
838 DCPTime fill_to = min (time, piece->content->end(_film));
840 if (_last_video_time) {
841 DCPTime fill_from = max (*_last_video_time, piece->content->position());
843 /* Fill if we have more than half a frame to do */
844 if ((fill_to - fill_from) > one_video_frame() / 2) {
845 LastVideoMap::const_iterator last = _last_video.find (wp);
846 if (_film->three_d()) {
847 Eyes fill_to_eyes = video.eyes;
848 if (fill_to_eyes == EYES_BOTH) {
849 fill_to_eyes = EYES_LEFT;
851 if (fill_to == piece->content->end(_film)) {
852 /* Don't fill after the end of the content */
853 fill_to_eyes = EYES_LEFT;
855 DCPTime j = fill_from;
856 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
857 if (eyes == EYES_BOTH) {
860 while (j < fill_to || eyes != fill_to_eyes) {
861 if (last != _last_video.end()) {
862 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
863 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
864 copy->set_eyes (eyes);
865 emit_video (copy, j);
867 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
868 emit_video (black_player_video_frame(eyes), j);
870 if (eyes == EYES_RIGHT) {
871 j += one_video_frame();
873 eyes = increment_eyes (eyes);
876 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
877 if (last != _last_video.end()) {
878 emit_video (last->second, j);
880 emit_video (black_player_video_frame(EYES_BOTH), j);
887 _last_video[wp].reset (
890 piece->content->video->crop (),
891 piece->content->video->fade (_film, video.frame),
892 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
893 _video_container_size,
896 piece->content->video->colour_conversion(),
897 piece->content->video->range(),
905 for (int i = 0; i < frc.repeat; ++i) {
906 if (t < piece->content->end(_film)) {
907 emit_video (_last_video[wp], t);
909 t += one_video_frame ();
914 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
916 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
918 shared_ptr<Piece> piece = wp.lock ();
923 shared_ptr<AudioContent> content = piece->content->audio;
924 DCPOMATIC_ASSERT (content);
926 int const rfr = content->resampled_frame_rate (_film);
928 /* Compute time in the DCP */
929 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
930 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
932 /* And the end of this block in the DCP */
933 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
935 /* Remove anything that comes before the start or after the end of the content */
936 if (time < piece->content->position()) {
937 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
939 /* This audio is entirely discarded */
942 content_audio.audio = cut.first;
944 } else if (time > piece->content->end(_film)) {
947 } else if (end > piece->content->end(_film)) {
948 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
949 if (remaining_frames == 0) {
952 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
955 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
959 if (content->gain() != 0) {
960 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
961 gain->apply_gain (content->gain ());
962 content_audio.audio = gain;
967 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
971 if (_audio_processor) {
972 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
977 _audio_merger.push (content_audio.audio, time);
978 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
979 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
983 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
985 shared_ptr<Piece> piece = wp.lock ();
986 shared_ptr<const TextContent> text = wc.lock ();
987 if (!piece || !text) {
991 /* Apply content's subtitle offsets */
992 subtitle.sub.rectangle.x += text->x_offset ();
993 subtitle.sub.rectangle.y += text->y_offset ();
995 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
996 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
997 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
999 /* Apply content's subtitle scale */
1000 subtitle.sub.rectangle.width *= text->x_scale ();
1001 subtitle.sub.rectangle.height *= text->y_scale ();
1004 shared_ptr<Image> image = subtitle.sub.image;
1006 /* We will scale the subtitle up to fit _video_container_size */
1007 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1008 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1009 if (width == 0 || height == 0) {
1013 dcp::Size scaled_size (width, height);
1014 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1015 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1017 _active_texts[text->type()].add_from (wc, ps, from);
1021 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1023 shared_ptr<Piece> piece = wp.lock ();
1024 shared_ptr<const TextContent> text = wc.lock ();
1025 if (!piece || !text) {
1030 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1032 if (from > piece->content->end(_film)) {
1036 for (auto s: subtitle.subs) {
1037 s.set_h_position (s.h_position() + text->x_offset ());
1038 s.set_v_position (s.v_position() + text->y_offset ());
1039 float const xs = text->x_scale();
1040 float const ys = text->y_scale();
1041 float size = s.size();
1043 /* Adjust size to express the common part of the scaling;
1044 e.g. if xs = ys = 0.5 we scale size by 2.
1046 if (xs > 1e-5 && ys > 1e-5) {
1047 size *= 1 / min (1 / xs, 1 / ys);
1051 /* Then express aspect ratio changes */
1052 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1053 s.set_aspect_adjust (xs / ys);
1056 s.set_in (dcp::Time(from.seconds(), 1000));
1057 ps.string.push_back (StringText (s, text->outline_width()));
1058 ps.add_fonts (text->fonts ());
1061 _active_texts[text->type()].add_from (wc, ps, from);
1065 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1067 shared_ptr<const TextContent> text = wc.lock ();
1072 if (!_active_texts[text->type()].have(wc)) {
1076 shared_ptr<Piece> piece = wp.lock ();
1081 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1083 if (dcp_to > piece->content->end(_film)) {
1087 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1089 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1090 if (text->use() && !always && !text->burn()) {
1091 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1096 Player::seek (DCPTime time, bool accurate)
1098 boost::mutex::scoped_lock lm (_mutex);
1099 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1102 /* We can't seek in this state */
1107 _shuffler->clear ();
1112 if (_audio_processor) {
1113 _audio_processor->flush ();
1116 _audio_merger.clear ();
1117 for (int i = 0; i < TEXT_COUNT; ++i) {
1118 _active_texts[i].clear ();
1121 for (auto i: _pieces) {
1122 if (time < i->content->position()) {
1123 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1124 we must seek this (following) content accurately, otherwise when we come to the end of the current
1125 content we may not start right at the beginning of the next, causing a gap (if the next content has
1126 been trimmed to a point between keyframes, or something).
1128 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1130 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1131 /* During; seek to position */
1132 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1135 /* After; this piece is done */
1141 _last_video_time = time;
1142 _last_video_eyes = EYES_LEFT;
1143 _last_audio_time = time;
1145 _last_video_time = optional<DCPTime>();
1146 _last_video_eyes = optional<Eyes>();
1147 _last_audio_time = optional<DCPTime>();
1150 _black.set_position (time);
1151 _silent.set_position (time);
1153 _last_video.clear ();
1157 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1159 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1160 player before the video that requires them.
1162 _delay.push_back (make_pair (pv, time));
1164 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1165 _last_video_time = time + one_video_frame();
1167 _last_video_eyes = increment_eyes (pv->eyes());
1169 if (_delay.size() < 3) {
1173 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1175 do_emit_video (to_do.first, to_do.second);
1179 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1181 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1182 for (int i = 0; i < TEXT_COUNT; ++i) {
1183 _active_texts[i].clear_before (time);
1187 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1189 pv->set_text (subtitles.get ());
1196 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1198 /* Log if the assert below is about to fail */
1199 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1200 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1203 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1204 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1205 Audio (data, time, _film->audio_frame_rate());
1206 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1210 Player::fill_audio (DCPTimePeriod period)
1212 if (period.from == period.to) {
1216 DCPOMATIC_ASSERT (period.from < period.to);
1218 DCPTime t = period.from;
1219 while (t < period.to) {
1220 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1221 Frame const samples = block.frames_round(_film->audio_frame_rate());
1223 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1224 silence->make_silent ();
1225 emit_audio (silence, t);
1232 Player::one_video_frame () const
1234 return DCPTime::from_frames (1, _film->video_frame_rate ());
1237 pair<shared_ptr<AudioBuffers>, DCPTime>
1238 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1240 DCPTime const discard_time = discard_to - time;
1241 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1242 Frame remaining_frames = audio->frames() - discard_frames;
1243 if (remaining_frames <= 0) {
1244 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1246 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1247 return make_pair(cut, time + discard_time);
1251 Player::set_dcp_decode_reduction (optional<int> reduction)
1253 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1256 boost::mutex::scoped_lock lm (_mutex);
1258 if (reduction == _dcp_decode_reduction) {
1260 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1264 _dcp_decode_reduction = reduction;
1265 setup_pieces_unlocked ();
1268 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1272 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1274 boost::mutex::scoped_lock lm (_mutex);
1276 for (auto i: _pieces) {
1277 if (i->content == content) {
1278 return content_time_to_dcp (i, t);
1282 /* We couldn't find this content; perhaps things are being changed over */
1283 return optional<DCPTime>();
1287 shared_ptr<const Playlist>
1288 Player::playlist () const
1290 return _playlist ? _playlist : _film->playlist();
1295 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1297 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);