2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
84 using namespace dcpomatic;
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
93 Player::Player (shared_ptr<const Film> film)
96 , _ignore_video (false)
97 , _ignore_audio (false)
98 , _ignore_text (false)
99 , _always_burn_open_subtitles (false)
101 , _tolerant (film->tolerant())
102 , _play_referenced (false)
103 , _audio_merger (_film->audio_frame_rate())
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111 , _playlist (playlist_)
113 , _ignore_video (false)
114 , _ignore_audio (false)
115 , _ignore_text (false)
116 , _always_burn_open_subtitles (false)
118 , _tolerant (film->tolerant())
119 , _play_referenced (false)
120 , _audio_merger (_film->audio_frame_rate())
129 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130 /* The butler must hear about this first, so since we are proxying this through to the butler we must
133 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135 set_video_container_size (_film->frame_size ());
137 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
140 seek (DCPTime (), true);
149 Player::setup_pieces ()
151 boost::mutex::scoped_lock lm (_mutex);
152 setup_pieces_unlocked ();
157 have_video (shared_ptr<const Content> content)
159 return static_cast<bool>(content->video) && content->video->use();
163 have_audio (shared_ptr<const Content> content)
165 return static_cast<bool>(content->audio);
169 Player::setup_pieces_unlocked ()
171 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
173 list<shared_ptr<Piece> > old_pieces = _pieces;
177 _shuffler = new Shuffler();
178 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
180 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
182 if (!i->paths_valid ()) {
186 if (_ignore_video && _ignore_audio && i->text.empty()) {
187 /* We're only interested in text and this content has none */
191 shared_ptr<Decoder> old_decoder;
192 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
193 if (j->content == i) {
194 old_decoder = j->decoder;
199 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
200 DCPOMATIC_ASSERT (decoder);
202 FrameRateChange frc (_film, i);
204 if (decoder->video && _ignore_video) {
205 decoder->video->set_ignore (true);
208 if (decoder->audio && _ignore_audio) {
209 decoder->audio->set_ignore (true);
213 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
214 i->set_ignore (true);
218 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
220 dcp->set_decode_referenced (_play_referenced);
221 if (_play_referenced) {
222 dcp->set_forced_reduction (_dcp_decode_reduction);
226 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
227 _pieces.push_back (piece);
229 if (decoder->video) {
230 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
231 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
232 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
234 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
238 if (decoder->audio) {
239 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
242 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
244 while (j != decoder->text.end()) {
245 (*j)->BitmapStart.connect (
246 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
248 (*j)->PlainStart.connect (
249 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258 if (decoder->atmos) {
259 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
263 _stream_states.clear ();
264 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
265 if (i->content->audio) {
266 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
267 _stream_states[j] = StreamState (i, i->content->position ());
272 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
273 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
275 _last_video_time = DCPTime ();
276 _last_video_eyes = EYES_BOTH;
277 _last_audio_time = DCPTime ();
281 Player::playlist_content_change (ChangeType type, int property, bool frequent)
283 if (type == CHANGE_TYPE_PENDING) {
284 /* The player content is probably about to change, so we can't carry on
285 until that has happened and we've rebuilt our pieces. Stop pass()
286 and seek() from working until then.
289 } else if (type == CHANGE_TYPE_DONE) {
290 /* A change in our content has gone through. Re-build our pieces. */
293 } else if (type == CHANGE_TYPE_CANCELLED) {
297 Change (type, property, frequent);
301 Player::set_video_container_size (dcp::Size s)
303 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
306 boost::mutex::scoped_lock lm (_mutex);
308 if (s == _video_container_size) {
310 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
314 _video_container_size = s;
316 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
317 _black_image->make_black ();
320 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
324 Player::playlist_change (ChangeType type)
326 if (type == CHANGE_TYPE_DONE) {
329 Change (type, PlayerProperty::PLAYLIST, false);
333 Player::film_change (ChangeType type, Film::Property p)
335 /* Here we should notice Film properties that affect our output, and
336 alert listeners that our output now would be different to how it was
337 last time we were run.
340 if (p == Film::CONTAINER) {
341 Change (type, PlayerProperty::FILM_CONTAINER, false);
342 } else if (p == Film::VIDEO_FRAME_RATE) {
343 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
344 so we need new pieces here.
346 if (type == CHANGE_TYPE_DONE) {
349 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
350 } else if (p == Film::AUDIO_PROCESSOR) {
351 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
352 boost::mutex::scoped_lock lm (_mutex);
353 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
355 } else if (p == Film::AUDIO_CHANNELS) {
356 if (type == CHANGE_TYPE_DONE) {
357 boost::mutex::scoped_lock lm (_mutex);
358 _audio_merger.clear ();
363 shared_ptr<PlayerVideo>
364 Player::black_player_video_frame (Eyes eyes) const
366 return shared_ptr<PlayerVideo> (
368 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
371 _video_container_size,
372 _video_container_size,
375 PresetColourConversion::all().front().conversion,
377 boost::weak_ptr<Content>(),
378 boost::optional<Frame>(),
385 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
387 DCPTime s = t - piece->content->position ();
388 s = min (piece->content->length_after_trim(_film), s);
389 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
391 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
392 then convert that ContentTime to frames at the content's rate. However this fails for
393 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
394 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
396 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
398 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
402 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
404 /* See comment in dcp_to_content_video */
405 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
406 return d + piece->content->position();
410 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
412 DCPTime s = t - piece->content->position ();
413 s = min (piece->content->length_after_trim(_film), s);
414 /* See notes in dcp_to_content_video */
415 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
419 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
421 /* See comment in dcp_to_content_video */
422 return DCPTime::from_frames (f, _film->audio_frame_rate())
423 - DCPTime (piece->content->trim_start(), piece->frc)
424 + piece->content->position();
428 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
430 DCPTime s = t - piece->content->position ();
431 s = min (piece->content->length_after_trim(_film), s);
432 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
436 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
438 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
441 list<shared_ptr<Font> >
442 Player::get_subtitle_fonts ()
444 boost::mutex::scoped_lock lm (_mutex);
446 list<shared_ptr<Font> > fonts;
447 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
448 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
449 /* XXX: things may go wrong if there are duplicate font IDs
450 with different font files.
452 list<shared_ptr<Font> > f = j->fonts ();
453 copy (f.begin(), f.end(), back_inserter (fonts));
460 /** Set this player never to produce any video data */
462 Player::set_ignore_video ()
464 boost::mutex::scoped_lock lm (_mutex);
465 _ignore_video = true;
466 setup_pieces_unlocked ();
470 Player::set_ignore_audio ()
472 boost::mutex::scoped_lock lm (_mutex);
473 _ignore_audio = true;
474 setup_pieces_unlocked ();
478 Player::set_ignore_text ()
480 boost::mutex::scoped_lock lm (_mutex);
482 setup_pieces_unlocked ();
485 /** Set the player to always burn open texts into the image regardless of the content settings */
487 Player::set_always_burn_open_subtitles ()
489 boost::mutex::scoped_lock lm (_mutex);
490 _always_burn_open_subtitles = true;
493 /** Sets up the player to be faster, possibly at the expense of quality */
497 boost::mutex::scoped_lock lm (_mutex);
499 setup_pieces_unlocked ();
503 Player::set_play_referenced ()
505 boost::mutex::scoped_lock lm (_mutex);
506 _play_referenced = true;
507 setup_pieces_unlocked ();
511 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
513 DCPOMATIC_ASSERT (r);
514 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
515 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
516 if (r->actual_duration() > 0) {
518 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
523 list<ReferencedReelAsset>
524 Player::get_reel_assets ()
526 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
528 list<ReferencedReelAsset> a;
530 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
531 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
536 scoped_ptr<DCPDecoder> decoder;
538 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
543 DCPOMATIC_ASSERT (j->video_frame_rate ());
544 double const cfr = j->video_frame_rate().get();
545 Frame const trim_start = j->trim_start().frames_round (cfr);
546 Frame const trim_end = j->trim_end().frames_round (cfr);
547 int const ffr = _film->video_frame_rate ();
549 /* position in the asset from the start */
550 int64_t offset_from_start = 0;
551 /* position in the asset from the end */
552 int64_t offset_from_end = 0;
553 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
554 /* Assume that main picture duration is the length of the reel */
555 offset_from_end += k->main_picture()->actual_duration();
558 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
560 /* Assume that main picture duration is the length of the reel */
561 int64_t const reel_duration = k->main_picture()->actual_duration();
563 /* See doc/design/trim_reels.svg */
564 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
565 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
567 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
568 if (j->reference_video ()) {
569 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
572 if (j->reference_audio ()) {
573 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
576 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
577 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
580 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
581 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
582 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
586 offset_from_start += reel_duration;
587 offset_from_end -= reel_duration;
597 boost::mutex::scoped_lock lm (_mutex);
600 /* We can't pass in this state */
601 LOG_DEBUG_PLAYER_NC ("Player is suspended");
605 if (_playback_length == DCPTime()) {
606 /* Special; just give one black frame */
607 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
611 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
613 shared_ptr<Piece> earliest_content;
614 optional<DCPTime> earliest_time;
616 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
621 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
622 if (t > i->content->end(_film)) {
626 /* Given two choices at the same time, pick the one with texts so we see it before
629 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
631 earliest_content = i;
645 if (earliest_content) {
649 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
650 earliest_time = _black.position ();
654 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
655 earliest_time = _silent.position ();
662 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
663 earliest_content->done = earliest_content->decoder->pass ();
664 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
665 if (dcp && !_play_referenced && dcp->reference_audio()) {
666 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
667 to `hide' the fact that no audio was emitted during the referenced DCP (though
668 we need to behave as though it was).
670 _last_audio_time = dcp->end (_film);
675 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
676 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
677 _black.set_position (_black.position() + one_video_frame());
681 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
682 DCPTimePeriod period (_silent.period_at_position());
683 if (_last_audio_time) {
684 /* Sometimes the thing that happened last finishes fractionally before
685 or after this silence. Bodge the start time of the silence to fix it.
686 I think this is nothing to worry about since we will just add or
687 remove a little silence at the end of some content.
689 int64_t const error = labs(period.from.get() - _last_audio_time->get());
690 /* Let's not worry about less than a frame at 24fps */
691 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
692 if (error >= too_much_error) {
693 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
695 DCPOMATIC_ASSERT (error < too_much_error);
696 period.from = *_last_audio_time;
698 if (period.duration() > one_video_frame()) {
699 period.to = period.from + one_video_frame();
702 _silent.set_position (period.to);
710 /* Emit any audio that is ready */
712 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
713 of our streams, or the position of the _silent.
715 DCPTime pull_to = _playback_length;
716 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
717 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
718 pull_to = i->second.last_push_end;
721 if (!_silent.done() && _silent.position() < pull_to) {
722 pull_to = _silent.position();
725 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
726 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
727 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
728 if (_last_audio_time && i->second < *_last_audio_time) {
729 /* This new data comes before the last we emitted (or the last seek); discard it */
730 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
735 } else if (_last_audio_time && i->second > *_last_audio_time) {
736 /* There's a gap between this data and the last we emitted; fill with silence */
737 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
740 emit_audio (i->first, i->second);
745 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
746 do_emit_video(i->first, i->second);
753 /** @return Open subtitles for the frame at the given time, converted to images */
754 optional<PositionImage>
755 Player::open_subtitles_for_frame (DCPTime time) const
757 list<PositionImage> captions;
758 int const vfr = _film->video_frame_rate();
762 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
765 /* Bitmap subtitles */
766 BOOST_FOREACH (BitmapText i, j.bitmap) {
771 /* i.image will already have been scaled to fit _video_container_size */
772 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
778 lrint (_video_container_size.width * i.rectangle.x),
779 lrint (_video_container_size.height * i.rectangle.y)
785 /* String subtitles (rendered to an image) */
786 if (!j.string.empty ()) {
787 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
788 copy (s.begin(), s.end(), back_inserter (captions));
792 if (captions.empty ()) {
793 return optional<PositionImage> ();
796 return merge (captions);
800 Player::video (weak_ptr<Piece> wp, ContentVideo video)
802 shared_ptr<Piece> piece = wp.lock ();
807 if (!piece->content->video->use()) {
811 FrameRateChange frc (_film, piece->content);
812 if (frc.skip && (video.frame % 2) == 1) {
816 /* Time of the first frame we will emit */
817 DCPTime const time = content_video_to_dcp (piece, video.frame);
818 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
820 /* Discard if it's before the content's period or the last accurate seek. We can't discard
821 if it's after the content's period here as in that case we still need to fill any gap between
822 `now' and the end of the content's period.
824 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
828 /* Fill gaps that we discover now that we have some video which needs to be emitted.
829 This is where we need to fill to.
831 DCPTime fill_to = min (time, piece->content->end(_film));
833 if (_last_video_time) {
834 DCPTime fill_from = max (*_last_video_time, piece->content->position());
836 /* Fill if we have more than half a frame to do */
837 if ((fill_to - fill_from) > one_video_frame() / 2) {
838 LastVideoMap::const_iterator last = _last_video.find (wp);
839 if (_film->three_d()) {
840 Eyes fill_to_eyes = video.eyes;
841 if (fill_to_eyes == EYES_BOTH) {
842 fill_to_eyes = EYES_LEFT;
844 if (fill_to == piece->content->end(_film)) {
845 /* Don't fill after the end of the content */
846 fill_to_eyes = EYES_LEFT;
848 DCPTime j = fill_from;
849 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
850 if (eyes == EYES_BOTH) {
853 while (j < fill_to || eyes != fill_to_eyes) {
854 if (last != _last_video.end()) {
855 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
856 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
857 copy->set_eyes (eyes);
858 emit_video (copy, j);
860 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
861 emit_video (black_player_video_frame(eyes), j);
863 if (eyes == EYES_RIGHT) {
864 j += one_video_frame();
866 eyes = increment_eyes (eyes);
869 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
870 if (last != _last_video.end()) {
871 emit_video (last->second, j);
873 emit_video (black_player_video_frame(EYES_BOTH), j);
880 _last_video[wp].reset (
883 piece->content->video->crop (),
884 piece->content->video->fade (_film, video.frame),
885 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
886 _video_container_size,
889 piece->content->video->colour_conversion(),
890 piece->content->video->range(),
898 for (int i = 0; i < frc.repeat; ++i) {
899 if (t < piece->content->end(_film)) {
900 emit_video (_last_video[wp], t);
902 t += one_video_frame ();
907 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
909 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
911 shared_ptr<Piece> piece = wp.lock ();
916 shared_ptr<AudioContent> content = piece->content->audio;
917 DCPOMATIC_ASSERT (content);
919 int const rfr = content->resampled_frame_rate (_film);
921 /* Compute time in the DCP */
922 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
923 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
925 /* And the end of this block in the DCP */
926 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
928 /* Remove anything that comes before the start or after the end of the content */
929 if (time < piece->content->position()) {
930 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
932 /* This audio is entirely discarded */
935 content_audio.audio = cut.first;
937 } else if (time > piece->content->end(_film)) {
940 } else if (end > piece->content->end(_film)) {
941 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
942 if (remaining_frames == 0) {
945 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
948 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
952 if (content->gain() != 0) {
953 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
954 gain->apply_gain (content->gain ());
955 content_audio.audio = gain;
960 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
964 if (_audio_processor) {
965 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
970 _audio_merger.push (content_audio.audio, time);
971 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
972 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
976 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
978 shared_ptr<Piece> piece = wp.lock ();
979 shared_ptr<const TextContent> text = wc.lock ();
980 if (!piece || !text) {
984 /* Apply content's subtitle offsets */
985 subtitle.sub.rectangle.x += text->x_offset ();
986 subtitle.sub.rectangle.y += text->y_offset ();
988 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
989 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
990 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
992 /* Apply content's subtitle scale */
993 subtitle.sub.rectangle.width *= text->x_scale ();
994 subtitle.sub.rectangle.height *= text->y_scale ();
997 shared_ptr<Image> image = subtitle.sub.image;
999 /* We will scale the subtitle up to fit _video_container_size */
1000 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1001 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1002 if (width == 0 || height == 0) {
1006 dcp::Size scaled_size (width, height);
1007 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1008 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1010 _active_texts[text->type()].add_from (wc, ps, from);
1014 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1016 shared_ptr<Piece> piece = wp.lock ();
1017 shared_ptr<const TextContent> text = wc.lock ();
1018 if (!piece || !text) {
1023 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1025 if (from > piece->content->end(_film)) {
1029 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1030 s.set_h_position (s.h_position() + text->x_offset ());
1031 s.set_v_position (s.v_position() + text->y_offset ());
1032 float const xs = text->x_scale();
1033 float const ys = text->y_scale();
1034 float size = s.size();
1036 /* Adjust size to express the common part of the scaling;
1037 e.g. if xs = ys = 0.5 we scale size by 2.
1039 if (xs > 1e-5 && ys > 1e-5) {
1040 size *= 1 / min (1 / xs, 1 / ys);
1044 /* Then express aspect ratio changes */
1045 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1046 s.set_aspect_adjust (xs / ys);
1049 s.set_in (dcp::Time(from.seconds(), 1000));
1050 ps.string.push_back (StringText (s, text->outline_width()));
1051 ps.add_fonts (text->fonts ());
1054 _active_texts[text->type()].add_from (wc, ps, from);
1058 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1060 shared_ptr<const TextContent> text = wc.lock ();
1065 if (!_active_texts[text->type()].have(wc)) {
1069 shared_ptr<Piece> piece = wp.lock ();
1074 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1076 if (dcp_to > piece->content->end(_film)) {
1080 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1082 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1083 if (text->use() && !always && !text->burn()) {
1084 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1089 Player::seek (DCPTime time, bool accurate)
1091 boost::mutex::scoped_lock lm (_mutex);
1092 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1095 /* We can't seek in this state */
1100 _shuffler->clear ();
1105 if (_audio_processor) {
1106 _audio_processor->flush ();
1109 _audio_merger.clear ();
1110 for (int i = 0; i < TEXT_COUNT; ++i) {
1111 _active_texts[i].clear ();
1114 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1115 if (time < i->content->position()) {
1116 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1117 we must seek this (following) content accurately, otherwise when we come to the end of the current
1118 content we may not start right at the beginning of the next, causing a gap (if the next content has
1119 been trimmed to a point between keyframes, or something).
1121 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1123 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1124 /* During; seek to position */
1125 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1128 /* After; this piece is done */
1134 _last_video_time = time;
1135 _last_video_eyes = EYES_LEFT;
1136 _last_audio_time = time;
1138 _last_video_time = optional<DCPTime>();
1139 _last_video_eyes = optional<Eyes>();
1140 _last_audio_time = optional<DCPTime>();
1143 _black.set_position (time);
1144 _silent.set_position (time);
1146 _last_video.clear ();
1150 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1152 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1153 player before the video that requires them.
1155 _delay.push_back (make_pair (pv, time));
1157 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1158 _last_video_time = time + one_video_frame();
1160 _last_video_eyes = increment_eyes (pv->eyes());
1162 if (_delay.size() < 3) {
1166 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1168 do_emit_video (to_do.first, to_do.second);
1172 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1174 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1175 for (int i = 0; i < TEXT_COUNT; ++i) {
1176 _active_texts[i].clear_before (time);
1180 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1182 pv->set_text (subtitles.get ());
1189 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1191 /* Log if the assert below is about to fail */
1192 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1193 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1196 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1197 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1198 Audio (data, time, _film->audio_frame_rate());
1199 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1203 Player::fill_audio (DCPTimePeriod period)
1205 if (period.from == period.to) {
1209 DCPOMATIC_ASSERT (period.from < period.to);
1211 DCPTime t = period.from;
1212 while (t < period.to) {
1213 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1214 Frame const samples = block.frames_round(_film->audio_frame_rate());
1216 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1217 silence->make_silent ();
1218 emit_audio (silence, t);
1225 Player::one_video_frame () const
1227 return DCPTime::from_frames (1, _film->video_frame_rate ());
1230 pair<shared_ptr<AudioBuffers>, DCPTime>
1231 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1233 DCPTime const discard_time = discard_to - time;
1234 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1235 Frame remaining_frames = audio->frames() - discard_frames;
1236 if (remaining_frames <= 0) {
1237 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1239 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1240 return make_pair(cut, time + discard_time);
1244 Player::set_dcp_decode_reduction (optional<int> reduction)
1246 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1249 boost::mutex::scoped_lock lm (_mutex);
1251 if (reduction == _dcp_decode_reduction) {
1253 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1257 _dcp_decode_reduction = reduction;
1258 setup_pieces_unlocked ();
1261 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1265 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1267 boost::mutex::scoped_lock lm (_mutex);
1269 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1270 if (i->content == content) {
1271 return content_time_to_dcp (i, t);
1275 /* We couldn't find this content; perhaps things are being changed over */
1276 return optional<DCPTime>();
1280 shared_ptr<const Playlist>
1281 Player::playlist () const
1283 return _playlist ? _playlist : _film->playlist();
1288 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1290 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);