2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "dcpomatic_log.h"
29 #include "raw_image_proxy.h"
32 #include "render_text.h"
34 #include "content_video.h"
35 #include "player_video.h"
36 #include "frame_rate_change.h"
37 #include "audio_processor.h"
39 #include "referenced_reel_asset.h"
40 #include "decoder_factory.h"
42 #include "video_decoder.h"
43 #include "audio_decoder.h"
44 #include "text_content.h"
45 #include "text_decoder.h"
46 #include "ffmpeg_content.h"
47 #include "audio_content.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
54 #include <dcp/reel_sound_asset.h>
55 #include <dcp/reel_subtitle_asset.h>
56 #include <dcp/reel_picture_asset.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <boost/foreach.hpp>
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80 using namespace dcpomatic;
82 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
83 int const PlayerProperty::PLAYLIST = 701;
84 int const PlayerProperty::FILM_CONTAINER = 702;
85 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
86 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 int const PlayerProperty::PLAYBACK_LENGTH = 705;
89 Player::Player (shared_ptr<const Film> film)
92 , _ignore_video (false)
93 , _ignore_audio (false)
94 , _ignore_text (false)
95 , _always_burn_open_subtitles (false)
97 , _tolerant (film->tolerant())
98 , _play_referenced (false)
99 , _audio_merger (_film->audio_frame_rate())
105 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
107 , _playlist (playlist_)
109 , _ignore_video (false)
110 , _ignore_audio (false)
111 , _ignore_text (false)
112 , _always_burn_open_subtitles (false)
114 , _tolerant (film->tolerant())
115 , _play_referenced (false)
116 , _audio_merger (_film->audio_frame_rate())
125 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
126 /* The butler must hear about this first, so since we are proxying this through to the butler we must
129 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
130 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
131 set_video_container_size (_film->frame_size ());
133 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
136 seek (DCPTime (), true);
145 Player::setup_pieces ()
147 boost::mutex::scoped_lock lm (_mutex);
148 setup_pieces_unlocked ();
153 have_video (shared_ptr<const Content> content)
155 return static_cast<bool>(content->video) && content->video->use();
159 have_audio (shared_ptr<const Content> content)
161 return static_cast<bool>(content->audio);
165 Player::setup_pieces_unlocked ()
167 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169 list<shared_ptr<Piece> > old_pieces = _pieces;
173 _shuffler = new Shuffler();
174 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
178 if (!i->paths_valid ()) {
182 if (_ignore_video && _ignore_audio && i->text.empty()) {
183 /* We're only interested in text and this content has none */
187 shared_ptr<Decoder> old_decoder;
188 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
189 if (j->content == i) {
190 old_decoder = j->decoder;
195 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
196 FrameRateChange frc (_film, i);
199 /* Not something that we can decode; e.g. Atmos content */
203 if (decoder->video && _ignore_video) {
204 decoder->video->set_ignore (true);
207 if (decoder->audio && _ignore_audio) {
208 decoder->audio->set_ignore (true);
212 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
213 i->set_ignore (true);
217 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
219 dcp->set_decode_referenced (_play_referenced);
220 if (_play_referenced) {
221 dcp->set_forced_reduction (_dcp_decode_reduction);
225 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
226 _pieces.push_back (piece);
228 if (decoder->video) {
229 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
230 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
231 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
233 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
237 if (decoder->audio) {
238 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
241 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
243 while (j != decoder->text.end()) {
244 (*j)->BitmapStart.connect (
245 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247 (*j)->PlainStart.connect (
248 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258 _stream_states.clear ();
259 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
260 if (i->content->audio) {
261 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
262 _stream_states[j] = StreamState (i, i->content->position ());
267 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
268 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
270 _last_video_time = DCPTime ();
271 _last_video_eyes = EYES_BOTH;
272 _last_audio_time = DCPTime ();
276 Player::playlist_content_change (ChangeType type, int property, bool frequent)
278 if (type == CHANGE_TYPE_PENDING) {
279 /* The player content is probably about to change, so we can't carry on
280 until that has happened and we've rebuilt our pieces. Stop pass()
281 and seek() from working until then.
284 } else if (type == CHANGE_TYPE_DONE) {
285 /* A change in our content has gone through. Re-build our pieces. */
288 } else if (type == CHANGE_TYPE_CANCELLED) {
292 Change (type, property, frequent);
296 Player::set_video_container_size (dcp::Size s)
298 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
301 boost::mutex::scoped_lock lm (_mutex);
303 if (s == _video_container_size) {
305 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
309 _video_container_size = s;
311 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
312 _black_image->make_black ();
315 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
319 Player::playlist_change (ChangeType type)
321 if (type == CHANGE_TYPE_DONE) {
324 Change (type, PlayerProperty::PLAYLIST, false);
328 Player::film_change (ChangeType type, Film::Property p)
330 /* Here we should notice Film properties that affect our output, and
331 alert listeners that our output now would be different to how it was
332 last time we were run.
335 if (p == Film::CONTAINER) {
336 Change (type, PlayerProperty::FILM_CONTAINER, false);
337 } else if (p == Film::VIDEO_FRAME_RATE) {
338 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
339 so we need new pieces here.
341 if (type == CHANGE_TYPE_DONE) {
344 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
345 } else if (p == Film::AUDIO_PROCESSOR) {
346 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
347 boost::mutex::scoped_lock lm (_mutex);
348 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
350 } else if (p == Film::AUDIO_CHANNELS) {
351 if (type == CHANGE_TYPE_DONE) {
352 boost::mutex::scoped_lock lm (_mutex);
353 _audio_merger.clear ();
358 shared_ptr<PlayerVideo>
359 Player::black_player_video_frame (Eyes eyes) const
361 return shared_ptr<PlayerVideo> (
363 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
366 _video_container_size,
367 _video_container_size,
370 PresetColourConversion::all().front().conversion,
372 boost::weak_ptr<Content>(),
373 boost::optional<Frame>(),
380 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
382 DCPTime s = t - piece->content->position ();
383 s = min (piece->content->length_after_trim(_film), s);
384 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
386 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
387 then convert that ContentTime to frames at the content's rate. However this fails for
388 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
389 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
391 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
393 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
397 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
399 /* See comment in dcp_to_content_video */
400 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
401 return d + piece->content->position();
405 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
407 DCPTime s = t - piece->content->position ();
408 s = min (piece->content->length_after_trim(_film), s);
409 /* See notes in dcp_to_content_video */
410 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
414 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
416 /* See comment in dcp_to_content_video */
417 return DCPTime::from_frames (f, _film->audio_frame_rate())
418 - DCPTime (piece->content->trim_start(), piece->frc)
419 + piece->content->position();
423 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
425 DCPTime s = t - piece->content->position ();
426 s = min (piece->content->length_after_trim(_film), s);
427 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
431 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
433 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
436 list<shared_ptr<Font> >
437 Player::get_subtitle_fonts ()
439 boost::mutex::scoped_lock lm (_mutex);
441 list<shared_ptr<Font> > fonts;
442 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
443 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
444 /* XXX: things may go wrong if there are duplicate font IDs
445 with different font files.
447 list<shared_ptr<Font> > f = j->fonts ();
448 copy (f.begin(), f.end(), back_inserter (fonts));
455 /** Set this player never to produce any video data */
457 Player::set_ignore_video ()
459 boost::mutex::scoped_lock lm (_mutex);
460 _ignore_video = true;
461 setup_pieces_unlocked ();
465 Player::set_ignore_audio ()
467 boost::mutex::scoped_lock lm (_mutex);
468 _ignore_audio = true;
469 setup_pieces_unlocked ();
473 Player::set_ignore_text ()
475 boost::mutex::scoped_lock lm (_mutex);
477 setup_pieces_unlocked ();
480 /** Set the player to always burn open texts into the image regardless of the content settings */
482 Player::set_always_burn_open_subtitles ()
484 boost::mutex::scoped_lock lm (_mutex);
485 _always_burn_open_subtitles = true;
488 /** Sets up the player to be faster, possibly at the expense of quality */
492 boost::mutex::scoped_lock lm (_mutex);
494 setup_pieces_unlocked ();
498 Player::set_play_referenced ()
500 boost::mutex::scoped_lock lm (_mutex);
501 _play_referenced = true;
502 setup_pieces_unlocked ();
506 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
508 DCPOMATIC_ASSERT (r);
509 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
510 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
511 if (r->actual_duration() > 0) {
513 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
518 list<ReferencedReelAsset>
519 Player::get_reel_assets ()
521 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
523 list<ReferencedReelAsset> a;
525 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
526 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
531 scoped_ptr<DCPDecoder> decoder;
533 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
538 DCPOMATIC_ASSERT (j->video_frame_rate ());
539 double const cfr = j->video_frame_rate().get();
540 Frame const trim_start = j->trim_start().frames_round (cfr);
541 Frame const trim_end = j->trim_end().frames_round (cfr);
542 int const ffr = _film->video_frame_rate ();
544 /* position in the asset from the start */
545 int64_t offset_from_start = 0;
546 /* position in the asset from the end */
547 int64_t offset_from_end = 0;
548 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
549 /* Assume that main picture duration is the length of the reel */
550 offset_from_end += k->main_picture()->actual_duration();
553 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
555 /* Assume that main picture duration is the length of the reel */
556 int64_t const reel_duration = k->main_picture()->actual_duration();
558 /* See doc/design/trim_reels.svg */
559 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
560 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
562 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
563 if (j->reference_video ()) {
564 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
567 if (j->reference_audio ()) {
568 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
571 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
572 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
575 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
576 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
577 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
581 offset_from_start += reel_duration;
582 offset_from_end -= reel_duration;
592 boost::mutex::scoped_lock lm (_mutex);
595 /* We can't pass in this state */
599 if (_playback_length == DCPTime()) {
600 /* Special; just give one black frame */
601 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
605 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
607 shared_ptr<Piece> earliest_content;
608 optional<DCPTime> earliest_time;
610 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
615 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
616 if (t > i->content->end(_film)) {
620 /* Given two choices at the same time, pick the one with texts so we see it before
623 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
625 earliest_content = i;
639 if (earliest_content) {
643 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
644 earliest_time = _black.position ();
648 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
649 earliest_time = _silent.position ();
656 earliest_content->done = earliest_content->decoder->pass ();
657 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
658 if (dcp && !_play_referenced && dcp->reference_audio()) {
659 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
660 to `hide' the fact that no audio was emitted during the referenced DCP (though
661 we need to behave as though it was).
663 _last_audio_time = dcp->end (_film);
668 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
669 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
670 _black.set_position (_black.position() + one_video_frame());
674 DCPTimePeriod period (_silent.period_at_position());
675 if (_last_audio_time) {
676 /* Sometimes the thing that happened last finishes fractionally before
677 or after this silence. Bodge the start time of the silence to fix it.
678 I think this is nothing to worry about since we will just add or
679 remove a little silence at the end of some content.
681 int64_t const error = labs(period.from.get() - _last_audio_time->get());
682 /* Let's not worry about less than a frame at 24fps */
683 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
684 if (error >= too_much_error) {
685 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
687 DCPOMATIC_ASSERT (error < too_much_error);
688 period.from = *_last_audio_time;
690 if (period.duration() > one_video_frame()) {
691 period.to = period.from + one_video_frame();
694 _silent.set_position (period.to);
702 /* Emit any audio that is ready */
704 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
705 of our streams, or the position of the _silent.
707 DCPTime pull_to = _playback_length;
708 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
709 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
710 pull_to = i->second.last_push_end;
713 if (!_silent.done() && _silent.position() < pull_to) {
714 pull_to = _silent.position();
717 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
718 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
719 if (_last_audio_time && i->second < *_last_audio_time) {
720 /* This new data comes before the last we emitted (or the last seek); discard it */
721 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
726 } else if (_last_audio_time && i->second > *_last_audio_time) {
727 /* There's a gap between this data and the last we emitted; fill with silence */
728 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
731 emit_audio (i->first, i->second);
736 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
737 do_emit_video(i->first, i->second);
744 /** @return Open subtitles for the frame at the given time, converted to images */
745 optional<PositionImage>
746 Player::open_subtitles_for_frame (DCPTime time) const
748 list<PositionImage> captions;
749 int const vfr = _film->video_frame_rate();
753 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
756 /* Bitmap subtitles */
757 BOOST_FOREACH (BitmapText i, j.bitmap) {
762 /* i.image will already have been scaled to fit _video_container_size */
763 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
769 lrint (_video_container_size.width * i.rectangle.x),
770 lrint (_video_container_size.height * i.rectangle.y)
776 /* String subtitles (rendered to an image) */
777 if (!j.string.empty ()) {
778 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
779 copy (s.begin(), s.end(), back_inserter (captions));
783 if (captions.empty ()) {
784 return optional<PositionImage> ();
787 return merge (captions);
791 Player::video (weak_ptr<Piece> wp, ContentVideo video)
793 shared_ptr<Piece> piece = wp.lock ();
798 if (!piece->content->video->use()) {
802 FrameRateChange frc (_film, piece->content);
803 if (frc.skip && (video.frame % 2) == 1) {
807 /* Time of the first frame we will emit */
808 DCPTime const time = content_video_to_dcp (piece, video.frame);
810 /* Discard if it's before the content's period or the last accurate seek. We can't discard
811 if it's after the content's period here as in that case we still need to fill any gap between
812 `now' and the end of the content's period.
814 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
818 /* Fill gaps that we discover now that we have some video which needs to be emitted.
819 This is where we need to fill to.
821 DCPTime fill_to = min (time, piece->content->end(_film));
823 if (_last_video_time) {
824 DCPTime fill_from = max (*_last_video_time, piece->content->position());
826 /* Fill if we have more than half a frame to do */
827 if ((fill_to - fill_from) > one_video_frame() / 2) {
828 LastVideoMap::const_iterator last = _last_video.find (wp);
829 if (_film->three_d()) {
830 Eyes fill_to_eyes = video.eyes;
831 if (fill_to_eyes == EYES_BOTH) {
832 fill_to_eyes = EYES_LEFT;
834 if (fill_to == piece->content->end(_film)) {
835 /* Don't fill after the end of the content */
836 fill_to_eyes = EYES_LEFT;
838 DCPTime j = fill_from;
839 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
840 if (eyes == EYES_BOTH) {
843 while (j < fill_to || eyes != fill_to_eyes) {
844 if (last != _last_video.end()) {
845 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
846 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
847 copy->set_eyes (eyes);
848 emit_video (copy, j);
850 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
851 emit_video (black_player_video_frame(eyes), j);
853 if (eyes == EYES_RIGHT) {
854 j += one_video_frame();
856 eyes = increment_eyes (eyes);
859 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
860 if (last != _last_video.end()) {
861 emit_video (last->second, j);
863 emit_video (black_player_video_frame(EYES_BOTH), j);
870 _last_video[wp].reset (
873 piece->content->video->crop (),
874 piece->content->video->fade (_film, video.frame),
875 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
876 _video_container_size,
879 piece->content->video->colour_conversion(),
880 piece->content->video->range(),
888 for (int i = 0; i < frc.repeat; ++i) {
889 if (t < piece->content->end(_film)) {
890 emit_video (_last_video[wp], t);
892 t += one_video_frame ();
897 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
899 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
901 shared_ptr<Piece> piece = wp.lock ();
906 shared_ptr<AudioContent> content = piece->content->audio;
907 DCPOMATIC_ASSERT (content);
909 int const rfr = content->resampled_frame_rate (_film);
911 /* Compute time in the DCP */
912 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
913 /* And the end of this block in the DCP */
914 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
916 /* Remove anything that comes before the start or after the end of the content */
917 if (time < piece->content->position()) {
918 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
920 /* This audio is entirely discarded */
923 content_audio.audio = cut.first;
925 } else if (time > piece->content->end(_film)) {
928 } else if (end > piece->content->end(_film)) {
929 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
930 if (remaining_frames == 0) {
933 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
936 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
940 if (content->gain() != 0) {
941 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
942 gain->apply_gain (content->gain ());
943 content_audio.audio = gain;
948 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
952 if (_audio_processor) {
953 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
958 _audio_merger.push (content_audio.audio, time);
959 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
960 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
964 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
966 shared_ptr<Piece> piece = wp.lock ();
967 shared_ptr<const TextContent> text = wc.lock ();
968 if (!piece || !text) {
972 /* Apply content's subtitle offsets */
973 subtitle.sub.rectangle.x += text->x_offset ();
974 subtitle.sub.rectangle.y += text->y_offset ();
976 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
977 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
978 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
980 /* Apply content's subtitle scale */
981 subtitle.sub.rectangle.width *= text->x_scale ();
982 subtitle.sub.rectangle.height *= text->y_scale ();
985 shared_ptr<Image> image = subtitle.sub.image;
987 /* We will scale the subtitle up to fit _video_container_size */
988 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
989 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
990 if (width == 0 || height == 0) {
994 dcp::Size scaled_size (width, height);
995 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
996 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
998 _active_texts[text->type()].add_from (wc, ps, from);
1002 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1004 shared_ptr<Piece> piece = wp.lock ();
1005 shared_ptr<const TextContent> text = wc.lock ();
1006 if (!piece || !text) {
1011 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1013 if (from > piece->content->end(_film)) {
1017 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1018 s.set_h_position (s.h_position() + text->x_offset ());
1019 s.set_v_position (s.v_position() + text->y_offset ());
1020 float const xs = text->x_scale();
1021 float const ys = text->y_scale();
1022 float size = s.size();
1024 /* Adjust size to express the common part of the scaling;
1025 e.g. if xs = ys = 0.5 we scale size by 2.
1027 if (xs > 1e-5 && ys > 1e-5) {
1028 size *= 1 / min (1 / xs, 1 / ys);
1032 /* Then express aspect ratio changes */
1033 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1034 s.set_aspect_adjust (xs / ys);
1037 s.set_in (dcp::Time(from.seconds(), 1000));
1038 ps.string.push_back (StringText (s, text->outline_width()));
1039 ps.add_fonts (text->fonts ());
1042 _active_texts[text->type()].add_from (wc, ps, from);
1046 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1048 shared_ptr<const TextContent> text = wc.lock ();
1053 if (!_active_texts[text->type()].have(wc)) {
1057 shared_ptr<Piece> piece = wp.lock ();
1062 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1064 if (dcp_to > piece->content->end(_film)) {
1068 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1070 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1071 if (text->use() && !always && !text->burn()) {
1072 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1077 Player::seek (DCPTime time, bool accurate)
1079 boost::mutex::scoped_lock lm (_mutex);
1082 /* We can't seek in this state */
1087 _shuffler->clear ();
1092 if (_audio_processor) {
1093 _audio_processor->flush ();
1096 _audio_merger.clear ();
1097 for (int i = 0; i < TEXT_COUNT; ++i) {
1098 _active_texts[i].clear ();
1101 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1102 if (time < i->content->position()) {
1103 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1104 we must seek this (following) content accurately, otherwise when we come to the end of the current
1105 content we may not start right at the beginning of the next, causing a gap (if the next content has
1106 been trimmed to a point between keyframes, or something).
1108 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1110 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1111 /* During; seek to position */
1112 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1115 /* After; this piece is done */
1121 _last_video_time = time;
1122 _last_video_eyes = EYES_LEFT;
1123 _last_audio_time = time;
1125 _last_video_time = optional<DCPTime>();
1126 _last_video_eyes = optional<Eyes>();
1127 _last_audio_time = optional<DCPTime>();
1130 _black.set_position (time);
1131 _silent.set_position (time);
1133 _last_video.clear ();
1137 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1139 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1140 player before the video that requires them.
1142 _delay.push_back (make_pair (pv, time));
1144 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1145 _last_video_time = time + one_video_frame();
1147 _last_video_eyes = increment_eyes (pv->eyes());
1149 if (_delay.size() < 3) {
1153 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1155 do_emit_video (to_do.first, to_do.second);
1159 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1161 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1162 for (int i = 0; i < TEXT_COUNT; ++i) {
1163 _active_texts[i].clear_before (time);
1167 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1169 pv->set_text (subtitles.get ());
1176 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1178 /* Log if the assert below is about to fail */
1179 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1180 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1183 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1184 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1185 Audio (data, time, _film->audio_frame_rate());
1186 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1190 Player::fill_audio (DCPTimePeriod period)
1192 if (period.from == period.to) {
1196 DCPOMATIC_ASSERT (period.from < period.to);
1198 DCPTime t = period.from;
1199 while (t < period.to) {
1200 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1201 Frame const samples = block.frames_round(_film->audio_frame_rate());
1203 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1204 silence->make_silent ();
1205 emit_audio (silence, t);
1212 Player::one_video_frame () const
1214 return DCPTime::from_frames (1, _film->video_frame_rate ());
1217 pair<shared_ptr<AudioBuffers>, DCPTime>
1218 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1220 DCPTime const discard_time = discard_to - time;
1221 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1222 Frame remaining_frames = audio->frames() - discard_frames;
1223 if (remaining_frames <= 0) {
1224 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1226 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1227 return make_pair(cut, time + discard_time);
1231 Player::set_dcp_decode_reduction (optional<int> reduction)
1233 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1236 boost::mutex::scoped_lock lm (_mutex);
1238 if (reduction == _dcp_decode_reduction) {
1240 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1244 _dcp_decode_reduction = reduction;
1245 setup_pieces_unlocked ();
1248 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1252 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1254 boost::mutex::scoped_lock lm (_mutex);
1256 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1257 if (i->content == content) {
1258 return content_time_to_dcp (i, t);
1262 /* We couldn't find this content; perhaps things are being changed over */
1263 return optional<DCPTime>();
1267 shared_ptr<const Playlist>
1268 Player::playlist () const
1270 return _playlist ? _playlist : _film->playlist();