2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist, DCPTime playback_length)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
100 , _playback_length (playback_length)
102 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
103 /* The butler must hear about this first, so since we are proxying this through to the butler we must
106 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
107 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
108 set_video_container_size (_film->frame_size ());
110 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
113 seek (DCPTime (), true);
122 Player::setup_pieces ()
124 boost::mutex::scoped_lock lm (_mutex);
125 setup_pieces_unlocked ();
129 have_video (shared_ptr<const Content> content)
131 return static_cast<bool>(content->video);
135 have_audio (shared_ptr<const Content> content)
137 return static_cast<bool>(content->audio);
141 Player::setup_pieces_unlocked ()
143 list<shared_ptr<Piece> > old_pieces = _pieces;
147 _shuffler = new Shuffler();
148 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
150 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
152 if (!i->paths_valid ()) {
156 if (_ignore_video && _ignore_audio && i->text.empty()) {
157 /* We're only interested in text and this content has none */
161 shared_ptr<Decoder> old_decoder;
162 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
163 if (j->content == i) {
164 old_decoder = j->decoder;
169 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
170 FrameRateChange frc (_film, i);
173 /* Not something that we can decode; e.g. Atmos content */
177 if (decoder->video && _ignore_video) {
178 decoder->video->set_ignore (true);
181 if (decoder->audio && _ignore_audio) {
182 decoder->audio->set_ignore (true);
186 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
187 i->set_ignore (true);
191 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
193 dcp->set_decode_referenced (_play_referenced);
194 if (_play_referenced) {
195 dcp->set_forced_reduction (_dcp_decode_reduction);
199 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
200 _pieces.push_back (piece);
202 if (decoder->video) {
203 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
204 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
205 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
207 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
211 if (decoder->audio) {
212 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
215 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
217 while (j != decoder->text.end()) {
218 (*j)->BitmapStart.connect (
219 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
221 (*j)->PlainStart.connect (
222 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
225 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
232 _stream_states.clear ();
233 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
234 if (i->content->audio) {
235 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
236 _stream_states[j] = StreamState (i, i->content->position ());
241 _black = Empty (_film, _playlist, bind(&have_video, _1), _playback_length);
242 _silent = Empty (_film, _playlist, bind(&have_audio, _1), _playback_length);
244 _last_video_time = DCPTime ();
245 _last_video_eyes = EYES_BOTH;
246 _last_audio_time = DCPTime ();
250 Player::playlist_content_change (ChangeType type, int property, bool frequent)
252 if (type == CHANGE_TYPE_PENDING) {
253 /* The player content is probably about to change, so we can't carry on
254 until that has happened and we've rebuilt our pieces. Stop pass()
255 and seek() from working until then.
258 } else if (type == CHANGE_TYPE_DONE) {
259 /* A change in our content has gone through. Re-build our pieces. */
262 } else if (type == CHANGE_TYPE_CANCELLED) {
266 Change (type, property, frequent);
270 Player::set_video_container_size (dcp::Size s)
272 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
275 boost::mutex::scoped_lock lm (_mutex);
277 if (s == _video_container_size) {
279 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
283 _video_container_size = s;
285 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
286 _black_image->make_black ();
289 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
293 Player::playlist_change (ChangeType type)
295 if (type == CHANGE_TYPE_DONE) {
298 Change (type, PlayerProperty::PLAYLIST, false);
302 Player::film_change (ChangeType type, Film::Property p)
304 /* Here we should notice Film properties that affect our output, and
305 alert listeners that our output now would be different to how it was
306 last time we were run.
309 if (p == Film::CONTAINER) {
310 Change (type, PlayerProperty::FILM_CONTAINER, false);
311 } else if (p == Film::VIDEO_FRAME_RATE) {
312 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
313 so we need new pieces here.
315 if (type == CHANGE_TYPE_DONE) {
318 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
319 } else if (p == Film::AUDIO_PROCESSOR) {
320 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
321 boost::mutex::scoped_lock lm (_mutex);
322 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
324 } else if (p == Film::AUDIO_CHANNELS) {
325 if (type == CHANGE_TYPE_DONE) {
326 boost::mutex::scoped_lock lm (_mutex);
327 _audio_merger.clear ();
332 shared_ptr<PlayerVideo>
333 Player::black_player_video_frame (Eyes eyes) const
335 return shared_ptr<PlayerVideo> (
337 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
340 _video_container_size,
341 _video_container_size,
344 PresetColourConversion::all().front().conversion,
346 boost::weak_ptr<Content>(),
347 boost::optional<Frame>(),
354 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
356 DCPTime s = t - piece->content->position ();
357 s = min (piece->content->length_after_trim(_film), s);
358 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
360 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
361 then convert that ContentTime to frames at the content's rate. However this fails for
362 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
363 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
365 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
367 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
371 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 /* See comment in dcp_to_content_video */
374 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
375 return d + piece->content->position();
379 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(_film), s);
383 /* See notes in dcp_to_content_video */
384 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
388 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
390 /* See comment in dcp_to_content_video */
391 return DCPTime::from_frames (f, _film->audio_frame_rate())
392 - DCPTime (piece->content->trim_start(), piece->frc)
393 + piece->content->position();
397 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
399 DCPTime s = t - piece->content->position ();
400 s = min (piece->content->length_after_trim(_film), s);
401 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
405 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
407 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
410 list<shared_ptr<Font> >
411 Player::get_subtitle_fonts ()
413 boost::mutex::scoped_lock lm (_mutex);
415 list<shared_ptr<Font> > fonts;
416 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
417 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
418 /* XXX: things may go wrong if there are duplicate font IDs
419 with different font files.
421 list<shared_ptr<Font> > f = j->fonts ();
422 copy (f.begin(), f.end(), back_inserter (fonts));
429 /** Set this player never to produce any video data */
431 Player::set_ignore_video ()
433 boost::mutex::scoped_lock lm (_mutex);
434 _ignore_video = true;
435 setup_pieces_unlocked ();
439 Player::set_ignore_audio ()
441 boost::mutex::scoped_lock lm (_mutex);
442 _ignore_audio = true;
443 setup_pieces_unlocked ();
447 Player::set_ignore_text ()
449 boost::mutex::scoped_lock lm (_mutex);
451 setup_pieces_unlocked ();
454 /** Set the player to always burn open texts into the image regardless of the content settings */
456 Player::set_always_burn_open_subtitles ()
458 boost::mutex::scoped_lock lm (_mutex);
459 _always_burn_open_subtitles = true;
462 /** Sets up the player to be faster, possibly at the expense of quality */
466 boost::mutex::scoped_lock lm (_mutex);
468 setup_pieces_unlocked ();
472 Player::set_play_referenced ()
474 boost::mutex::scoped_lock lm (_mutex);
475 _play_referenced = true;
476 setup_pieces_unlocked ();
480 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
482 DCPOMATIC_ASSERT (r);
483 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
484 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
485 if (r->actual_duration() > 0) {
487 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
492 list<ReferencedReelAsset>
493 Player::get_reel_assets ()
495 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
497 list<ReferencedReelAsset> a;
499 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
500 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
505 scoped_ptr<DCPDecoder> decoder;
507 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
512 DCPOMATIC_ASSERT (j->video_frame_rate ());
513 double const cfr = j->video_frame_rate().get();
514 Frame const trim_start = j->trim_start().frames_round (cfr);
515 Frame const trim_end = j->trim_end().frames_round (cfr);
516 int const ffr = _film->video_frame_rate ();
518 /* position in the asset from the start */
519 int64_t offset_from_start = 0;
520 /* position in the asset from the end */
521 int64_t offset_from_end = 0;
522 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
523 /* Assume that main picture duration is the length of the reel */
524 offset_from_end += k->main_picture()->actual_duration();
527 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
529 /* Assume that main picture duration is the length of the reel */
530 int64_t const reel_duration = k->main_picture()->actual_duration();
532 /* See doc/design/trim_reels.svg */
533 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
534 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
536 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
537 if (j->reference_video ()) {
538 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
541 if (j->reference_audio ()) {
542 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
545 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
546 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
549 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
550 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
551 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
555 offset_from_start += reel_duration;
556 offset_from_end -= reel_duration;
566 boost::mutex::scoped_lock lm (_mutex);
569 /* We can't pass in this state */
573 if (_playback_length == DCPTime()) {
574 /* Special; just give one black frame */
575 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
579 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
581 shared_ptr<Piece> earliest_content;
582 optional<DCPTime> earliest_time;
584 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
589 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
590 if (t > i->content->end(_film)) {
594 /* Given two choices at the same time, pick the one with texts so we see it before
597 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
599 earliest_content = i;
613 if (earliest_content) {
617 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
618 earliest_time = _black.position ();
622 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
623 earliest_time = _silent.position ();
630 earliest_content->done = earliest_content->decoder->pass ();
631 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
632 if (dcp && !_play_referenced && dcp->reference_audio()) {
633 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
634 to `hide' the fact that no audio was emitted during the referenced DCP (though
635 we need to behave as though it was).
637 _last_audio_time = dcp->end (_film);
642 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
643 _black.set_position (_black.position() + one_video_frame());
647 DCPTimePeriod period (_silent.period_at_position());
648 if (_last_audio_time) {
649 /* Sometimes the thing that happened last finishes fractionally before
650 or after this silence. Bodge the start time of the silence to fix it.
651 I think this is nothing to worry about since we will just add or
652 remove a little silence at the end of some content.
654 int64_t const error = labs(period.from.get() - _last_audio_time->get());
655 /* Let's not worry about less than a frame at 24fps */
656 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
657 if (error >= too_much_error) {
658 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
660 DCPOMATIC_ASSERT (error < too_much_error);
661 period.from = *_last_audio_time;
663 if (period.duration() > one_video_frame()) {
664 period.to = period.from + one_video_frame();
667 _silent.set_position (period.to);
675 /* Emit any audio that is ready */
677 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
678 of our streams, or the position of the _silent.
680 DCPTime pull_to = _playback_length;
681 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
682 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
683 pull_to = i->second.last_push_end;
686 if (!_silent.done() && _silent.position() < pull_to) {
687 pull_to = _silent.position();
690 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
691 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
692 if (_last_audio_time && i->second < *_last_audio_time) {
693 /* This new data comes before the last we emitted (or the last seek); discard it */
694 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
699 } else if (_last_audio_time && i->second > *_last_audio_time) {
700 /* There's a gap between this data and the last we emitted; fill with silence */
701 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
704 emit_audio (i->first, i->second);
709 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
710 do_emit_video(i->first, i->second);
717 /** @return Open subtitles for the frame at the given time, converted to images */
718 optional<PositionImage>
719 Player::open_subtitles_for_frame (DCPTime time) const
721 list<PositionImage> captions;
722 int const vfr = _film->video_frame_rate();
726 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
729 /* Bitmap subtitles */
730 BOOST_FOREACH (BitmapText i, j.bitmap) {
735 /* i.image will already have been scaled to fit _video_container_size */
736 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
742 lrint (_video_container_size.width * i.rectangle.x),
743 lrint (_video_container_size.height * i.rectangle.y)
749 /* String subtitles (rendered to an image) */
750 if (!j.string.empty ()) {
751 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
752 copy (s.begin(), s.end(), back_inserter (captions));
756 if (captions.empty ()) {
757 return optional<PositionImage> ();
760 return merge (captions);
764 Player::video (weak_ptr<Piece> wp, ContentVideo video)
766 shared_ptr<Piece> piece = wp.lock ();
771 FrameRateChange frc (_film, piece->content);
772 if (frc.skip && (video.frame % 2) == 1) {
776 /* Time of the first frame we will emit */
777 DCPTime const time = content_video_to_dcp (piece, video.frame);
779 /* Discard if it's before the content's period or the last accurate seek. We can't discard
780 if it's after the content's period here as in that case we still need to fill any gap between
781 `now' and the end of the content's period.
783 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
787 /* Fill gaps that we discover now that we have some video which needs to be emitted.
788 This is where we need to fill to.
790 DCPTime fill_to = min (time, piece->content->end(_film));
792 if (_last_video_time) {
793 DCPTime fill_from = max (*_last_video_time, piece->content->position());
795 /* Fill if we have more than half a frame to do */
796 if ((fill_to - fill_from) > one_video_frame() / 2) {
797 LastVideoMap::const_iterator last = _last_video.find (wp);
798 if (_film->three_d()) {
799 Eyes fill_to_eyes = video.eyes;
800 if (fill_to_eyes == EYES_BOTH) {
801 fill_to_eyes = EYES_LEFT;
803 if (fill_to == piece->content->end(_film)) {
804 /* Don't fill after the end of the content */
805 fill_to_eyes = EYES_LEFT;
807 DCPTime j = fill_from;
808 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
809 if (eyes == EYES_BOTH) {
812 while (j < fill_to || eyes != fill_to_eyes) {
813 if (last != _last_video.end()) {
814 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
815 copy->set_eyes (eyes);
816 emit_video (copy, j);
818 emit_video (black_player_video_frame(eyes), j);
820 if (eyes == EYES_RIGHT) {
821 j += one_video_frame();
823 eyes = increment_eyes (eyes);
826 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
827 if (last != _last_video.end()) {
828 emit_video (last->second, j);
830 emit_video (black_player_video_frame(EYES_BOTH), j);
837 _last_video[wp].reset (
840 piece->content->video->crop (),
841 piece->content->video->fade (_film, video.frame),
842 piece->content->video->scale().size (
843 piece->content->video, _video_container_size, _film->frame_size ()
845 _video_container_size,
848 piece->content->video->colour_conversion(),
849 piece->content->video->range(),
857 for (int i = 0; i < frc.repeat; ++i) {
858 if (t < piece->content->end(_film)) {
859 emit_video (_last_video[wp], t);
861 t += one_video_frame ();
866 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
868 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
870 shared_ptr<Piece> piece = wp.lock ();
875 shared_ptr<AudioContent> content = piece->content->audio;
876 DCPOMATIC_ASSERT (content);
878 int const rfr = content->resampled_frame_rate (_film);
880 /* Compute time in the DCP */
881 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
882 /* And the end of this block in the DCP */
883 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
885 /* Remove anything that comes before the start or after the end of the content */
886 if (time < piece->content->position()) {
887 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
889 /* This audio is entirely discarded */
892 content_audio.audio = cut.first;
894 } else if (time > piece->content->end(_film)) {
897 } else if (end > piece->content->end(_film)) {
898 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
899 if (remaining_frames == 0) {
902 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
905 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
909 if (content->gain() != 0) {
910 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
911 gain->apply_gain (content->gain ());
912 content_audio.audio = gain;
917 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
921 if (_audio_processor) {
922 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
927 _audio_merger.push (content_audio.audio, time);
928 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
929 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
933 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
935 shared_ptr<Piece> piece = wp.lock ();
936 shared_ptr<const TextContent> text = wc.lock ();
937 if (!piece || !text) {
941 /* Apply content's subtitle offsets */
942 subtitle.sub.rectangle.x += text->x_offset ();
943 subtitle.sub.rectangle.y += text->y_offset ();
945 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
946 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
947 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
949 /* Apply content's subtitle scale */
950 subtitle.sub.rectangle.width *= text->x_scale ();
951 subtitle.sub.rectangle.height *= text->y_scale ();
954 shared_ptr<Image> image = subtitle.sub.image;
956 /* We will scale the subtitle up to fit _video_container_size */
957 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
958 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
959 if (width == 0 || height == 0) {
963 dcp::Size scaled_size (width, height);
964 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
965 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
967 _active_texts[text->type()].add_from (wc, ps, from);
971 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
973 shared_ptr<Piece> piece = wp.lock ();
974 shared_ptr<const TextContent> text = wc.lock ();
975 if (!piece || !text) {
980 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
982 if (from > piece->content->end(_film)) {
986 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
987 s.set_h_position (s.h_position() + text->x_offset ());
988 s.set_v_position (s.v_position() + text->y_offset ());
989 float const xs = text->x_scale();
990 float const ys = text->y_scale();
991 float size = s.size();
993 /* Adjust size to express the common part of the scaling;
994 e.g. if xs = ys = 0.5 we scale size by 2.
996 if (xs > 1e-5 && ys > 1e-5) {
997 size *= 1 / min (1 / xs, 1 / ys);
1001 /* Then express aspect ratio changes */
1002 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1003 s.set_aspect_adjust (xs / ys);
1006 s.set_in (dcp::Time(from.seconds(), 1000));
1007 ps.string.push_back (StringText (s, text->outline_width()));
1008 ps.add_fonts (text->fonts ());
1011 _active_texts[text->type()].add_from (wc, ps, from);
1015 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1017 shared_ptr<const TextContent> text = wc.lock ();
1022 if (!_active_texts[text->type()].have(wc)) {
1026 shared_ptr<Piece> piece = wp.lock ();
1031 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1033 if (dcp_to > piece->content->end(_film)) {
1037 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1039 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1040 if (text->use() && !always && !text->burn()) {
1041 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1046 Player::seek (DCPTime time, bool accurate)
1048 boost::mutex::scoped_lock lm (_mutex);
1051 /* We can't seek in this state */
1056 _shuffler->clear ();
1061 if (_audio_processor) {
1062 _audio_processor->flush ();
1065 _audio_merger.clear ();
1066 for (int i = 0; i < TEXT_COUNT; ++i) {
1067 _active_texts[i].clear ();
1070 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1071 if (time < i->content->position()) {
1072 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1073 we must seek this (following) content accurately, otherwise when we come to the end of the current
1074 content we may not start right at the beginning of the next, causing a gap (if the next content has
1075 been trimmed to a point between keyframes, or something).
1077 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1079 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1080 /* During; seek to position */
1081 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1084 /* After; this piece is done */
1090 _last_video_time = time;
1091 _last_video_eyes = EYES_LEFT;
1092 _last_audio_time = time;
1094 _last_video_time = optional<DCPTime>();
1095 _last_video_eyes = optional<Eyes>();
1096 _last_audio_time = optional<DCPTime>();
1099 _black.set_position (time);
1100 _silent.set_position (time);
1102 _last_video.clear ();
1106 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1108 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1109 player before the video that requires them.
1111 _delay.push_back (make_pair (pv, time));
1113 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1114 _last_video_time = time + one_video_frame();
1116 _last_video_eyes = increment_eyes (pv->eyes());
1118 if (_delay.size() < 3) {
1122 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1124 do_emit_video (to_do.first, to_do.second);
1128 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1130 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1131 for (int i = 0; i < TEXT_COUNT; ++i) {
1132 _active_texts[i].clear_before (time);
1136 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1138 pv->set_text (subtitles.get ());
1145 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1147 /* Log if the assert below is about to fail */
1148 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1149 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1152 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1153 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1154 Audio (data, time, _film->audio_frame_rate());
1155 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1159 Player::fill_audio (DCPTimePeriod period)
1161 if (period.from == period.to) {
1165 DCPOMATIC_ASSERT (period.from < period.to);
1167 DCPTime t = period.from;
1168 while (t < period.to) {
1169 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1170 Frame const samples = block.frames_round(_film->audio_frame_rate());
1172 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1173 silence->make_silent ();
1174 emit_audio (silence, t);
1181 Player::one_video_frame () const
1183 return DCPTime::from_frames (1, _film->video_frame_rate ());
1186 pair<shared_ptr<AudioBuffers>, DCPTime>
1187 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1189 DCPTime const discard_time = discard_to - time;
1190 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1191 Frame remaining_frames = audio->frames() - discard_frames;
1192 if (remaining_frames <= 0) {
1193 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1195 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1196 return make_pair(cut, time + discard_time);
1200 Player::set_dcp_decode_reduction (optional<int> reduction)
1202 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1205 boost::mutex::scoped_lock lm (_mutex);
1207 if (reduction == _dcp_decode_reduction) {
1209 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1213 _dcp_decode_reduction = reduction;
1214 setup_pieces_unlocked ();
1217 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1221 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1223 boost::mutex::scoped_lock lm (_mutex);
1225 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1226 if (i->content == content) {
1227 return content_time_to_dcp (i, t);
1231 /* We couldn't find this content; perhaps things are being changed over */
1232 return optional<DCPTime>();