2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 int const PlayerProperty::PLAYBACK_LENGTH = 705;
88 Player::Player (shared_ptr<const Film> film)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
104 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
106 , _playlist (playlist_)
108 , _ignore_video (false)
109 , _ignore_audio (false)
110 , _ignore_text (false)
111 , _always_burn_open_subtitles (false)
113 , _tolerant (film->tolerant())
114 , _play_referenced (false)
115 , _audio_merger (_film->audio_frame_rate())
124 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
125 /* The butler must hear about this first, so since we are proxying this through to the butler we must
128 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
129 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
130 set_video_container_size (_film->frame_size ());
132 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
135 seek (DCPTime (), true);
144 Player::setup_pieces ()
146 boost::mutex::scoped_lock lm (_mutex);
147 setup_pieces_unlocked ();
152 have_video (shared_ptr<const Content> content)
154 return static_cast<bool>(content->video) && content->video->use();
158 have_audio (shared_ptr<const Content> content)
160 return static_cast<bool>(content->audio);
164 Player::setup_pieces_unlocked ()
166 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
168 list<shared_ptr<Piece> > old_pieces = _pieces;
172 _shuffler = new Shuffler();
173 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
175 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
177 if (!i->paths_valid ()) {
181 if (_ignore_video && _ignore_audio && i->text.empty()) {
182 /* We're only interested in text and this content has none */
186 shared_ptr<Decoder> old_decoder;
187 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
188 if (j->content == i) {
189 old_decoder = j->decoder;
194 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
195 FrameRateChange frc (_film, i);
198 /* Not something that we can decode; e.g. Atmos content */
202 if (decoder->video && _ignore_video) {
203 decoder->video->set_ignore (true);
206 if (decoder->audio && _ignore_audio) {
207 decoder->audio->set_ignore (true);
211 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
212 i->set_ignore (true);
216 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
218 dcp->set_decode_referenced (_play_referenced);
219 if (_play_referenced) {
220 dcp->set_forced_reduction (_dcp_decode_reduction);
224 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
225 _pieces.push_back (piece);
227 if (decoder->video) {
228 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
229 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
232 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
236 if (decoder->audio) {
237 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
240 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
242 while (j != decoder->text.end()) {
243 (*j)->BitmapStart.connect (
244 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
246 (*j)->PlainStart.connect (
247 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
257 _stream_states.clear ();
258 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
259 if (i->content->audio) {
260 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
261 _stream_states[j] = StreamState (i, i->content->position ());
266 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
267 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
269 _last_video_time = DCPTime ();
270 _last_video_eyes = EYES_BOTH;
271 _last_audio_time = DCPTime ();
275 Player::playlist_content_change (ChangeType type, int property, bool frequent)
277 if (type == CHANGE_TYPE_PENDING) {
278 /* The player content is probably about to change, so we can't carry on
279 until that has happened and we've rebuilt our pieces. Stop pass()
280 and seek() from working until then.
283 } else if (type == CHANGE_TYPE_DONE) {
284 /* A change in our content has gone through. Re-build our pieces. */
287 } else if (type == CHANGE_TYPE_CANCELLED) {
291 Change (type, property, frequent);
295 Player::set_video_container_size (dcp::Size s)
297 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
300 boost::mutex::scoped_lock lm (_mutex);
302 if (s == _video_container_size) {
304 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
308 _video_container_size = s;
310 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
311 _black_image->make_black ();
314 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318 Player::playlist_change (ChangeType type)
320 if (type == CHANGE_TYPE_DONE) {
323 Change (type, PlayerProperty::PLAYLIST, false);
327 Player::film_change (ChangeType type, Film::Property p)
329 /* Here we should notice Film properties that affect our output, and
330 alert listeners that our output now would be different to how it was
331 last time we were run.
334 if (p == Film::CONTAINER) {
335 Change (type, PlayerProperty::FILM_CONTAINER, false);
336 } else if (p == Film::VIDEO_FRAME_RATE) {
337 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
338 so we need new pieces here.
340 if (type == CHANGE_TYPE_DONE) {
343 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
344 } else if (p == Film::AUDIO_PROCESSOR) {
345 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
346 boost::mutex::scoped_lock lm (_mutex);
347 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
349 } else if (p == Film::AUDIO_CHANNELS) {
350 if (type == CHANGE_TYPE_DONE) {
351 boost::mutex::scoped_lock lm (_mutex);
352 _audio_merger.clear ();
357 shared_ptr<PlayerVideo>
358 Player::black_player_video_frame (Eyes eyes) const
360 return shared_ptr<PlayerVideo> (
362 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
365 _video_container_size,
366 _video_container_size,
369 PresetColourConversion::all().front().conversion,
371 boost::weak_ptr<Content>(),
372 boost::optional<Frame>(),
379 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(_film), s);
383 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
385 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
386 then convert that ContentTime to frames at the content's rate. However this fails for
387 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
388 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
390 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
392 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
396 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
398 /* See comment in dcp_to_content_video */
399 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
400 return d + piece->content->position();
404 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
406 DCPTime s = t - piece->content->position ();
407 s = min (piece->content->length_after_trim(_film), s);
408 /* See notes in dcp_to_content_video */
409 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
413 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
415 /* See comment in dcp_to_content_video */
416 return DCPTime::from_frames (f, _film->audio_frame_rate())
417 - DCPTime (piece->content->trim_start(), piece->frc)
418 + piece->content->position();
422 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
424 DCPTime s = t - piece->content->position ();
425 s = min (piece->content->length_after_trim(_film), s);
426 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
430 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
432 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
435 list<shared_ptr<Font> >
436 Player::get_subtitle_fonts ()
438 boost::mutex::scoped_lock lm (_mutex);
440 list<shared_ptr<Font> > fonts;
441 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
442 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
443 /* XXX: things may go wrong if there are duplicate font IDs
444 with different font files.
446 list<shared_ptr<Font> > f = j->fonts ();
447 copy (f.begin(), f.end(), back_inserter (fonts));
454 /** Set this player never to produce any video data */
456 Player::set_ignore_video ()
458 boost::mutex::scoped_lock lm (_mutex);
459 _ignore_video = true;
460 setup_pieces_unlocked ();
464 Player::set_ignore_audio ()
466 boost::mutex::scoped_lock lm (_mutex);
467 _ignore_audio = true;
468 setup_pieces_unlocked ();
472 Player::set_ignore_text ()
474 boost::mutex::scoped_lock lm (_mutex);
476 setup_pieces_unlocked ();
479 /** Set the player to always burn open texts into the image regardless of the content settings */
481 Player::set_always_burn_open_subtitles ()
483 boost::mutex::scoped_lock lm (_mutex);
484 _always_burn_open_subtitles = true;
487 /** Sets up the player to be faster, possibly at the expense of quality */
491 boost::mutex::scoped_lock lm (_mutex);
493 setup_pieces_unlocked ();
497 Player::set_play_referenced ()
499 boost::mutex::scoped_lock lm (_mutex);
500 _play_referenced = true;
501 setup_pieces_unlocked ();
505 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
507 DCPOMATIC_ASSERT (r);
508 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
509 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
510 if (r->actual_duration() > 0) {
512 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
517 list<ReferencedReelAsset>
518 Player::get_reel_assets ()
520 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
522 list<ReferencedReelAsset> a;
524 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
525 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
530 scoped_ptr<DCPDecoder> decoder;
532 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
537 DCPOMATIC_ASSERT (j->video_frame_rate ());
538 double const cfr = j->video_frame_rate().get();
539 Frame const trim_start = j->trim_start().frames_round (cfr);
540 Frame const trim_end = j->trim_end().frames_round (cfr);
541 int const ffr = _film->video_frame_rate ();
543 /* position in the asset from the start */
544 int64_t offset_from_start = 0;
545 /* position in the asset from the end */
546 int64_t offset_from_end = 0;
547 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
548 /* Assume that main picture duration is the length of the reel */
549 offset_from_end += k->main_picture()->actual_duration();
552 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
554 /* Assume that main picture duration is the length of the reel */
555 int64_t const reel_duration = k->main_picture()->actual_duration();
557 /* See doc/design/trim_reels.svg */
558 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
559 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
561 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
562 if (j->reference_video ()) {
563 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
566 if (j->reference_audio ()) {
567 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
570 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
571 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
574 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
575 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
576 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
580 offset_from_start += reel_duration;
581 offset_from_end -= reel_duration;
591 boost::mutex::scoped_lock lm (_mutex);
594 /* We can't pass in this state */
598 if (_playback_length == DCPTime()) {
599 /* Special; just give one black frame */
600 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
604 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
606 shared_ptr<Piece> earliest_content;
607 optional<DCPTime> earliest_time;
609 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
614 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
615 if (t > i->content->end(_film)) {
619 /* Given two choices at the same time, pick the one with texts so we see it before
622 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
624 earliest_content = i;
638 if (earliest_content) {
642 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
643 earliest_time = _black.position ();
647 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
648 earliest_time = _silent.position ();
655 earliest_content->done = earliest_content->decoder->pass ();
656 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
657 if (dcp && !_play_referenced && dcp->reference_audio()) {
658 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
659 to `hide' the fact that no audio was emitted during the referenced DCP (though
660 we need to behave as though it was).
662 _last_audio_time = dcp->end (_film);
667 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
668 _black.set_position (_black.position() + one_video_frame());
672 DCPTimePeriod period (_silent.period_at_position());
673 if (_last_audio_time) {
674 /* Sometimes the thing that happened last finishes fractionally before
675 or after this silence. Bodge the start time of the silence to fix it.
676 I think this is nothing to worry about since we will just add or
677 remove a little silence at the end of some content.
679 int64_t const error = labs(period.from.get() - _last_audio_time->get());
680 /* Let's not worry about less than a frame at 24fps */
681 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
682 if (error >= too_much_error) {
683 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
685 DCPOMATIC_ASSERT (error < too_much_error);
686 period.from = *_last_audio_time;
688 if (period.duration() > one_video_frame()) {
689 period.to = period.from + one_video_frame();
692 _silent.set_position (period.to);
700 /* Emit any audio that is ready */
702 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
703 of our streams, or the position of the _silent.
705 DCPTime pull_to = _playback_length;
706 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
707 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
708 pull_to = i->second.last_push_end;
711 if (!_silent.done() && _silent.position() < pull_to) {
712 pull_to = _silent.position();
715 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
716 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
717 if (_last_audio_time && i->second < *_last_audio_time) {
718 /* This new data comes before the last we emitted (or the last seek); discard it */
719 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
724 } else if (_last_audio_time && i->second > *_last_audio_time) {
725 /* There's a gap between this data and the last we emitted; fill with silence */
726 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
729 emit_audio (i->first, i->second);
734 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
735 do_emit_video(i->first, i->second);
742 /** @return Open subtitles for the frame at the given time, converted to images */
743 optional<PositionImage>
744 Player::open_subtitles_for_frame (DCPTime time) const
746 list<PositionImage> captions;
747 int const vfr = _film->video_frame_rate();
751 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
754 /* Bitmap subtitles */
755 BOOST_FOREACH (BitmapText i, j.bitmap) {
760 /* i.image will already have been scaled to fit _video_container_size */
761 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
767 lrint (_video_container_size.width * i.rectangle.x),
768 lrint (_video_container_size.height * i.rectangle.y)
774 /* String subtitles (rendered to an image) */
775 if (!j.string.empty ()) {
776 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
777 copy (s.begin(), s.end(), back_inserter (captions));
781 if (captions.empty ()) {
782 return optional<PositionImage> ();
785 return merge (captions);
789 Player::video (weak_ptr<Piece> wp, ContentVideo video)
791 shared_ptr<Piece> piece = wp.lock ();
796 FrameRateChange frc (_film, piece->content);
797 if (frc.skip && (video.frame % 2) == 1) {
801 /* Time of the first frame we will emit */
802 DCPTime const time = content_video_to_dcp (piece, video.frame);
804 /* Discard if it's before the content's period or the last accurate seek. We can't discard
805 if it's after the content's period here as in that case we still need to fill any gap between
806 `now' and the end of the content's period.
808 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
812 /* Fill gaps that we discover now that we have some video which needs to be emitted.
813 This is where we need to fill to.
815 DCPTime fill_to = min (time, piece->content->end(_film));
817 if (_last_video_time) {
818 DCPTime fill_from = max (*_last_video_time, piece->content->position());
820 /* Fill if we have more than half a frame to do */
821 if ((fill_to - fill_from) > one_video_frame() / 2) {
822 LastVideoMap::const_iterator last = _last_video.find (wp);
823 if (_film->three_d()) {
824 Eyes fill_to_eyes = video.eyes;
825 if (fill_to_eyes == EYES_BOTH) {
826 fill_to_eyes = EYES_LEFT;
828 if (fill_to == piece->content->end(_film)) {
829 /* Don't fill after the end of the content */
830 fill_to_eyes = EYES_LEFT;
832 DCPTime j = fill_from;
833 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
834 if (eyes == EYES_BOTH) {
837 while (j < fill_to || eyes != fill_to_eyes) {
838 if (last != _last_video.end()) {
839 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
840 copy->set_eyes (eyes);
841 emit_video (copy, j);
843 emit_video (black_player_video_frame(eyes), j);
845 if (eyes == EYES_RIGHT) {
846 j += one_video_frame();
848 eyes = increment_eyes (eyes);
851 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
852 if (last != _last_video.end()) {
853 emit_video (last->second, j);
855 emit_video (black_player_video_frame(EYES_BOTH), j);
862 _last_video[wp].reset (
865 piece->content->video->crop (),
866 piece->content->video->fade (_film, video.frame),
867 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
868 _video_container_size,
871 piece->content->video->colour_conversion(),
872 piece->content->video->range(),
880 for (int i = 0; i < frc.repeat; ++i) {
881 if (t < piece->content->end(_film)) {
882 emit_video (_last_video[wp], t);
884 t += one_video_frame ();
889 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
891 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
893 shared_ptr<Piece> piece = wp.lock ();
898 shared_ptr<AudioContent> content = piece->content->audio;
899 DCPOMATIC_ASSERT (content);
901 int const rfr = content->resampled_frame_rate (_film);
903 /* Compute time in the DCP */
904 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
905 /* And the end of this block in the DCP */
906 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
908 /* Remove anything that comes before the start or after the end of the content */
909 if (time < piece->content->position()) {
910 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
912 /* This audio is entirely discarded */
915 content_audio.audio = cut.first;
917 } else if (time > piece->content->end(_film)) {
920 } else if (end > piece->content->end(_film)) {
921 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
922 if (remaining_frames == 0) {
925 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
928 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
932 if (content->gain() != 0) {
933 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
934 gain->apply_gain (content->gain ());
935 content_audio.audio = gain;
940 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
944 if (_audio_processor) {
945 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
950 _audio_merger.push (content_audio.audio, time);
951 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
952 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
956 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
958 shared_ptr<Piece> piece = wp.lock ();
959 shared_ptr<const TextContent> text = wc.lock ();
960 if (!piece || !text) {
964 /* Apply content's subtitle offsets */
965 subtitle.sub.rectangle.x += text->x_offset ();
966 subtitle.sub.rectangle.y += text->y_offset ();
968 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
969 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
970 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
972 /* Apply content's subtitle scale */
973 subtitle.sub.rectangle.width *= text->x_scale ();
974 subtitle.sub.rectangle.height *= text->y_scale ();
977 shared_ptr<Image> image = subtitle.sub.image;
979 /* We will scale the subtitle up to fit _video_container_size */
980 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
981 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
982 if (width == 0 || height == 0) {
986 dcp::Size scaled_size (width, height);
987 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
988 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
990 _active_texts[text->type()].add_from (wc, ps, from);
994 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
996 shared_ptr<Piece> piece = wp.lock ();
997 shared_ptr<const TextContent> text = wc.lock ();
998 if (!piece || !text) {
1003 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1005 if (from > piece->content->end(_film)) {
1009 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1010 s.set_h_position (s.h_position() + text->x_offset ());
1011 s.set_v_position (s.v_position() + text->y_offset ());
1012 float const xs = text->x_scale();
1013 float const ys = text->y_scale();
1014 float size = s.size();
1016 /* Adjust size to express the common part of the scaling;
1017 e.g. if xs = ys = 0.5 we scale size by 2.
1019 if (xs > 1e-5 && ys > 1e-5) {
1020 size *= 1 / min (1 / xs, 1 / ys);
1024 /* Then express aspect ratio changes */
1025 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1026 s.set_aspect_adjust (xs / ys);
1029 s.set_in (dcp::Time(from.seconds(), 1000));
1030 ps.string.push_back (StringText (s, text->outline_width()));
1031 ps.add_fonts (text->fonts ());
1034 _active_texts[text->type()].add_from (wc, ps, from);
1038 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1040 shared_ptr<const TextContent> text = wc.lock ();
1045 if (!_active_texts[text->type()].have(wc)) {
1049 shared_ptr<Piece> piece = wp.lock ();
1054 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1056 if (dcp_to > piece->content->end(_film)) {
1060 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1062 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1063 if (text->use() && !always && !text->burn()) {
1064 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1069 Player::seek (DCPTime time, bool accurate)
1071 boost::mutex::scoped_lock lm (_mutex);
1074 /* We can't seek in this state */
1079 _shuffler->clear ();
1084 if (_audio_processor) {
1085 _audio_processor->flush ();
1088 _audio_merger.clear ();
1089 for (int i = 0; i < TEXT_COUNT; ++i) {
1090 _active_texts[i].clear ();
1093 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1094 if (time < i->content->position()) {
1095 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1096 we must seek this (following) content accurately, otherwise when we come to the end of the current
1097 content we may not start right at the beginning of the next, causing a gap (if the next content has
1098 been trimmed to a point between keyframes, or something).
1100 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1102 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1103 /* During; seek to position */
1104 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1107 /* After; this piece is done */
1113 _last_video_time = time;
1114 _last_video_eyes = EYES_LEFT;
1115 _last_audio_time = time;
1117 _last_video_time = optional<DCPTime>();
1118 _last_video_eyes = optional<Eyes>();
1119 _last_audio_time = optional<DCPTime>();
1122 _black.set_position (time);
1123 _silent.set_position (time);
1125 _last_video.clear ();
1129 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1131 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1132 player before the video that requires them.
1134 _delay.push_back (make_pair (pv, time));
1136 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1137 _last_video_time = time + one_video_frame();
1139 _last_video_eyes = increment_eyes (pv->eyes());
1141 if (_delay.size() < 3) {
1145 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1147 do_emit_video (to_do.first, to_do.second);
1151 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1153 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1154 for (int i = 0; i < TEXT_COUNT; ++i) {
1155 _active_texts[i].clear_before (time);
1159 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1161 pv->set_text (subtitles.get ());
1168 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1170 /* Log if the assert below is about to fail */
1171 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1172 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1175 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1176 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1177 Audio (data, time, _film->audio_frame_rate());
1178 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1182 Player::fill_audio (DCPTimePeriod period)
1184 if (period.from == period.to) {
1188 DCPOMATIC_ASSERT (period.from < period.to);
1190 DCPTime t = period.from;
1191 while (t < period.to) {
1192 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1193 Frame const samples = block.frames_round(_film->audio_frame_rate());
1195 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1196 silence->make_silent ();
1197 emit_audio (silence, t);
1204 Player::one_video_frame () const
1206 return DCPTime::from_frames (1, _film->video_frame_rate ());
1209 pair<shared_ptr<AudioBuffers>, DCPTime>
1210 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1212 DCPTime const discard_time = discard_to - time;
1213 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1214 Frame remaining_frames = audio->frames() - discard_frames;
1215 if (remaining_frames <= 0) {
1216 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1218 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1219 return make_pair(cut, time + discard_time);
1223 Player::set_dcp_decode_reduction (optional<int> reduction)
1225 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1228 boost::mutex::scoped_lock lm (_mutex);
1230 if (reduction == _dcp_decode_reduction) {
1232 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1236 _dcp_decode_reduction = reduction;
1237 setup_pieces_unlocked ();
1240 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1244 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1246 boost::mutex::scoped_lock lm (_mutex);
1248 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1249 if (i->content == content) {
1250 return content_time_to_dcp (i, t);
1254 /* We couldn't find this content; perhaps things are being changed over */
1255 return optional<DCPTime>();
1259 shared_ptr<const Playlist>
1260 Player::playlist () const
1262 return _playlist ? _playlist : _film->playlist();