2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "caption_content.h"
44 #include "caption_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88 , _playlist (playlist)
89 , _have_valid_pieces (false)
90 , _ignore_video (false)
91 , _ignore_subtitle (false)
93 , _play_referenced (false)
94 , _audio_merger (_film->audio_frame_rate())
97 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
98 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
99 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
100 set_video_container_size (_film->frame_size ());
102 film_changed (Film::AUDIO_PROCESSOR);
104 seek (DCPTime (), true);
113 Player::setup_pieces ()
118 _shuffler = new Shuffler();
119 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
121 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
123 if (!i->paths_valid ()) {
127 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
128 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
131 /* Not something that we can decode; e.g. Atmos content */
135 if (decoder->video && _ignore_video) {
136 decoder->video->set_ignore (true);
139 if (decoder->caption && _ignore_subtitle) {
140 decoder->caption->set_ignore (true);
143 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
145 dcp->set_decode_referenced (_play_referenced);
146 if (_play_referenced) {
147 dcp->set_forced_reduction (_dcp_decode_reduction);
151 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
152 _pieces.push_back (piece);
154 if (decoder->video) {
155 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
156 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
157 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
159 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
163 if (decoder->audio) {
164 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
167 if (decoder->caption) {
168 decoder->caption->BitmapStart.connect (bind (&Player::bitmap_text_start, this, weak_ptr<Piece> (piece), _1));
169 decoder->caption->PlainStart.connect (bind (&Player::plain_text_start, this, weak_ptr<Piece> (piece), _1));
170 decoder->caption->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1, _2));
174 _stream_states.clear ();
175 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
176 if (i->content->audio) {
177 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
178 _stream_states[j] = StreamState (i, i->content->position ());
183 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
184 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
186 _last_video_time = DCPTime ();
187 _last_video_eyes = EYES_BOTH;
188 _last_audio_time = DCPTime ();
189 _have_valid_pieces = true;
193 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
195 shared_ptr<Content> c = w.lock ();
201 property == ContentProperty::POSITION ||
202 property == ContentProperty::LENGTH ||
203 property == ContentProperty::TRIM_START ||
204 property == ContentProperty::TRIM_END ||
205 property == ContentProperty::PATH ||
206 property == VideoContentProperty::FRAME_TYPE ||
207 property == VideoContentProperty::COLOUR_CONVERSION ||
208 property == AudioContentProperty::STREAMS ||
209 property == DCPContentProperty::NEEDS_ASSETS ||
210 property == DCPContentProperty::NEEDS_KDM ||
211 property == CaptionContentProperty::COLOUR ||
212 property == CaptionContentProperty::EFFECT ||
213 property == CaptionContentProperty::EFFECT_COLOUR ||
214 property == FFmpegContentProperty::SUBTITLE_STREAM ||
215 property == FFmpegContentProperty::FILTERS
218 _have_valid_pieces = false;
219 Changed (property, frequent);
222 property == CaptionContentProperty::LINE_SPACING ||
223 property == CaptionContentProperty::OUTLINE_WIDTH ||
224 property == CaptionContentProperty::Y_SCALE ||
225 property == CaptionContentProperty::FADE_IN ||
226 property == CaptionContentProperty::FADE_OUT ||
227 property == ContentProperty::VIDEO_FRAME_RATE ||
228 property == CaptionContentProperty::USE ||
229 property == CaptionContentProperty::X_OFFSET ||
230 property == CaptionContentProperty::Y_OFFSET ||
231 property == CaptionContentProperty::X_SCALE ||
232 property == CaptionContentProperty::FONTS ||
233 property == VideoContentProperty::CROP ||
234 property == VideoContentProperty::SCALE ||
235 property == VideoContentProperty::FADE_IN ||
236 property == VideoContentProperty::FADE_OUT
239 Changed (property, frequent);
244 Player::set_video_container_size (dcp::Size s)
246 if (s == _video_container_size) {
250 _video_container_size = s;
252 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
253 _black_image->make_black ();
255 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
259 Player::playlist_changed ()
261 _have_valid_pieces = false;
262 Changed (PlayerProperty::PLAYLIST, false);
266 Player::film_changed (Film::Property p)
268 /* Here we should notice Film properties that affect our output, and
269 alert listeners that our output now would be different to how it was
270 last time we were run.
273 if (p == Film::CONTAINER) {
274 Changed (PlayerProperty::FILM_CONTAINER, false);
275 } else if (p == Film::VIDEO_FRAME_RATE) {
276 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
277 so we need new pieces here.
279 _have_valid_pieces = false;
280 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
281 } else if (p == Film::AUDIO_PROCESSOR) {
282 if (_film->audio_processor ()) {
283 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
285 } else if (p == Film::AUDIO_CHANNELS) {
286 _audio_merger.clear ();
291 Player::transform_bitmap_captions (list<BitmapCaption> subs) const
293 list<PositionImage> all;
295 for (list<BitmapCaption>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
300 /* We will scale the subtitle up to fit _video_container_size */
301 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
307 dcp::YUV_TO_RGB_REC601,
308 i->image->pixel_format (),
313 lrint (_video_container_size.width * i->rectangle.x),
314 lrint (_video_container_size.height * i->rectangle.y)
323 shared_ptr<PlayerVideo>
324 Player::black_player_video_frame (Eyes eyes) const
326 return shared_ptr<PlayerVideo> (
328 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
331 _video_container_size,
332 _video_container_size,
335 PresetColourConversion::all().front().conversion,
336 boost::weak_ptr<Content>(),
337 boost::optional<Frame>()
343 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
345 DCPTime s = t - piece->content->position ();
346 s = min (piece->content->length_after_trim(), s);
347 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
349 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
350 then convert that ContentTime to frames at the content's rate. However this fails for
351 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
352 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
354 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
356 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
360 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
362 /* See comment in dcp_to_content_video */
363 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
364 return d + piece->content->position();
368 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
370 DCPTime s = t - piece->content->position ();
371 s = min (piece->content->length_after_trim(), s);
372 /* See notes in dcp_to_content_video */
373 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
377 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
379 /* See comment in dcp_to_content_video */
380 return DCPTime::from_frames (f, _film->audio_frame_rate())
381 - DCPTime (piece->content->trim_start(), piece->frc)
382 + piece->content->position();
386 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
388 DCPTime s = t - piece->content->position ();
389 s = min (piece->content->length_after_trim(), s);
390 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
394 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
396 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
399 list<shared_ptr<Font> >
400 Player::get_subtitle_fonts ()
402 if (!_have_valid_pieces) {
406 list<shared_ptr<Font> > fonts;
407 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
408 if (p->content->caption) {
409 /* XXX: things may go wrong if there are duplicate font IDs
410 with different font files.
412 list<shared_ptr<Font> > f = p->content->caption->fonts ();
413 copy (f.begin(), f.end(), back_inserter (fonts));
420 /** Set this player never to produce any video data */
422 Player::set_ignore_video ()
424 _ignore_video = true;
428 Player::set_ignore_subtitle ()
430 _ignore_subtitle = true;
433 /** Set a type of caption that this player should always burn into the image,
434 * regardless of the content settings.
435 * @param type type of captions to burn.
438 Player::set_always_burn_captions (CaptionType type)
440 _always_burn_captions = type;
443 /** Sets up the player to be faster, possibly at the expense of quality */
448 _have_valid_pieces = false;
452 Player::set_play_referenced ()
454 _play_referenced = true;
455 _have_valid_pieces = false;
458 list<ReferencedReelAsset>
459 Player::get_reel_assets ()
461 list<ReferencedReelAsset> a;
463 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
464 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
469 scoped_ptr<DCPDecoder> decoder;
471 decoder.reset (new DCPDecoder (j, _film->log(), false));
477 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
479 DCPOMATIC_ASSERT (j->video_frame_rate ());
480 double const cfr = j->video_frame_rate().get();
481 Frame const trim_start = j->trim_start().frames_round (cfr);
482 Frame const trim_end = j->trim_end().frames_round (cfr);
483 int const ffr = _film->video_frame_rate ();
485 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
486 if (j->reference_video ()) {
487 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
488 DCPOMATIC_ASSERT (ra);
489 ra->set_entry_point (ra->entry_point() + trim_start);
490 ra->set_duration (ra->duration() - trim_start - trim_end);
492 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
496 if (j->reference_audio ()) {
497 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
498 DCPOMATIC_ASSERT (ra);
499 ra->set_entry_point (ra->entry_point() + trim_start);
500 ra->set_duration (ra->duration() - trim_start - trim_end);
502 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
506 if (j->reference_subtitle ()) {
507 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
508 DCPOMATIC_ASSERT (ra);
509 ra->set_entry_point (ra->entry_point() + trim_start);
510 ra->set_duration (ra->duration() - trim_start - trim_end);
512 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
516 /* Assume that main picture duration is the length of the reel */
517 offset += k->main_picture()->duration ();
527 if (!_have_valid_pieces) {
531 if (_playlist->length() == DCPTime()) {
532 /* Special case of an empty Film; just give one black frame */
533 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
537 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
539 shared_ptr<Piece> earliest_content;
540 optional<DCPTime> earliest_time;
542 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
547 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
548 if (t > i->content->end()) {
552 /* Given two choices at the same time, pick the one with a subtitle so we see it before
555 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->caption)) {
557 earliest_content = i;
571 if (earliest_content) {
575 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
576 earliest_time = _black.position ();
580 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
581 earliest_time = _silent.position ();
587 earliest_content->done = earliest_content->decoder->pass ();
590 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
591 _black.set_position (_black.position() + one_video_frame());
595 DCPTimePeriod period (_silent.period_at_position());
596 if (_last_audio_time) {
597 /* Sometimes the thing that happened last finishes fractionally before
598 this silence. Bodge the start time of the silence to fix it. I'm
599 not sure if this is the right solution --- maybe the last thing should
600 be padded `forward' rather than this thing padding `back'.
602 period.from = min(period.from, *_last_audio_time);
604 if (period.duration() > one_video_frame()) {
605 period.to = period.from + one_video_frame();
608 _silent.set_position (period.to);
616 /* Emit any audio that is ready */
618 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
619 of our streams, or the position of the _silent.
621 DCPTime pull_to = _film->length ();
622 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
623 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
624 pull_to = i->second.last_push_end;
627 if (!_silent.done() && _silent.position() < pull_to) {
628 pull_to = _silent.position();
631 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
632 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
633 if (_last_audio_time && i->second < *_last_audio_time) {
634 /* This new data comes before the last we emitted (or the last seek); discard it */
635 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
640 } else if (_last_audio_time && i->second > *_last_audio_time) {
641 /* There's a gap between this data and the last we emitted; fill with silence */
642 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
645 emit_audio (i->first, i->second);
650 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
651 do_emit_video(i->first, i->second);
658 optional<PositionImage>
659 Player::captions_for_frame (DCPTime time) const
661 list<PositionImage> captions;
663 int const vfr = _film->video_frame_rate();
665 for (int i = 0; i < CAPTION_COUNT; ++i) {
666 bool const always = _always_burn_captions && *_always_burn_captions == i;
669 _active_captions[i].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), always)
672 /* Image subtitles */
673 list<PositionImage> c = transform_bitmap_captions (j.image);
674 copy (c.begin(), c.end(), back_inserter (captions));
676 /* Text subtitles (rendered to an image) */
677 if (!j.text.empty ()) {
678 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
679 copy (s.begin(), s.end(), back_inserter (captions));
684 if (captions.empty ()) {
685 return optional<PositionImage> ();
688 return merge (captions);
692 Player::video (weak_ptr<Piece> wp, ContentVideo video)
694 shared_ptr<Piece> piece = wp.lock ();
699 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
700 if (frc.skip && (video.frame % 2) == 1) {
704 /* Time of the first frame we will emit */
705 DCPTime const time = content_video_to_dcp (piece, video.frame);
707 /* Discard if it's before the content's period or the last accurate seek. We can't discard
708 if it's after the content's period here as in that case we still need to fill any gap between
709 `now' and the end of the content's period.
711 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
715 /* Fill gaps that we discover now that we have some video which needs to be emitted.
716 This is where we need to fill to.
718 DCPTime fill_to = min (time, piece->content->end());
720 if (_last_video_time) {
721 DCPTime fill_from = max (*_last_video_time, piece->content->position());
722 LastVideoMap::const_iterator last = _last_video.find (wp);
723 if (_film->three_d()) {
724 DCPTime j = fill_from;
725 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
726 if (eyes == EYES_BOTH) {
729 while (j < fill_to || eyes != video.eyes) {
730 if (last != _last_video.end()) {
731 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
732 copy->set_eyes (eyes);
733 emit_video (copy, j);
735 emit_video (black_player_video_frame(eyes), j);
737 if (eyes == EYES_RIGHT) {
738 j += one_video_frame();
740 eyes = increment_eyes (eyes);
743 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
744 if (last != _last_video.end()) {
745 emit_video (last->second, j);
747 emit_video (black_player_video_frame(EYES_BOTH), j);
753 _last_video[wp].reset (
756 piece->content->video->crop (),
757 piece->content->video->fade (video.frame),
758 piece->content->video->scale().size (
759 piece->content->video, _video_container_size, _film->frame_size ()
761 _video_container_size,
764 piece->content->video->colour_conversion(),
771 for (int i = 0; i < frc.repeat; ++i) {
772 if (t < piece->content->end()) {
773 emit_video (_last_video[wp], t);
775 t += one_video_frame ();
780 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
782 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
784 shared_ptr<Piece> piece = wp.lock ();
789 shared_ptr<AudioContent> content = piece->content->audio;
790 DCPOMATIC_ASSERT (content);
792 /* Compute time in the DCP */
793 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
794 /* And the end of this block in the DCP */
795 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
797 /* Remove anything that comes before the start or after the end of the content */
798 if (time < piece->content->position()) {
799 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
801 /* This audio is entirely discarded */
804 content_audio.audio = cut.first;
806 } else if (time > piece->content->end()) {
809 } else if (end > piece->content->end()) {
810 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
811 if (remaining_frames == 0) {
814 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
815 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
816 content_audio.audio = cut;
819 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
823 if (content->gain() != 0) {
824 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
825 gain->apply_gain (content->gain ());
826 content_audio.audio = gain;
831 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
835 if (_audio_processor) {
836 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
841 _audio_merger.push (content_audio.audio, time);
842 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
843 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
847 Player::bitmap_text_start (weak_ptr<Piece> wp, ContentBitmapCaption subtitle)
849 shared_ptr<Piece> piece = wp.lock ();
854 /* Apply content's subtitle offsets */
855 subtitle.sub.rectangle.x += piece->content->caption->x_offset ();
856 subtitle.sub.rectangle.y += piece->content->caption->y_offset ();
858 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
859 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((piece->content->caption->x_scale() - 1) / 2);
860 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((piece->content->caption->y_scale() - 1) / 2);
862 /* Apply content's subtitle scale */
863 subtitle.sub.rectangle.width *= piece->content->caption->x_scale ();
864 subtitle.sub.rectangle.height *= piece->content->caption->y_scale ();
867 ps.image.push_back (subtitle.sub);
868 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
870 _active_captions[subtitle.type()].add_from (wp, ps, from);
874 Player::plain_text_start (weak_ptr<Piece> wp, ContentTextCaption subtitle)
876 shared_ptr<Piece> piece = wp.lock ();
882 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
884 if (from > piece->content->end()) {
888 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
889 s.set_h_position (s.h_position() + piece->content->caption->x_offset ());
890 s.set_v_position (s.v_position() + piece->content->caption->y_offset ());
891 float const xs = piece->content->caption->x_scale();
892 float const ys = piece->content->caption->y_scale();
893 float size = s.size();
895 /* Adjust size to express the common part of the scaling;
896 e.g. if xs = ys = 0.5 we scale size by 2.
898 if (xs > 1e-5 && ys > 1e-5) {
899 size *= 1 / min (1 / xs, 1 / ys);
903 /* Then express aspect ratio changes */
904 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
905 s.set_aspect_adjust (xs / ys);
908 s.set_in (dcp::Time(from.seconds(), 1000));
909 ps.text.push_back (TextCaption (s, piece->content->caption->outline_width()));
910 ps.add_fonts (piece->content->caption->fonts ());
913 _active_captions[subtitle.type()].add_from (wp, ps, from);
917 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to, CaptionType type)
919 if (!_active_captions[type].have (wp)) {
923 shared_ptr<Piece> piece = wp.lock ();
928 DCPTime const dcp_to = content_time_to_dcp (piece, to);
930 if (dcp_to > piece->content->end()) {
934 pair<PlayerCaption, DCPTime> from = _active_captions[type].add_to (wp, dcp_to);
936 bool const always = _always_burn_captions && *_always_burn_captions == type;
937 if (piece->content->caption->use() && !always && !piece->content->caption->burn()) {
938 Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
943 Player::seek (DCPTime time, bool accurate)
945 if (!_have_valid_pieces) {
955 if (_audio_processor) {
956 _audio_processor->flush ();
959 _audio_merger.clear ();
960 for (int i = 0; i < CAPTION_COUNT; ++i) {
961 _active_captions[i].clear ();
964 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
965 if (time < i->content->position()) {
966 /* Before; seek to the start of the content */
967 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
969 } else if (i->content->position() <= time && time < i->content->end()) {
970 /* During; seek to position */
971 i->decoder->seek (dcp_to_content_time (i, time), accurate);
974 /* After; this piece is done */
980 _last_video_time = time;
981 _last_video_eyes = EYES_LEFT;
982 _last_audio_time = time;
984 _last_video_time = optional<DCPTime>();
985 _last_video_eyes = optional<Eyes>();
986 _last_audio_time = optional<DCPTime>();
989 _black.set_position (time);
990 _silent.set_position (time);
992 _last_video.clear ();
996 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
998 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
999 player before the video that requires them.
1001 _delay.push_back (make_pair (pv, time));
1003 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1004 _last_video_time = time + one_video_frame();
1006 _last_video_eyes = increment_eyes (pv->eyes());
1008 if (_delay.size() < 3) {
1012 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1014 do_emit_video (to_do.first, to_do.second);
1018 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1020 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1021 for (int i = 0; i < CAPTION_COUNT; ++i) {
1022 _active_captions[i].clear_before (time);
1026 optional<PositionImage> captions = captions_for_frame (time);
1028 pv->set_caption (captions.get ());
1035 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1037 /* Log if the assert below is about to fail */
1038 if (_last_audio_time && time != *_last_audio_time) {
1039 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1042 /* This audio must follow on from the previous */
1043 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1045 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1049 Player::fill_audio (DCPTimePeriod period)
1051 if (period.from == period.to) {
1055 DCPOMATIC_ASSERT (period.from < period.to);
1057 DCPTime t = period.from;
1058 while (t < period.to) {
1059 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1060 Frame const samples = block.frames_round(_film->audio_frame_rate());
1062 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1063 silence->make_silent ();
1064 emit_audio (silence, t);
1071 Player::one_video_frame () const
1073 return DCPTime::from_frames (1, _film->video_frame_rate ());
1076 pair<shared_ptr<AudioBuffers>, DCPTime>
1077 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1079 DCPTime const discard_time = discard_to - time;
1080 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1081 Frame remaining_frames = audio->frames() - discard_frames;
1082 if (remaining_frames <= 0) {
1083 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1085 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1086 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1087 return make_pair(cut, time + discard_time);
1091 Player::set_dcp_decode_reduction (optional<int> reduction)
1093 if (reduction == _dcp_decode_reduction) {
1097 _dcp_decode_reduction = reduction;
1098 _have_valid_pieces = false;
1099 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1103 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1105 if (_have_valid_pieces) {
1109 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1110 if (i->content == content) {
1111 return content_time_to_dcp (i, t);
1115 DCPOMATIC_ASSERT (false);