2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_subtitle (false)
93 , _always_burn_subtitles (false)
95 , _play_referenced (false)
96 , _audio_merger (_film->audio_frame_rate())
99 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102 set_video_container_size (_film->frame_size ());
104 film_changed (Film::AUDIO_PROCESSOR);
106 seek (DCPTime (), true);
115 Player::setup_pieces ()
120 _shuffler = new Shuffler();
121 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
123 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125 if (!i->paths_valid ()) {
129 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
133 /* Not something that we can decode; e.g. Atmos content */
137 if (decoder->video && _ignore_video) {
138 decoder->video->set_ignore (true);
141 if (decoder->subtitle && _ignore_subtitle) {
142 decoder->subtitle->set_ignore (true);
145 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
147 dcp->set_decode_referenced (_play_referenced);
148 if (_play_referenced) {
149 dcp->set_forced_reduction (_dcp_decode_reduction);
153 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
154 _pieces.push_back (piece);
156 if (decoder->video) {
157 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
158 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
159 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
161 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
165 if (decoder->audio) {
166 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
169 if (decoder->subtitle) {
170 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
171 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
172 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
176 _stream_states.clear ();
177 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
178 if (i->content->audio) {
179 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
180 _stream_states[j] = StreamState (i, i->content->position ());
185 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
186 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
188 _last_video_time = DCPTime ();
189 _last_video_eyes = EYES_BOTH;
190 _last_audio_time = DCPTime ();
191 _have_valid_pieces = true;
195 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
197 shared_ptr<Content> c = w.lock ();
203 property == ContentProperty::POSITION ||
204 property == ContentProperty::LENGTH ||
205 property == ContentProperty::TRIM_START ||
206 property == ContentProperty::TRIM_END ||
207 property == ContentProperty::PATH ||
208 property == VideoContentProperty::FRAME_TYPE ||
209 property == VideoContentProperty::COLOUR_CONVERSION ||
210 property == AudioContentProperty::STREAMS ||
211 property == DCPContentProperty::NEEDS_ASSETS ||
212 property == DCPContentProperty::NEEDS_KDM ||
213 property == SubtitleContentProperty::COLOUR ||
214 property == SubtitleContentProperty::EFFECT ||
215 property == SubtitleContentProperty::EFFECT_COLOUR ||
216 property == FFmpegContentProperty::SUBTITLE_STREAM ||
217 property == FFmpegContentProperty::FILTERS
220 _have_valid_pieces = false;
221 Changed (property, frequent);
224 property == SubtitleContentProperty::LINE_SPACING ||
225 property == SubtitleContentProperty::OUTLINE_WIDTH ||
226 property == SubtitleContentProperty::Y_SCALE ||
227 property == SubtitleContentProperty::FADE_IN ||
228 property == SubtitleContentProperty::FADE_OUT ||
229 property == ContentProperty::VIDEO_FRAME_RATE ||
230 property == SubtitleContentProperty::USE ||
231 property == SubtitleContentProperty::X_OFFSET ||
232 property == SubtitleContentProperty::Y_OFFSET ||
233 property == SubtitleContentProperty::X_SCALE ||
234 property == SubtitleContentProperty::FONTS ||
235 property == VideoContentProperty::CROP ||
236 property == VideoContentProperty::SCALE ||
237 property == VideoContentProperty::FADE_IN ||
238 property == VideoContentProperty::FADE_OUT
241 Changed (property, frequent);
246 Player::set_video_container_size (dcp::Size s)
248 if (s == _video_container_size) {
252 _video_container_size = s;
254 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
255 _black_image->make_black ();
257 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
261 Player::playlist_changed ()
263 _have_valid_pieces = false;
264 Changed (PlayerProperty::PLAYLIST, false);
268 Player::film_changed (Film::Property p)
270 /* Here we should notice Film properties that affect our output, and
271 alert listeners that our output now would be different to how it was
272 last time we were run.
275 if (p == Film::CONTAINER) {
276 Changed (PlayerProperty::FILM_CONTAINER, false);
277 } else if (p == Film::VIDEO_FRAME_RATE) {
278 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
279 so we need new pieces here.
281 _have_valid_pieces = false;
282 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
283 } else if (p == Film::AUDIO_PROCESSOR) {
284 if (_film->audio_processor ()) {
285 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
287 } else if (p == Film::AUDIO_CHANNELS) {
288 _audio_merger.clear ();
293 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
295 list<PositionImage> all;
297 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
302 /* We will scale the subtitle up to fit _video_container_size */
303 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
309 dcp::YUV_TO_RGB_REC601,
310 i->image->pixel_format (),
315 lrint (_video_container_size.width * i->rectangle.x),
316 lrint (_video_container_size.height * i->rectangle.y)
325 shared_ptr<PlayerVideo>
326 Player::black_player_video_frame (Eyes eyes) const
328 return shared_ptr<PlayerVideo> (
330 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
333 _video_container_size,
334 _video_container_size,
337 PresetColourConversion::all().front().conversion,
338 boost::weak_ptr<Content>(),
339 boost::optional<Frame>()
345 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
347 DCPTime s = t - piece->content->position ();
348 s = min (piece->content->length_after_trim(), s);
349 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
351 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
352 then convert that ContentTime to frames at the content's rate. However this fails for
353 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
354 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
356 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
358 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
362 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
364 /* See comment in dcp_to_content_video */
365 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
366 return d + piece->content->position();
370 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
372 DCPTime s = t - piece->content->position ();
373 s = min (piece->content->length_after_trim(), s);
374 /* See notes in dcp_to_content_video */
375 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
379 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
381 /* See comment in dcp_to_content_video */
382 return DCPTime::from_frames (f, _film->audio_frame_rate())
383 - DCPTime (piece->content->trim_start(), piece->frc)
384 + piece->content->position();
388 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
390 DCPTime s = t - piece->content->position ();
391 s = min (piece->content->length_after_trim(), s);
392 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
396 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
398 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
401 list<shared_ptr<Font> >
402 Player::get_subtitle_fonts ()
404 if (!_have_valid_pieces) {
408 list<shared_ptr<Font> > fonts;
409 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
410 if (p->content->subtitle) {
411 /* XXX: things may go wrong if there are duplicate font IDs
412 with different font files.
414 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
415 copy (f.begin(), f.end(), back_inserter (fonts));
422 /** Set this player never to produce any video data */
424 Player::set_ignore_video ()
426 _ignore_video = true;
430 Player::set_ignore_subtitle ()
432 _ignore_subtitle = true;
435 /** Set whether or not this player should always burn text subtitles into the image,
436 * regardless of the content settings.
437 * @param burn true to always burn subtitles, false to obey content settings.
440 Player::set_always_burn_subtitles (bool burn)
442 _always_burn_subtitles = burn;
445 /** Sets up the player to be faster, possibly at the expense of quality */
450 _have_valid_pieces = false;
454 Player::set_play_referenced ()
456 _play_referenced = true;
457 _have_valid_pieces = false;
460 list<ReferencedReelAsset>
461 Player::get_reel_assets ()
463 list<ReferencedReelAsset> a;
465 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
466 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
471 scoped_ptr<DCPDecoder> decoder;
473 decoder.reset (new DCPDecoder (j, _film->log(), false));
479 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
481 DCPOMATIC_ASSERT (j->video_frame_rate ());
482 double const cfr = j->video_frame_rate().get();
483 Frame const trim_start = j->trim_start().frames_round (cfr);
484 Frame const trim_end = j->trim_end().frames_round (cfr);
485 int const ffr = _film->video_frame_rate ();
487 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
488 if (j->reference_video ()) {
489 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
490 DCPOMATIC_ASSERT (ra);
491 ra->set_entry_point (ra->entry_point() + trim_start);
492 ra->set_duration (ra->duration() - trim_start - trim_end);
494 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
498 if (j->reference_audio ()) {
499 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
500 DCPOMATIC_ASSERT (ra);
501 ra->set_entry_point (ra->entry_point() + trim_start);
502 ra->set_duration (ra->duration() - trim_start - trim_end);
504 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
508 if (j->reference_subtitle ()) {
509 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
510 DCPOMATIC_ASSERT (ra);
511 ra->set_entry_point (ra->entry_point() + trim_start);
512 ra->set_duration (ra->duration() - trim_start - trim_end);
514 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
518 /* Assume that main picture duration is the length of the reel */
519 offset += k->main_picture()->duration ();
529 if (!_have_valid_pieces) {
533 if (_playlist->length() == DCPTime()) {
534 /* Special case of an empty Film; just give one black frame */
535 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
539 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
541 shared_ptr<Piece> earliest_content;
542 optional<DCPTime> earliest_time;
544 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
549 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
550 if (t > i->content->end()) {
554 /* Given two choices at the same time, pick the one with a subtitle so we see it before
557 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
559 earliest_content = i;
573 if (earliest_content) {
577 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
578 earliest_time = _black.position ();
582 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
583 earliest_time = _silent.position ();
589 earliest_content->done = earliest_content->decoder->pass ();
592 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
593 _black.set_position (_black.position() + one_video_frame());
597 DCPTimePeriod period (_silent.period_at_position());
598 if (_last_audio_time) {
599 /* Sometimes the thing that happened last finishes fractionally before
600 this silence. Bodge the start time of the silence to fix it. I'm
601 not sure if this is the right solution --- maybe the last thing should
602 be padded `forward' rather than this thing padding `back'.
604 period.from = min(period.from, *_last_audio_time);
606 if (period.duration() > one_video_frame()) {
607 period.to = period.from + one_video_frame();
610 _silent.set_position (period.to);
618 /* Emit any audio that is ready */
620 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
621 of our streams, or the position of the _silent.
623 DCPTime pull_to = _film->length ();
624 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
625 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
626 pull_to = i->second.last_push_end;
629 if (!_silent.done() && _silent.position() < pull_to) {
630 pull_to = _silent.position();
633 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
634 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
635 if (_last_audio_time && i->second < *_last_audio_time) {
636 /* This new data comes before the last we emitted (or the last seek); discard it */
637 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
642 } else if (_last_audio_time && i->second > *_last_audio_time) {
643 /* There's a gap between this data and the last we emitted; fill with silence */
644 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
647 emit_audio (i->first, i->second);
652 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
653 do_emit_video(i->first, i->second);
660 optional<PositionImage>
661 Player::subtitles_for_frame (DCPTime time) const
663 list<PositionImage> subtitles;
665 int const vfr = _film->video_frame_rate();
667 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
669 /* Image subtitles */
670 list<PositionImage> c = transform_image_subtitles (i.image);
671 copy (c.begin(), c.end(), back_inserter (subtitles));
673 /* Text subtitles (rendered to an image) */
674 if (!i.text.empty ()) {
675 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
676 copy (s.begin(), s.end(), back_inserter (subtitles));
680 if (subtitles.empty ()) {
681 return optional<PositionImage> ();
684 return merge (subtitles);
688 Player::video (weak_ptr<Piece> wp, ContentVideo video)
690 shared_ptr<Piece> piece = wp.lock ();
695 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
696 if (frc.skip && (video.frame % 2) == 1) {
700 /* Time of the first frame we will emit */
701 DCPTime const time = content_video_to_dcp (piece, video.frame);
703 /* Discard if it's before the content's period or the last accurate seek. We can't discard
704 if it's after the content's period here as in that case we still need to fill any gap between
705 `now' and the end of the content's period.
707 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
711 /* Fill gaps that we discover now that we have some video which needs to be emitted.
712 This is where we need to fill to.
714 DCPTime fill_to = min (time, piece->content->end());
716 if (_last_video_time) {
717 DCPTime fill_from = max (*_last_video_time, piece->content->position());
718 LastVideoMap::const_iterator last = _last_video.find (wp);
719 if (_film->three_d()) {
720 DCPTime j = fill_from;
721 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
722 if (eyes == EYES_BOTH) {
725 while (j < fill_to || eyes != video.eyes) {
726 if (last != _last_video.end()) {
727 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
728 copy->set_eyes (eyes);
729 emit_video (copy, j);
731 emit_video (black_player_video_frame(eyes), j);
733 if (eyes == EYES_RIGHT) {
734 j += one_video_frame();
736 eyes = increment_eyes (eyes);
739 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
740 if (last != _last_video.end()) {
741 emit_video (last->second, j);
743 emit_video (black_player_video_frame(EYES_BOTH), j);
749 _last_video[wp].reset (
752 piece->content->video->crop (),
753 piece->content->video->fade (video.frame),
754 piece->content->video->scale().size (
755 piece->content->video, _video_container_size, _film->frame_size ()
757 _video_container_size,
760 piece->content->video->colour_conversion(),
767 for (int i = 0; i < frc.repeat; ++i) {
768 if (t < piece->content->end()) {
769 emit_video (_last_video[wp], t);
771 t += one_video_frame ();
776 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
778 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
780 shared_ptr<Piece> piece = wp.lock ();
785 shared_ptr<AudioContent> content = piece->content->audio;
786 DCPOMATIC_ASSERT (content);
788 /* Compute time in the DCP */
789 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
790 /* And the end of this block in the DCP */
791 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
793 /* Remove anything that comes before the start or after the end of the content */
794 if (time < piece->content->position()) {
795 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
797 /* This audio is entirely discarded */
800 content_audio.audio = cut.first;
802 } else if (time > piece->content->end()) {
805 } else if (end > piece->content->end()) {
806 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
807 if (remaining_frames == 0) {
810 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
811 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
812 content_audio.audio = cut;
815 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
819 if (content->gain() != 0) {
820 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
821 gain->apply_gain (content->gain ());
822 content_audio.audio = gain;
827 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
831 if (_audio_processor) {
832 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
837 _audio_merger.push (content_audio.audio, time);
838 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
839 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
843 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
845 shared_ptr<Piece> piece = wp.lock ();
850 /* Apply content's subtitle offsets */
851 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
852 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
854 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
855 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((piece->content->subtitle->x_scale() - 1) / 2);
856 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((piece->content->subtitle->y_scale() - 1) / 2);
858 /* Apply content's subtitle scale */
859 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
860 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
863 ps.image.push_back (subtitle.sub);
864 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
866 _active_subtitles.add_from (wp, ps, from);
870 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
872 shared_ptr<Piece> piece = wp.lock ();
878 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
880 if (from > piece->content->end()) {
884 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
885 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
886 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
887 float const xs = piece->content->subtitle->x_scale();
888 float const ys = piece->content->subtitle->y_scale();
889 float size = s.size();
891 /* Adjust size to express the common part of the scaling;
892 e.g. if xs = ys = 0.5 we scale size by 2.
894 if (xs > 1e-5 && ys > 1e-5) {
895 size *= 1 / min (1 / xs, 1 / ys);
899 /* Then express aspect ratio changes */
900 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
901 s.set_aspect_adjust (xs / ys);
904 s.set_in (dcp::Time(from.seconds(), 1000));
905 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
906 ps.add_fonts (piece->content->subtitle->fonts ());
909 _active_subtitles.add_from (wp, ps, from);
913 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
915 if (!_active_subtitles.have (wp)) {
919 shared_ptr<Piece> piece = wp.lock ();
924 DCPTime const dcp_to = content_time_to_dcp (piece, to);
926 if (dcp_to > piece->content->end()) {
930 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
932 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
933 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
938 Player::seek (DCPTime time, bool accurate)
940 if (!_have_valid_pieces) {
950 if (_audio_processor) {
951 _audio_processor->flush ();
954 _audio_merger.clear ();
955 _active_subtitles.clear ();
957 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
958 if (time < i->content->position()) {
959 /* Before; seek to the start of the content */
960 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
962 } else if (i->content->position() <= time && time < i->content->end()) {
963 /* During; seek to position */
964 i->decoder->seek (dcp_to_content_time (i, time), accurate);
967 /* After; this piece is done */
973 _last_video_time = time;
974 _last_video_eyes = EYES_LEFT;
975 _last_audio_time = time;
977 _last_video_time = optional<DCPTime>();
978 _last_video_eyes = optional<Eyes>();
979 _last_audio_time = optional<DCPTime>();
982 _black.set_position (time);
983 _silent.set_position (time);
985 _last_video.clear ();
989 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
991 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
992 player before the video that requires them.
994 _delay.push_back (make_pair (pv, time));
996 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
997 _last_video_time = time + one_video_frame();
999 _last_video_eyes = increment_eyes (pv->eyes());
1001 if (_delay.size() < 3) {
1005 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1007 do_emit_video (to_do.first, to_do.second);
1011 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1013 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1014 _active_subtitles.clear_before (time);
1017 optional<PositionImage> subtitles = subtitles_for_frame (time);
1019 pv->set_subtitle (subtitles.get ());
1026 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1028 /* Log if the assert below is about to fail */
1029 if (_last_audio_time && time != *_last_audio_time) {
1030 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1033 /* This audio must follow on from the previous */
1034 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1036 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1040 Player::fill_audio (DCPTimePeriod period)
1042 if (period.from == period.to) {
1046 DCPOMATIC_ASSERT (period.from < period.to);
1048 DCPTime t = period.from;
1049 while (t < period.to) {
1050 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1051 Frame const samples = block.frames_round(_film->audio_frame_rate());
1053 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1054 silence->make_silent ();
1055 emit_audio (silence, t);
1062 Player::one_video_frame () const
1064 return DCPTime::from_frames (1, _film->video_frame_rate ());
1067 pair<shared_ptr<AudioBuffers>, DCPTime>
1068 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1070 DCPTime const discard_time = discard_to - time;
1071 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1072 Frame remaining_frames = audio->frames() - discard_frames;
1073 if (remaining_frames <= 0) {
1074 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1076 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1077 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1078 return make_pair(cut, time + discard_time);
1082 Player::set_dcp_decode_reduction (optional<int> reduction)
1084 if (reduction == _dcp_decode_reduction) {
1088 _dcp_decode_reduction = reduction;
1089 _have_valid_pieces = false;
1090 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);