2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_subtitle (false)
93 , _always_burn_subtitles (false)
95 , _play_referenced (false)
96 , _audio_merger (_film->audio_frame_rate())
99 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102 set_video_container_size (_film->frame_size ());
104 film_changed (Film::AUDIO_PROCESSOR);
106 seek (DCPTime (), true);
115 Player::setup_pieces ()
120 _shuffler = new Shuffler();
121 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
123 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125 if (!i->paths_valid ()) {
129 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
133 /* Not something that we can decode; e.g. Atmos content */
137 if (decoder->video && _ignore_video) {
138 decoder->video->set_ignore (true);
141 if (decoder->subtitle && _ignore_subtitle) {
142 decoder->subtitle->set_ignore (true);
145 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
147 dcp->set_decode_referenced (_play_referenced);
148 if (_play_referenced) {
149 dcp->set_forced_reduction (_dcp_decode_reduction);
153 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
154 _pieces.push_back (piece);
156 if (decoder->video) {
157 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
158 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
159 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
161 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
165 if (decoder->audio) {
166 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
169 if (decoder->subtitle) {
170 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
171 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
172 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
176 _stream_states.clear ();
177 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
178 if (i->content->audio) {
179 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
180 _stream_states[j] = StreamState (i, i->content->position ());
185 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
186 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
188 _last_video_time = DCPTime ();
189 _last_video_eyes = EYES_BOTH;
190 _last_audio_time = DCPTime ();
191 _have_valid_pieces = true;
195 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
197 shared_ptr<Content> c = w.lock ();
203 property == ContentProperty::POSITION ||
204 property == ContentProperty::LENGTH ||
205 property == ContentProperty::TRIM_START ||
206 property == ContentProperty::TRIM_END ||
207 property == ContentProperty::PATH ||
208 property == VideoContentProperty::FRAME_TYPE ||
209 property == DCPContentProperty::NEEDS_ASSETS ||
210 property == DCPContentProperty::NEEDS_KDM ||
211 property == SubtitleContentProperty::COLOUR ||
212 property == SubtitleContentProperty::EFFECT ||
213 property == SubtitleContentProperty::EFFECT_COLOUR ||
214 property == FFmpegContentProperty::SUBTITLE_STREAM ||
215 property == FFmpegContentProperty::FILTERS ||
216 property == VideoContentProperty::COLOUR_CONVERSION
219 _have_valid_pieces = false;
220 Changed (property, frequent);
223 property == SubtitleContentProperty::LINE_SPACING ||
224 property == SubtitleContentProperty::OUTLINE_WIDTH ||
225 property == SubtitleContentProperty::Y_SCALE ||
226 property == SubtitleContentProperty::FADE_IN ||
227 property == SubtitleContentProperty::FADE_OUT ||
228 property == ContentProperty::VIDEO_FRAME_RATE ||
229 property == SubtitleContentProperty::USE ||
230 property == SubtitleContentProperty::X_OFFSET ||
231 property == SubtitleContentProperty::Y_OFFSET ||
232 property == SubtitleContentProperty::X_SCALE ||
233 property == SubtitleContentProperty::FONTS ||
234 property == VideoContentProperty::CROP ||
235 property == VideoContentProperty::SCALE ||
236 property == VideoContentProperty::FADE_IN ||
237 property == VideoContentProperty::FADE_OUT
240 Changed (property, frequent);
245 Player::set_video_container_size (dcp::Size s)
247 if (s == _video_container_size) {
251 _video_container_size = s;
253 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
254 _black_image->make_black ();
256 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
260 Player::playlist_changed ()
262 _have_valid_pieces = false;
263 Changed (PlayerProperty::PLAYLIST, false);
267 Player::film_changed (Film::Property p)
269 /* Here we should notice Film properties that affect our output, and
270 alert listeners that our output now would be different to how it was
271 last time we were run.
274 if (p == Film::CONTAINER) {
275 Changed (PlayerProperty::FILM_CONTAINER, false);
276 } else if (p == Film::VIDEO_FRAME_RATE) {
277 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
278 so we need new pieces here.
280 _have_valid_pieces = false;
281 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
282 } else if (p == Film::AUDIO_PROCESSOR) {
283 if (_film->audio_processor ()) {
284 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
290 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
292 list<PositionImage> all;
294 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
299 /* We will scale the subtitle up to fit _video_container_size */
300 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
302 /* Then we need a corrective translation, consisting of two parts:
304 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
305 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
307 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
308 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
309 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
311 * Combining these two translations gives these expressions.
318 dcp::YUV_TO_RGB_REC601,
319 i->image->pixel_format (),
324 lrint (_video_container_size.width * i->rectangle.x),
325 lrint (_video_container_size.height * i->rectangle.y)
334 shared_ptr<PlayerVideo>
335 Player::black_player_video_frame (Eyes eyes) const
337 return shared_ptr<PlayerVideo> (
339 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
342 _video_container_size,
343 _video_container_size,
346 PresetColourConversion::all().front().conversion,
347 boost::weak_ptr<Content>(),
348 boost::optional<Frame>()
354 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
356 DCPTime s = t - piece->content->position ();
357 s = min (piece->content->length_after_trim(), s);
358 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
360 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
361 then convert that ContentTime to frames at the content's rate. However this fails for
362 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
363 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
365 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
367 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
371 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 /* See comment in dcp_to_content_video */
374 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
375 return d + piece->content->position();
379 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(), s);
383 /* See notes in dcp_to_content_video */
384 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
388 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
390 /* See comment in dcp_to_content_video */
391 return DCPTime::from_frames (f, _film->audio_frame_rate())
392 - DCPTime (piece->content->trim_start(), piece->frc)
393 + piece->content->position();
397 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
399 DCPTime s = t - piece->content->position ();
400 s = min (piece->content->length_after_trim(), s);
401 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
405 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
407 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
410 list<shared_ptr<Font> >
411 Player::get_subtitle_fonts ()
413 if (!_have_valid_pieces) {
417 list<shared_ptr<Font> > fonts;
418 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
419 if (p->content->subtitle) {
420 /* XXX: things may go wrong if there are duplicate font IDs
421 with different font files.
423 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
424 copy (f.begin(), f.end(), back_inserter (fonts));
431 /** Set this player never to produce any video data */
433 Player::set_ignore_video ()
435 _ignore_video = true;
439 Player::set_ignore_subtitle ()
441 _ignore_subtitle = true;
444 /** Set whether or not this player should always burn text subtitles into the image,
445 * regardless of the content settings.
446 * @param burn true to always burn subtitles, false to obey content settings.
449 Player::set_always_burn_subtitles (bool burn)
451 _always_burn_subtitles = burn;
454 /** Sets up the player to be faster, possibly at the expense of quality */
459 _have_valid_pieces = false;
463 Player::set_play_referenced ()
465 _play_referenced = true;
466 _have_valid_pieces = false;
469 list<ReferencedReelAsset>
470 Player::get_reel_assets ()
472 list<ReferencedReelAsset> a;
474 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
475 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
480 scoped_ptr<DCPDecoder> decoder;
482 decoder.reset (new DCPDecoder (j, _film->log(), false));
488 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
490 DCPOMATIC_ASSERT (j->video_frame_rate ());
491 double const cfr = j->video_frame_rate().get();
492 Frame const trim_start = j->trim_start().frames_round (cfr);
493 Frame const trim_end = j->trim_end().frames_round (cfr);
494 int const ffr = _film->video_frame_rate ();
496 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
497 if (j->reference_video ()) {
498 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
499 DCPOMATIC_ASSERT (ra);
500 ra->set_entry_point (ra->entry_point() + trim_start);
501 ra->set_duration (ra->duration() - trim_start - trim_end);
503 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
507 if (j->reference_audio ()) {
508 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
509 DCPOMATIC_ASSERT (ra);
510 ra->set_entry_point (ra->entry_point() + trim_start);
511 ra->set_duration (ra->duration() - trim_start - trim_end);
513 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
517 if (j->reference_subtitle ()) {
518 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
519 DCPOMATIC_ASSERT (ra);
520 ra->set_entry_point (ra->entry_point() + trim_start);
521 ra->set_duration (ra->duration() - trim_start - trim_end);
523 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
527 /* Assume that main picture duration is the length of the reel */
528 offset += k->main_picture()->duration ();
538 if (!_have_valid_pieces) {
542 if (_playlist->length() == DCPTime()) {
543 /* Special case of an empty Film; just give one black frame */
544 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
548 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
550 shared_ptr<Piece> earliest_content;
551 optional<DCPTime> earliest_time;
553 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
558 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
559 if (t > i->content->end()) {
563 /* Given two choices at the same time, pick the one with a subtitle so we see it before
566 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
568 earliest_content = i;
582 if (earliest_content) {
586 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
587 earliest_time = _black.position ();
591 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
592 earliest_time = _silent.position ();
598 earliest_content->done = earliest_content->decoder->pass ();
601 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
602 _black.set_position (_black.position() + one_video_frame());
606 DCPTimePeriod period (_silent.period_at_position());
607 if (_last_audio_time) {
608 /* Sometimes the thing that happened last finishes fractionally before
609 this silence. Bodge the start time of the silence to fix it. I'm
610 not sure if this is the right solution --- maybe the last thing should
611 be padded `forward' rather than this thing padding `back'.
613 period.from = min(period.from, *_last_audio_time);
615 if (period.duration() > one_video_frame()) {
616 period.to = period.from + one_video_frame();
619 _silent.set_position (period.to);
627 /* Emit any audio that is ready */
629 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
630 of our streams, or the position of the _silent.
632 DCPTime pull_to = _film->length ();
633 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
634 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
635 pull_to = i->second.last_push_end;
638 if (!_silent.done() && _silent.position() < pull_to) {
639 pull_to = _silent.position();
642 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
643 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
644 if (_last_audio_time && i->second < *_last_audio_time) {
645 /* This new data comes before the last we emitted (or the last seek); discard it */
646 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
651 } else if (_last_audio_time && i->second > *_last_audio_time) {
652 /* There's a gap between this data and the last we emitted; fill with silence */
653 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
656 emit_audio (i->first, i->second);
661 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
662 do_emit_video(i->first, i->second);
669 optional<PositionImage>
670 Player::subtitles_for_frame (DCPTime time) const
672 list<PositionImage> subtitles;
674 int const vfr = _film->video_frame_rate();
676 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
678 /* Image subtitles */
679 list<PositionImage> c = transform_image_subtitles (i.image);
680 copy (c.begin(), c.end(), back_inserter (subtitles));
682 /* Text subtitles (rendered to an image) */
683 if (!i.text.empty ()) {
684 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
685 copy (s.begin(), s.end(), back_inserter (subtitles));
689 if (subtitles.empty ()) {
690 return optional<PositionImage> ();
693 return merge (subtitles);
697 Player::video (weak_ptr<Piece> wp, ContentVideo video)
699 shared_ptr<Piece> piece = wp.lock ();
704 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
705 if (frc.skip && (video.frame % 2) == 1) {
709 /* Time of the first frame we will emit */
710 DCPTime const time = content_video_to_dcp (piece, video.frame);
712 /* Discard if it's before the content's period or the last accurate seek. We can't discard
713 if it's after the content's period here as in that case we still need to fill any gap between
714 `now' and the end of the content's period.
716 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
720 /* Fill gaps that we discover now that we have some video which needs to be emitted.
721 This is where we need to fill to.
723 DCPTime fill_to = min (time, piece->content->end());
725 if (_last_video_time) {
726 DCPTime fill_from = max (*_last_video_time, piece->content->position());
727 LastVideoMap::const_iterator last = _last_video.find (wp);
728 if (_film->three_d()) {
729 DCPTime j = fill_from;
730 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
731 if (eyes == EYES_BOTH) {
734 while (j < fill_to || eyes != video.eyes) {
735 if (last != _last_video.end()) {
736 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
737 copy->set_eyes (eyes);
738 emit_video (copy, j);
740 emit_video (black_player_video_frame(eyes), j);
742 if (eyes == EYES_RIGHT) {
743 j += one_video_frame();
745 eyes = increment_eyes (eyes);
748 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
749 if (last != _last_video.end()) {
750 emit_video (last->second, j);
752 emit_video (black_player_video_frame(EYES_BOTH), j);
758 _last_video[wp].reset (
761 piece->content->video->crop (),
762 piece->content->video->fade (video.frame),
763 piece->content->video->scale().size (
764 piece->content->video, _video_container_size, _film->frame_size ()
766 _video_container_size,
769 piece->content->video->colour_conversion(),
776 for (int i = 0; i < frc.repeat; ++i) {
777 if (t < piece->content->end()) {
778 emit_video (_last_video[wp], t);
780 t += one_video_frame ();
785 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
787 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
789 shared_ptr<Piece> piece = wp.lock ();
794 shared_ptr<AudioContent> content = piece->content->audio;
795 DCPOMATIC_ASSERT (content);
797 /* Compute time in the DCP */
798 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
799 /* And the end of this block in the DCP */
800 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
802 /* Remove anything that comes before the start or after the end of the content */
803 if (time < piece->content->position()) {
804 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
806 /* This audio is entirely discarded */
809 content_audio.audio = cut.first;
811 } else if (time > piece->content->end()) {
814 } else if (end > piece->content->end()) {
815 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
816 if (remaining_frames == 0) {
819 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
820 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
821 content_audio.audio = cut;
824 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
828 if (content->gain() != 0) {
829 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
830 gain->apply_gain (content->gain ());
831 content_audio.audio = gain;
836 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
840 if (_audio_processor) {
841 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
846 _audio_merger.push (content_audio.audio, time);
847 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
848 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
852 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
854 shared_ptr<Piece> piece = wp.lock ();
859 /* Apply content's subtitle offsets */
860 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
861 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
863 /* Apply content's subtitle scale */
864 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
865 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
867 /* Apply a corrective translation to keep the subtitle centred after that scale */
868 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
869 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
872 ps.image.push_back (subtitle.sub);
873 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
875 _active_subtitles.add_from (wp, ps, from);
879 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
881 shared_ptr<Piece> piece = wp.lock ();
887 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
889 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
890 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
891 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
892 float const xs = piece->content->subtitle->x_scale();
893 float const ys = piece->content->subtitle->y_scale();
894 float size = s.size();
896 /* Adjust size to express the common part of the scaling;
897 e.g. if xs = ys = 0.5 we scale size by 2.
899 if (xs > 1e-5 && ys > 1e-5) {
900 size *= 1 / min (1 / xs, 1 / ys);
904 /* Then express aspect ratio changes */
905 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
906 s.set_aspect_adjust (xs / ys);
909 s.set_in (dcp::Time(from.seconds(), 1000));
910 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
911 ps.add_fonts (piece->content->subtitle->fonts ());
914 _active_subtitles.add_from (wp, ps, from);
918 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
920 if (!_active_subtitles.have (wp)) {
924 shared_ptr<Piece> piece = wp.lock ();
929 DCPTime const dcp_to = content_time_to_dcp (piece, to);
931 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
933 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
934 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
939 Player::seek (DCPTime time, bool accurate)
941 if (!_have_valid_pieces) {
951 if (_audio_processor) {
952 _audio_processor->flush ();
955 _audio_merger.clear ();
956 _active_subtitles.clear ();
958 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
959 if (time < i->content->position()) {
960 /* Before; seek to 0 */
961 i->decoder->seek (ContentTime(), accurate);
963 } else if (i->content->position() <= time && time < i->content->end()) {
964 /* During; seek to position */
965 i->decoder->seek (dcp_to_content_time (i, time), accurate);
968 /* After; this piece is done */
974 _last_video_time = time;
975 _last_video_eyes = EYES_LEFT;
976 _last_audio_time = time;
978 _last_video_time = optional<DCPTime>();
979 _last_video_eyes = optional<Eyes>();
980 _last_audio_time = optional<DCPTime>();
983 _black.set_position (time);
984 _silent.set_position (time);
986 _last_video.clear ();
990 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
992 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
993 player before the video that requires them.
995 _delay.push_back (make_pair (pv, time));
997 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
998 _last_video_time = time + one_video_frame();
1000 _last_video_eyes = increment_eyes (pv->eyes());
1002 if (_delay.size() < 3) {
1006 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1008 do_emit_video (to_do.first, to_do.second);
1012 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1014 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1015 _active_subtitles.clear_before (time);
1018 optional<PositionImage> subtitles = subtitles_for_frame (time);
1020 pv->set_subtitle (subtitles.get ());
1027 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1029 /* Log if the assert below is about to fail */
1030 if (_last_audio_time && time != *_last_audio_time) {
1031 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1034 /* This audio must follow on from the previous */
1035 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1037 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1041 Player::fill_audio (DCPTimePeriod period)
1043 if (period.from == period.to) {
1047 DCPOMATIC_ASSERT (period.from < period.to);
1049 DCPTime t = period.from;
1050 while (t < period.to) {
1051 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1052 Frame const samples = block.frames_round(_film->audio_frame_rate());
1054 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1055 silence->make_silent ();
1056 emit_audio (silence, t);
1063 Player::one_video_frame () const
1065 return DCPTime::from_frames (1, _film->video_frame_rate ());
1068 pair<shared_ptr<AudioBuffers>, DCPTime>
1069 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1071 DCPTime const discard_time = discard_to - time;
1072 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1073 Frame remaining_frames = audio->frames() - discard_frames;
1074 if (remaining_frames <= 0) {
1075 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1077 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1078 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1079 return make_pair(cut, time + discard_time);
1083 Player::set_dcp_decode_reduction (optional<int> reduction)
1085 if (reduction == _dcp_decode_reduction) {
1089 _dcp_decode_reduction = reduction;
1090 _have_valid_pieces = false;
1091 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);