2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_subtitle (false)
93 , _always_burn_subtitles (false)
95 , _play_referenced (false)
96 , _audio_merger (_film->audio_frame_rate())
99 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102 set_video_container_size (_film->frame_size ());
104 film_changed (Film::AUDIO_PROCESSOR);
106 seek (DCPTime (), true);
115 Player::setup_pieces ()
120 _shuffler = new Shuffler();
121 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
123 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125 if (!i->paths_valid ()) {
129 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
133 /* Not something that we can decode; e.g. Atmos content */
137 if (decoder->video && _ignore_video) {
138 decoder->video->set_ignore (true);
141 if (decoder->subtitle && _ignore_subtitle) {
142 decoder->subtitle->set_ignore (true);
145 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
147 dcp->set_decode_referenced (_play_referenced);
148 if (_play_referenced) {
149 dcp->set_forced_reduction (_dcp_decode_reduction);
153 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
154 _pieces.push_back (piece);
156 if (decoder->video) {
157 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
158 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
159 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
161 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
165 if (decoder->audio) {
166 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
169 if (decoder->subtitle) {
170 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
171 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
172 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
176 _stream_states.clear ();
177 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
178 if (i->content->audio) {
179 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
180 _stream_states[j] = StreamState (i, i->content->position ());
185 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
186 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
188 _last_video_time = DCPTime ();
189 _last_video_eyes = EYES_BOTH;
190 _last_audio_time = DCPTime ();
191 _have_valid_pieces = true;
195 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
197 shared_ptr<Content> c = w.lock ();
203 property == ContentProperty::POSITION ||
204 property == ContentProperty::LENGTH ||
205 property == ContentProperty::TRIM_START ||
206 property == ContentProperty::TRIM_END ||
207 property == ContentProperty::PATH ||
208 property == VideoContentProperty::FRAME_TYPE ||
209 property == DCPContentProperty::NEEDS_ASSETS ||
210 property == DCPContentProperty::NEEDS_KDM ||
211 property == SubtitleContentProperty::COLOUR ||
212 property == SubtitleContentProperty::EFFECT ||
213 property == SubtitleContentProperty::EFFECT_COLOUR ||
214 property == FFmpegContentProperty::SUBTITLE_STREAM ||
215 property == FFmpegContentProperty::FILTERS ||
216 property == VideoContentProperty::COLOUR_CONVERSION
219 _have_valid_pieces = false;
220 Changed (property, frequent);
223 property == SubtitleContentProperty::LINE_SPACING ||
224 property == SubtitleContentProperty::OUTLINE_WIDTH ||
225 property == SubtitleContentProperty::Y_SCALE ||
226 property == SubtitleContentProperty::FADE_IN ||
227 property == SubtitleContentProperty::FADE_OUT ||
228 property == ContentProperty::VIDEO_FRAME_RATE ||
229 property == SubtitleContentProperty::USE ||
230 property == SubtitleContentProperty::X_OFFSET ||
231 property == SubtitleContentProperty::Y_OFFSET ||
232 property == SubtitleContentProperty::X_SCALE ||
233 property == SubtitleContentProperty::FONTS ||
234 property == VideoContentProperty::CROP ||
235 property == VideoContentProperty::SCALE ||
236 property == VideoContentProperty::FADE_IN ||
237 property == VideoContentProperty::FADE_OUT
240 Changed (property, frequent);
245 Player::set_video_container_size (dcp::Size s)
247 if (s == _video_container_size) {
251 _video_container_size = s;
253 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
254 _black_image->make_black ();
256 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
260 Player::playlist_changed ()
262 _have_valid_pieces = false;
263 Changed (PlayerProperty::PLAYLIST, false);
267 Player::film_changed (Film::Property p)
269 /* Here we should notice Film properties that affect our output, and
270 alert listeners that our output now would be different to how it was
271 last time we were run.
274 if (p == Film::CONTAINER) {
275 Changed (PlayerProperty::FILM_CONTAINER, false);
276 } else if (p == Film::VIDEO_FRAME_RATE) {
277 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
278 so we need new pieces here.
280 _have_valid_pieces = false;
281 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
282 } else if (p == Film::AUDIO_PROCESSOR) {
283 if (_film->audio_processor ()) {
284 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
286 } else if (p == Film::AUDIO_CHANNELS) {
287 _audio_merger.clear ();
292 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
294 list<PositionImage> all;
296 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
301 /* We will scale the subtitle up to fit _video_container_size */
302 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
304 /* Then we need a corrective translation, consisting of two parts:
306 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
307 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
309 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
310 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
311 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
313 * Combining these two translations gives these expressions.
320 dcp::YUV_TO_RGB_REC601,
321 i->image->pixel_format (),
326 lrint (_video_container_size.width * i->rectangle.x),
327 lrint (_video_container_size.height * i->rectangle.y)
336 shared_ptr<PlayerVideo>
337 Player::black_player_video_frame (Eyes eyes) const
339 return shared_ptr<PlayerVideo> (
341 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
344 _video_container_size,
345 _video_container_size,
348 PresetColourConversion::all().front().conversion,
349 boost::weak_ptr<Content>(),
350 boost::optional<Frame>()
356 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
358 DCPTime s = t - piece->content->position ();
359 s = min (piece->content->length_after_trim(), s);
360 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
362 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
363 then convert that ContentTime to frames at the content's rate. However this fails for
364 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
365 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
367 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
369 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
373 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
375 /* See comment in dcp_to_content_video */
376 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
377 return d + piece->content->position();
381 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
383 DCPTime s = t - piece->content->position ();
384 s = min (piece->content->length_after_trim(), s);
385 /* See notes in dcp_to_content_video */
386 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
390 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
392 /* See comment in dcp_to_content_video */
393 return DCPTime::from_frames (f, _film->audio_frame_rate())
394 - DCPTime (piece->content->trim_start(), piece->frc)
395 + piece->content->position();
399 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
401 DCPTime s = t - piece->content->position ();
402 s = min (piece->content->length_after_trim(), s);
403 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
407 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
409 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
412 list<shared_ptr<Font> >
413 Player::get_subtitle_fonts ()
415 if (!_have_valid_pieces) {
419 list<shared_ptr<Font> > fonts;
420 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
421 if (p->content->subtitle) {
422 /* XXX: things may go wrong if there are duplicate font IDs
423 with different font files.
425 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
426 copy (f.begin(), f.end(), back_inserter (fonts));
433 /** Set this player never to produce any video data */
435 Player::set_ignore_video ()
437 _ignore_video = true;
441 Player::set_ignore_subtitle ()
443 _ignore_subtitle = true;
446 /** Set whether or not this player should always burn text subtitles into the image,
447 * regardless of the content settings.
448 * @param burn true to always burn subtitles, false to obey content settings.
451 Player::set_always_burn_subtitles (bool burn)
453 _always_burn_subtitles = burn;
456 /** Sets up the player to be faster, possibly at the expense of quality */
461 _have_valid_pieces = false;
465 Player::set_play_referenced ()
467 _play_referenced = true;
468 _have_valid_pieces = false;
471 list<ReferencedReelAsset>
472 Player::get_reel_assets ()
474 list<ReferencedReelAsset> a;
476 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
477 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
482 scoped_ptr<DCPDecoder> decoder;
484 decoder.reset (new DCPDecoder (j, _film->log(), false));
490 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
492 DCPOMATIC_ASSERT (j->video_frame_rate ());
493 double const cfr = j->video_frame_rate().get();
494 Frame const trim_start = j->trim_start().frames_round (cfr);
495 Frame const trim_end = j->trim_end().frames_round (cfr);
496 int const ffr = _film->video_frame_rate ();
498 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
499 if (j->reference_video ()) {
500 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
501 DCPOMATIC_ASSERT (ra);
502 ra->set_entry_point (ra->entry_point() + trim_start);
503 ra->set_duration (ra->duration() - trim_start - trim_end);
505 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
509 if (j->reference_audio ()) {
510 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
511 DCPOMATIC_ASSERT (ra);
512 ra->set_entry_point (ra->entry_point() + trim_start);
513 ra->set_duration (ra->duration() - trim_start - trim_end);
515 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
519 if (j->reference_subtitle ()) {
520 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
521 DCPOMATIC_ASSERT (ra);
522 ra->set_entry_point (ra->entry_point() + trim_start);
523 ra->set_duration (ra->duration() - trim_start - trim_end);
525 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
529 /* Assume that main picture duration is the length of the reel */
530 offset += k->main_picture()->duration ();
540 if (!_have_valid_pieces) {
544 if (_playlist->length() == DCPTime()) {
545 /* Special case of an empty Film; just give one black frame */
546 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
550 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
552 shared_ptr<Piece> earliest_content;
553 optional<DCPTime> earliest_time;
555 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
560 /** decoder position may need to be trimmed like the
561 content (but the decoder does not know it yet);
562 check for that and fake it here if necessary.
564 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
565 if (t > i->content->end()) {
569 /* Given two choices at the same time, pick the one with a subtitle so we see it before
572 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
574 earliest_content = i;
588 if (earliest_content) {
592 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
593 earliest_time = _black.position ();
597 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
598 earliest_time = _silent.position ();
604 earliest_content->done = earliest_content->decoder->pass ();
607 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
608 _black.set_position (_black.position() + one_video_frame());
612 DCPTimePeriod period (_silent.period_at_position());
613 if (_last_audio_time) {
614 /* Sometimes the thing that happened last finishes fractionally before
615 this silence. Bodge the start time of the silence to fix it. I'm
616 not sure if this is the right solution --- maybe the last thing should
617 be padded `forward' rather than this thing padding `back'.
619 period.from = min(period.from, *_last_audio_time);
621 if (period.duration() > one_video_frame()) {
622 period.to = period.from + one_video_frame();
625 _silent.set_position (period.to);
633 /* Emit any audio that is ready */
635 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
636 of our streams, or the position of the _silent.
638 DCPTime pull_to = _film->length ();
639 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
640 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
641 pull_to = i->second.last_push_end;
644 if (!_silent.done() && _silent.position() < pull_to) {
645 pull_to = _silent.position();
648 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
649 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
650 if (_last_audio_time && i->second < *_last_audio_time) {
651 /* This new data comes before the last we emitted (or the last seek); discard it */
652 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
657 } else if (_last_audio_time && i->second > *_last_audio_time) {
658 /* There's a gap between this data and the last we emitted; fill with silence */
659 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
662 emit_audio (i->first, i->second);
667 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
668 do_emit_video(i->first, i->second);
675 optional<PositionImage>
676 Player::subtitles_for_frame (DCPTime time) const
678 list<PositionImage> subtitles;
680 int const vfr = _film->video_frame_rate();
682 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
684 /* Image subtitles */
685 list<PositionImage> c = transform_image_subtitles (i.image);
686 copy (c.begin(), c.end(), back_inserter (subtitles));
688 /* Text subtitles (rendered to an image) */
689 if (!i.text.empty ()) {
690 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
691 copy (s.begin(), s.end(), back_inserter (subtitles));
695 if (subtitles.empty ()) {
696 return optional<PositionImage> ();
699 return merge (subtitles);
703 Player::video (weak_ptr<Piece> wp, ContentVideo video)
705 shared_ptr<Piece> piece = wp.lock ();
710 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
711 if (frc.skip && (video.frame % 2) == 1) {
715 /* Time of the first frame we will emit */
716 DCPTime const time = content_video_to_dcp (piece, video.frame);
718 /* Discard if it's before the content's period or the last accurate seek. We can't discard
719 if it's after the content's period here as in that case we still need to fill any gap between
720 `now' and the end of the content's period.
722 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
726 /* Fill gaps that we discover now that we have some video which needs to be emitted.
727 This is where we need to fill to.
729 DCPTime fill_to = min (time, piece->content->end());
731 if (_last_video_time) {
732 DCPTime fill_from = max (*_last_video_time, piece->content->position());
733 LastVideoMap::const_iterator last = _last_video.find (wp);
734 if (_film->three_d()) {
735 DCPTime j = fill_from;
736 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
737 if (eyes == EYES_BOTH) {
740 while (j < fill_to || eyes != video.eyes) {
741 if (last != _last_video.end()) {
742 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
743 copy->set_eyes (eyes);
744 emit_video (copy, j);
746 emit_video (black_player_video_frame(eyes), j);
748 if (eyes == EYES_RIGHT) {
749 j += one_video_frame();
751 eyes = increment_eyes (eyes);
754 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
755 if (last != _last_video.end()) {
756 emit_video (last->second, j);
758 emit_video (black_player_video_frame(EYES_BOTH), j);
764 _last_video[wp].reset (
767 piece->content->video->crop (),
768 piece->content->video->fade (video.frame),
769 piece->content->video->scale().size (
770 piece->content->video, _video_container_size, _film->frame_size ()
772 _video_container_size,
775 piece->content->video->colour_conversion(),
782 for (int i = 0; i < frc.repeat; ++i) {
783 if (t < piece->content->end()) {
784 emit_video (_last_video[wp], t);
786 t += one_video_frame ();
791 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
793 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
795 shared_ptr<Piece> piece = wp.lock ();
800 shared_ptr<AudioContent> content = piece->content->audio;
801 DCPOMATIC_ASSERT (content);
803 /* Compute time in the DCP */
804 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
805 /* And the end of this block in the DCP */
806 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
808 /* Remove anything that comes before the start or after the end of the content */
809 if (time < piece->content->position()) {
810 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
812 /* This audio is entirely discarded */
815 content_audio.audio = cut.first;
817 } else if (time > piece->content->end()) {
820 } else if (end > piece->content->end()) {
821 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
822 if (remaining_frames == 0) {
825 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
826 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
827 content_audio.audio = cut;
830 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
834 if (content->gain() != 0) {
835 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
836 gain->apply_gain (content->gain ());
837 content_audio.audio = gain;
842 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
846 if (_audio_processor) {
847 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
852 _audio_merger.push (content_audio.audio, time);
853 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
854 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
858 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
860 shared_ptr<Piece> piece = wp.lock ();
865 /* Apply content's subtitle offsets */
866 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
867 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
869 /* Apply content's subtitle scale */
870 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
871 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
873 /* Apply a corrective translation to keep the subtitle centred after that scale */
874 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
875 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
878 ps.image.push_back (subtitle.sub);
879 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
881 _active_subtitles.add_from (wp, ps, from);
885 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
887 shared_ptr<Piece> piece = wp.lock ();
893 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
895 if (from > piece->content->end()) {
899 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
900 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
901 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
902 float const xs = piece->content->subtitle->x_scale();
903 float const ys = piece->content->subtitle->y_scale();
904 float size = s.size();
906 /* Adjust size to express the common part of the scaling;
907 e.g. if xs = ys = 0.5 we scale size by 2.
909 if (xs > 1e-5 && ys > 1e-5) {
910 size *= 1 / min (1 / xs, 1 / ys);
914 /* Then express aspect ratio changes */
915 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
916 s.set_aspect_adjust (xs / ys);
919 s.set_in (dcp::Time(from.seconds(), 1000));
920 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
921 ps.add_fonts (piece->content->subtitle->fonts ());
924 _active_subtitles.add_from (wp, ps, from);
928 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
930 if (!_active_subtitles.have (wp)) {
934 shared_ptr<Piece> piece = wp.lock ();
939 DCPTime const dcp_to = content_time_to_dcp (piece, to);
941 if (dcp_to > piece->content->end()) {
945 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
947 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
948 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
953 Player::seek (DCPTime time, bool accurate)
955 if (!_have_valid_pieces) {
965 if (_audio_processor) {
966 _audio_processor->flush ();
969 _audio_merger.clear ();
970 _active_subtitles.clear ();
972 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
973 if (time < i->content->position()) {
974 /* Before; seek to the start of the content */
975 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
977 } else if (i->content->position() <= time && time < i->content->end()) {
978 /* During; seek to position */
979 i->decoder->seek (dcp_to_content_time (i, time), accurate);
982 /* After; this piece is done */
988 _last_video_time = time;
989 _last_video_eyes = EYES_LEFT;
990 _last_audio_time = time;
992 _last_video_time = optional<DCPTime>();
993 _last_video_eyes = optional<Eyes>();
994 _last_audio_time = optional<DCPTime>();
997 _black.set_position (time);
998 _silent.set_position (time);
1000 _last_video.clear ();
1004 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1006 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1007 player before the video that requires them.
1009 _delay.push_back (make_pair (pv, time));
1011 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1012 _last_video_time = time + one_video_frame();
1014 _last_video_eyes = increment_eyes (pv->eyes());
1016 if (_delay.size() < 3) {
1020 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1022 do_emit_video (to_do.first, to_do.second);
1026 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1028 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1029 _active_subtitles.clear_before (time);
1032 optional<PositionImage> subtitles = subtitles_for_frame (time);
1034 pv->set_subtitle (subtitles.get ());
1041 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1043 /* Log if the assert below is about to fail */
1044 if (_last_audio_time && time != *_last_audio_time) {
1045 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1048 /* This audio must follow on from the previous, but I'll remove this check for the 2.12.x release */
1049 // DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1051 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1055 Player::fill_audio (DCPTimePeriod period)
1057 if (period.from == period.to) {
1061 DCPOMATIC_ASSERT (period.from < period.to);
1063 DCPTime t = period.from;
1064 while (t < period.to) {
1065 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1066 Frame const samples = block.frames_round(_film->audio_frame_rate());
1068 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1069 silence->make_silent ();
1070 emit_audio (silence, t);
1077 Player::one_video_frame () const
1079 return DCPTime::from_frames (1, _film->video_frame_rate ());
1082 pair<shared_ptr<AudioBuffers>, DCPTime>
1083 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1085 DCPTime const discard_time = discard_to - time;
1086 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1087 Frame remaining_frames = audio->frames() - discard_frames;
1088 if (remaining_frames <= 0) {
1089 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1091 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1092 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1093 return make_pair(cut, time + discard_time);
1097 Player::set_dcp_decode_reduction (optional<int> reduction)
1099 if (reduction == _dcp_decode_reduction) {
1103 _dcp_decode_reduction = reduction;
1104 _have_valid_pieces = false;
1105 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);