2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_subtitle (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->subtitle && _ignore_subtitle) {
125 decoder->subtitle->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 if (_play_referenced) {
131 dcp->set_decode_referenced ();
133 dcp->set_forced_reduction (_dcp_decode_reduction);
136 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
137 _pieces.push_back (piece);
139 if (decoder->video) {
140 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
143 if (decoder->audio) {
144 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
147 if (decoder->subtitle) {
148 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
150 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
154 _stream_states.clear ();
155 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
156 if (i->content->audio) {
157 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
158 _stream_states[j] = StreamState (i, i->content->position ());
163 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
164 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
166 _last_video_time = DCPTime ();
167 _last_audio_time = DCPTime ();
168 _have_valid_pieces = true;
172 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
174 shared_ptr<Content> c = w.lock ();
180 property == ContentProperty::POSITION ||
181 property == ContentProperty::LENGTH ||
182 property == ContentProperty::TRIM_START ||
183 property == ContentProperty::TRIM_END ||
184 property == ContentProperty::PATH ||
185 property == VideoContentProperty::FRAME_TYPE ||
186 property == DCPContentProperty::NEEDS_ASSETS ||
187 property == DCPContentProperty::NEEDS_KDM ||
188 property == SubtitleContentProperty::COLOUR ||
189 property == SubtitleContentProperty::OUTLINE ||
190 property == SubtitleContentProperty::SHADOW ||
191 property == SubtitleContentProperty::EFFECT_COLOUR ||
192 property == FFmpegContentProperty::SUBTITLE_STREAM ||
193 property == FFmpegContentProperty::FILTERS ||
194 property == VideoContentProperty::COLOUR_CONVERSION
197 _have_valid_pieces = false;
201 property == SubtitleContentProperty::LINE_SPACING ||
202 property == SubtitleContentProperty::OUTLINE_WIDTH ||
203 property == SubtitleContentProperty::Y_SCALE ||
204 property == SubtitleContentProperty::FADE_IN ||
205 property == SubtitleContentProperty::FADE_OUT ||
206 property == ContentProperty::VIDEO_FRAME_RATE ||
207 property == SubtitleContentProperty::USE ||
208 property == SubtitleContentProperty::X_OFFSET ||
209 property == SubtitleContentProperty::Y_OFFSET ||
210 property == SubtitleContentProperty::X_SCALE ||
211 property == SubtitleContentProperty::FONTS ||
212 property == VideoContentProperty::CROP ||
213 property == VideoContentProperty::SCALE ||
214 property == VideoContentProperty::FADE_IN ||
215 property == VideoContentProperty::FADE_OUT
223 Player::set_video_container_size (dcp::Size s)
225 if (s == _video_container_size) {
229 _video_container_size = s;
231 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
232 _black_image->make_black ();
238 Player::playlist_changed ()
240 _have_valid_pieces = false;
245 Player::film_changed (Film::Property p)
247 /* Here we should notice Film properties that affect our output, and
248 alert listeners that our output now would be different to how it was
249 last time we were run.
252 if (p == Film::CONTAINER) {
254 } else if (p == Film::VIDEO_FRAME_RATE) {
255 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
256 so we need new pieces here.
258 _have_valid_pieces = false;
260 } else if (p == Film::AUDIO_PROCESSOR) {
261 if (_film->audio_processor ()) {
262 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
268 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
270 list<PositionImage> all;
272 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
277 /* We will scale the subtitle up to fit _video_container_size */
278 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
280 /* Then we need a corrective translation, consisting of two parts:
282 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
283 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
285 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
286 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
287 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
289 * Combining these two translations gives these expressions.
296 dcp::YUV_TO_RGB_REC601,
297 i->image->pixel_format (),
302 lrint (_video_container_size.width * i->rectangle.x),
303 lrint (_video_container_size.height * i->rectangle.y)
312 shared_ptr<PlayerVideo>
313 Player::black_player_video_frame () const
315 return shared_ptr<PlayerVideo> (
317 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
320 _video_container_size,
321 _video_container_size,
324 PresetColourConversion::all().front().conversion
330 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
332 DCPTime s = t - piece->content->position ();
333 s = min (piece->content->length_after_trim(), s);
334 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
336 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
337 then convert that ContentTime to frames at the content's rate. However this fails for
338 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
339 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
341 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
343 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
347 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
349 /* See comment in dcp_to_content_video */
350 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
351 return d + piece->content->position();
355 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
357 DCPTime s = t - piece->content->position ();
358 s = min (piece->content->length_after_trim(), s);
359 /* See notes in dcp_to_content_video */
360 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
364 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
366 /* See comment in dcp_to_content_video */
367 return DCPTime::from_frames (f, _film->audio_frame_rate())
368 - DCPTime (piece->content->trim_start(), piece->frc)
369 + piece->content->position();
373 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
375 DCPTime s = t - piece->content->position ();
376 s = min (piece->content->length_after_trim(), s);
377 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
381 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
383 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
386 list<shared_ptr<Font> >
387 Player::get_subtitle_fonts ()
389 if (!_have_valid_pieces) {
393 list<shared_ptr<Font> > fonts;
394 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
395 if (p->content->subtitle) {
396 /* XXX: things may go wrong if there are duplicate font IDs
397 with different font files.
399 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
400 copy (f.begin(), f.end(), back_inserter (fonts));
407 /** Set this player never to produce any video data */
409 Player::set_ignore_video ()
411 _ignore_video = true;
415 Player::set_ignore_subtitle ()
417 _ignore_subtitle = true;
420 /** Set whether or not this player should always burn text subtitles into the image,
421 * regardless of the content settings.
422 * @param burn true to always burn subtitles, false to obey content settings.
425 Player::set_always_burn_subtitles (bool burn)
427 _always_burn_subtitles = burn;
430 /** Sets up the player to be faster, possibly at the expense of quality */
435 _have_valid_pieces = false;
439 Player::set_play_referenced ()
441 _play_referenced = true;
442 _have_valid_pieces = false;
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
448 list<ReferencedReelAsset> a;
450 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
456 scoped_ptr<DCPDecoder> decoder;
458 decoder.reset (new DCPDecoder (j, _film->log(), false));
464 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
466 DCPOMATIC_ASSERT (j->video_frame_rate ());
467 double const cfr = j->video_frame_rate().get();
468 Frame const trim_start = j->trim_start().frames_round (cfr);
469 Frame const trim_end = j->trim_end().frames_round (cfr);
470 int const ffr = _film->video_frame_rate ();
472 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473 if (j->reference_video ()) {
474 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475 DCPOMATIC_ASSERT (ra);
476 ra->set_entry_point (ra->entry_point() + trim_start);
477 ra->set_duration (ra->duration() - trim_start - trim_end);
479 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
483 if (j->reference_audio ()) {
484 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485 DCPOMATIC_ASSERT (ra);
486 ra->set_entry_point (ra->entry_point() + trim_start);
487 ra->set_duration (ra->duration() - trim_start - trim_end);
489 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
493 if (j->reference_subtitle ()) {
494 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495 DCPOMATIC_ASSERT (ra);
496 ra->set_entry_point (ra->entry_point() + trim_start);
497 ra->set_duration (ra->duration() - trim_start - trim_end);
499 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
503 /* Assume that main picture duration is the length of the reel */
504 offset += k->main_picture()->duration ();
514 if (!_have_valid_pieces) {
518 if (_playlist->length() == DCPTime()) {
519 /* Special case of an empty Film; just give one black frame */
520 emit_video (black_player_video_frame(), DCPTime());
524 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
526 shared_ptr<Piece> earliest_content;
527 optional<DCPTime> earliest_time;
529 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
534 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
535 if (t > i->content->end()) {
538 /* Given two choices at the same time, pick the one with a subtitle so we see it before
541 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
543 earliest_content = i;
557 if (earliest_content) {
561 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
562 earliest_time = _black.position ();
566 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
567 earliest_time = _silent.position ();
573 earliest_content->done = earliest_content->decoder->pass ();
576 emit_video (black_player_video_frame(), _black.position());
577 _black.set_position (_black.position() + one_video_frame());
581 DCPTimePeriod period (_silent.period_at_position());
582 if (period.duration() > one_video_frame()) {
583 period.to = period.from + one_video_frame();
586 _silent.set_position (period.to);
594 /* Emit any audio that is ready */
596 DCPTime pull_to = _film->length ();
597 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
598 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
599 pull_to = i->second.last_push_end;
603 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
604 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
605 if (_last_audio_time && i->second < *_last_audio_time) {
606 /* This new data comes before the last we emitted (or the last seek); discard it */
607 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
612 } else if (_last_audio_time && i->second > *_last_audio_time) {
613 /* There's a gap between this data and the last we emitted; fill with silence */
614 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
617 emit_audio (i->first, i->second);
623 optional<PositionImage>
624 Player::subtitles_for_frame (DCPTime time) const
626 list<PositionImage> subtitles;
628 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
630 /* Image subtitles */
631 list<PositionImage> c = transform_image_subtitles (i.image);
632 copy (c.begin(), c.end(), back_inserter (subtitles));
634 /* Text subtitles (rendered to an image) */
635 if (!i.text.empty ()) {
636 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
637 copy (s.begin(), s.end(), back_inserter (subtitles));
641 if (subtitles.empty ()) {
642 return optional<PositionImage> ();
645 return merge (subtitles);
649 Player::video (weak_ptr<Piece> wp, ContentVideo video)
651 shared_ptr<Piece> piece = wp.lock ();
656 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
657 if (frc.skip && (video.frame % 2) == 1) {
661 /* Time of the first frame we will emit */
662 DCPTime const time = content_video_to_dcp (piece, video.frame);
664 /* Discard if it's outside the content's period or if it's before the last accurate seek */
666 time < piece->content->position() ||
667 time >= piece->content->end() ||
668 (_last_video_time && time < *_last_video_time)) {
672 /* Fill gaps that we discover now that we have some video which needs to be emitted */
674 if (_last_video_time) {
675 /* XXX: this may not work for 3D */
676 DCPTime fill_from = max (*_last_video_time, piece->content->position());
677 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
678 LastVideoMap::const_iterator k = _last_video.find (wp);
679 if (k != _last_video.end ()) {
680 emit_video (k->second, j);
682 emit_video (black_player_video_frame(), j);
687 _last_video[wp].reset (
690 piece->content->video->crop (),
691 piece->content->video->fade (video.frame),
692 piece->content->video->scale().size (
693 piece->content->video, _video_container_size, _film->frame_size ()
695 _video_container_size,
698 piece->content->video->colour_conversion ()
703 for (int i = 0; i < frc.repeat; ++i) {
704 emit_video (_last_video[wp], t);
705 t += one_video_frame ();
710 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
712 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
714 shared_ptr<Piece> piece = wp.lock ();
719 shared_ptr<AudioContent> content = piece->content->audio;
720 DCPOMATIC_ASSERT (content);
722 /* Compute time in the DCP */
723 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
724 /* And the end of this block in the DCP */
725 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
727 /* Remove anything that comes before the start or after the end of the content */
728 if (time < piece->content->position()) {
729 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
731 /* This audio is entirely discarded */
734 content_audio.audio = cut.first;
736 } else if (time > piece->content->end()) {
739 } else if (end > piece->content->end()) {
740 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
741 if (remaining_frames == 0) {
744 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
745 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
746 content_audio.audio = cut;
749 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
753 if (content->gain() != 0) {
754 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
755 gain->apply_gain (content->gain ());
756 content_audio.audio = gain;
761 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
765 if (_audio_processor) {
766 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
771 _audio_merger.push (content_audio.audio, time);
772 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
773 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
777 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
779 shared_ptr<Piece> piece = wp.lock ();
784 /* Apply content's subtitle offsets */
785 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
786 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
788 /* Apply content's subtitle scale */
789 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
790 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
792 /* Apply a corrective translation to keep the subtitle centred after that scale */
793 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
794 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
797 ps.image.push_back (subtitle.sub);
798 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
800 _active_subtitles.add_from (wp, ps, from);
804 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
806 shared_ptr<Piece> piece = wp.lock ();
812 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
814 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
815 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
816 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
817 float const xs = piece->content->subtitle->x_scale();
818 float const ys = piece->content->subtitle->y_scale();
819 float size = s.size();
821 /* Adjust size to express the common part of the scaling;
822 e.g. if xs = ys = 0.5 we scale size by 2.
824 if (xs > 1e-5 && ys > 1e-5) {
825 size *= 1 / min (1 / xs, 1 / ys);
829 /* Then express aspect ratio changes */
830 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
831 s.set_aspect_adjust (xs / ys);
834 s.set_in (dcp::Time(from.seconds(), 1000));
835 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
836 ps.add_fonts (piece->content->subtitle->fonts ());
839 _active_subtitles.add_from (wp, ps, from);
843 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
845 if (!_active_subtitles.have (wp)) {
849 shared_ptr<Piece> piece = wp.lock ();
854 DCPTime const dcp_to = content_time_to_dcp (piece, to);
856 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
858 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
859 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
864 Player::seek (DCPTime time, bool accurate)
866 if (!_have_valid_pieces) {
870 if (_audio_processor) {
871 _audio_processor->flush ();
874 _audio_merger.clear ();
875 _active_subtitles.clear ();
877 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
878 if (time < i->content->position()) {
879 /* Before; seek to 0 */
880 i->decoder->seek (ContentTime(), accurate);
882 } else if (i->content->position() <= time && time < i->content->end()) {
883 /* During; seek to position */
884 i->decoder->seek (dcp_to_content_time (i, time), accurate);
887 /* After; this piece is done */
893 _last_video_time = time;
894 _last_audio_time = time;
896 _last_video_time = optional<DCPTime>();
897 _last_audio_time = optional<DCPTime>();
900 _black.set_position (time);
901 _silent.set_position (time);
903 _last_video.clear ();
907 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
909 optional<PositionImage> subtitles = subtitles_for_frame (time);
911 pv->set_subtitle (subtitles.get ());
916 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
917 _last_video_time = time + one_video_frame();
918 _active_subtitles.clear_before (time);
923 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
926 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
930 Player::fill_audio (DCPTimePeriod period)
932 if (period.from == period.to) {
936 DCPOMATIC_ASSERT (period.from < period.to);
938 DCPTime t = period.from;
939 while (t < period.to) {
940 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
941 Frame const samples = block.frames_round(_film->audio_frame_rate());
943 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
944 silence->make_silent ();
945 emit_audio (silence, t);
952 Player::one_video_frame () const
954 return DCPTime::from_frames (1, _film->video_frame_rate ());
957 pair<shared_ptr<AudioBuffers>, DCPTime>
958 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
960 DCPTime const discard_time = discard_to - time;
961 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
962 Frame remaining_frames = audio->frames() - discard_frames;
963 if (remaining_frames <= 0) {
964 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
966 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
967 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
968 return make_pair(cut, time + discard_time);
972 Player::set_dcp_decode_reduction (optional<int> reduction)
974 if (reduction == _dcp_decode_reduction) {
978 _dcp_decode_reduction = reduction;
979 _have_valid_pieces = false;