2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_subtitle (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->subtitle && _ignore_subtitle) {
125 decoder->subtitle->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 if (_play_referenced) {
131 dcp->set_decode_referenced ();
133 dcp->set_forced_reduction (_dcp_decode_reduction);
136 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
137 _pieces.push_back (piece);
139 if (decoder->video) {
140 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
143 if (decoder->audio) {
144 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
147 if (decoder->subtitle) {
148 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
150 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
154 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
155 if (i->content->audio) {
156 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
157 _stream_states[j] = StreamState (i, i->content->position ());
162 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
163 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
165 _last_video_time = DCPTime ();
166 _last_audio_time = DCPTime ();
167 _have_valid_pieces = true;
171 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
173 shared_ptr<Content> c = w.lock ();
179 property == ContentProperty::POSITION ||
180 property == ContentProperty::LENGTH ||
181 property == ContentProperty::TRIM_START ||
182 property == ContentProperty::TRIM_END ||
183 property == ContentProperty::PATH ||
184 property == VideoContentProperty::FRAME_TYPE ||
185 property == DCPContentProperty::NEEDS_ASSETS ||
186 property == DCPContentProperty::NEEDS_KDM ||
187 property == SubtitleContentProperty::COLOUR ||
188 property == SubtitleContentProperty::OUTLINE ||
189 property == SubtitleContentProperty::SHADOW ||
190 property == SubtitleContentProperty::EFFECT_COLOUR ||
191 property == FFmpegContentProperty::SUBTITLE_STREAM ||
192 property == VideoContentProperty::COLOUR_CONVERSION
195 _have_valid_pieces = false;
199 property == SubtitleContentProperty::LINE_SPACING ||
200 property == SubtitleContentProperty::OUTLINE_WIDTH ||
201 property == SubtitleContentProperty::Y_SCALE ||
202 property == SubtitleContentProperty::FADE_IN ||
203 property == SubtitleContentProperty::FADE_OUT ||
204 property == ContentProperty::VIDEO_FRAME_RATE ||
205 property == SubtitleContentProperty::USE ||
206 property == SubtitleContentProperty::X_OFFSET ||
207 property == SubtitleContentProperty::Y_OFFSET ||
208 property == SubtitleContentProperty::X_SCALE ||
209 property == SubtitleContentProperty::FONTS ||
210 property == VideoContentProperty::CROP ||
211 property == VideoContentProperty::SCALE ||
212 property == VideoContentProperty::FADE_IN ||
213 property == VideoContentProperty::FADE_OUT
221 Player::set_video_container_size (dcp::Size s)
223 if (s == _video_container_size) {
227 _video_container_size = s;
229 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
230 _black_image->make_black ();
236 Player::playlist_changed ()
238 _have_valid_pieces = false;
243 Player::film_changed (Film::Property p)
245 /* Here we should notice Film properties that affect our output, and
246 alert listeners that our output now would be different to how it was
247 last time we were run.
250 if (p == Film::CONTAINER) {
252 } else if (p == Film::VIDEO_FRAME_RATE) {
253 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
254 so we need new pieces here.
256 _have_valid_pieces = false;
258 } else if (p == Film::AUDIO_PROCESSOR) {
259 if (_film->audio_processor ()) {
260 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
266 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
268 list<PositionImage> all;
270 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
275 /* We will scale the subtitle up to fit _video_container_size */
276 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
278 /* Then we need a corrective translation, consisting of two parts:
280 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
281 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
283 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
284 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
285 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
287 * Combining these two translations gives these expressions.
294 dcp::YUV_TO_RGB_REC601,
295 i->image->pixel_format (),
300 lrint (_video_container_size.width * i->rectangle.x),
301 lrint (_video_container_size.height * i->rectangle.y)
310 shared_ptr<PlayerVideo>
311 Player::black_player_video_frame () const
313 return shared_ptr<PlayerVideo> (
315 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
318 _video_container_size,
319 _video_container_size,
322 PresetColourConversion::all().front().conversion
328 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
330 DCPTime s = t - piece->content->position ();
331 s = min (piece->content->length_after_trim(), s);
332 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
334 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
335 then convert that ContentTime to frames at the content's rate. However this fails for
336 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
337 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
339 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
341 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
345 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
347 /* See comment in dcp_to_content_video */
348 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
349 return max (DCPTime (), d + piece->content->position ());
353 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
355 DCPTime s = t - piece->content->position ();
356 s = min (piece->content->length_after_trim(), s);
357 /* See notes in dcp_to_content_video */
358 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
362 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
364 /* See comment in dcp_to_content_video */
365 return DCPTime::from_frames (f, _film->audio_frame_rate())
366 - DCPTime (piece->content->trim_start(), piece->frc)
367 + piece->content->position();
371 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
373 DCPTime s = t - piece->content->position ();
374 s = min (piece->content->length_after_trim(), s);
375 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
379 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
381 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
384 list<shared_ptr<Font> >
385 Player::get_subtitle_fonts ()
387 if (!_have_valid_pieces) {
391 list<shared_ptr<Font> > fonts;
392 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
393 if (p->content->subtitle) {
394 /* XXX: things may go wrong if there are duplicate font IDs
395 with different font files.
397 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
398 copy (f.begin(), f.end(), back_inserter (fonts));
405 /** Set this player never to produce any video data */
407 Player::set_ignore_video ()
409 _ignore_video = true;
413 Player::set_ignore_subtitle ()
415 _ignore_subtitle = true;
418 /** Set whether or not this player should always burn text subtitles into the image,
419 * regardless of the content settings.
420 * @param burn true to always burn subtitles, false to obey content settings.
423 Player::set_always_burn_subtitles (bool burn)
425 _always_burn_subtitles = burn;
432 _have_valid_pieces = false;
436 Player::set_play_referenced ()
438 _play_referenced = true;
439 _have_valid_pieces = false;
442 list<ReferencedReelAsset>
443 Player::get_reel_assets ()
445 list<ReferencedReelAsset> a;
447 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
448 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
453 scoped_ptr<DCPDecoder> decoder;
455 decoder.reset (new DCPDecoder (j, _film->log()));
461 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
463 DCPOMATIC_ASSERT (j->video_frame_rate ());
464 double const cfr = j->video_frame_rate().get();
465 Frame const trim_start = j->trim_start().frames_round (cfr);
466 Frame const trim_end = j->trim_end().frames_round (cfr);
467 int const ffr = _film->video_frame_rate ();
469 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
470 if (j->reference_video ()) {
471 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
472 DCPOMATIC_ASSERT (ra);
473 ra->set_entry_point (ra->entry_point() + trim_start);
474 ra->set_duration (ra->duration() - trim_start - trim_end);
476 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
480 if (j->reference_audio ()) {
481 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
482 DCPOMATIC_ASSERT (ra);
483 ra->set_entry_point (ra->entry_point() + trim_start);
484 ra->set_duration (ra->duration() - trim_start - trim_end);
486 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
490 if (j->reference_subtitle ()) {
491 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
492 DCPOMATIC_ASSERT (ra);
493 ra->set_entry_point (ra->entry_point() + trim_start);
494 ra->set_duration (ra->duration() - trim_start - trim_end);
496 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
500 /* Assume that main picture duration is the length of the reel */
501 offset += k->main_picture()->duration ();
511 if (!_have_valid_pieces) {
515 if (_playlist->length() == DCPTime()) {
516 /* Special case of an empty Film; just give one black frame */
517 emit_video (black_player_video_frame(), DCPTime());
521 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
523 shared_ptr<Piece> earliest_content;
524 optional<DCPTime> earliest_time;
526 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
528 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
529 /* Given two choices at the same time, pick the one with a subtitle so we see it before
532 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
534 earliest_content = i;
548 if (earliest_content) {
552 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
553 earliest_time = _black.position ();
557 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
558 earliest_time = _silent.position ();
564 earliest_content->done = earliest_content->decoder->pass ();
567 emit_video (black_player_video_frame(), _black.position());
568 _black.set_position (_black.position() + one_video_frame());
572 DCPTimePeriod period (_silent.period_at_position());
573 if (period.duration() > one_video_frame()) {
574 period.to = period.from + one_video_frame();
577 _silent.set_position (period.to);
585 /* Emit any audio that is ready */
587 DCPTime pull_to = _film->length ();
588 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
589 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
590 pull_to = i->second.last_push_end;
594 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
595 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
596 if (_last_audio_time && i->second < *_last_audio_time) {
597 /* There has been an accurate seek and we have received some audio before the seek time;
600 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
607 emit_audio (i->first, i->second);
613 optional<PositionImage>
614 Player::subtitles_for_frame (DCPTime time) const
616 list<PositionImage> subtitles;
618 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
620 /* Image subtitles */
621 list<PositionImage> c = transform_image_subtitles (i.image);
622 copy (c.begin(), c.end(), back_inserter (subtitles));
624 /* Text subtitles (rendered to an image) */
625 if (!i.text.empty ()) {
626 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
627 copy (s.begin(), s.end(), back_inserter (subtitles));
631 if (subtitles.empty ()) {
632 return optional<PositionImage> ();
635 return merge (subtitles);
639 Player::video (weak_ptr<Piece> wp, ContentVideo video)
641 shared_ptr<Piece> piece = wp.lock ();
646 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
647 if (frc.skip && (video.frame % 2) == 1) {
651 /* Time of the first frame we will emit */
652 DCPTime const time = content_video_to_dcp (piece, video.frame);
654 /* Discard if it's outside the content's period or if it's before the last accurate seek */
656 time < piece->content->position() ||
657 time >= piece->content->end() ||
658 (_last_video_time && time < *_last_video_time)) {
662 /* Fill gaps that we discover now that we have some video which needs to be emitted */
664 if (_last_video_time) {
665 /* XXX: this may not work for 3D */
666 DCPTime fill_from = max (*_last_video_time, piece->content->position());
667 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
668 LastVideoMap::const_iterator k = _last_video.find (wp);
669 if (k != _last_video.end ()) {
670 emit_video (k->second, j);
672 emit_video (black_player_video_frame(), j);
677 _last_video[wp].reset (
680 piece->content->video->crop (),
681 piece->content->video->fade (video.frame),
682 piece->content->video->scale().size (
683 piece->content->video, _video_container_size, _film->frame_size ()
685 _video_container_size,
688 piece->content->video->colour_conversion ()
693 for (int i = 0; i < frc.repeat; ++i) {
694 emit_video (_last_video[wp], t);
695 t += one_video_frame ();
700 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
702 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
704 shared_ptr<Piece> piece = wp.lock ();
709 shared_ptr<AudioContent> content = piece->content->audio;
710 DCPOMATIC_ASSERT (content);
712 /* Compute time in the DCP */
713 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
714 /* And the end of this block in the DCP */
715 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
717 /* Remove anything that comes before the start or after the end of the content */
718 if (time < piece->content->position()) {
719 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
721 /* This audio is entirely discarded */
724 content_audio.audio = cut.first;
726 } else if (time > piece->content->end()) {
729 } else if (end > piece->content->end()) {
730 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
731 if (remaining_frames == 0) {
734 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
735 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
736 content_audio.audio = cut;
739 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
743 if (content->gain() != 0) {
744 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
745 gain->apply_gain (content->gain ());
746 content_audio.audio = gain;
751 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
755 if (_audio_processor) {
756 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
761 _audio_merger.push (content_audio.audio, time);
762 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
763 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
767 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
769 shared_ptr<Piece> piece = wp.lock ();
774 /* Apply content's subtitle offsets */
775 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
776 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
778 /* Apply content's subtitle scale */
779 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
780 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
782 /* Apply a corrective translation to keep the subtitle centred after that scale */
783 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
784 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
787 ps.image.push_back (subtitle.sub);
788 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
790 _active_subtitles.add_from (wp, ps, from);
794 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
796 shared_ptr<Piece> piece = wp.lock ();
802 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
804 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
805 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
806 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
807 float const xs = piece->content->subtitle->x_scale();
808 float const ys = piece->content->subtitle->y_scale();
809 float size = s.size();
811 /* Adjust size to express the common part of the scaling;
812 e.g. if xs = ys = 0.5 we scale size by 2.
814 if (xs > 1e-5 && ys > 1e-5) {
815 size *= 1 / min (1 / xs, 1 / ys);
819 /* Then express aspect ratio changes */
820 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
821 s.set_aspect_adjust (xs / ys);
824 s.set_in (dcp::Time(from.seconds(), 1000));
825 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
826 ps.add_fonts (piece->content->subtitle->fonts ());
829 _active_subtitles.add_from (wp, ps, from);
833 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
835 if (!_active_subtitles.have (wp)) {
839 shared_ptr<Piece> piece = wp.lock ();
844 DCPTime const dcp_to = content_time_to_dcp (piece, to);
846 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
848 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
849 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
854 Player::seek (DCPTime time, bool accurate)
856 if (!_have_valid_pieces) {
860 if (_audio_processor) {
861 _audio_processor->flush ();
864 _audio_merger.clear ();
865 _active_subtitles.clear ();
867 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
868 if (time < i->content->position()) {
869 /* Before; seek to 0 */
870 i->decoder->seek (ContentTime(), accurate);
872 } else if (i->content->position() <= time && time < i->content->end()) {
873 /* During; seek to position */
874 i->decoder->seek (dcp_to_content_time (i, time), accurate);
877 /* After; this piece is done */
883 _last_video_time = time;
884 _last_audio_time = time;
886 _last_video_time = optional<DCPTime>();
887 _last_audio_time = optional<DCPTime>();
890 _black.set_position (time);
891 _silent.set_position (time);
893 _last_video.clear ();
897 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
899 optional<PositionImage> subtitles = subtitles_for_frame (time);
901 pv->set_subtitle (subtitles.get ());
906 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
907 _last_video_time = time + one_video_frame();
908 _active_subtitles.clear_before (time);
913 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
916 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
920 Player::fill_audio (DCPTimePeriod period)
922 if (period.from == period.to) {
926 DCPOMATIC_ASSERT (period.from < period.to);
928 DCPTime t = period.from;
929 while (t < period.to) {
930 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
931 Frame const samples = block.frames_round(_film->audio_frame_rate());
933 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
934 silence->make_silent ();
935 emit_audio (silence, t);
942 Player::one_video_frame () const
944 return DCPTime::from_frames (1, _film->video_frame_rate ());
947 pair<shared_ptr<AudioBuffers>, DCPTime>
948 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
950 DCPTime const discard_time = discard_to - time;
951 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
952 Frame remaining_frames = audio->frames() - discard_frames;
953 if (remaining_frames <= 0) {
954 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
956 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
957 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
958 return make_pair(cut, time + discard_time);
962 Player::set_dcp_decode_reduction (optional<int> reduction)
964 if (reduction == _dcp_decode_reduction) {
968 _dcp_decode_reduction = reduction;
969 _have_valid_pieces = false;