+ optional<DCPTime> audio_fill_from;
+ if (_last_audio_time) {
+ /* Fill from the last audio or seek time */
+ audio_fill_from = _last_audio_time;
+ }
+
+ DCPTime audio_fill_towards = fill_towards;
+ if (earliest && earliest->content->audio) {
+ audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
+ }
+
+ if (audio_fill_from && audio_fill_from < audio_fill_towards) {
+ DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
+ if (period.duration() > one_video_frame()) {
+ period.to = period.from + one_video_frame();
+ }
+ list<DCPTimePeriod> p = subtract(period, _no_audio);
+ if (!p.empty ()) {
+ fill_audio (p.front());
+ filled = true;
+ }
+ }
+
+ if (earliest) {
+ earliest->done = earliest->decoder->pass ();
+ }
+
+ /* Emit any audio that is ready */
+
+ DCPTime pull_to = _playlist->length ();
+ for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
+ if (!i->second.piece->done && i->second.last_push_end < pull_to) {
+ pull_to = i->second.last_push_end;
+ }
+ }
+
+ list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
+ for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
+ if (_last_audio_time && i->second < *_last_audio_time) {
+ /* There has been an accurate seek and we have received some audio before the seek time;
+ discard it.
+ */
+ pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
+ if (!cut.first) {
+ continue;
+ }
+ *i = cut;
+ }
+
+ if (_last_audio_time) {
+ fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
+ }
+
+ emit_audio (i->first, i->second);
+ }
+
+ return !earliest && !filled;
+}
+
+optional<PositionImage>
+Player::subtitles_for_frame (DCPTime time) const
+{
+ list<PositionImage> subtitles;
+
+ BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
+
+ /* Image subtitles */
+ list<PositionImage> c = transform_image_subtitles (i.image);
+ copy (c.begin(), c.end(), back_inserter (subtitles));
+
+ /* Text subtitles (rendered to an image) */
+ if (!i.text.empty ()) {
+ list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
+ copy (s.begin(), s.end(), back_inserter (subtitles));
+ }
+ }
+
+ if (subtitles.empty ()) {
+ return optional<PositionImage> ();
+ }
+
+ return merge (subtitles);
+}
+
+void
+Player::video (weak_ptr<Piece> wp, ContentVideo video)
+{
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
+ if (frc.skip && (video.frame % 2) == 1) {
+ return;
+ }
+
+ /* Time and period of the frame we will emit */
+ DCPTime const time = content_video_to_dcp (piece, video.frame);
+ DCPTimePeriod const period (time, time + one_video_frame());
+
+ /* Discard if it's outside the content's period or if it's before the last accurate seek */
+ if (
+ time < piece->content->position() ||
+ time >= piece->content->end() ||
+ (_last_video_time && time < *_last_video_time)) {
+ return;
+ }
+
+ /* Fill gaps that we discover now that we have some video which needs to be emitted */
+
+ optional<DCPTime> fill_to;
+ if (_last_video_time) {
+ fill_to = _last_video_time;
+ }
+
+ if (fill_to) {
+ /* XXX: this may not work for 3D */
+ BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
+ for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
+ LastVideoMap::const_iterator k = _last_video.find (wp);
+ if (k != _last_video.end ()) {
+ emit_video (k->second, j);
+ } else {
+ emit_video (black_player_video_frame(), j);
+ }
+ }
+ }
+ }
+
+ _last_video[wp].reset (
+ new PlayerVideo (
+ video.image,
+ piece->content->video->crop (),
+ piece->content->video->fade (video.frame),
+ piece->content->video->scale().size (
+ piece->content->video, _video_container_size, _film->frame_size ()
+ ),
+ _video_container_size,
+ video.eyes,
+ video.part,
+ piece->content->video->colour_conversion ()
+ )
+ );
+
+ emit_video (_last_video[wp], time);
+}
+
+/** Do our common processing on some audio */
+void
+Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
+{
+ DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+
+ /* Gain */
+
+ if (content->gain() != 0) {
+ shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
+ gain->apply_gain (content->gain ());
+ content_audio.audio = gain;
+ }
+
+ /* Remap */
+
+ content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
+
+ /* Process */
+
+ if (_audio_processor) {
+ content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+ }
+
+ /* Push */
+
+ _audio_merger.push (content_audio.audio, time);
+ DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
+ _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
+}
+
+void
+Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+{
+ DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ shared_ptr<AudioContent> content = piece->content->audio;
+ DCPOMATIC_ASSERT (content);
+
+ /* Compute time in the DCP */
+ DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
+ /* And the end of this block in the DCP */
+ DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
+
+ /* Remove anything that comes before the start or after the end of the content */
+ if (time < piece->content->position()) {
+ pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
+ if (!cut.first) {
+ /* This audio is entirely discarded */
+ return;
+ }
+ content_audio.audio = cut.first;
+ time = cut.second;
+ } else if (time > piece->content->end()) {
+ /* Discard it all */
+ return;
+ } else if (end > piece->content->end()) {
+ Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
+ if (remaining_frames == 0) {
+ return;
+ }
+ shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
+ cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
+ content_audio.audio = cut;
+ }
+
+ audio_transform (content, stream, content_audio, time);
+}
+
+void
+Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
+{
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ /* Apply content's subtitle offsets */
+ subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
+ subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
+
+ /* Apply content's subtitle scale */
+ subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
+ subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
+
+ /* Apply a corrective translation to keep the subtitle centred after that scale */
+ subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
+ subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
+
+ PlayerSubtitles ps;
+ ps.image.push_back (subtitle.sub);
+ DCPTime from (content_time_to_dcp (piece, subtitle.from()));
+
+ _active_subtitles.add_from (wp, ps, from);
+}
+
+void
+Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
+{
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ PlayerSubtitles ps;
+ DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
+
+ BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
+ s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
+ s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
+ float const xs = piece->content->subtitle->x_scale();
+ float const ys = piece->content->subtitle->y_scale();
+ float size = s.size();
+
+ /* Adjust size to express the common part of the scaling;
+ e.g. if xs = ys = 0.5 we scale size by 2.
+ */
+ if (xs > 1e-5 && ys > 1e-5) {
+ size *= 1 / min (1 / xs, 1 / ys);
+ }
+ s.set_size (size);
+
+ /* Then express aspect ratio changes */
+ if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
+ s.set_aspect_adjust (xs / ys);
+ }
+
+ s.set_in (dcp::Time(from.seconds(), 1000));
+ ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
+ ps.add_fonts (piece->content->subtitle->fonts ());
+ }
+
+ _active_subtitles.add_from (wp, ps, from);
+}
+
+void
+Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
+{
+ if (!_active_subtitles.have (wp)) {
+ return;
+ }
+
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ DCPTime const dcp_to = content_time_to_dcp (piece, to);
+
+ pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
+
+ if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
+ Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
+ }
+}
+
+void
+Player::seek (DCPTime time, bool accurate)
+{
+ if (_audio_processor) {
+ _audio_processor->flush ();
+ }
+
+ _audio_merger.clear ();
+ _active_subtitles.clear ();
+
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ if (time < i->content->position()) {
+ /* Before; seek to 0 */
+ i->decoder->seek (ContentTime(), accurate);
+ i->done = false;
+ } else if (i->content->position() <= time && time < i->content->end()) {
+ /* During; seek to position */
+ i->decoder->seek (dcp_to_content_time (i, time), accurate);
+ i->done = false;
+ } else {
+ /* After; this piece is done */
+ i->done = true;
+ }
+ }
+
+ if (accurate) {
+ _last_video_time = time;
+ _last_audio_time = time;
+ } else {
+ _last_video_time = optional<DCPTime>();
+ _last_audio_time = optional<DCPTime>();
+ }
+}
+
+void
+Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+{
+ optional<PositionImage> subtitles = subtitles_for_frame (time);
+ if (subtitles) {
+ pv->set_subtitle (subtitles.get ());
+ }
+
+ Video (pv, time);
+
+ if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+ _last_video_time = time + one_video_frame();
+ _active_subtitles.clear_before (time);
+ }
+}
+
+void
+Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
+{
+ Audio (data, time);
+ _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
+}
+
+void
+Player::fill_audio (DCPTimePeriod period)
+{
+ if (period.from == period.to) {
+ return;
+ }
+
+ DCPOMATIC_ASSERT (period.from < period.to);
+
+ BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
+ DCPTime t = i.from;
+ while (t < i.to) {
+ DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
+ Frame const samples = block.frames_round(_film->audio_frame_rate());
+ if (samples) {
+ shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
+ silence->make_silent ();
+ emit_audio (silence, t);
+ }
+ t += block;
+ }
+ }
+}
+
+DCPTime
+Player::one_video_frame () const
+{
+ return DCPTime::from_frames (1, _film->video_frame_rate ());
+}
+
+pair<shared_ptr<AudioBuffers>, DCPTime>
+Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
+{
+ DCPTime const discard_time = discard_to - time;
+ Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
+ Frame remaining_frames = audio->frames() - discard_frames;
+ if (remaining_frames <= 0) {
+ return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
+ }
+ shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
+ cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
+ return make_pair(cut, time + discard_time);