/*
- Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "content_subtitle.h"
#include "dcp_decoder.h"
#include "image_decoder.h"
+#include "resampler.h"
+#include "compose.hpp"
#include <dcp/reel.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
using boost::optional;
using boost::scoped_ptr;
-static bool
-has_video (Content* c)
-{
- return static_cast<bool>(c->video);
-}
-
-static bool
-has_audio (Content* c)
-{
- return static_cast<bool>(c->audio);
-}
-
-static bool
-has_subtitle (Content* c)
-{
- return static_cast<bool>(c->subtitle);
-}
-
Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
: _film (film)
, _playlist (playlist)
, _always_burn_subtitles (false)
, _fast (false)
, _play_referenced (false)
- , _audio_merger (_film->audio_channels(), _film->audio_frame_rate())
+ , _audio_merger (_film->audio_frame_rate())
{
_film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
_playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
set_video_container_size (_film->frame_size ());
film_changed (Film::AUDIO_PROCESSOR);
+
+ seek (DCPTime (), true);
}
void
decoder->audio->set_ignore ();
}
- if (decoder->audio && _fast) {
- decoder->audio->set_fast ();
- }
-
shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
if (dcp && _play_referenced) {
dcp->set_decode_referenced ();
}
if (decoder->subtitle) {
- decoder->subtitle->ImageData.connect (bind (&Player::image_subtitle, this, weak_ptr<Piece> (piece), _1));
- decoder->subtitle->TextData.connect (bind (&Player::text_subtitle, this, weak_ptr<Piece> (piece), _1));
+ decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
+ decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
+ decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
+ }
+ }
+
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ if (i->content->audio) {
+ BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
+ _stream_states[j] = StreamState (i, i->content->position ());
+ }
+ }
+ }
+
+ if (!_play_referenced) {
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
+ if (dc) {
+ if (dc->reference_video()) {
+ _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
+ }
+ if (dc->reference_audio()) {
+ _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
+ }
+ }
}
}
+ _last_video_time = DCPTime ();
+ _last_audio_time = DCPTime ();
_have_valid_pieces = true;
}
void
Player::set_video_container_size (dcp::Size s)
{
+ if (s == _video_container_size) {
+ return;
+ }
+
_video_container_size = s;
_black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
_black_image->make_black ();
+
+ Changed (false);
}
void
}
shared_ptr<PlayerVideo>
-Player::black_player_video_frame (DCPTime time) const
+Player::black_player_video_frame () const
{
return shared_ptr<PlayerVideo> (
new PlayerVideo (
shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
- time,
Crop (),
optional<double> (),
_video_container_size,
Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
{
/* See comment in dcp_to_content_video */
- DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start (), piece->frc);
+ DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
return max (DCPTime (), d + piece->content->position ());
}
ContentTime
-Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
+Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
{
DCPTime s = t - piece->content->position ();
s = min (piece->content->length_after_trim(), s);
}
DCPTime
-Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
+Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
{
return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
}
_ignore_video = true;
}
-/** Set this player never to produce any audio data */
-void
-Player::set_ignore_audio ()
-{
- _ignore_audio = true;
-}
-
/** Set whether or not this player should always burn text subtitles into the image,
* regardless of the content settings.
* @param burn true to always burn subtitles, false to obey content settings.
return a;
}
-list<shared_ptr<Piece> >
-Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
+bool
+Player::pass ()
{
if (!_have_valid_pieces) {
setup_pieces ();
}
- list<shared_ptr<Piece> > overlaps;
+ shared_ptr<Piece> earliest;
+ DCPTime earliest_content;
+
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
- overlaps.push_back (i);
+ if (!i->done) {
+ DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
+ if (!earliest || t < earliest_content) {
+ earliest_content = t;
+ earliest = i;
+ }
+ }
+ }
+
+ if (earliest) {
+ earliest->done = earliest->decoder->pass ();
+ if (earliest->done && earliest->content->audio) {
+ /* Flush the Player audio system for this piece */
+ BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
+ audio_flush (earliest, i);
+ }
+ }
+ }
+
+ /* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content,
+ NOT to fill gaps within content (the latter is done in ::video())
+ */
+ DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
+
+ /* Work out where to fill video from */
+ optional<DCPTime> video_fill_from;
+ if (_last_video_time) {
+ /* Fill from the last video or seek time */
+ video_fill_from = _last_video_time;
+ }
+
+ bool filled = false;
+ /* Fill some black if we would emit before the earliest piece of content. This is so we act like a phantom
+ Piece which emits black in spaces (we only emit if we are the earliest thing)
+ */
+ if (video_fill_from && (!earliest || *video_fill_from < earliest_content) && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
+ list<DCPTimePeriod> p = subtract(DCPTimePeriod(*video_fill_from, *video_fill_from + one_video_frame()), _no_video);
+ if (!p.empty ()) {
+ emit_video (black_player_video_frame(), p.front().from);
+ filled = true;
+ }
+ } else if (_playlist->length() == DCPTime()) {
+ /* Special case of an empty Film; just give one black frame */
+ emit_video (black_player_video_frame(), DCPTime());
+ filled = true;
+ }
+
+ optional<DCPTime> audio_fill_from;
+ if (_last_audio_time) {
+ /* Fill from the last audio or seek time */
+ audio_fill_from = _last_audio_time;
+ }
+
+ /* XXX: _no_audio */
+ if (audio_fill_from && audio_fill_from < fill_towards) {
+ DCPTimePeriod period (*audio_fill_from, fill_towards);
+ if (period.duration() > one_video_frame()) {
+ period.to = period.from + one_video_frame();
+ }
+ list<DCPTimePeriod> p = subtract(period, _no_video);
+ if (!p.empty ()) {
+ fill_audio (period);
+ }
+ filled = true;
+ }
+
+ /* Emit any audio that is ready */
+
+ DCPTime pull_to = _playlist->length ();
+ for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
+ if (!i->second.piece->done && i->second.last_push_end < pull_to) {
+ pull_to = i->second.last_push_end;
+ }
+ }
+
+ list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
+ for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
+ if (_last_audio_time && i->second < *_last_audio_time) {
+ /* There has been an accurate seek and we have received some audio before the seek time;
+ discard it.
+ */
+ pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
+ if (!cut.first) {
+ continue;
+ }
+ *i = cut;
}
+
+ if (_last_audio_time) {
+ fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
+ }
+
+ emit_audio (i->first, i->second);
}
- return overlaps;
+ return !earliest && !filled;
}
-bool
-Player::pass ()
+optional<PositionImage>
+Player::subtitles_for_frame (DCPTime time) const
{
- if (!_have_valid_pieces) {
- setup_pieces ();
- }
+ list<PositionImage> subtitles;
- shared_ptr<Piece> earliest;
- DCPTime earliest_position;
- BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
- if (t < earliest_position) {
- earliest_position = t;
- earliest = i;
+ BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
+
+ /* Image subtitles */
+ list<PositionImage> c = transform_image_subtitles (i.image);
+ copy (c.begin(), c.end(), back_inserter (subtitles));
+
+ /* Text subtitles (rendered to an image) */
+ if (!i.text.empty ()) {
+ list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
+ copy (s.begin(), s.end(), back_inserter (subtitles));
}
}
- earliest->decoder->pass ();
+ if (subtitles.empty ()) {
+ return optional<PositionImage> ();
+ }
- /* XXX: collect audio and maybe emit some */
+ return merge (subtitles);
}
void
return;
}
- /* XXX: get subs to burn in and burn them in */
+ FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
+ if (frc.skip && (video.frame % 2) == 1) {
+ return;
+ }
+
+ /* Time and period of the frame we will emit */
+ DCPTime const time = content_video_to_dcp (piece, video.frame);
+ DCPTimePeriod const period (time, time + one_video_frame());
+ /* Discard if it's outside the content's period or if it's before the last accurate seek */
+ if (
+ time < piece->content->position() ||
+ time >= piece->content->end() ||
+ (_last_video_time && time < *_last_video_time)) {
+ return;
+ }
- /* Fill gaps */
+ /* Fill gaps that we discover now that we have some video which needs to be emitted */
- DCPTime const time = content_video_to_dcp (piece, video.frame.index());
+ optional<DCPTime> fill_to;
+ if (_last_video_time) {
+ fill_to = _last_video_time;
+ }
- for (DCPTime i = _last_video_time; i < time; i += DCPTime::from_frames (1, _film->video_frame_rate())) {
- if (_playlist->video_content_at(i) && _last_video) {
- Video (_last_video->clone (i));
- } else {
- Video (black_player_video_frame (i));
+ if (fill_to) {
+ /* XXX: this may not work for 3D */
+ BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
+ for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
+ LastVideoMap::const_iterator k = _last_video.find (wp);
+ if (k != _last_video.end ()) {
+ emit_video (k->second, j);
+ } else {
+ emit_video (black_player_video_frame(), j);
+ }
+ }
}
}
- _last_video.reset (
+ _last_video[wp].reset (
new PlayerVideo (
video.image,
- time,
piece->content->video->crop (),
- piece->content->video->fade (video.frame.index()),
+ piece->content->video->fade (video.frame),
piece->content->video->scale().size (
piece->content->video, _video_container_size, _film->frame_size ()
),
_video_container_size,
- video.frame.eyes(),
+ video.eyes,
video.part,
piece->content->video->colour_conversion ()
)
);
- _last_video_time = time;
-
- Video (_last_video);
+ emit_video (_last_video[wp], time);
}
void
-Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
{
- shared_ptr<Piece> piece = wp.lock ();
- if (!piece) {
+ shared_ptr<AudioContent> content = piece->content->audio;
+ DCPOMATIC_ASSERT (content);
+
+ shared_ptr<Resampler> r = resampler (content, stream, false);
+ if (!r) {
return;
}
- shared_ptr<AudioContent> content = piece->content->audio;
- DCPOMATIC_ASSERT (content);
+ pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
+ if (ro.first->frames() == 0) {
+ return;
+ }
+
+ ContentAudio content_audio;
+ content_audio.audio = ro.first;
+ content_audio.frame = ro.second;
- shared_ptr<AudioBuffers> audio = content_audio.audio;
+ /* Compute time in the DCP */
+ DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
+
+ audio_transform (content, stream, content_audio, time);
+}
+
+/** Do our common processing on some audio */
+void
+Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
+{
+ DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
/* Gain */
+
if (content->gain() != 0) {
- shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
+ shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
gain->apply_gain (content->gain ());
- audio = gain;
+ content_audio.audio = gain;
}
- /* XXX: end-trimming used to be checked here */
+ /* Remap */
- /* Compute time in the DCP */
- DCPTime const time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000);
-
- /* Remap channels */
- shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
+ shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
dcp_mapped->make_silent ();
AudioMapping map = stream->mapping ();
for (int j = 0; j < dcp_mapped->channels(); ++j) {
if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
dcp_mapped->accumulate_channel (
- audio.get(),
+ content_audio.audio.get(),
i,
static_cast<dcp::Channel> (j),
map.get (i, static_cast<dcp::Channel> (j))
}
}
- audio = dcp_mapped;
+ content_audio.audio = dcp_mapped;
+
+ /* Process */
if (_audio_processor) {
- audio = _audio_processor->run (audio, _film->audio_channels ());
+ content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+ }
+
+ /* Pad any gap which may be caused by audio delay */
+
+ if (_last_audio_time) {
+ fill_audio (DCPTimePeriod (*_last_audio_time, time));
}
- _audio_merger.push (audio, time);
+ /* Push */
+
+ _audio_merger.push (content_audio.audio, time);
+ DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
+ _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
}
void
-Player::image_subtitle (weak_ptr<Piece> piece, ContentImageSubtitle subtitle)
+Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
{
- /* XXX: Store for video to see */
+ DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ shared_ptr<AudioContent> content = piece->content->audio;
+ DCPOMATIC_ASSERT (content);
+
+ /* Resample */
+ if (stream->frame_rate() != content->resampled_frame_rate()) {
+ shared_ptr<Resampler> r = resampler (content, stream, true);
+ pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
+ if (ro.first->frames() == 0) {
+ return;
+ }
+ content_audio.audio = ro.first;
+ content_audio.frame = ro.second;
+ }
+
+ /* Compute time in the DCP */
+ DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
+ /* And the end of this block in the DCP */
+ DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
+
+ /* Remove anything that comes before the start or after the end of the content */
+ if (time < piece->content->position()) {
+ pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
+ if (!cut.first) {
+ /* This audio is entirely discarded */
+ return;
+ }
+ content_audio.audio = cut.first;
+ time = cut.second;
+ } else if (time > piece->content->end()) {
+ /* Discard it all */
+ return;
+ } else if (end > piece->content->end()) {
+ Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
+ if (remaining_frames == 0) {
+ return;
+ }
+ shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
+ cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
+ content_audio.audio = cut;
+ }
+
+ audio_transform (content, stream, content_audio, time);
}
void
-Player::text_subtitle (weak_ptr<Piece> piece, ContentTextSubtitle subtitle)
+Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
{
- /* XXX: Store for video to see, or emit */
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ /* Apply content's subtitle offsets */
+ subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
+ subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
+
+ /* Apply content's subtitle scale */
+ subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
+ subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
+
+ /* Apply a corrective translation to keep the subtitle centred after that scale */
+ subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
+ subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
+
+ PlayerSubtitles ps;
+ ps.image.push_back (subtitle.sub);
+ DCPTime from (content_time_to_dcp (piece, subtitle.from()));
+
+ _active_subtitles.add_from (wp, ps, from);
+}
+
+void
+Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
+{
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ PlayerSubtitles ps;
+ DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
+
+ BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
+ s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
+ s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
+ float const xs = piece->content->subtitle->x_scale();
+ float const ys = piece->content->subtitle->y_scale();
+ float size = s.size();
+
+ /* Adjust size to express the common part of the scaling;
+ e.g. if xs = ys = 0.5 we scale size by 2.
+ */
+ if (xs > 1e-5 && ys > 1e-5) {
+ size *= 1 / min (1 / xs, 1 / ys);
+ }
+ s.set_size (size);
+
+ /* Then express aspect ratio changes */
+ if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
+ s.set_aspect_adjust (xs / ys);
+ }
+
+ s.set_in (dcp::Time(from.seconds(), 1000));
+ ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
+ ps.add_fonts (piece->content->subtitle->fonts ());
+ }
+
+ _active_subtitles.add_from (wp, ps, from);
+}
+
+void
+Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
+{
+ if (!_active_subtitles.have (wp)) {
+ return;
+ }
+
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ DCPTime const dcp_to = content_time_to_dcp (piece, to);
+
+ pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
+
+ if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
+ Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
+ }
}
void
Player::seek (DCPTime time, bool accurate)
{
- /* XXX: seek decoders */
+ if (_audio_processor) {
+ _audio_processor->flush ();
+ }
+
+ for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
+ i->second->flush ();
+ i->second->reset ();
+ }
+
+ _audio_merger.clear ();
+ _active_subtitles.clear ();
+
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ if (time < i->content->position()) {
+ /* Before; seek to 0 */
+ i->decoder->seek (ContentTime(), accurate);
+ i->done = false;
+ } else if (i->content->position() <= time && time < i->content->end()) {
+ /* During; seek to position */
+ i->decoder->seek (dcp_to_content_time (i, time), accurate);
+ i->done = false;
+ } else {
+ /* After; this piece is done */
+ i->done = true;
+ }
+ }
if (accurate) {
- _last_video_time = time - DCPTime::from_frames (1, _film->video_frame_rate ());
+ _last_video_time = time;
+ _last_audio_time = time;
+ } else {
+ _last_video_time = optional<DCPTime>();
+ _last_audio_time = optional<DCPTime>();
+ }
+}
+
+shared_ptr<Resampler>
+Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
+{
+ ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
+ if (i != _resamplers.end ()) {
+ return i->second;
+ }
+
+ if (!create) {
+ return shared_ptr<Resampler> ();
+ }
+
+ LOG_GENERAL (
+ "Creating new resampler from %1 to %2 with %3 channels",
+ stream->frame_rate(),
+ content->resampled_frame_rate(),
+ stream->channels()
+ );
+
+ shared_ptr<Resampler> r (
+ new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
+ );
+
+ _resamplers[make_pair(content, stream)] = r;
+ return r;
+}
+
+void
+Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+{
+ optional<PositionImage> subtitles = subtitles_for_frame (time);
+ if (subtitles) {
+ pv->set_subtitle (subtitles.get ());
+ }
+
+ Video (pv, time);
+
+ if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+ _last_video_time = time + one_video_frame();
+ _active_subtitles.clear_before (time);
+ }
+}
+
+void
+Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
+{
+ Audio (data, time);
+ _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
+}
+
+void
+Player::fill_audio (DCPTimePeriod period)
+{
+ BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
+ DCPTime t = i.from;
+ while (t < i.to) {
+ DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
+ Frame const samples = block.frames_round(_film->audio_frame_rate());
+ if (samples) {
+ shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
+ silence->make_silent ();
+ emit_audio (silence, t);
+ }
+ t += block;
+ }
+ }
+}
+
+DCPTime
+Player::one_video_frame () const
+{
+ return DCPTime::from_frames (1, _film->video_frame_rate ());
+}
+
+pair<shared_ptr<AudioBuffers>, DCPTime>
+Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
+{
+ DCPTime const discard_time = discard_to - time;
+ Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
+ Frame remaining_frames = audio->frames() - discard_frames;
+ if (remaining_frames <= 0) {
+ return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
}
+ shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
+ cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
+ return make_pair(cut, time + discard_time);
}