}
-bool
-have_video (shared_ptr<const Content> content)
-{
- return static_cast<bool>(content->video) && content->video->use();
-}
-
-
-bool
-have_audio (shared_ptr<const Content> content)
-{
- return static_cast<bool>(content->audio);
-}
-
-
void
Player::setup_pieces_unlocked ()
{
auto old_pieces = _pieces;
_pieces.clear ();
- _shuffler.reset (new Shuffler());
- _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+ auto playlist_content = playlist()->content();
+ bool const have_threed = std::any_of(
+ playlist_content.begin(),
+ playlist_content.end(),
+ [](shared_ptr<const Content> c) {
+ return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
+ });
+
+
+ if (have_threed) {
+ _shuffler.reset(new Shuffler());
+ _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+ }
for (auto i: playlist()->content()) {
_pieces.push_back (piece);
if (decoder->video) {
- if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
+ if (have_threed) {
/* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
} else {
}
}
+ auto ignore_overlap = [](shared_ptr<VideoContent> v) {
+ return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
+ };
+
for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
- if (auto video = (*i)->content->video) {
- if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
- /* Look for content later in the content list with in-use video that overlaps this */
- auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
- auto j = i;
- ++j;
- for (; j != _pieces.end(); ++j) {
- if ((*j)->content->video && (*j)->content->video->use()) {
- (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
- }
+ if (ignore_overlap((*i)->content->video)) {
+ /* Look for content later in the content list with in-use video that overlaps this */
+ auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
+ for (auto j = std::next(i); j != _pieces.end(); ++j) {
+ if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
+ (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
}
}
}
}
- _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
- _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
+ _black = EmptyVideo (_film, playlist(), _playback_length);
+ _silent = EmptyAudio (_film, playlist(), _playback_length);
_next_video_time = boost::none;
_next_video_eyes = Eyes::BOTH;
break;
}
case BLACK:
+ {
LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
- emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+ auto period = _black.period_at_position();
+ emit_video (black_player_video_frame(period.second), _black.position());
_black.set_position (_black.position() + one_video_frame());
break;
+ }
case SILENT:
{
LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
}
if (done) {
- _shuffler->flush ();
+ if (_shuffler) {
+ _shuffler->flush ();
+ }
for (auto const& i: _delay) {
do_emit_video(i.first, i.second);
}
-
- /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
- * However, if we have L and R video files, and one is shorter than the other,
- * the fill code in ::video mostly takes care of filling in the gaps.
- * However, since it fills at the point when it knows there is more video coming
- * at time t (so it should fill any gap up to t) it can't do anything right at the
- * end. This is particularly bad news if the last frame emitted is a LEFT
- * eye, as the MXF writer will complain about the 3D sequence being wrong.
- * Here's a hack to workaround that particular case.
- */
- if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
- do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
- }
}
return done;
void
-Player::video (weak_ptr<Piece> wp, ContentVideo video)
+Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
{
if (_suspended) {
return;
}
- auto piece = wp.lock ();
+ auto piece = weak_piece.lock ();
if (!piece) {
return;
}
/* Time of the first frame we will emit */
DCPTime const time = content_video_to_dcp (piece, video.frame);
- LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+ if (video.eyes == Eyes::BOTH) {
+ LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+ } else {
+ LOG_DEBUG_PLAYER("Received video frame %1 %2 at %3", (video.eyes == Eyes::LEFT ? "L" : "R"), video.frame, to_string(time));
+ }
/* Discard if it's before the content's period or the last accurate seek. We can't discard
if it's after the content's period here as in that case we still need to fill any gap between
/* Fill if we have more than half a frame to do */
if ((fill_to - fill_from) > one_video_frame() / 2) {
- auto last = _last_video.find (wp);
+ auto last = _last_video.find (weak_piece);
if (_film->three_d()) {
auto fill_to_eyes = video.eyes;
if (fill_to_eyes == Eyes::BOTH) {
auto const content_video = piece->content->video;
- _last_video[wp] = std::make_shared<PlayerVideo>(
+ _last_video[weak_piece] = std::make_shared<PlayerVideo>(
video.image,
content_video->actual_crop(),
content_video->fade (_film, video.frame),
DCPTime t = time;
for (int i = 0; i < frc.repeat; ++i) {
if (t < piece->content->end(_film)) {
- emit_video (_last_video[wp], t);
+ emit_video (_last_video[weak_piece], t);
}
t += one_video_frame ();
}
void
-Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
{
if (_suspended) {
return;
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
- auto piece = wp.lock ();
+ auto piece = weak_piece.lock ();
if (!piece) {
return;
}
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
- /* Gain */
-
- if (content->gain() != 0) {
- auto gain = make_shared<AudioBuffers>(content_audio.audio);
- gain->apply_gain (content->gain());
- content_audio.audio = gain;
+ /* Gain and fade */
+
+ auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
+ if (content->gain() != 0 || !fade_coeffs.empty()) {
+ auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
+ if (!fade_coeffs.empty()) {
+ /* Apply both fade and gain */
+ DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
+ auto const channels = gain_buffers->channels();
+ auto const frames = fade_coeffs.size();
+ auto data = gain_buffers->data();
+ auto const gain = db_to_linear (content->gain());
+ for (auto channel = 0; channel < channels; ++channel) {
+ for (auto frame = 0U; frame < frames; ++frame) {
+ data[channel][frame] *= gain * fade_coeffs[frame];
+ }
+ }
+ } else {
+ /* Just apply gain */
+ gain_buffers->apply_gain (content->gain());
+ }
+ content_audio.audio = gain_buffers;
}
/* Remap */
void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
{
if (_suspended) {
return;
}
- auto piece = wp.lock ();
- auto text = wc.lock ();
- if (!piece || !text) {
+ auto piece = weak_piece.lock ();
+ auto content = weak_content.lock ();
+ if (!piece || !content) {
return;
}
- /* Apply content's subtitle offsets */
- subtitle.sub.rectangle.x += text->x_offset ();
- subtitle.sub.rectangle.y += text->y_offset ();
+ PlayerText ps;
+ for (auto& sub: subtitle.subs)
+ {
+ /* Apply content's subtitle offsets */
+ sub.rectangle.x += content->x_offset ();
+ sub.rectangle.y += content->y_offset ();
- /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
- subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
- subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
+ /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
+ sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
+ sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
- /* Apply content's subtitle scale */
- subtitle.sub.rectangle.width *= text->x_scale ();
- subtitle.sub.rectangle.height *= text->y_scale ();
+ /* Apply content's subtitle scale */
+ sub.rectangle.width *= content->x_scale ();
+ sub.rectangle.height *= content->y_scale ();
- PlayerText ps;
- auto image = subtitle.sub.image;
+ auto image = sub.image;
- /* We will scale the subtitle up to fit _video_container_size */
- int const width = subtitle.sub.rectangle.width * _video_container_size.width;
- int const height = subtitle.sub.rectangle.height * _video_container_size.height;
- if (width == 0 || height == 0) {
- return;
- }
+ /* We will scale the subtitle up to fit _video_container_size */
+ int const width = sub.rectangle.width * _video_container_size.width;
+ int const height = sub.rectangle.height * _video_container_size.height;
+ if (width == 0 || height == 0) {
+ return;
+ }
- dcp::Size scaled_size (width, height);
- ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
- DCPTime from (content_time_to_dcp (piece, subtitle.from()));
+ dcp::Size scaled_size (width, height);
+ ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
+ }
- _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+ DCPTime from(content_time_to_dcp(piece, subtitle.from()));
+ _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
}
void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
+Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
{
if (_suspended) {
return;
}
- auto piece = wp.lock ();
- auto text = wc.lock ();
- if (!piece || !text) {
+ auto piece = weak_piece.lock ();
+ auto content = weak_content.lock ();
+ if (!piece || !content) {
return;
}
}
for (auto s: subtitle.subs) {
- s.set_h_position (s.h_position() + text->x_offset ());
- s.set_v_position (s.v_position() + text->y_offset ());
- float const xs = text->x_scale();
- float const ys = text->y_scale();
+ s.set_h_position (s.h_position() + content->x_offset());
+ s.set_v_position (s.v_position() + content->y_offset());
+ float const xs = content->x_scale();
+ float const ys = content->y_scale();
float size = s.size();
/* Adjust size to express the common part of the scaling;
}
s.set_in (dcp::Time(from.seconds(), 1000));
- ps.string.push_back (StringText (s, text->outline_width()));
- ps.add_fonts (text->fonts ());
+ ps.string.push_back (StringText (s, content->outline_width()));
+ ps.add_fonts (content->fonts ());
}
- _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+ _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
}
void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
{
if (_suspended) {
return;
}
- auto text = wc.lock ();
- if (!text) {
+ auto content = weak_content.lock ();
+ if (!content) {
return;
}
- if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
+ if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
return;
}
- shared_ptr<Piece> piece = wp.lock ();
+ auto piece = weak_piece.lock ();
if (!piece) {
return;
}
return;
}
- auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
+ auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
- bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
- if (text->use() && !always && !text->burn()) {
- Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
+ bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
+ if (content->use() && !always && !content->burn()) {
+ Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
}
}
pv->set_text (subtitles.get ());
}
+ LOG_DEBUG_PLAYER("Player --> Video %1 %2", to_string(time), static_cast<int>(pv->eyes()));
Video (pv, time);
}
}
+optional<ContentTime>
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
+{
+ boost::mutex::scoped_lock lm (_mutex);
+
+ for (auto i: _pieces) {
+ if (i->content == content) {
+ return dcp_to_content_time (i, t);
+ }
+ }
+
+ /* We couldn't find this content; perhaps things are being changed over */
+ return {};
+}
+
+
shared_ptr<const Playlist>
Player::playlist () const
{