#include "playlist.h"
#include "ratio.h"
#include "raw_image_proxy.h"
-#include "referenced_reel_asset.h"
#include "render_text.h"
#include "shuffler.h"
#include "text_content.h"
using std::list;
using std::make_pair;
using std::make_shared;
-using std::make_shared;
using std::max;
using std::min;
-using std::min;
using std::pair;
using std::shared_ptr;
using std::vector;
int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
int const PlayerProperty::PLAYBACK_LENGTH = 705;
+int const PlayerProperty::IGNORE_VIDEO = 706;
+int const PlayerProperty::IGNORE_AUDIO = 707;
+int const PlayerProperty::IGNORE_TEXT = 708;
+int const PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES = 709;
+int const PlayerProperty::PLAY_REFERENCED = 710;
Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
, _fast(false)
, _tolerant (film->tolerant())
, _play_referenced(false)
- , _audio_merger (_film->audio_frame_rate())
+ , _audio_merger(film->audio_frame_rate())
, _subtitle_alignment (subtitle_alignment)
{
construct ();
, _fast(false)
, _tolerant (film->tolerant())
, _play_referenced(false)
- , _audio_merger (_film->audio_frame_rate())
+ , _audio_merger(film->audio_frame_rate())
{
construct ();
}
void
Player::construct ()
{
- _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ _film_changed_connection = film->Change.connect(bind(&Player::film_change, this, _1, _2));
/* The butler must hear about this first, so since we are proxying this through to the butler we must
be first.
*/
_playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
_playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
- set_video_container_size (_film->frame_size ());
+ set_video_container_size(film->frame_size());
film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
{
boost::mutex::scoped_lock lm (_mutex);
- _playback_length = _playlist ? _playlist->length(_film) : _film->length();
-
auto old_pieces = _pieces;
_pieces.clear ();
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
+ _playback_length = _playlist ? _playlist->length(film) : film->length();
+
auto playlist_content = playlist()->content();
bool const have_threed = std::any_of(
playlist_content.begin(),
}
}
- auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+ auto decoder = decoder_factory(film, i, _fast, _tolerant, old_decoder);
DCPOMATIC_ASSERT (decoder);
- FrameRateChange frc (_film, i);
+ FrameRateChange frc(film, i);
if (decoder->video && _ignore_video) {
decoder->video->set_ignore (true);
for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
if (ignore_overlap((*i)->content->video)) {
/* Look for content later in the content list with in-use video that overlaps this */
- auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
+ auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(film));
for (auto j = std::next(i); j != _pieces.end(); ++j) {
if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
- (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
+ (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(film)).overlap(period);
}
}
}
}
- _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
- _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
+ _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
+ _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
_next_video_time = boost::none;
- _next_video_eyes = Eyes::BOTH;
_next_audio_time = boost::none;
}
void
Player::playlist_content_change (ChangeType type, int property, bool frequent)
{
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
if (property == VideoContentProperty::CROP) {
if (type == ChangeType::DONE) {
- auto const vcs = video_container_size();
boost::mutex::scoped_lock lm (_mutex);
for (auto const& i: _delay) {
- i.first->reset_metadata (_film, vcs);
+ i.first->reset_metadata(film, _video_container_size);
}
}
} else {
void
Player::set_video_container_size (dcp::Size s)
{
- Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
if (s == _video_container_size) {
- Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ cc.abort();
return;
}
_video_container_size = s;
{
- boost::mutex::scoped_lock lm (_mutex);
+ boost::mutex::scoped_lock lm(_black_image_mutex);
_black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
_black_image->make_black ();
}
-
- Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
}
last time we were run.
*/
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
if (p == Film::Property::CONTAINER) {
Change (type, PlayerProperty::FILM_CONTAINER, false);
} else if (p == Film::Property::VIDEO_FRAME_RATE) {
}
Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
} else if (p == Film::Property::AUDIO_PROCESSOR) {
- if (type == ChangeType::DONE && _film->audio_processor ()) {
+ if (type == ChangeType::DONE && film->audio_processor ()) {
boost::mutex::scoped_lock lm (_mutex);
- _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
+ _audio_processor = film->audio_processor()->clone(film->audio_frame_rate());
}
} else if (p == Film::Property::AUDIO_CHANNELS) {
if (type == ChangeType::DONE) {
shared_ptr<PlayerVideo>
Player::black_player_video_frame (Eyes eyes) const
{
+ boost::mutex::scoped_lock lm(_black_image_mutex);
+
return std::make_shared<PlayerVideo> (
- std::make_shared<const RawImageProxy>(_black_image),
+ make_shared<const RawImageProxy>(_black_image),
Crop(),
optional<double>(),
_video_container_size,
PresetColourConversion::all().front().conversion,
VideoRange::FULL,
std::weak_ptr<Content>(),
- boost::optional<Frame>(),
+ boost::optional<dcpomatic::ContentTime>(),
false
);
}
Frame
Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(_film), s);
+ s = min (piece->content->length_after_trim(film), s);
s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
/* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
Frame
Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(_film), s);
+ s = min (piece->content->length_after_trim(film), s);
/* See notes in dcp_to_content_video */
- return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
+ return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(film->audio_frame_rate());
}
DCPTime
Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
/* See comment in dcp_to_content_video */
- return DCPTime::from_frames (f, _film->audio_frame_rate())
+ return DCPTime::from_frames(f, film->audio_frame_rate())
- DCPTime (piece->content->trim_start(), piece->frc)
+ piece->content->position();
}
ContentTime
Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(_film), s);
+ s = min (piece->content->length_after_trim(film), s);
return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
}
void
Player::set_ignore_video ()
{
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_VIDEO);
_ignore_video = true;
setup_pieces();
}
void
Player::set_ignore_audio ()
{
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_AUDIO);
_ignore_audio = true;
setup_pieces();
}
void
Player::set_ignore_text ()
{
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_TEXT);
_ignore_text = true;
setup_pieces();
}
void
Player::set_always_burn_open_subtitles ()
{
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES);
_always_burn_open_subtitles = true;
}
void
Player::set_play_referenced ()
{
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::PLAY_REFERENCED);
_play_referenced = true;
setup_pieces();
}
-static void
-maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
-{
- DCPOMATIC_ASSERT (r);
- r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
- r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
- if (r->actual_duration() > 0) {
- a.push_back (
- ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
- );
- }
-}
-
-
-list<ReferencedReelAsset>
-Player::get_reel_assets ()
-{
- /* Does not require a lock on _mutex as it's only called from DCPEncoder */
-
- list<ReferencedReelAsset> reel_assets;
-
- for (auto content: playlist()->content()) {
- auto dcp = dynamic_pointer_cast<DCPContent>(content);
- if (!dcp) {
- continue;
- }
-
- if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
- continue;
- }
-
- scoped_ptr<DCPDecoder> decoder;
- try {
- decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
- } catch (...) {
- return reel_assets;
- }
-
- auto const frame_rate = _film->video_frame_rate();
- DCPOMATIC_ASSERT (dcp->video_frame_rate());
- /* We should only be referencing if the DCP rate is the same as the film rate */
- DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
-
- Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
- Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
-
- /* position in the asset from the start */
- int64_t offset_from_start = 0;
- /* position i the asset from the end */
- int64_t offset_from_end = 0;
- for (auto reel: decoder->reels()) {
- /* Assume that main picture duration is the length of the reel */
- offset_from_end += reel->main_picture()->actual_duration();
- }
-
- for (auto reel: decoder->reels()) {
-
- /* Assume that main picture duration is the length of the reel */
- int64_t const reel_duration = reel->main_picture()->actual_duration();
-
- /* See doc/design/trim_reels.svg */
- Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
- Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
-
- auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
- if (dcp->reference_video()) {
- maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
- }
-
- if (dcp->reference_audio()) {
- maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
- }
-
- if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
- maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
- }
-
- if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
- for (auto caption: reel->closed_captions()) {
- maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
- }
- }
-
- offset_from_start += reel_duration;
- offset_from_end -= reel_duration;
- }
- }
-
- return reel_assets;
-}
-
-
bool
Player::pass ()
{
return false;
}
- if (_playback_length == DCPTime()) {
+ auto film = _film.lock();
+
+ if (_playback_length.load() == DCPTime() || !film) {
/* Special; just give one black frame */
- emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
+ use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
return true;
}
}
auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
- if (t > i->content->end(_film)) {
+ if (t > i->content->end(film)) {
i->done = true;
} else {
LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
earliest_content->done = earliest_content->decoder->pass ();
auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
- if (dcp && !_play_referenced && dcp->reference_audio()) {
- /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
- to `hide' the fact that no audio was emitted during the referenced DCP (though
- we need to behave as though it was).
- */
- _next_audio_time = dcp->end (_film);
+ if (dcp && !_play_referenced) {
+ if (dcp->reference_video()) {
+ _next_video_time = dcp->end(film);
+ }
+ if (dcp->reference_audio()) {
+ /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
+ to `hide' the fact that no audio was emitted during the referenced DCP (though
+ we need to behave as though it was).
+ */
+ _next_audio_time = dcp->end(film);
+ }
}
break;
}
case BLACK:
LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
- emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+ use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
_black.set_position (_black.position() + one_video_frame());
break;
case SILENT:
/* Let's not worry about less than a frame at 24fps */
int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
if (error >= too_much_error) {
- _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
+ film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
}
DCPOMATIC_ASSERT (error < too_much_error);
period.from = *_next_audio_time;
}
}
- auto pull_to = _playback_length;
+ auto pull_to = _playback_length.load();
for (auto const& i: alive_stream_states) {
if (!i.second.piece->done && i.second.last_push_end < pull_to) {
pull_to = i.second.last_push_end;
}
if (done) {
+ emit_video_until(film->length());
+
if (_shuffler) {
_shuffler->flush ();
}
for (auto const& i: _delay) {
- do_emit_video(i.first, i.second);
- }
-
- /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
- * However, if we have L and R video files, and one is shorter than the other,
- * the fill code in ::video mostly takes care of filling in the gaps.
- * However, since it fills at the point when it knows there is more video coming
- * at time t (so it should fill any gap up to t) it can't do anything right at the
- * end. This is particularly bad news if the last frame emitted is a LEFT
- * eye, as the MXF writer will complain about the 3D sequence being wrong.
- * Here's a hack to workaround that particular case.
- */
- if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
- do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+ emit_video(i.first, i.second);
}
}
optional<PositionImage>
Player::open_subtitles_for_frame (DCPTime time) const
{
+ auto film = _film.lock();
+ if (!film) {
+ return {};
+ }
+
list<PositionImage> captions;
- int const vfr = _film->video_frame_rate();
+ int const vfr = film->video_frame_rate();
for (
auto j:
- _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+ _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
) {
/* Bitmap subtitles */
}
+void
+Player::emit_video_until(DCPTime time)
+{
+ auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+ /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
+ player before the video that requires them.
+ */
+ _delay.push_back(make_pair(pv, time));
+
+ if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+ _next_video_time = time + one_video_frame();
+ }
+
+ if (_delay.size() < 3) {
+ return;
+ }
+
+ auto to_do = _delay.front();
+ _delay.pop_front();
+ emit_video(to_do.first, to_do.second);
+ };
+
+ auto const age_threshold = one_video_frame() * 2;
+
+ while (_next_video_time.get_value_or({}) < time) {
+ auto left = _last_video[Eyes::LEFT];
+ auto right = _last_video[Eyes::RIGHT];
+ auto both = _last_video[Eyes::BOTH];
+
+ auto const next = _next_video_time.get_value_or({});
+
+ if (
+ left.first &&
+ right.first &&
+ (!both.first || (left.second >= both.second && right.second >= both.second)) &&
+ (left.second - next) < age_threshold &&
+ (right.second - next) < age_threshold
+ ) {
+ frame(left.first, next);
+ frame(right.first, next);
+ } else if (both.first && (both.second - next) < age_threshold) {
+ frame(both.first, next);
+ LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
+ } else {
+ frame(black_player_video_frame(Eyes::BOTH), next);
+ LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
+ }
+ }
+}
+
+
void
Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
{
return;
}
- FrameRateChange frc (_film, piece->content);
- if (frc.skip && (video.frame % 2) == 1) {
+ auto film = _film.lock();
+ if (!film) {
return;
}
- /* Time of the first frame we will emit */
- DCPTime const time = content_video_to_dcp (piece, video.frame);
- LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+ auto const three_d = film->three_d();
- /* Discard if it's before the content's period or the last accurate seek. We can't discard
- if it's after the content's period here as in that case we still need to fill any gap between
- `now' and the end of the content's period.
- */
- if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
+ if (!three_d) {
+ if (video.eyes == Eyes::LEFT) {
+ /* Use left-eye images for both eyes... */
+ video.eyes = Eyes::BOTH;
+ } else if (video.eyes == Eyes::RIGHT) {
+ /* ...and discard the right */
+ return;
+ }
+ }
+
+ /* Time of the frame we just received within the DCP */
+ auto const time = content_time_to_dcp(piece, video.time);
+ LOG_DEBUG_PLAYER("Received video frame %1 %2 eyes %3", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
+
+ if (time < piece->content->position()) {
return;
}
return;
}
- /* Fill gaps that we discover now that we have some video which needs to be emitted.
- This is where we need to fill to.
- */
- DCPTime fill_to = min (time, piece->content->end(_film));
-
- if (_next_video_time) {
- DCPTime fill_from = max (*_next_video_time, piece->content->position());
-
- /* Fill if we have more than half a frame to do */
- if ((fill_to - fill_from) > one_video_frame() / 2) {
- auto last = _last_video.find (weak_piece);
- if (_film->three_d()) {
- auto fill_to_eyes = video.eyes;
- if (fill_to_eyes == Eyes::BOTH) {
- fill_to_eyes = Eyes::LEFT;
- }
- if (fill_to == piece->content->end(_film)) {
- /* Don't fill after the end of the content */
- fill_to_eyes = Eyes::LEFT;
- }
- auto j = fill_from;
- auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
- if (eyes == Eyes::BOTH) {
- eyes = Eyes::LEFT;
- }
- while (j < fill_to || eyes != fill_to_eyes) {
- if (last != _last_video.end()) {
- LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
- auto copy = last->second->shallow_copy();
- copy->set_eyes (eyes);
- emit_video (copy, j);
- } else {
- LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
- emit_video (black_player_video_frame(eyes), j);
- }
- if (eyes == Eyes::RIGHT) {
- j += one_video_frame();
- }
- eyes = increment_eyes (eyes);
- }
- } else {
- for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
- if (last != _last_video.end()) {
- emit_video (last->second, j);
- } else {
- emit_video (black_player_video_frame(Eyes::BOTH), j);
- }
- }
- }
- }
+ if (!_next_video_time) {
+ _next_video_time = time.round(film->video_frame_rate());
}
auto const content_video = piece->content->video;
-
- _last_video[weak_piece] = std::make_shared<PlayerVideo>(
- video.image,
- content_video->actual_crop(),
- content_video->fade (_film, video.frame),
- scale_for_display(
- content_video->scaled_size(_film->frame_size()),
+ use_video(
+ std::make_shared<PlayerVideo>(
+ video.image,
+ content_video->actual_crop(),
+ content_video->fade(film, video.time),
+ scale_for_display(
+ content_video->scaled_size(film->frame_size()),
+ _video_container_size,
+ film->frame_size(),
+ content_video->pixel_quanta()
+ ),
_video_container_size,
- _film->frame_size(),
- content_video->pixel_quanta()
+ video.eyes,
+ video.part,
+ content_video->colour_conversion(),
+ content_video->range(),
+ piece->content,
+ video.time,
+ false
),
- _video_container_size,
- video.eyes,
- video.part,
- content_video->colour_conversion(),
- content_video->range(),
- piece->content,
- video.frame,
- false
- );
+ time,
+ piece->content->end(film)
+ );
+}
- DCPTime t = time;
- for (int i = 0; i < frc.repeat; ++i) {
- if (t < piece->content->end(_film)) {
- emit_video (_last_video[weak_piece], t);
- }
- t += one_video_frame ();
+void
+Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
+{
+ _last_video[pv->eyes()] = { pv, time };
+ if (pv->eyes() != Eyes::LEFT) {
+ emit_video_until(std::min(time + one_video_frame() / 2, end));
}
}
return;
}
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
auto content = piece->content->audio;
DCPOMATIC_ASSERT (content);
- int const rfr = content->resampled_frame_rate (_film);
+ int const rfr = content->resampled_frame_rate(film);
/* Compute time in the DCP */
auto time = resampled_audio_to_dcp (piece, content_audio.frame);
}
content_audio.audio = cut.first;
time = cut.second;
- } else if (time > piece->content->end(_film)) {
+ } else if (time > piece->content->end(film)) {
/* Discard it all */
return;
- } else if (end > piece->content->end(_film)) {
- Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
+ } else if (end > piece->content->end(film)) {
+ Frame const remaining_frames = DCPTime(piece->content->end(film) - time).frames_round(rfr);
if (remaining_frames == 0) {
return;
}
/* Remap */
- content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
+ content_audio.audio = remap(content_audio.audio, film->audio_channels(), stream->mapping());
/* Process */
if (_audio_processor) {
- content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+ content_audio.audio = _audio_processor->run(content_audio.audio, film->audio_channels());
}
/* Push */
_audio_merger.push (content_audio.audio, time);
DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
- _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
+ _stream_states[stream].last_push_end = time + DCPTime::from_frames(content_audio.audio->frames(), film->audio_frame_rate());
}
}
DCPTime from(content_time_to_dcp(piece, subtitle.from()));
- _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
+ _active_texts[content->type()].add_from(weak_content, ps, from);
}
auto piece = weak_piece.lock ();
auto content = weak_content.lock ();
- if (!piece || !content) {
+ auto film = _film.lock();
+ if (!piece || !content || !film) {
return;
}
PlayerText ps;
DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
- if (from > piece->content->end(_film)) {
+ if (from > piece->content->end(film)) {
return;
}
ps.string.push_back (s);
}
- _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
+ _active_texts[content->type()].add_from(weak_content, ps, from);
}
return;
}
- if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
+ if (!_active_texts[content->type()].have(weak_content)) {
return;
}
auto piece = weak_piece.lock ();
- if (!piece) {
+ auto film = _film.lock();
+ if (!piece || !film) {
return;
}
DCPTime const dcp_to = content_time_to_dcp (piece, to);
- if (dcp_to > piece->content->end(_film)) {
+ if (dcp_to > piece->content->end(film)) {
return;
}
- auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
+ auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
if (content->use() && !always && !content->burn()) {
return;
}
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
if (_shuffler) {
_shuffler->clear ();
}
}
_audio_merger.clear ();
- for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
- _active_texts[i].clear ();
- }
+ std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
for (auto i: _pieces) {
if (time < i->content->position()) {
*/
i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
i->done = false;
- } else if (i->content->position() <= time && time < i->content->end(_film)) {
+ } else if (i->content->position() <= time && time < i->content->end(film)) {
/* During; seek to position */
i->decoder->seek (dcp_to_content_time (i, time), accurate);
i->done = false;
if (accurate) {
_next_video_time = time;
- _next_video_eyes = Eyes::LEFT;
_next_audio_time = time;
} else {
_next_video_time = boost::none;
- _next_video_eyes = boost::none;
_next_audio_time = boost::none;
}
_black.set_position (time);
_silent.set_position (time);
- _last_video.clear ();
+ _last_video[Eyes::LEFT] = {};
+ _last_video[Eyes::RIGHT] = {};
+ _last_video[Eyes::BOTH] = {};
}
void
-Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
{
- if (!_film->three_d()) {
- if (pv->eyes() == Eyes::LEFT) {
- /* Use left-eye images for both eyes... */
- pv->set_eyes (Eyes::BOTH);
- } else if (pv->eyes() == Eyes::RIGHT) {
- /* ...and discard the right */
- return;
- }
- }
-
- /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
- player before the video that requires them.
- */
- _delay.push_back (make_pair (pv, time));
-
if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
- _next_video_time = time + one_video_frame();
- }
- _next_video_eyes = increment_eyes (pv->eyes());
-
- if (_delay.size() < 3) {
- return;
- }
-
- auto to_do = _delay.front();
- _delay.pop_front();
- do_emit_video (to_do.first, to_do.second);
-}
-
-
-void
-Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
-{
- if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
- for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
- _active_texts[i].clear_before (time);
- }
+ std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
}
auto subtitles = open_subtitles_for_frame (time);
void
Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
/* Log if the assert below is about to fail */
if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
- _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
+ film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
}
/* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
- Audio (data, time, _film->audio_frame_rate());
- _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
+ Audio(data, time, film->audio_frame_rate());
+ _next_audio_time = time + DCPTime::from_frames(data->frames(), film->audio_frame_rate());
}
void
Player::fill_audio (DCPTimePeriod period)
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
if (period.from == period.to) {
return;
}
DCPTime t = period.from;
while (t < period.to) {
DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
- Frame const samples = block.frames_round(_film->audio_frame_rate());
+ Frame const samples = block.frames_round(film->audio_frame_rate());
if (samples) {
- auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
+ auto silence = make_shared<AudioBuffers>(film->audio_channels(), samples);
silence->make_silent ();
emit_audio (silence, t);
}
DCPTime
Player::one_video_frame () const
{
- return DCPTime::from_frames (1, _film->video_frame_rate ());
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ return DCPTime::from_frames(1, film->video_frame_rate ());
}
pair<shared_ptr<AudioBuffers>, DCPTime>
Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto const discard_time = discard_to - time;
- auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
+ auto const discard_frames = discard_time.frames_round(film->audio_frame_rate());
auto remaining_frames = audio->frames() - discard_frames;
if (remaining_frames <= 0) {
return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
void
Player::set_dcp_decode_reduction (optional<int> reduction)
{
- Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::DCP_DECODE_REDUCTION);
if (reduction == _dcp_decode_reduction.load()) {
- Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ cc.abort();
return;
}
_dcp_decode_reduction = reduction;
setup_pieces();
-
- Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
}
optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
+Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
{
boost::mutex::scoped_lock lm (_mutex);
optional<ContentTime>
-Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
{
boost::mutex::scoped_lock lm (_mutex);
shared_ptr<const Playlist>
Player::playlist () const
{
- return _playlist ? _playlist : _film->playlist();
+ auto film = _film.lock();
+ if (!film) {
+ return {};
+ }
+
+ return _playlist ? _playlist : film->playlist();
}
return;
}
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto piece = weak_piece.lock ();
DCPOMATIC_ASSERT (piece);
- auto const vfr = _film->video_frame_rate();
+ auto const vfr = film->video_frame_rate();
DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
- if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
+ if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(film))) {
return;
}
Atmos (data.data, dcp_time, data.metadata);
}
+
+void
+Player::signal_change(ChangeType type, int property)
+{
+ Change(type, property, false);
+}
+