connect();
set_video_container_size(film->frame_size());
- film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
+ film_change(ChangeType::DONE, FilmProperty::AUDIO_PROCESSOR);
setup_pieces ();
seek (DCPTime (), true);
, _silent(std::move(other._silent))
, _active_texts(std::move(other._active_texts))
, _audio_processor(std::move(other._audio_processor))
+ , _disable_audio_processor(other._disable_audio_processor)
, _playback_length(other._playback_length.load())
, _subtitle_alignment(other._subtitle_alignment)
{
_silent = std::move(other._silent);
_active_texts = std::move(other._active_texts);
_audio_processor = std::move(other._audio_processor);
+ _disable_audio_processor = other._disable_audio_processor;
_playback_length = other._playback_length.load();
_subtitle_alignment = other._subtitle_alignment;
bool
have_audio (shared_ptr<const Content> content)
{
- return static_cast<bool>(content->audio) && content->can_be_played();
+ return content->has_mapped_audio() && content->can_be_played();
}
_stream_states.clear ();
for (auto i: _pieces) {
- if (i->content->audio) {
+ if (i->content->has_mapped_audio()) {
for (auto j: i->content->audio->streams()) {
_stream_states[j] = StreamState(i);
}
void
-Player::film_change (ChangeType type, Film::Property p)
+Player::film_change(ChangeType type, FilmProperty p)
{
/* Here we should notice Film properties that affect our output, and
alert listeners that our output now would be different to how it was
return;
}
- if (p == Film::Property::CONTAINER) {
+ if (p == FilmProperty::CONTAINER) {
Change (type, PlayerProperty::FILM_CONTAINER, false);
- } else if (p == Film::Property::VIDEO_FRAME_RATE) {
+ } else if (p == FilmProperty::VIDEO_FRAME_RATE) {
/* Pieces contain a FrameRateChange which contains the DCP frame rate,
so we need new pieces here.
*/
setup_pieces ();
}
Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
- } else if (p == Film::Property::AUDIO_PROCESSOR) {
+ } else if (p == FilmProperty::AUDIO_PROCESSOR) {
if (type == ChangeType::DONE && film->audio_processor ()) {
boost::mutex::scoped_lock lm (_mutex);
_audio_processor = film->audio_processor()->clone(film->audio_frame_rate());
}
- } else if (p == Film::Property::AUDIO_CHANNELS) {
+ } else if (p == FilmProperty::AUDIO_CHANNELS) {
if (type == ChangeType::DONE) {
boost::mutex::scoped_lock lm (_mutex);
_audio_merger.clear ();
}
+pair<shared_ptr<Piece>, optional<DCPTime>>
+Player::earliest_piece_and_time() const
+{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ shared_ptr<Piece> earliest_content;
+ optional<DCPTime> earliest_time;
+
+ for (auto const& piece: _pieces) {
+ if (piece->done) {
+ continue;
+ }
+
+ auto const t = content_time_to_dcp(piece, max(piece->decoder->position(), piece->content->trim_start()));
+ if (t > piece->content->end(film)) {
+ piece->done = true;
+ } else {
+
+ /* Given two choices at the same time, pick the one with texts so we see it before
+ the video.
+ */
+ if (!earliest_time || t < *earliest_time || (t == *earliest_time && !piece->decoder->text.empty())) {
+ earliest_time = t;
+ earliest_content = piece;
+ }
+ }
+ }
+
+ return { earliest_content, earliest_time };
+}
+
+
bool
Player::pass ()
{
shared_ptr<Piece> earliest_content;
optional<DCPTime> earliest_time;
-
- for (auto i: _pieces) {
- if (i->done) {
- continue;
- }
-
- auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
- if (t > i->content->end(film)) {
- i->done = true;
- } else {
-
- /* Given two choices at the same time, pick the one with texts so we see it before
- the video.
- */
- if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
- earliest_time = t;
- earliest_content = i;
- }
- }
- }
+ std::tie(earliest_content, earliest_time) = earliest_piece_and_time();
bool done = false;
[](state_pair const& a, state_pair const& b) { return a.second.last_push_end.get() < b.second.last_push_end.get(); }
);
+ std::map<AudioStreamPtr, StreamState> alive_stream_states;
+
if (latest_last_push_end != have_pushed.end()) {
LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end.get()));
- }
- /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
- std::map<AudioStreamPtr, StreamState> alive_stream_states;
- for (auto const& i: _stream_states) {
- if (!i.second.last_push_end || (latest_last_push_end->second.last_push_end.get() - i.second.last_push_end.get()) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
- alive_stream_states.insert(i);
- } else {
- LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
+ /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
+ for (auto const& i: _stream_states) {
+ if (!i.second.last_push_end || (latest_last_push_end->second.last_push_end.get() - i.second.last_push_end.get()) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+ alive_stream_states.insert(i);
+ } else {
+ LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
+ }
}
}
/* Bitmap subtitles */
for (auto i: j.bitmap) {
- if (!i.image) {
+ if (!i.image || i.image->size().width == 0 || i.image->size().height == 0) {
continue;
}
/* String subtitles (rendered to an image) */
if (!j.string.empty()) {
auto s = render_text(j.string, _video_container_size, time, vfr);
- copy (s.begin(), s.end(), back_inserter (captions));
+ copy_if(s.begin(), s.end(), back_inserter(captions), [](PositionImage const& image) {
+ return image.image->size().width && image.image->size().height;
+ });
+
}
}
auto const content_video = piece->content->video;
+ auto scaled_size = content_video->scaled_size(film->frame_size());
+ DCPOMATIC_ASSERT(scaled_size);
+
for (auto eyes: eyes_to_emit) {
_last_video[weak_piece] = std::make_shared<PlayerVideo>(
video.image,
content_video->actual_crop(),
content_video->fade(film, video.frame),
scale_for_display(
- content_video->scaled_size(film->frame_size()),
+ *scaled_size,
_video_container_size,
film->frame_size(),
content_video->pixel_quanta()
/* Process */
- if (_audio_processor) {
+ if (_audio_processor && !_disable_audio_processor) {
content_audio.audio = _audio_processor->run(content_audio.audio, film->audio_channels());
}
Change(type, property, false);
}
+
+/** Must be called from the same thread that calls ::pass() */
+void
+Player::set_disable_audio_processor()
+{
+ _disable_audio_processor = true;
+}
+
+
+Frame
+Player::frames_done() const
+{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ shared_ptr<Piece> earliest_content;
+ optional<DCPTime> earliest_time;
+ std::tie(earliest_content, earliest_time) = earliest_piece_and_time();
+
+ return earliest_time.get_value_or({}).frames_round(film->video_frame_rate());
+}
+
+
+float
+Player::progress() const
+{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ shared_ptr<Piece> earliest_content;
+ optional<DCPTime> earliest_time;
+ std::tie(earliest_content, earliest_time) = earliest_piece_and_time();
+
+ return static_cast<float>(earliest_time.get_value_or({}).get()) / film->length().get();
+}