_shuffler->Video.connect(bind(&Player::video, this, _1, _2));
}
- for (auto i: playlist()->content()) {
+ for (auto content: playlist()->content()) {
- if (!i->paths_valid ()) {
+ if (!content->paths_valid()) {
continue;
}
- if (_ignore_video && _ignore_audio && i->text.empty()) {
+ if (_ignore_video && _ignore_audio && content->text.empty()) {
/* We're only interested in text and this content has none */
continue;
}
shared_ptr<Decoder> old_decoder;
for (auto j: old_pieces) {
- if (j->content == i) {
+ if (j->content == content) {
old_decoder = j->decoder;
break;
}
}
- auto decoder = decoder_factory(film, i, _fast, _tolerant, old_decoder);
+ auto decoder = decoder_factory(film, content, _fast, _tolerant, old_decoder);
DCPOMATIC_ASSERT (decoder);
- FrameRateChange frc(film, i);
+ FrameRateChange frc(film, content);
if (decoder->video && _ignore_video) {
decoder->video->set_ignore (true);
}
}
- auto piece = make_shared<Piece>(i, decoder, frc);
+ auto piece = make_shared<Piece>(content, decoder, frc);
_pieces.push_back (piece);
if (decoder->video) {
for (auto i: _pieces) {
if (i->content->audio) {
for (auto j: i->content->audio->streams()) {
- _stream_states[j] = StreamState (i, i->content->position ());
+ _stream_states[j] = StreamState(i);
}
}
}
return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
};
- for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
- if (ignore_overlap((*i)->content->video)) {
+ for (auto piece = _pieces.begin(); piece != _pieces.end(); ++piece) {
+ if (ignore_overlap((*piece)->content->video)) {
/* Look for content later in the content list with in-use video that overlaps this */
- auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(film));
- for (auto j = std::next(i); j != _pieces.end(); ++j) {
- if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
- (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(film)).overlap(period);
+ auto const period = (*piece)->content->period(film);
+ for (auto later_piece = std::next(piece); later_piece != _pieces.end(); ++later_piece) {
+ if (ignore_overlap((*later_piece)->content->video)) {
+ if (auto overlap = (*later_piece)->content->period(film).overlap(period)) {
+ (*piece)->ignore_video.push_back(*overlap);
+ }
+ }
+ }
+ }
+ }
+
+ for (auto piece = _pieces.begin(); piece != _pieces.end(); ++piece) {
+ if ((*piece)->content->atmos) {
+ /* Look for content later in the content list with ATMOS that overlaps this */
+ auto const period = (*piece)->content->period(film);
+ for (auto later_piece = std::next(piece); later_piece != _pieces.end(); ++later_piece) {
+ if ((*later_piece)->content->atmos) {
+ if (auto overlap = (*later_piece)->content->period(film).overlap(period)) {
+ (*piece)->ignore_atmos.push_back(*overlap);
+ }
}
}
}
}
case BLACK:
LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
- emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+ if (film->three_d()) {
+ emit_video(black_player_video_frame(Eyes::LEFT), _black.position());
+ emit_video(black_player_video_frame(Eyes::RIGHT), _black.position());
+ } else {
+ emit_video(black_player_video_frame(Eyes::BOTH), _black.position());
+ }
_black.set_position (_black.position() + one_video_frame());
break;
case SILENT:
using state_pair = std::pair<AudioStreamPtr, StreamState>;
+ /* Find streams that have pushed */
+ std::vector<state_pair> have_pushed;
+ std::copy_if(_stream_states.begin(), _stream_states.end(), std::back_inserter(have_pushed), [](state_pair const& a) { return static_cast<bool>(a.second.last_push_end); });
+
/* Find the 'leading' stream (i.e. the one that pushed data most recently) */
auto latest_last_push_end = std::max_element(
- _stream_states.begin(),
- _stream_states.end(),
- [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
+ have_pushed.begin(),
+ have_pushed.end(),
+ [](state_pair const& a, state_pair const& b) { return a.second.last_push_end.get() < b.second.last_push_end.get(); }
);
- if (latest_last_push_end != _stream_states.end()) {
- LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
+ if (latest_last_push_end != have_pushed.end()) {
+ LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end.get()));
}
/* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
std::map<AudioStreamPtr, StreamState> alive_stream_states;
for (auto const& i: _stream_states) {
- if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+ if (!i.second.last_push_end || (latest_last_push_end->second.last_push_end.get() - i.second.last_push_end.get()) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
alive_stream_states.insert(i);
} else {
LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
auto pull_to = _playback_length.load();
for (auto const& i: alive_stream_states) {
- if (!i.second.piece->done && i.second.last_push_end < pull_to) {
- pull_to = i.second.last_push_end;
+ auto position = i.second.last_push_end.get_value_or(i.second.piece->content->position());
+ if (!i.second.piece->done && position < pull_to) {
+ pull_to = position;
}
}
if (!_silent.done() && _silent.position() < pull_to) {
}
+static
+Eyes
+increment_eyes (Eyes e)
+{
+ if (e == Eyes::LEFT) {
+ return Eyes::RIGHT;
+ }
+
+ return Eyes::LEFT;
+}
+
+
void
Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
{
return;
}
+ vector<Eyes> eyes_to_emit;
+
+ if (!film->three_d()) {
+ if (video.eyes == Eyes::RIGHT) {
+ /* 2D film, 3D content: discard right */
+ return;
+ } else if (video.eyes == Eyes::LEFT) {
+ /* 2D film, 3D content: emit left as "both" */
+ video.eyes = Eyes::BOTH;
+ eyes_to_emit = { Eyes::BOTH };
+ }
+ } else {
+ if (video.eyes == Eyes::BOTH) {
+ /* 3D film, 2D content; emit "both" for left and right */
+ eyes_to_emit = { Eyes::LEFT, Eyes::RIGHT };
+ }
+ }
+
+ if (eyes_to_emit.empty()) {
+ eyes_to_emit = { video.eyes };
+ }
+
/* Time of the first frame we will emit */
DCPTime const time = content_video_to_dcp (piece, video.frame);
LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
return;
}
- if (piece->ignore_video && piece->ignore_video->contains(time)) {
+ auto ignore_video = std::find_if(
+ piece->ignore_video.begin(),
+ piece->ignore_video.end(),
+ [time](DCPTimePeriod period) { return period.contains(time); }
+ );
+ if (ignore_video != piece->ignore_video.end()) {
return;
}
if ((fill_to - fill_from) > one_video_frame() / 2) {
auto last = _last_video.find (weak_piece);
if (film->three_d()) {
- auto fill_to_eyes = video.eyes;
+ auto fill_to_eyes = eyes_to_emit[0];
if (fill_to_eyes == Eyes::BOTH) {
fill_to_eyes = Eyes::LEFT;
}
auto const content_video = piece->content->video;
- _last_video[weak_piece] = std::make_shared<PlayerVideo>(
- video.image,
- content_video->actual_crop(),
- content_video->fade(film, video.frame),
- scale_for_display(
- content_video->scaled_size(film->frame_size()),
+ for (auto eyes: eyes_to_emit) {
+ _last_video[weak_piece] = std::make_shared<PlayerVideo>(
+ video.image,
+ content_video->actual_crop(),
+ content_video->fade(film, video.frame),
+ scale_for_display(
+ content_video->scaled_size(film->frame_size()),
+ _video_container_size,
+ film->frame_size(),
+ content_video->pixel_quanta()
+ ),
_video_container_size,
- film->frame_size(),
- content_video->pixel_quanta()
- ),
- _video_container_size,
- video.eyes,
- video.part,
- content_video->colour_conversion(),
- content_video->range(),
- piece->content,
- video.frame,
- false
- );
-
- DCPTime t = time;
- for (int i = 0; i < frc.repeat; ++i) {
- if (t < piece->content->end(film)) {
- emit_video (_last_video[weak_piece], t);
+ eyes,
+ video.part,
+ content_video->colour_conversion(),
+ content_video->range(),
+ piece->content,
+ video.frame,
+ false
+ );
+
+ DCPTime t = time;
+ for (int i = 0; i < frc.repeat; ++i) {
+ if (t < piece->content->end(film)) {
+ emit_video (_last_video[weak_piece], t);
+ }
+ t += one_video_frame ();
}
- t += one_video_frame ();
}
}
/* Compute time in the DCP */
auto time = resampled_audio_to_dcp (piece, content_audio.frame);
- LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
/* And the end of this block in the DCP */
auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+ LOG_DEBUG_PLAYER("Received audio frame %1 covering %2 to %3 (%4)", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename());
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
_silent.set_position (time);
_last_video.clear ();
+
+ for (auto& state: _stream_states) {
+ state.second.last_push_end = boost::none;
+ }
}
auto film = _film.lock();
DCPOMATIC_ASSERT(film);
- if (!film->three_d()) {
- if (pv->eyes() == Eyes::LEFT) {
- /* Use left-eye images for both eyes... */
- pv->set_eyes (Eyes::BOTH);
- } else if (pv->eyes() == Eyes::RIGHT) {
- /* ...and discard the right */
- return;
- }
- }
-
/* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
player before the video that requires them.
*/
return;
}
+ auto ignore_atmos = std::find_if(
+ piece->ignore_atmos.begin(),
+ piece->ignore_atmos.end(),
+ [dcp_time](DCPTimePeriod period) { return period.contains(dcp_time); }
+ );
+ if (ignore_atmos != piece->ignore_atmos.end()) {
+ return;
+ }
+
Atmos (data.data, dcp_time, data.metadata);
}