using boost::dynamic_pointer_cast;
using boost::optional;
using boost::scoped_ptr;
+#if BOOST_VERSION >= 106100
+using namespace boost::placeholders;
+#endif
using namespace dcpomatic;
int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
}
shared_ptr<Decoder> old_decoder;
+ /* XXX: needs to check vector of Content and use the old decoders, but
+ * this will all be different as we have to coalesce content before
+ * this happens.
BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
if (j->content == i) {
old_decoder = j->decoder;
break;
}
}
+ */
shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
DCPOMATIC_ASSERT (decoder);
}
}
- _stream_states.clear ();
- BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (i->content->audio) {
- BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
- _stream_states[j] = StreamState (i, i->content->position ());
- }
- }
- }
-
_black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
_silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
void
Player::playlist_content_change (ChangeType type, int property, bool frequent)
{
- if (type == CHANGE_TYPE_PENDING) {
- /* The player content is probably about to change, so we can't carry on
- until that has happened and we've rebuilt our pieces. Stop pass()
- and seek() from working until then.
- */
- ++_suspended;
- } else if (type == CHANGE_TYPE_DONE) {
- /* A change in our content has gone through. Re-build our pieces. */
- setup_pieces ();
- --_suspended;
- } else if (type == CHANGE_TYPE_CANCELLED) {
- --_suspended;
+ if (property == VideoContentProperty::CROP) {
+ if (type == CHANGE_TYPE_DONE) {
+ dcp::Size const vcs = video_container_size();
+ boost::mutex::scoped_lock lm (_mutex);
+ for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
+ i->first->reset_metadata (_film, vcs);
+ }
+ }
+ } else {
+ if (type == CHANGE_TYPE_PENDING) {
+ /* The player content is probably about to change, so we can't carry on
+ until that has happened and we've rebuilt our pieces. Stop pass()
+ and seek() from working until then.
+ */
+ ++_suspended;
+ } else if (type == CHANGE_TYPE_DONE) {
+ /* A change in our content has gone through. Re-build our pieces. */
+ setup_pieces ();
+ --_suspended;
+ } else if (type == CHANGE_TYPE_CANCELLED) {
+ --_suspended;
+ }
}
Change (type, property, frequent);
Frame
Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
{
- DCPTime s = t - piece->content->position ();
+ DCPTime s = t - piece->position ();
s = min (piece->content->length_after_trim(_film), s);
s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
{
/* See comment in dcp_to_content_video */
DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
- return d + piece->content->position();
+ return d + piece->position();
}
Frame
Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
- DCPTime s = t - piece->content->position ();
+ DCPTime s = t - piece->position ();
s = min (piece->content->length_after_trim(_film), s);
/* See notes in dcp_to_content_video */
return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
/* See comment in dcp_to_content_video */
return DCPTime::from_frames (f, _film->audio_frame_rate())
- DCPTime (piece->content->trim_start(), piece->frc)
- + piece->content->position();
+ + piece->position();
}
ContentTime
Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
{
- DCPTime s = t - piece->content->position ();
+ DCPTime s = t - piece->position ();
s = min (piece->content->length_after_trim(_film), s);
return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
}
DCPTime
Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
{
- return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
+ return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->position());
}
list<shared_ptr<Font> >
if (_suspended) {
/* We can't pass in this state */
+ LOG_DEBUG_PLAYER_NC ("Player is suspended");
return false;
}
switch (which) {
case CONTENT:
{
+ LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
earliest_content->done = earliest_content->decoder->pass ();
shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
if (dcp && !_play_referenced && dcp->reference_audio()) {
break;
case SILENT:
{
+ LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
DCPTimePeriod period (_silent.period_at_position());
if (_last_audio_time) {
/* Sometimes the thing that happened last finishes fractionally before
of our streams, or the position of the _silent.
*/
DCPTime pull_to = _playback_length;
- for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
- if (!i->second.piece->done && i->second.last_push_end < pull_to) {
- pull_to = i->second.last_push_end;
- }
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ i->update_pull_to (pull_to);
}
if (!_silent.done() && _silent.position() < pull_to) {
pull_to = _silent.position();
}
+ LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
if (_last_audio_time && i->second < *_last_audio_time) {
/* Time of the first frame we will emit */
DCPTime const time = content_video_to_dcp (piece, video.frame);
+ LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
/* Discard if it's before the content's period or the last accurate seek. We can't discard
if it's after the content's period here as in that case we still need to fill any gap between
`now' and the end of the content's period.
*/
- if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
+ if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
return;
}
DCPTime fill_to = min (time, piece->content->end(_film));
if (_last_video_time) {
- DCPTime fill_from = max (*_last_video_time, piece->content->position());
+ DCPTime fill_from = max (*_last_video_time, piece->position());
/* Fill if we have more than half a frame to do */
if ((fill_to - fill_from) > one_video_frame() / 2) {
/* Compute time in the DCP */
DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
+ LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+
/* And the end of this block in the DCP */
DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
/* Remove anything that comes before the start or after the end of the content */
- if (time < piece->content->position()) {
- pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
+ if (time < piece->position()) {
+ pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
if (!cut.first) {
/* This audio is entirely discarded */
return;
/* Push */
_audio_merger.push (content_audio.audio, time);
- DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
- _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
+ /* XXX: this almost certainly needs to be more efficient; perhaps pieces fill a map to find
+ * the piece from the stream, then we can call the right piece with no loop.
+ */
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ i->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
+ }
}
void
Player::seek (DCPTime time, bool accurate)
{
boost::mutex::scoped_lock lm (_mutex);
+ LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
if (_suspended) {
/* We can't seek in this state */
}
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (time < i->content->position()) {
+ if (time < i->position()) {
/* Before; seek to the start of the content. Even if this request is for an inaccurate seek
we must seek this (following) content accurately, otherwise when we come to the end of the current
content we may not start right at the beginning of the next, causing a gap (if the next content has
been trimmed to a point between keyframes, or something).
*/
- i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
+ i->decoder->seek (dcp_to_content_time (i, i->position()), true);
i->done = false;
- } else if (i->content->position() <= time && time < i->content->end(_film)) {
+ } else if (i->position() <= time && time < i->content->end(_film)) {
/* During; seek to position */
i->decoder->seek (dcp_to_content_time (i, time), accurate);
i->done = false;