_positions[stream] += data->frames();
}
+/** @return Time just after the last thing that was emitted from a given stream */
+ContentTime
+AudioDecoder::stream_position (AudioStreamPtr stream) const
+{
+ map<AudioStreamPtr, Frame>::const_iterator i = _positions.find (stream);
+ DCPOMATIC_ASSERT (i != _positions.end ());
+ return ContentTime::from_frames (i->second, _content->resampled_frame_rate());
+}
+
ContentTime
AudioDecoder::position () const
{
optional<ContentTime> p;
for (map<AudioStreamPtr, Frame>::const_iterator i = _positions.begin(); i != _positions.end(); ++i) {
- ContentTime const ct = ContentTime::from_frames (i->second, _content->resampled_frame_rate());
+ ContentTime const ct = stream_position (i->first);
if (!p || ct < *p) {
p = ct;
}
void seek ();
void flush ();
+ ContentTime stream_position (AudioStreamPtr stream) const;
+
boost::signals2::signal<void (AudioStreamPtr, ContentAudio)> Data;
private:
note
);
} else {
- xyz = dcp::xyz_to_xyz (image->data()[0], image->size(), image->stride()[0]);
+ xyz.reset (new dcp::OpenJPEGImage (image->data()[0], image->size(), image->stride()[0]));
}
return xyz;
if (c->video) {
video.reset (new VideoDecoder (this, c, log));
_pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate());
+ /* It doesn't matter what size or pixel format this is, it just needs to be black */
+ _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
+ _black_image->make_black ();
} else {
_pts_offset = ContentTime ();
}
if (audio) {
decode_audio_packet ();
+ }
+
+ /* Make sure all streams are the same length and round up to the next video frame */
+
+ FrameRateChange const frc = _ffmpeg_content->film()->active_frame_rate_change(_ffmpeg_content->position());
+ ContentTime full_length (_ffmpeg_content->full_length(), frc);
+ full_length = full_length.ceil (frc.source);
+ if (video) {
+ double const vfr = _ffmpeg_content->video_frame_rate().get();
+ Frame const f = full_length.frames_round (vfr);
+ Frame v = video->position().frames_round (vfr);
+ while (v < f) {
+ video->emit (shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
+ ++v;
+ }
+ }
+
+ BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
+ ContentTime a = audio->stream_position(i);
+ while (a < full_length) {
+ ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
+ shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
+ silence->make_silent ();
+ audio->emit (i, silence, a);
+ a += to_do;
+ }
+ }
+
+ if (audio) {
audio->flush ();
}
}
class VideoFilterGraph;
class FFmpegAudioStream;
class AudioBuffers;
+class Image;
struct ffmpeg_pts_offset_test;
/** @class FFmpegDecoder
ContentTime _pts_offset;
boost::optional<ContentTime> _current_subtitle_to;
bool _have_current_subtitle;
+
+ boost::shared_ptr<Image> _black_image;
};
/* See if the header has duration information in it */
_need_video_length = _format_context->duration == AV_NOPTS_VALUE;
if (!_need_video_length) {
- _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get ();
+ _video_length = llrint ((double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get());
}
}
setup_pieces ();
}
- shared_ptr<Piece> earliest;
- DCPTime earliest_content;
-
- BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (!i->done) {
- DCPTime const t = content_time_to_dcp (i, i->decoder->position());
- if (!earliest || t < earliest_content) {
- earliest_content = t;
- earliest = i;
- }
- }
- }
-
- /* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content,
- NOT to fill gaps within content (the latter is done in ::video())
-
- XXX: can't we just look at content position/end and fill based on that?
- */
- DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
-
bool filled = false;
- /* Fill some black if we would emit before the earliest piece of content. This is so we act like a phantom
- Piece which emits black in spaces (we only emit if we are the earliest thing)
- */
- if (_last_video_time && (!earliest || *_last_video_time < earliest_content) && ((fill_towards - *_last_video_time)) >= one_video_frame()) {
- list<DCPTimePeriod> p = subtract(DCPTimePeriod(*_last_video_time, *_last_video_time + one_video_frame()), _no_video);
- if (!p.empty ()) {
- emit_video (black_player_video_frame(), p.front().from);
- filled = true;
- }
+
+ if (_last_video_time && !_playlist->video_content_at(*_last_video_time) && *_last_video_time < _playlist->length()) {
+ /* _last_video_time is the time just after the last video we emitted, and there is no video content
+ at this time so we need to emit some black.
+ */
+ emit_video (black_player_video_frame(), *_last_video_time);
+ filled = true;
} else if (_playlist->length() == DCPTime()) {
/* Special case of an empty Film; just give one black frame */
emit_video (black_player_video_frame(), DCPTime());
filled = true;
}
- optional<DCPTime> audio_fill_from;
- if (_last_audio_time) {
- /* Fill from the last audio or seek time */
- audio_fill_from = _last_audio_time;
+ if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time) && *_last_audio_time < _playlist->length()) {
+ /* _last_audio_time is the time just after the last audio we emitted. There is no audio here
+ so we need to emit some silence.
+ */
+ shared_ptr<Content> next = _playlist->next_audio_content(*_last_audio_time);
+ DCPTimePeriod period (*_last_audio_time, next ? next->position() : _playlist->length());
+ if (period.duration() > one_video_frame()) {
+ period = DCPTimePeriod (*_last_audio_time, *_last_audio_time + one_video_frame());
+ }
+ fill_audio (period);
+ filled = true;
}
- DCPTime audio_fill_towards = fill_towards;
- if (earliest && earliest->content->audio) {
- audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
- }
+ /* Now pass() the decoder which is farthest behind where we are */
- if (audio_fill_from && audio_fill_from < audio_fill_towards && ((audio_fill_towards - *audio_fill_from) >= one_video_frame())) {
- DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
- if (period.duration() > one_video_frame()) {
- period.to = period.from + one_video_frame();
- }
- list<DCPTimePeriod> p = subtract(period, _no_audio);
- if (!p.empty ()) {
- fill_audio (p.front());
- filled = true;
+ shared_ptr<Piece> earliest;
+ DCPTime earliest_content;
+
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ if (!i->done) {
+ DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+ if (!earliest || t < earliest_content) {
+ earliest_content = t;
+ earliest = i;
+ }
}
}
- if (earliest) {
+ if (!filled && earliest) {
earliest->done = earliest->decoder->pass ();
}
DCPTime const time = content_video_to_dcp (piece, video.frame);
DCPTimePeriod const period (time, time + one_video_frame());
- /* Discard if it's outside the content's period or if it's before the last accurate seek */
- if (
- time < piece->content->position() ||
- time >= piece->content->end() ||
- (_last_video_time && time < *_last_video_time)) {
- return;
- }
-
/* Fill gaps that we discover now that we have some video which needs to be emitted */
optional<DCPTime> fill_to;
}
}
+ /* Discard if it's outside the content's period or if it's before the last accurate seek */
+ if (
+ time < piece->content->position() ||
+ time >= piece->content->end() ||
+ (_last_video_time && time < *_last_video_time)) {
+ return;
+ }
+
_last_video[wp].reset (
new PlayerVideo (
video.image,
pv->set_subtitle (subtitles.get ());
}
+ cout << "Player emit @ " << to_string(time) << "\n";
Video (pv, time);
if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
#include "ffmpeg_decoder.h"
#include "ffmpeg_content.h"
#include "image_decoder.h"
+#include "audio_content.h"
#include "content_factory.h"
#include "dcp_content.h"
#include "job.h"
Playlist::audio_content_at (DCPTime time) const
{
BOOST_FOREACH (shared_ptr<Content> i, _content) {
- if (i->audio && i->position() <= time && time < i->end()) {
+ if (!i->audio) {
+ continue;
+ }
+ DCPTime end = i->end ();
+ if (i->audio->delay() < 0) {
+ end += DCPTime::from_seconds (i->audio->delay() / 1000.0);
+ }
+ if (i->position() <= time && time < end) {
return true;
}
}
return false;
}
+shared_ptr<Content>
+Playlist::next_audio_content (DCPTime time) const
+{
+ shared_ptr<Content> next;
+ DCPTime next_position;
+ BOOST_FOREACH (shared_ptr<Content> i, _content) {
+ if (!i->audio) {
+ continue;
+ }
+ if (i->position() >= time && (!next || i->position() < next_position)) {
+ next = i;
+ next_position = i->position();
+ }
+ }
+
+ return next;
+}
+
pair<double, double>
Playlist::speed_up_range (int dcp_video_frame_rate) const
{
ContentList content () const;
bool video_content_at (DCPTime time) const;
bool audio_content_at (DCPTime time) const;
+ boost::shared_ptr<Content> next_audio_content (DCPTime time) const;
std::string video_identifier () const;
return;
}
- /* Work out what we are going to emit next */
- switch (_content->video->frame_type ()) {
- case VIDEO_FRAME_TYPE_2D:
- Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
- break;
- case VIDEO_FRAME_TYPE_3D:
- {
- /* We receive the same frame index twice for 3D; hence we know which
- frame this one is.
- */
- bool const same = (_last_emitted && _last_emitted.get() == frame);
- Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
- _last_emitted = frame;
- break;
- }
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
- frame /= 2;
- break;
- case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
- break;
- case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
- break;
- case VIDEO_FRAME_TYPE_3D_LEFT:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
- break;
- case VIDEO_FRAME_TYPE_3D_RIGHT:
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
- break;
- default:
- DCPOMATIC_ASSERT (false);
+ FrameRateChange const frc = _content->film()->active_frame_rate_change (_content->position());
+ for (int i = 0; i < frc.repeat; ++i) {
+ switch (_content->video->frame_type ()) {
+ case VIDEO_FRAME_TYPE_2D:
+ Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
+ break;
+ case VIDEO_FRAME_TYPE_3D:
+ {
+ /* We receive the same frame index twice for 3D; hence we know which
+ frame this one is.
+ */
+ bool const same = (_last_emitted && _last_emitted.get() == frame);
+ Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+ _last_emitted = frame;
+ break;
+ }
+ case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+ frame /= 2;
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
+ break;
+ case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT:
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
+ break;
+ case VIDEO_FRAME_TYPE_3D_RIGHT:
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
+ break;
+ default:
+ DCPOMATIC_ASSERT (false);
+ }
+
+ ++frame;
}
_position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
-Subproject commit 83a0643c345454864dafabfb9d4703db30dd0bc0
+Subproject commit adec5472fc49df236d9ef7609f54c56c65d09479