using boost::optional;
VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c, shared_ptr<Log> log)
+ : DecoderPart (parent, log)
#ifdef DCPOMATIC_DEBUG
- : test_gaps (0)
- , _parent (parent),
- _content (c)
-#else
- : _parent (parent)
- , _content (c)
+ , test_gaps (0)
#endif
- , _log (log)
+ , _content (c)
, _last_seek_accurate (true)
- , _ignore (false)
{
_black_image.reset (new Image (AV_PIX_FMT_RGB24, _content->video->size(), true));
_black_image->make_black ();
list<ContentVideo> output;
BOOST_FOREACH (ContentVideo const & i, _decoded) {
- if (i.frame == frame) {
+ if (i.frame.index() == frame) {
output.push_back (i);
}
}
return list<ContentVideo> ();
}
- /* At this stage, if we have get_video()ed before, _decoded will contain the last frame that this
- method returned (and possibly a few more). If the requested frame is not in _decoded and it is not the next
- one after the end of _decoded we need to seek.
- */
-
_log->log (String::compose ("VD has request for %1", frame), LogEntry::TYPE_DEBUG_DECODE);
- if (_decoded.empty() || frame < _decoded.front().frame || frame > (_decoded.back().frame + 1)) {
- _parent->seek (ContentTime::from_frames (frame, _content->active_video_frame_rate()), accurate);
+ /* See if we have frame, and suggest a seek if not */
+
+ list<ContentVideo>::const_iterator i = _decoded.begin ();
+ while (i != _decoded.end() && i->frame.index() != frame) {
+ _log->log (String::compose ("VD has stored %1 which is no good", i->frame.index()), LogEntry::TYPE_DEBUG_DECODE);
+ ++i;
+ }
+
+ if (i == _decoded.end()) {
+ Frame seek_frame = frame;
+ if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ /* 3D alternate is a special case as the frame index in the content is not the same
+ as the frame index we are talking about here.
+ */
+ seek_frame *= 2;
+ }
+ _log->log (String::compose ("VD suggests seek to %1", seek_frame), LogEntry::TYPE_DEBUG_DECODE);
+ maybe_seek (ContentTime::from_frames (seek_frame, _content->active_video_frame_rate()), accurate);
}
- unsigned int const frames_wanted = _content->video->frame_type() == VIDEO_FRAME_TYPE_2D ? 1 : 2;
+ /* Work out the number of frames that we should return; we
+ must return all frames in our content at the requested `time'
+ (i.e. frame)
+ */
+ unsigned int frames_wanted = 0;
+ switch (_content->video->frame_type()) {
+ case VIDEO_FRAME_TYPE_2D:
+ case VIDEO_FRAME_TYPE_3D_LEFT:
+ case VIDEO_FRAME_TYPE_3D_RIGHT:
+ frames_wanted = 1;
+ break;
+ case VIDEO_FRAME_TYPE_3D:
+ case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ frames_wanted = 2;
+ break;
+ default:
+ DCPOMATIC_ASSERT (false);
+ }
list<ContentVideo> dec;
break;
}
- if (!_decoded.empty() && _decoded.front().frame > frame) {
+ if (!_decoded.empty() && _decoded.front().frame.index() > frame) {
/* We're never going to get the frame we want. Perhaps the caller is asking
* for a video frame before the content's video starts (if its audio
* begins before its video, for example).
}
/* Clean up _decoded; keep the frame we are returning, if any (which may have two images
- for 3D), but nothing before that */
- while (!_decoded.empty() && !dec.empty() && _decoded.front().frame < dec.front().frame) {
+ for 3D), but nothing before that
+ */
+ while (!_decoded.empty() && !dec.empty() && _decoded.front().frame.index() < dec.front().frame.index()) {
+ _log->log (String::compose ("VD discards %1", _decoded.front().frame.index()), LogEntry::TYPE_DEBUG_DECODE);
_decoded.pop_front ();
}
test_gaps++;
#endif
_decoded.push_back (
- ContentVideo (filler_image, eye, filler_part, i)
+ ContentVideo (filler_image, VideoFrame (i, eye), filler_part)
);
}
}
* adding both left and right eye frames.
*/
void
-VideoDecoder::fill_both_eyes (Frame from_frame, Eyes from_eye, Frame to_frame, Eyes to_eye)
+VideoDecoder::fill_both_eyes (VideoFrame from, VideoFrame to)
{
/* Fill with black... */
shared_ptr<const ImageProxy> filler_left_image (new RawImageProxy (_black_image));
/* ...unless there's some video we can fill with */
for (list<ContentVideo>::const_reverse_iterator i = _decoded.rbegin(); i != _decoded.rend(); ++i) {
- if (i->eyes == EYES_LEFT && !filler_left_image) {
+ if (i->frame.eyes() == EYES_LEFT && !filler_left_image) {
filler_left_image = i->image;
filler_left_part = i->part;
- } else if (i->eyes == EYES_RIGHT && !filler_right_image) {
+ } else if (i->frame.eyes() == EYES_RIGHT && !filler_right_image) {
filler_right_image = i->image;
filler_right_part = i->part;
}
}
}
- while (from_frame != to_frame || from_eye != to_eye) {
+ while (from != to) {
#ifdef DCPOMATIC_DEBUG
test_gaps++;
_decoded.push_back (
ContentVideo (
- from_eye == EYES_LEFT ? filler_left_image : filler_right_image,
- from_eye,
- from_eye == EYES_LEFT ? filler_left_part : filler_right_part,
- from_frame
+ from.eyes() == EYES_LEFT ? filler_left_image : filler_right_image,
+ from,
+ from.eyes() == EYES_LEFT ? filler_left_part : filler_right_part
)
);
- if (from_eye == EYES_LEFT) {
- from_eye = EYES_RIGHT;
- } else {
- from_eye = EYES_LEFT;
- ++from_frame;
- }
+ ++from;
}
}
-/** Called by decoder classes when they have a video frame ready */
+/** Called by decoder classes when they have a video frame ready.
+ * @param frame Frame index within the content; this does not take into account 3D
+ * so for 3D_ALTERNATE this value goes:
+ * 0: frame 0 left
+ * 1: frame 0 right
+ * 2: frame 1 left
+ * 3: frame 1 right
+ * and so on.
+ */
void
VideoDecoder::give (shared_ptr<const ImageProxy> image, Frame frame)
{
- if (_ignore) {
+ if (ignore ()) {
return;
}
list<ContentVideo> to_push;
switch (_content->video->frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
- to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_BOTH), PART_WHOLE));
break;
case VIDEO_FRAME_TYPE_3D:
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
{
- /* We receive the same frame index twice for 3D-alternate; hence we know which
+ /* We receive the same frame index twice for 3D; hence we know which
frame this one is.
*/
- bool const same = (!_decoded.empty() && frame == _decoded.back().frame);
- to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
+ bool const same = (!_decoded.empty() && frame == _decoded.back().frame.index());
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, same ? EYES_RIGHT : EYES_LEFT), PART_WHOLE));
break;
}
+ case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ to_push.push_back (ContentVideo (image, VideoFrame (frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT), PART_WHOLE));
+ break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
- to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame));
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_LEFT_HALF));
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_RIGHT_HALF));
break;
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- to_push.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame));
- to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame));
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_TOP_HALF));
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_BOTTOM_HALF));
break;
case VIDEO_FRAME_TYPE_3D_LEFT:
- to_push.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_WHOLE));
break;
case VIDEO_FRAME_TYPE_3D_RIGHT:
- to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_WHOLE));
break;
default:
DCPOMATIC_ASSERT (false);
and the things we are about to push.
*/
- optional<Frame> from_frame;
- optional<Eyes> from_eye;
+ optional<VideoFrame> from;
if (_decoded.empty() && _last_seek_time && _last_seek_accurate) {
- from_frame = _last_seek_time->frames_round (_content->active_video_frame_rate ());
- from_eye = EYES_LEFT;
+ from = VideoFrame (
+ _last_seek_time->frames_round (_content->active_video_frame_rate ()),
+ _content->video->frame_type() == VIDEO_FRAME_TYPE_2D ? EYES_BOTH : EYES_LEFT
+ );
} else if (!_decoded.empty ()) {
- switch (_content->video->frame_type()) {
- case VIDEO_FRAME_TYPE_2D:
- case VIDEO_FRAME_TYPE_3D_LEFT:
- case VIDEO_FRAME_TYPE_3D_RIGHT:
- from_frame = _decoded.back().frame + 1;
- break;
- case VIDEO_FRAME_TYPE_3D:
- case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- /* Get the last frame that we have */
- from_frame = _decoded.back().frame;
- from_eye = _decoded.back().eyes;
- /* And increment */
- if (from_eye.get() == EYES_LEFT) {
- from_eye = EYES_RIGHT;
- } else {
- from_eye = EYES_LEFT;
- from_frame = from_frame.get() + 1;
- }
+ /* Get the last frame we have */
+ from = _decoded.back().frame;
+ /* And move onto the first frame we need */
+ ++(*from);
+ if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || _content->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+ /* The previous ++ will increment a 3D-left-eye to the same index right-eye. If we are dealing with
+ a single-eye source we need an extra ++ to move back to the same eye.
+ */
+ ++(*from);
}
}
/* If we've pre-rolled on a seek we may now receive out-of-order frames
(frames before the last seek time) which we can just ignore.
*/
-
- if (from_frame && from_frame.get() > to_push.front().frame) {
+ if (from && (*from) > to_push.front().frame) {
return;
}
- if (from_frame) {
- switch (_content->video->frame_type ()) {
- case VIDEO_FRAME_TYPE_2D:
- fill_one_eye (from_frame.get(), to_push.front().frame, EYES_BOTH);
- break;
- case VIDEO_FRAME_TYPE_3D:
- case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- fill_both_eyes (from_frame.get(), from_eye.get(), to_push.front().frame, to_push.front().eyes);
- break;
- case VIDEO_FRAME_TYPE_3D_LEFT:
- fill_one_eye (from_frame.get(), to_push.front().frame, EYES_LEFT);
- break;
- case VIDEO_FRAME_TYPE_3D_RIGHT:
- fill_one_eye (from_frame.get(), to_push.front().frame, EYES_RIGHT);
- break;
+ int const max_decoded_size = 96;
+
+ /* If _decoded is already `full' there is no point in adding anything more to it,
+ as the new stuff will just be removed again.
+ */
+ if (_decoded.size() < max_decoded_size) {
+ if (from) {
+ switch (_content->video->frame_type ()) {
+ case VIDEO_FRAME_TYPE_2D:
+ fill_one_eye (from->index(), to_push.front().frame.index(), EYES_BOTH);
+ break;
+ case VIDEO_FRAME_TYPE_3D:
+ case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ fill_both_eyes (from.get(), to_push.front().frame);
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT:
+ fill_one_eye (from->index(), to_push.front().frame.index(), EYES_LEFT);
+ break;
+ case VIDEO_FRAME_TYPE_3D_RIGHT:
+ fill_one_eye (from->index(), to_push.front().frame.index(), EYES_RIGHT);
+ break;
+ }
}
- }
- copy (to_push.begin(), to_push.end(), back_inserter (_decoded));
+ copy (to_push.begin(), to_push.end(), back_inserter (_decoded));
+ }
/* We can't let this build up too much or we will run out of memory. There is a
`best' value for the allowed size of _decoded which balances memory use
with decoding efficiency (lack of seeks). Throwing away video frames here
is not a problem for correctness, so do it.
*/
- while (_decoded.size() > 96) {
+ while (_decoded.size() > max_decoded_size) {
_decoded.pop_back ();
}
}
void
VideoDecoder::seek (ContentTime s, bool accurate)
{
+ _log->log (String::compose ("VD seek to %1", to_string(s)), LogEntry::TYPE_DEBUG_DECODE);
_decoded.clear ();
_last_seek_time = s;
_last_seek_accurate = accurate;
}
-
-/** Set this decoder never to produce any data */
-void
-VideoDecoder::set_ignore ()
-{
- _ignore = true;
-}