#include "image.h"
#include "image_proxy.h"
#include "raw_image_proxy.h"
-#include "content_video.h"
+#include "raw_image_proxy.h"
+#include "film.h"
+#include "log.h"
#include "i18n.h"
#else
: _video_content (c)
#endif
- , _same (false)
+ , _last_seek_accurate (true)
+ , _ignore_video (false)
{
_black_image.reset (new Image (PIX_FMT_RGB24, _video_content->video_size(), true));
_black_image->make_black ();
}
list<ContentVideo>
-VideoDecoder::decoded_video (VideoFrame frame)
+VideoDecoder::decoded_video (Frame frame)
{
list<ContentVideo> output;
-
+
for (list<ContentVideo>::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) {
if (i->frame == frame) {
output.push_back (*i);
* @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D.
*/
list<ContentVideo>
-VideoDecoder::get_video (VideoFrame frame, bool accurate)
+VideoDecoder::get_video (Frame frame, bool accurate)
{
/* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this
method returned (and possibly a few more). If the requested frame is not in _decoded_video and it is not the next
one after the end of _decoded_video we need to seek.
*/
-
+
if (_decoded_video.empty() || frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1)) {
seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate);
}
return dec;
}
-/** Fill _decoded_video up to, but not including, the specified frame */
+/** Fill _decoded_video from `from' up to, but not including, `to' */
void
-VideoDecoder::fill_up_to_2d (VideoFrame frame)
+VideoDecoder::fill_2d (Frame from, Frame to)
{
- if (frame == 0) {
+ if (to == 0) {
/* Already OK */
return;
}
filler_part = _decoded_video.back().part;
}
- VideoFrame filler_frame = _decoded_video.empty() ? 0 : (_decoded_video.back().frame + 1);
- while (filler_frame < frame) {
-
+ for (Frame i = from; i < to; ++i) {
#ifdef DCPOMATIC_DEBUG
test_gaps++;
#endif
-
_decoded_video.push_back (
- ContentVideo (filler_image, EYES_BOTH, filler_part, filler_frame)
+ ContentVideo (filler_image, EYES_BOTH, filler_part, i)
);
-
- ++filler_frame;
}
}
-/** Fill _decoded_video up to, but not including, the specified frame and eye */
+/** Fill _decoded_video from `from' up to, but not including, `to' */
void
-VideoDecoder::fill_up_to_3d (VideoFrame frame, Eyes eye)
+VideoDecoder::fill_3d (Frame from, Frame to, Eyes eye)
{
- if (frame == 0 && eye == EYES_LEFT) {
+ if (to == 0 && eye == EYES_LEFT) {
/* Already OK */
return;
}
}
}
- VideoFrame filler_frame = _decoded_video.empty() ? 0 : _decoded_video.back().frame;
+ Frame filler_frame = from;
Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes;
if (_decoded_video.empty ()) {
filler_eye = EYES_LEFT;
}
- while (filler_frame != frame || filler_eye != eye) {
+ while (filler_frame != to || filler_eye != eye) {
#ifdef DCPOMATIC_DEBUG
test_gaps++;
}
}
}
-
+
/** Called by subclasses when they have a video frame ready */
void
-VideoDecoder::video (shared_ptr<const ImageProxy> image, VideoFrame frame)
+VideoDecoder::video (shared_ptr<const ImageProxy> image, Frame frame)
{
+ if (_ignore_video) {
+ return;
+ }
+
/* We may receive the same frame index twice for 3D, and we need to know
when that happens.
*/
- _same = (!_decoded_video.empty() && frame == _decoded_video.back().frame);
+ bool const same = (!_decoded_video.empty() && frame == _decoded_video.back().frame);
/* Work out what we are going to push into _decoded_video next */
list<ContentVideo> to_push;
to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- to_push.push_back (ContentVideo (image, _same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
+ to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
break;
default:
- assert (false);
+ DCPOMATIC_ASSERT (false);
}
/* Now VideoDecoder is required never to have gaps in the frames that it presents
and the things we are about to push.
*/
- if (_video_content->video_frame_type() == VIDEO_FRAME_TYPE_2D) {
- fill_up_to_2d (to_push.front().frame);
- } else {
- fill_up_to_3d (to_push.front().frame, to_push.front().eyes);
+ boost::optional<Frame> from;
+ boost::optional<Frame> to;
+
+ if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) {
+ from = _last_seek_time->frames (_video_content->video_frame_rate ());
+ to = to_push.front().frame;
+ } else if (!_decoded_video.empty ()) {
+ from = _decoded_video.back().frame + 1;
+ to = to_push.front().frame;
+ }
+
+ /* It has been known that this method receives frames out of order; at this
+ point I'm not sure why, but we'll just ignore them.
+ */
+
+ if (from && to && from.get() > to.get()) {
+ _video_content->film()->log()->log (
+ String::compose ("Ignoring out-of-order decoded frame %1 after %2", to.get(), from.get()), Log::TYPE_WARNING
+ );
+ return;
+ }
+
+ if (from) {
+ if (_video_content->video_frame_type() == VIDEO_FRAME_TYPE_2D) {
+ fill_2d (from.get(), to.get ());
+ } else {
+ fill_3d (from.get(), to.get(), to_push.front().eyes);
+ }
}
copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video));
+
+ /* We can't let this build up too much or we will run out of memory. We need to allow
+ the most frames that can exist between blocks of sound in a multiplexed file.
+ */
+ DCPOMATIC_ASSERT (_decoded_video.size() <= 96);
}
void
-VideoDecoder::seek (ContentTime, bool)
+VideoDecoder::seek (ContentTime s, bool accurate)
{
_decoded_video.clear ();
+ _last_seek_time = s;
+ _last_seek_accurate = accurate;
}
+/** Set this player never to produce any video data */
+void
+VideoDecoder::set_ignore_video ()
+{
+ _ignore_video = true;
+}