/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
*/
#include "video_decoder.h"
-#include "subtitle.h"
-#include "film.h"
#include "image.h"
+#include "raw_image_proxy.h"
+#include "film.h"
#include "log.h"
-#include "job.h"
+#include "compose.hpp"
+#include <iostream>
#include "i18n.h"
+using std::cout;
+using std::list;
+using std::max;
+using std::back_inserter;
using boost::shared_ptr;
using boost::optional;
-VideoDecoder::VideoDecoder (shared_ptr<const Film> f)
- : Decoder (f)
- , _video_frame (0)
- , _last_source_time (0)
+VideoDecoder::VideoDecoder (shared_ptr<const VideoContent> c)
+#ifdef DCPOMATIC_DEBUG
+ : test_gaps (0)
+ , _video_content (c)
+#else
+ : _video_content (c)
+#endif
+ , _last_seek_accurate (true)
+ , _ignore_video (false)
{
-
+ _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_content->video_size(), true));
+ _black_image->make_black ();
}
-/** Called by subclasses to tell the world that some video data is ready.
- * We find a subtitle then emit it for listeners.
- * @param image frame to emit.
- * @param t Time of the frame within the source, in seconds.
- */
-void
-VideoDecoder::emit_video (shared_ptr<Image> image, double t)
+list<ContentVideo>
+VideoDecoder::decoded_video (Frame frame)
{
- shared_ptr<Subtitle> sub;
- if (_timed_subtitle && _timed_subtitle->displayed_at (t)) {
- sub = _timed_subtitle->subtitle ();
+ list<ContentVideo> output;
+
+ for (list<ContentVideo>::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) {
+ if (i->frame == frame) {
+ output.push_back (*i);
+ }
}
- signal_video (image, false, sub, t);
+ return output;
}
-bool
-VideoDecoder::have_last_video () const
+/** Get all frames which exist in the content at a given frame index.
+ * @param frame Frame index.
+ * @param accurate true to try hard to return frames at the precise time that was requested, otherwise frames nearby may be returned.
+ * @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D.
+ */
+list<ContentVideo>
+VideoDecoder::get_video (Frame frame, bool accurate)
{
- return _last_image;
+ /* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this
+ method returned (and possibly a few more). If the requested frame is not in _decoded_video and it is not the next
+ one after the end of _decoded_video we need to seek.
+ */
+
+ _video_content->film()->log()->log (String::compose ("VD has request for %1", frame), LogEntry::TYPE_DEBUG_DECODE);
+
+ if (_decoded_video.empty() || frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1)) {
+ seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate);
+ }
+
+ list<ContentVideo> dec;
+
+ /* Now enough pass() calls should either:
+ * (a) give us what we want, or
+ * (b) give us something after what we want, indicating that we will never get what we want, or
+ * (c) hit the end of the decoder.
+ */
+ if (accurate) {
+ /* We are being accurate, so we want the right frame.
+ * This could all be one statement but it's split up for clarity.
+ */
+ while (true) {
+ if (!decoded_video(frame).empty ()) {
+ /* We got what we want */
+ break;
+ }
+
+ if (pass (PASS_REASON_VIDEO, accurate)) {
+ /* The decoder has nothing more for us */
+ break;
+ }
+
+ if (!_decoded_video.empty() && _decoded_video.front().frame > frame) {
+ /* We're never going to get the frame we want. Perhaps the caller is asking
+ * for a video frame before the content's video starts (if its audio
+ * begins before its video, for example).
+ */
+ break;
+ }
+ }
+
+ dec = decoded_video (frame);
+ } else {
+ /* Any frame will do: use the first one that comes out of pass() */
+ while (_decoded_video.empty() && !pass (PASS_REASON_VIDEO, accurate)) {}
+ if (!_decoded_video.empty ()) {
+ dec.push_back (_decoded_video.front ());
+ }
+ }
+
+ /* Clean up _decoded_video; keep the frame we are returning (which may have two images
+ for 3D), but nothing before that */
+ while (!_decoded_video.empty() && _decoded_video.front().frame < dec.front().frame) {
+ _decoded_video.pop_front ();
+ }
+
+ return dec;
}
-/** Called by subclasses to repeat the last video frame that we
- * passed to emit_video(). If emit_video hasn't yet been called,
- * we will generate a black frame.
+/** Fill _decoded_video from `from' up to, but not including, `to' with
+ * a frame for one particular Eyes value (which could be EYES_BOTH,
+ * EYES_LEFT or EYES_RIGHT)
*/
void
-VideoDecoder::repeat_last_video (double t)
+VideoDecoder::fill_one_eye (Frame from, Frame to, Eyes eye)
{
- if (!_last_image) {
- _last_image.reset (new SimpleImage (pixel_format(), native_size(), true));
- _last_image->make_black ();
+ if (to == 0) {
+ /* Already OK */
+ return;
+ }
+
+ /* Fill with black... */
+ shared_ptr<const ImageProxy> filler_image (new RawImageProxy (_black_image));
+ Part filler_part = PART_WHOLE;
+
+ /* ...unless there's some video we can fill with */
+ if (!_decoded_video.empty ()) {
+ filler_image = _decoded_video.back().image;
+ filler_part = _decoded_video.back().part;
}
- signal_video (_last_image, true, _last_subtitle, t);
+ for (Frame i = from; i < to; ++i) {
+#ifdef DCPOMATIC_DEBUG
+ test_gaps++;
+#endif
+ _decoded_video.push_back (
+ ContentVideo (filler_image, eye, filler_part, i)
+ );
+ }
}
-/** Emit our signal to say that some video data is ready.
- * @param image Video frame.
- * @param same true if `image' is the same as the last one we emitted.
- * @param sub Subtitle for this frame, or 0.
+/** Fill _decoded_video from `from' up to, but not including, `to'
+ * adding both left and right eye frames.
*/
void
-VideoDecoder::signal_video (shared_ptr<Image> image, bool same, shared_ptr<Subtitle> sub, double t)
+VideoDecoder::fill_both_eyes (Frame from, Frame to, Eyes eye)
{
- TIMING (N_("Decoder emits %1"), _video_frame);
- Video (image, same, sub);
- ++_video_frame;
+ if (to == 0 && eye == EYES_LEFT) {
+ /* Already OK */
+ return;
+ }
+
+ /* Fill with black... */
+ shared_ptr<const ImageProxy> filler_left_image (new RawImageProxy (_black_image));
+ shared_ptr<const ImageProxy> filler_right_image (new RawImageProxy (_black_image));
+ Part filler_left_part = PART_WHOLE;
+ Part filler_right_part = PART_WHOLE;
+
+ /* ...unless there's some video we can fill with */
+ for (list<ContentVideo>::const_reverse_iterator i = _decoded_video.rbegin(); i != _decoded_video.rend(); ++i) {
+ if (i->eyes == EYES_LEFT && !filler_left_image) {
+ filler_left_image = i->image;
+ filler_left_part = i->part;
+ } else if (i->eyes == EYES_RIGHT && !filler_right_image) {
+ filler_right_image = i->image;
+ filler_right_part = i->part;
+ }
+
+ if (filler_left_image && filler_right_image) {
+ break;
+ }
+ }
- _last_image = image;
- _last_subtitle = sub;
- _last_source_time = t;
+ Frame filler_frame = from;
+ Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes;
+
+ if (_decoded_video.empty ()) {
+ filler_frame = 0;
+ filler_eye = EYES_LEFT;
+ } else if (_decoded_video.back().eyes == EYES_LEFT) {
+ filler_frame = _decoded_video.back().frame;
+ filler_eye = EYES_RIGHT;
+ } else if (_decoded_video.back().eyes == EYES_RIGHT) {
+ filler_frame = _decoded_video.back().frame + 1;
+ filler_eye = EYES_LEFT;
+ }
+
+ while (filler_frame != to || filler_eye != eye) {
+
+#ifdef DCPOMATIC_DEBUG
+ test_gaps++;
+#endif
+
+ _decoded_video.push_back (
+ ContentVideo (
+ filler_eye == EYES_LEFT ? filler_left_image : filler_right_image,
+ filler_eye,
+ filler_eye == EYES_LEFT ? filler_left_part : filler_right_part,
+ filler_frame
+ )
+ );
+
+ if (filler_eye == EYES_LEFT) {
+ filler_eye = EYES_RIGHT;
+ } else {
+ filler_eye = EYES_LEFT;
+ ++filler_frame;
+ }
+ }
}
-/** Set up the current subtitle. This will be put onto frames that
- * fit within its time specification. s may be 0 to say that there
- * is no current subtitle.
- * @param s New current subtitle, or 0.
- */
+/** Called by subclasses when they have a video frame ready */
void
-VideoDecoder::emit_subtitle (shared_ptr<TimedSubtitle> s)
+VideoDecoder::video (shared_ptr<const ImageProxy> image, Frame frame)
{
- _timed_subtitle = s;
-
- if (_timed_subtitle) {
- Position const p = _timed_subtitle->subtitle()->position ();
- _timed_subtitle->subtitle()->set_position (Position (p.x - _film->crop().left, p.y - _film->crop().top));
+ if (_ignore_video) {
+ return;
+ }
+
+ _video_content->film()->log()->log (String::compose ("VD receives %1", frame), LogEntry::TYPE_DEBUG_DECODE);
+
+ /* We may receive the same frame index twice for 3D, and we need to know
+ when that happens.
+ */
+ bool const same = (!_decoded_video.empty() && frame == _decoded_video.back().frame);
+
+ /* Work out what we are going to push into _decoded_video next */
+ list<ContentVideo> to_push;
+ switch (_video_content->video_frame_type ()) {
+ case VIDEO_FRAME_TYPE_2D:
+ to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
+ break;
+ case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame));
+ to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame));
+ break;
+ case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ to_push.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame));
+ to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame));
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT:
+ to_push.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame));
+ break;
+ case VIDEO_FRAME_TYPE_3D_RIGHT:
+ to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame));
+ break;
+ default:
+ DCPOMATIC_ASSERT (false);
+ }
+
+ /* Now VideoDecoder is required never to have gaps in the frames that it presents
+ via get_video(). Hence we need to fill in any gap between the last thing in _decoded_video
+ and the things we are about to push.
+ */
+
+ optional<Frame> from;
+ optional<Frame> to;
+
+ if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) {
+ from = _last_seek_time->frames_round (_video_content->video_frame_rate ());
+ to = to_push.front().frame;
+ } else if (!_decoded_video.empty ()) {
+ from = _decoded_video.back().frame + 1;
+ to = to_push.front().frame;
+ }
+
+ /* If we've pre-rolled on a seek we may now receive out-of-order frames
+ (frames before the last seek time) which we can just ignore.
+ */
+
+ if (from && to && from.get() > to.get()) {
+ return;
+ }
+
+ if (from) {
+ switch (_video_content->video_frame_type ()) {
+ case VIDEO_FRAME_TYPE_2D:
+ fill_one_eye (from.get(), to.get (), EYES_BOTH);
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ fill_both_eyes (from.get(), to.get(), to_push.front().eyes);
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT:
+ fill_one_eye (from.get(), to.get (), EYES_LEFT);
+ break;
+ case VIDEO_FRAME_TYPE_3D_RIGHT:
+ fill_one_eye (from.get(), to.get (), EYES_RIGHT);
+ }
+ }
+
+ copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video));
+
+ /* We can't let this build up too much or we will run out of memory. There is a
+ `best' value for the allowed size of _decoded_video which balances memory use
+ with decoding efficiency (lack of seeks). Throwing away video frames here
+ is not a problem for correctness, so do it.
+ */
+ while (_decoded_video.size() > 96) {
+ _decoded_video.pop_back ();
}
}
void
-VideoDecoder::set_progress (Job* j) const
+VideoDecoder::seek (ContentTime s, bool accurate)
{
- assert (j);
+ _decoded_video.clear ();
+ _last_seek_time = s;
+ _last_seek_accurate = accurate;
+}
- if (_film->video_length()) {
- j->set_progress (float (_video_frame) / _film->video_length());
- }
+/** Set this player never to produce any video data */
+void
+VideoDecoder::set_ignore_video ()
+{
+ _ignore_video = true;
}