X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=fcc262163ecab19844fdd992b641d4d3a2d12c31;hb=e60bb3e51bd1508b149e6b8f6608f09b5196ae26;hp=bd609d1683ddd0fca0ee82ca9da8bd7eeaed725b;hpb=034feb503b0a38eb82c21ae8d9f83522fc63a25c;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index bd609d168..fcc262163 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -19,12 +19,18 @@ #include "video_decoder.h" #include "image.h" -#include "content_video.h" +#include "image_proxy.h" +#include "raw_image_proxy.h" +#include "raw_image_proxy.h" +#include "film.h" +#include "log.h" #include "i18n.h" using std::cout; using std::list; +using std::max; +using std::back_inserter; using boost::shared_ptr; using boost::optional; @@ -35,42 +41,57 @@ VideoDecoder::VideoDecoder (shared_ptr c) #else : _video_content (c) #endif + , _last_seek_accurate (true) + , _ignore_video (false) { - + _black_image.reset (new Image (PIX_FMT_RGB24, _video_content->video_size(), true)); + _black_image->make_black (); } -optional -VideoDecoder::decoded_video (VideoFrame frame) +list +VideoDecoder::decoded_video (Frame frame) { + list output; + for (list::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) { if (i->frame == frame) { - return *i; + output.push_back (*i); } } - return optional (); + return output; } -optional -VideoDecoder::get_video (VideoFrame frame, bool accurate) +/** Get all frames which exist in the content at a given frame index. + * @param frame Frame index. + * @param accurate true to try hard to return frames at the precise time that was requested, otherwise frames nearby may be returned. + * @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D. + */ +list +VideoDecoder::get_video (Frame frame, bool accurate) { - if (_decoded_video.empty() || (frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1))) { - /* Either we have no decoded data, or what we do have is a long way from what we want: seek */ + /* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this + method returned (and possibly a few more). If the requested frame is not in _decoded_video and it is not the next + one after the end of _decoded_video we need to seek. + */ + + if (_decoded_video.empty() || frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1)) { seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate); } - optional dec; + list dec; /* Now enough pass() calls should either: * (a) give us what we want, or - * (b) hit the end of the decoder. + * (b) give us something after what we want, indicating that we will never get what we want, or + * (c) hit the end of the decoder. */ if (accurate) { /* We are being accurate, so we want the right frame. * This could all be one statement but it's split up for clarity. */ while (true) { - if (decoded_video (frame)) { + if (!decoded_video(frame).empty ()) { /* We got what we want */ break; } @@ -94,77 +115,210 @@ VideoDecoder::get_video (VideoFrame frame, bool accurate) /* Any frame will do: use the first one that comes out of pass() */ while (_decoded_video.empty() && !pass ()) {} if (!_decoded_video.empty ()) { - dec = _decoded_video.front (); + dec.push_back (_decoded_video.front ()); } } - /* Clean up decoded_video */ - while (!_decoded_video.empty() && _decoded_video.front().frame < (frame - 1)) { + /* Clean up _decoded_video; keep the frame we are returning (which may have two images + for 3D), but nothing before that */ + while (!_decoded_video.empty() && _decoded_video.front().frame < dec.front().frame) { _decoded_video.pop_front (); } return dec; } +/** Fill _decoded_video from `from' up to, but not including, `to' */ +void +VideoDecoder::fill_2d (Frame from, Frame to) +{ + if (to == 0) { + /* Already OK */ + return; + } + + /* Fill with black... */ + boost::shared_ptr filler_image (new RawImageProxy (_black_image)); + Part filler_part = PART_WHOLE; -/** Called by subclasses when they have a video frame ready */ + /* ...unless there's some video we can fill with */ + if (!_decoded_video.empty ()) { + filler_image = _decoded_video.back().image; + filler_part = _decoded_video.back().part; + } + + for (Frame i = from; i < to; ++i) { +#ifdef DCPOMATIC_DEBUG + test_gaps++; +#endif + _decoded_video.push_back ( + ContentVideo (filler_image, EYES_BOTH, filler_part, i) + ); + } +} + +/** Fill _decoded_video from `from' up to, but not including, `to' */ void -VideoDecoder::video (shared_ptr image, VideoFrame frame) +VideoDecoder::fill_3d (Frame from, Frame to, Eyes eye) { - /* We should not receive the same thing twice */ - assert (_decoded_video.empty() || frame != _decoded_video.back().frame); + if (to == 0 && eye == EYES_LEFT) { + /* Already OK */ + return; + } + + /* Fill with black... */ + boost::shared_ptr filler_left_image (new RawImageProxy (_black_image)); + boost::shared_ptr filler_right_image (new RawImageProxy (_black_image)); + Part filler_left_part = PART_WHOLE; + Part filler_right_part = PART_WHOLE; + + /* ...unless there's some video we can fill with */ + for (list::const_reverse_iterator i = _decoded_video.rbegin(); i != _decoded_video.rend(); ++i) { + if (i->eyes == EYES_LEFT && !filler_left_image) { + filler_left_image = i->image; + filler_left_part = i->part; + } else if (i->eyes == EYES_RIGHT && !filler_right_image) { + filler_right_image = i->image; + filler_right_part = i->part; + } - /* Fill in gaps */ - /* XXX: 3D */ + if (filler_left_image && filler_right_image) { + break; + } + } + + Frame filler_frame = from; + Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes; + + if (_decoded_video.empty ()) { + filler_frame = 0; + filler_eye = EYES_LEFT; + } else if (_decoded_video.back().eyes == EYES_LEFT) { + filler_frame = _decoded_video.back().frame; + filler_eye = EYES_RIGHT; + } else if (_decoded_video.back().eyes == EYES_RIGHT) { + filler_frame = _decoded_video.back().frame + 1; + filler_eye = EYES_LEFT; + } + + while (filler_frame != to || filler_eye != eye) { - while (!_decoded_video.empty () && (_decoded_video.back().frame + 1) < frame) { #ifdef DCPOMATIC_DEBUG test_gaps++; #endif + _decoded_video.push_back ( ContentVideo ( - _decoded_video.back().image, - _decoded_video.back().eyes, - _decoded_video.back().frame + 1 + filler_eye == EYES_LEFT ? filler_left_image : filler_right_image, + filler_eye, + filler_eye == EYES_LEFT ? filler_left_part : filler_right_part, + filler_frame ) ); + + if (filler_eye == EYES_LEFT) { + filler_eye = EYES_RIGHT; + } else { + filler_eye = EYES_LEFT; + ++filler_frame; + } + } +} + +/** Called by subclasses when they have a video frame ready */ +void +VideoDecoder::video (shared_ptr image, Frame frame) +{ + if (_ignore_video) { + return; } - + + /* We may receive the same frame index twice for 3D, and we need to know + when that happens. + */ + bool const same = (!_decoded_video.empty() && frame == _decoded_video.back().frame); + + /* Work out what we are going to push into _decoded_video next */ + list to_push; switch (_video_content->video_frame_type ()) { case VIDEO_FRAME_TYPE_2D: - _decoded_video.push_back (ContentVideo (image, EYES_BOTH, frame)); + to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame)); break; case VIDEO_FRAME_TYPE_3D_ALTERNATE: - _decoded_video.push_back (ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, frame)); + to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame)); break; case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - { - int const half = image->size().width / 2; - _decoded_video.push_back (ContentVideo (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, frame)); - _decoded_video.push_back (ContentVideo (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, frame)); + to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame)); + to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_RIGHT_HALF, frame)); break; - } case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - { - int const half = image->size().height / 2; - _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, frame)); - _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, frame)); + to_push.push_back (ContentVideo (image, EYES_LEFT, PART_TOP_HALF, frame)); + to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_BOTTOM_HALF, frame)); break; - } case VIDEO_FRAME_TYPE_3D_LEFT: - _decoded_video.push_back (ContentVideo (image, EYES_LEFT, frame)); + to_push.push_back (ContentVideo (image, EYES_LEFT, PART_WHOLE, frame)); break; case VIDEO_FRAME_TYPE_3D_RIGHT: - _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, frame)); + to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame)); break; default: - assert (false); + DCPOMATIC_ASSERT (false); + } + + /* Now VideoDecoder is required never to have gaps in the frames that it presents + via get_video(). Hence we need to fill in any gap between the last thing in _decoded_video + and the things we are about to push. + */ + + boost::optional from; + boost::optional to; + + if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) { + from = _last_seek_time->frames (_video_content->video_frame_rate ()); + to = to_push.front().frame; + } else if (!_decoded_video.empty ()) { + from = _decoded_video.back().frame + 1; + to = to_push.front().frame; + } + + /* It has been known that this method receives frames out of order; at this + point I'm not sure why, but we'll just ignore them. + */ + + if (from && to && from.get() > to.get()) { + _video_content->film()->log()->log ( + String::compose ("Ignoring out-of-order decoded frame %1 after %2", to.get(), from.get()), Log::TYPE_WARNING + ); + return; + } + + if (from) { + if (_video_content->video_frame_type() == VIDEO_FRAME_TYPE_2D) { + fill_2d (from.get(), to.get ()); + } else { + fill_3d (from.get(), to.get(), to_push.front().eyes); + } } + + copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video)); + + /* We can't let this build up too much or we will run out of memory. We need to allow + the most frames that can exist between blocks of sound in a multiplexed file. + */ + DCPOMATIC_ASSERT (_decoded_video.size() <= 96); } void -VideoDecoder::seek (ContentTime, bool) +VideoDecoder::seek (ContentTime s, bool accurate) { _decoded_video.clear (); + _last_seek_time = s; + _last_seek_accurate = accurate; } +/** Set this player never to produce any video data */ +void +VideoDecoder::set_ignore_video () +{ + _ignore_video = true; +}