X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=fcc262163ecab19844fdd992b641d4d3a2d12c31;hb=e60bb3e51bd1508b149e6b8f6608f09b5196ae26;hp=cab1a979fc2dc48502015e7b73940f7889f3ea16;hpb=09d7c1aeab76040e1dcbd829fbf734c4f4706180;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index cab1a979f..fcc262163 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -21,7 +21,9 @@ #include "image.h" #include "image_proxy.h" #include "raw_image_proxy.h" -#include "content_video.h" +#include "raw_image_proxy.h" +#include "film.h" +#include "log.h" #include "i18n.h" @@ -39,18 +41,18 @@ VideoDecoder::VideoDecoder (shared_ptr c) #else : _video_content (c) #endif - , _same (false) , _last_seek_accurate (true) + , _ignore_video (false) { _black_image.reset (new Image (PIX_FMT_RGB24, _video_content->video_size(), true)); _black_image->make_black (); } list -VideoDecoder::decoded_video (VideoFrame frame) +VideoDecoder::decoded_video (Frame frame) { list output; - + for (list::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) { if (i->frame == frame) { output.push_back (*i); @@ -66,7 +68,7 @@ VideoDecoder::decoded_video (VideoFrame frame) * @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D. */ list -VideoDecoder::get_video (VideoFrame frame, bool accurate) +VideoDecoder::get_video (Frame frame, bool accurate) { /* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this method returned (and possibly a few more). If the requested frame is not in _decoded_video and it is not the next @@ -128,7 +130,7 @@ VideoDecoder::get_video (VideoFrame frame, bool accurate) /** Fill _decoded_video from `from' up to, but not including, `to' */ void -VideoDecoder::fill_2d (VideoFrame from, VideoFrame to) +VideoDecoder::fill_2d (Frame from, Frame to) { if (to == 0) { /* Already OK */ @@ -145,24 +147,19 @@ VideoDecoder::fill_2d (VideoFrame from, VideoFrame to) filler_part = _decoded_video.back().part; } - VideoFrame filler_frame = from; - - while (filler_frame < to) { - + for (Frame i = from; i < to; ++i) { #ifdef DCPOMATIC_DEBUG test_gaps++; #endif _decoded_video.push_back ( - ContentVideo (filler_image, EYES_BOTH, filler_part, filler_frame) + ContentVideo (filler_image, EYES_BOTH, filler_part, i) ); - - ++filler_frame; } } /** Fill _decoded_video from `from' up to, but not including, `to' */ void -VideoDecoder::fill_3d (VideoFrame from, VideoFrame to, Eyes eye) +VideoDecoder::fill_3d (Frame from, Frame to, Eyes eye) { if (to == 0 && eye == EYES_LEFT) { /* Already OK */ @@ -190,7 +187,7 @@ VideoDecoder::fill_3d (VideoFrame from, VideoFrame to, Eyes eye) } } - VideoFrame filler_frame = from; + Frame filler_frame = from; Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes; if (_decoded_video.empty ()) { @@ -227,15 +224,19 @@ VideoDecoder::fill_3d (VideoFrame from, VideoFrame to, Eyes eye) } } } - + /** Called by subclasses when they have a video frame ready */ void -VideoDecoder::video (shared_ptr image, VideoFrame frame) +VideoDecoder::video (shared_ptr image, Frame frame) { + if (_ignore_video) { + return; + } + /* We may receive the same frame index twice for 3D, and we need to know when that happens. */ - _same = (!_decoded_video.empty() && frame == _decoded_video.back().frame); + bool const same = (!_decoded_video.empty() && frame == _decoded_video.back().frame); /* Work out what we are going to push into _decoded_video next */ list to_push; @@ -244,7 +245,7 @@ VideoDecoder::video (shared_ptr image, VideoFrame frame) to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame)); break; case VIDEO_FRAME_TYPE_3D_ALTERNATE: - to_push.push_back (ContentVideo (image, _same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame)); + to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame)); break; case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: to_push.push_back (ContentVideo (image, EYES_LEFT, PART_LEFT_HALF, frame)); @@ -261,7 +262,7 @@ VideoDecoder::video (shared_ptr image, VideoFrame frame) to_push.push_back (ContentVideo (image, EYES_RIGHT, PART_WHOLE, frame)); break; default: - assert (false); + DCPOMATIC_ASSERT (false); } /* Now VideoDecoder is required never to have gaps in the frames that it presents @@ -269,9 +270,9 @@ VideoDecoder::video (shared_ptr image, VideoFrame frame) and the things we are about to push. */ - boost::optional from; - boost::optional to; - + boost::optional from; + boost::optional to; + if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) { from = _last_seek_time->frames (_video_content->video_frame_rate ()); to = to_push.front().frame; @@ -280,6 +281,17 @@ VideoDecoder::video (shared_ptr image, VideoFrame frame) to = to_push.front().frame; } + /* It has been known that this method receives frames out of order; at this + point I'm not sure why, but we'll just ignore them. + */ + + if (from && to && from.get() > to.get()) { + _video_content->film()->log()->log ( + String::compose ("Ignoring out-of-order decoded frame %1 after %2", to.get(), from.get()), Log::TYPE_WARNING + ); + return; + } + if (from) { if (_video_content->video_frame_type() == VIDEO_FRAME_TYPE_2D) { fill_2d (from.get(), to.get ()); @@ -289,6 +301,11 @@ VideoDecoder::video (shared_ptr image, VideoFrame frame) } copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video)); + + /* We can't let this build up too much or we will run out of memory. We need to allow + the most frames that can exist between blocks of sound in a multiplexed file. + */ + DCPOMATIC_ASSERT (_decoded_video.size() <= 96); } void @@ -298,3 +315,10 @@ VideoDecoder::seek (ContentTime s, bool accurate) _last_seek_time = s; _last_seek_accurate = accurate; } + +/** Set this player never to produce any video data */ +void +VideoDecoder::set_ignore_video () +{ + _ignore_video = true; +}