X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=3625e074fba0460fc63729a00bc30c446f7d769b;hb=14cccb179fff7bbbf422e13f9d2e3264239c93c7;hp=bd609d1683ddd0fca0ee82ca9da8bd7eeaed725b;hpb=2c0478d2b33906845b9d910668b12fe3e8f03a7c;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index bd609d168..3625e074f 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,170 +1,106 @@ /* - Copyright (C) 2012-2014 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ #include "video_decoder.h" #include "image.h" -#include "content_video.h" +#include "raw_image_proxy.h" +#include "film.h" +#include "log.h" +#include "compose.hpp" +#include +#include #include "i18n.h" using std::cout; using std::list; +using std::max; +using std::back_inserter; using boost::shared_ptr; using boost::optional; -VideoDecoder::VideoDecoder (shared_ptr c) -#ifdef DCPOMATIC_DEBUG - : test_gaps (0) - , _video_content (c) -#else - : _video_content (c) -#endif -{ - -} - -optional -VideoDecoder::decoded_video (VideoFrame frame) -{ - for (list::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) { - if (i->frame == frame) { - return *i; - } - } - - return optional (); -} - -optional -VideoDecoder::get_video (VideoFrame frame, bool accurate) +VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c, shared_ptr log) + : DecoderPart (parent, log) + , _content (c) { - if (_decoded_video.empty() || (frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1))) { - /* Either we have no decoded data, or what we do have is a long way from what we want: seek */ - seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate); - } - - optional dec; - - /* Now enough pass() calls should either: - * (a) give us what we want, or - * (b) hit the end of the decoder. - */ - if (accurate) { - /* We are being accurate, so we want the right frame. - * This could all be one statement but it's split up for clarity. - */ - while (true) { - if (decoded_video (frame)) { - /* We got what we want */ - break; - } - - if (pass ()) { - /* The decoder has nothing more for us */ - break; - } - - if (!_decoded_video.empty() && _decoded_video.front().frame > frame) { - /* We're never going to get the frame we want. Perhaps the caller is asking - * for a video frame before the content's video starts (if its audio - * begins before its video, for example). - */ - break; - } - } - - dec = decoded_video (frame); - } else { - /* Any frame will do: use the first one that comes out of pass() */ - while (_decoded_video.empty() && !pass ()) {} - if (!_decoded_video.empty ()) { - dec = _decoded_video.front (); - } - } - - /* Clean up decoded_video */ - while (!_decoded_video.empty() && _decoded_video.front().frame < (frame - 1)) { - _decoded_video.pop_front (); - } - return dec; } - -/** Called by subclasses when they have a video frame ready */ +/** Called by decoder classes when they have a video frame ready. + * @param frame Frame index within the content; this does not take into account 3D + * so for 3D_ALTERNATE this value goes: + * 0: frame 0 left + * 1: frame 0 right + * 2: frame 1 left + * 3: frame 1 right + * and so on. + */ void -VideoDecoder::video (shared_ptr image, VideoFrame frame) +VideoDecoder::emit (shared_ptr image, Frame frame) { - /* We should not receive the same thing twice */ - assert (_decoded_video.empty() || frame != _decoded_video.back().frame); - - /* Fill in gaps */ - /* XXX: 3D */ - - while (!_decoded_video.empty () && (_decoded_video.back().frame + 1) < frame) { -#ifdef DCPOMATIC_DEBUG - test_gaps++; -#endif - _decoded_video.push_back ( - ContentVideo ( - _decoded_video.back().image, - _decoded_video.back().eyes, - _decoded_video.back().frame + 1 - ) - ); + if (ignore ()) { + return; } - - switch (_video_content->video_frame_type ()) { + + switch (_content->video->frame_type ()) { case VIDEO_FRAME_TYPE_2D: - _decoded_video.push_back (ContentVideo (image, EYES_BOTH, frame)); + Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); + break; + case VIDEO_FRAME_TYPE_3D: + { + /* We receive the same frame index twice for 3D; hence we know which + frame this one is. + */ + bool const same = (_last_emitted && _last_emitted.get() == frame); + Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + _last_emitted = frame; break; + } case VIDEO_FRAME_TYPE_3D_ALTERNATE: - _decoded_video.push_back (ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, frame)); + Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + frame /= 2; break; case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - { - int const half = image->size().width / 2; - _decoded_video.push_back (ContentVideo (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, frame)); - _decoded_video.push_back (ContentVideo (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, frame)); + Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); break; - } case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - { - int const half = image->size().height / 2; - _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, frame)); - _decoded_video.push_back (ContentVideo (image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, frame)); + Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); break; - } case VIDEO_FRAME_TYPE_3D_LEFT: - _decoded_video.push_back (ContentVideo (image, EYES_LEFT, frame)); + Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); break; case VIDEO_FRAME_TYPE_3D_RIGHT: - _decoded_video.push_back (ContentVideo (image, EYES_RIGHT, frame)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); break; default: - assert (false); + DCPOMATIC_ASSERT (false); } + + _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ()); } void -VideoDecoder::seek (ContentTime, bool) +VideoDecoder::seek () { - _decoded_video.clear (); + _position = ContentTime(); + _last_emitted.reset (); } -