X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=3625e074fba0460fc63729a00bc30c446f7d769b;hb=14cccb179fff7bbbf422e13f9d2e3264239c93c7;hp=4c05d5fcdf335e931414e72e569125b4ef535fe2;hpb=8dd455ba867122056e2093e259a9a045aeeea451;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 4c05d5fcd..3625e074f 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,100 +1,106 @@ /* - Copyright (C) 2012 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ #include "video_decoder.h" -#include "subtitle.h" -#include "film.h" #include "image.h" +#include "raw_image_proxy.h" +#include "film.h" #include "log.h" -#include "options.h" -#include "job.h" +#include "compose.hpp" +#include +#include +#include "i18n.h" + +using std::cout; +using std::list; +using std::max; +using std::back_inserter; using boost::shared_ptr; using boost::optional; -VideoDecoder::VideoDecoder (shared_ptr f, shared_ptr o, Job* j) - : Decoder (f, o, j) - , _video_frame (0) - , _last_source_frame (0) +VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c, shared_ptr log) + : DecoderPart (parent, log) + , _content (c) { } -/** Called by subclasses to tell the world that some video data is ready. - * We find a subtitle then emit it for listeners. - * @param frame to emit. +/** Called by decoder classes when they have a video frame ready. + * @param frame Frame index within the content; this does not take into account 3D + * so for 3D_ALTERNATE this value goes: + * 0: frame 0 left + * 1: frame 0 right + * 2: frame 1 left + * 3: frame 1 right + * and so on. */ void -VideoDecoder::emit_video (shared_ptr image, SourceFrame f) +VideoDecoder::emit (shared_ptr image, Frame frame) { - shared_ptr sub; - if (_timed_subtitle && _timed_subtitle->displayed_at (f / _film->frames_per_second())) { - sub = _timed_subtitle->subtitle (); + if (ignore ()) { + return; } - signal_video (image, sub); - _last_source_frame = f; -} - -void -VideoDecoder::repeat_last_video () -{ - if (!_last_image) { - _last_image.reset (new SimpleImage (pixel_format(), native_size(), false)); - _last_image->make_black (); + switch (_content->video->frame_type ()) { + case VIDEO_FRAME_TYPE_2D: + Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); + break; + case VIDEO_FRAME_TYPE_3D: + { + /* We receive the same frame index twice for 3D; hence we know which + frame this one is. + */ + bool const same = (_last_emitted && _last_emitted.get() == frame); + Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + _last_emitted = frame; + break; } - - signal_video (_last_image, _last_subtitle); -} - -void -VideoDecoder::signal_video (shared_ptr image, shared_ptr sub) -{ - TIMING ("Decoder emits %1", _video_frame); - Video (image, sub); - ++_video_frame; - - _last_image = image; - _last_subtitle = sub; -} - -void -VideoDecoder::emit_subtitle (shared_ptr s) -{ - _timed_subtitle = s; - - if (_timed_subtitle) { - Position const p = _timed_subtitle->subtitle()->position (); - _timed_subtitle->subtitle()->set_position (Position (p.x - _film->crop().left, p.y - _film->crop().top)); + case VIDEO_FRAME_TYPE_3D_ALTERNATE: + Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + frame /= 2; + break; + case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: + Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); + break; + case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: + Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); + break; + case VIDEO_FRAME_TYPE_3D_LEFT: + Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); + break; + case VIDEO_FRAME_TYPE_3D_RIGHT: + Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); + break; + default: + DCPOMATIC_ASSERT (false); } -} -void -VideoDecoder::set_subtitle_stream (shared_ptr s) -{ - _subtitle_stream = s; + _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ()); } void -VideoDecoder::set_progress () const +VideoDecoder::seek () { - if (_job && _film->length()) { - _job->set_progress (float (_video_frame) / _film->length().get()); - } + _position = ContentTime(); + _last_emitted.reset (); }