X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;ds=inline;f=src%2Flib%2Fvideo_decoder.cc;h=cf21f885a3fbc6af45ba2efde1cfc18ad645eb9b;hb=c103d8c1306e5fb3937b3a6c430a3fff32653fa3;hp=4b46b111f743922211fc42596120ebb56bd386ae;hpb=8fedaaa75c4586a4cc7ffb393bd71d1fdb091dc8;p=dcpomatic.git diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 4b46b111f..cf21f885a 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2020 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,25 +18,26 @@ */ -#include "video_decoder.h" -#include "image.h" -#include "raw_image_proxy.h" + +#include "compose.hpp" #include "film.h" -#include "log.h" #include "frame_interval_checker.h" -#include "compose.hpp" +#include "image.h" +#include "j2k_image_proxy.h" +#include "log.h" +#include "raw_image_proxy.h" +#include "video_decoder.h" #include #include "i18n.h" + using std::cout; -using std::list; -using std::max; -using std::back_inserter; +using std::dynamic_pointer_cast; using std::shared_ptr; -using boost::optional; using namespace dcpomatic; + VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c) : DecoderPart (parent) , _content (c) @@ -45,6 +46,7 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c) } + /** Called by decoder classes when they have a video frame ready. * @param frame Frame index within the content; this does not take into account 3D * so for 3D_ALTERNATE this value goes: @@ -61,10 +63,10 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im return; } - double const afr = _content->active_video_frame_rate(film); - VideoFrameType const vft = _content->video->frame_type(); + auto const afr = _content->active_video_frame_rate(film); + auto const vft = _content->video->frame_type(); - ContentTime frame_time = ContentTime::from_frames (decoder_frame, afr); + auto frame_time = ContentTime::from_frames (decoder_frame, afr); /* Do some heuristics to try and spot the case where the user sets content to 3D * when it is not. We try to tell this by looking at the differences in time between @@ -94,21 +96,46 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im if (!_position) { /* This is the first data we have received since initialisation or seek. Set the position based on the frame that was given. After this first time - we just cound frames, since (as with audio) it seems that ContentTimes + we just count frames, since (as with audio) it seems that ContentTimes are unreliable from FFmpegDecoder. They are much better than audio times but still we get the occasional one which is duplicated. In this case ffmpeg seems to carry on regardless, processing the video frame as normal. If we drop the frame with the duplicated timestamp we obviously lose sync. */ - _position = ContentTime::from_frames (decoder_frame, afr); + if (vft == VideoFrameType::THREE_D_ALTERNATE) { frame = decoder_frame / 2; - _last_emitted_eyes = Eyes::RIGHT; + eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT; } else { frame = decoder_frame; + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy + * knows which eye it is. + */ + if (j2k && j2k->eye()) { + eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT; + } + } } + + _position = ContentTime::from_frames (frame, afr); } else { - if (vft == VideoFrameType::THREE_D || vft == VideoFrameType::THREE_D_ALTERNATE) { + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + if (j2k && j2k->eye()) { + if (j2k->eye() == dcp::Eye::LEFT) { + frame = _position->frames_round(afr) + 1; + eyes = Eyes::LEFT; + } else { + frame = _position->frames_round(afr); + eyes = Eyes::RIGHT; + } + } else { + /* This should not happen; see above */ + frame = _position->frames_round(afr) + 1; + } + } else if (vft == VideoFrameType::THREE_D_ALTERNATE) { DCPOMATIC_ASSERT (_last_emitted_eyes); if (_last_emitted_eyes.get() == Eyes::RIGHT) { frame = _position->frames_round(afr) + 1; @@ -124,15 +151,9 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im switch (vft) { case VideoFrameType::TWO_D: - Data (ContentVideo (image, frame, Eyes::BOTH, Part::WHOLE)); - break; case VideoFrameType::THREE_D: - { Data (ContentVideo (image, frame, eyes, Part::WHOLE)); - _last_emitted_frame = frame; - _last_emitted_eyes = eyes; break; - } case VideoFrameType::THREE_D_ALTERNATE: { Data (ContentVideo (image, frame, eyes, Part::WHOLE)); @@ -160,11 +181,11 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im _position = ContentTime::from_frames (frame, afr); } + void VideoDecoder::seek () { _position = boost::none; - _last_emitted_frame.reset (); _last_emitted_eyes.reset (); _frame_interval_checker.reset (new FrameIntervalChecker()); }