X-Git-Url: https://git.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=cf21f885a3fbc6af45ba2efde1cfc18ad645eb9b;hp=8a8a457476e2cacd5056189366bed0c888ffbaf1;hb=c103d8c1306e5fb3937b3a6c430a3fff32653fa3;hpb=8412288821bdf9808bf11fec061068baf310b67f diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 8a8a45747..cf21f885a 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2016 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,31 +18,35 @@ */ -#include "video_decoder.h" -#include "image.h" -#include "raw_image_proxy.h" + +#include "compose.hpp" #include "film.h" +#include "frame_interval_checker.h" +#include "image.h" +#include "j2k_image_proxy.h" #include "log.h" -#include "compose.hpp" -#include +#include "raw_image_proxy.h" +#include "video_decoder.h" #include #include "i18n.h" + using std::cout; -using std::list; -using std::max; -using std::back_inserter; -using boost::shared_ptr; -using boost::optional; - -VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c, shared_ptr log) - : DecoderPart (parent, log) +using std::dynamic_pointer_cast; +using std::shared_ptr; +using namespace dcpomatic; + + +VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c) + : DecoderPart (parent) , _content (c) + , _frame_interval_checker (new FrameIntervalChecker()) { } + /** Called by decoder classes when they have a video frame ready. * @param frame Frame index within the content; this does not take into account 3D * so for 3D_ALTERNATE this value goes: @@ -53,47 +57,135 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c, shared * and so on. */ void -VideoDecoder::emit (shared_ptr image, Frame frame) +VideoDecoder::emit (shared_ptr film, shared_ptr image, Frame decoder_frame) { if (ignore ()) { return; } - /* Work out what we are going to emit next */ - switch (_content->video->frame_type ()) { - case VIDEO_FRAME_TYPE_2D: - Data (ContentVideo (image, VideoFrame (frame, EYES_BOTH), PART_WHOLE)); + auto const afr = _content->active_video_frame_rate(film); + auto const vft = _content->video->frame_type(); + + auto frame_time = ContentTime::from_frames (decoder_frame, afr); + + /* Do some heuristics to try and spot the case where the user sets content to 3D + * when it is not. We try to tell this by looking at the differences in time between + * the first few frames. Real 3D content should have two frames for each timestamp. + */ + if (_frame_interval_checker) { + _frame_interval_checker->feed (frame_time, afr); + if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) { + boost::throw_exception ( + DecodeError( + String::compose( + _("The content file %1 is set as 3D but does not appear to contain 3D images. Please set it to 2D. " + "You can still make a 3D DCP from this content by ticking the 3D option in the DCP video tab."), + _content->path(0) + ) + ) + ); + } + + if (_frame_interval_checker->guess() != FrameIntervalChecker::AGAIN) { + _frame_interval_checker.reset (); + } + } + + Frame frame; + Eyes eyes = Eyes::BOTH; + if (!_position) { + /* This is the first data we have received since initialisation or seek. Set + the position based on the frame that was given. After this first time + we just count frames, since (as with audio) it seems that ContentTimes + are unreliable from FFmpegDecoder. They are much better than audio times + but still we get the occasional one which is duplicated. In this case + ffmpeg seems to carry on regardless, processing the video frame as normal. + If we drop the frame with the duplicated timestamp we obviously lose sync. + */ + + if (vft == VideoFrameType::THREE_D_ALTERNATE) { + frame = decoder_frame / 2; + eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT; + } else { + frame = decoder_frame; + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy + * knows which eye it is. + */ + if (j2k && j2k->eye()) { + eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT; + } + } + } + + _position = ContentTime::from_frames (frame, afr); + } else { + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + if (j2k && j2k->eye()) { + if (j2k->eye() == dcp::Eye::LEFT) { + frame = _position->frames_round(afr) + 1; + eyes = Eyes::LEFT; + } else { + frame = _position->frames_round(afr); + eyes = Eyes::RIGHT; + } + } else { + /* This should not happen; see above */ + frame = _position->frames_round(afr) + 1; + } + } else if (vft == VideoFrameType::THREE_D_ALTERNATE) { + DCPOMATIC_ASSERT (_last_emitted_eyes); + if (_last_emitted_eyes.get() == Eyes::RIGHT) { + frame = _position->frames_round(afr) + 1; + eyes = Eyes::LEFT; + } else { + frame = _position->frames_round(afr); + eyes = Eyes::RIGHT; + } + } else { + frame = _position->frames_round(afr) + 1; + } + } + + switch (vft) { + case VideoFrameType::TWO_D: + case VideoFrameType::THREE_D: + Data (ContentVideo (image, frame, eyes, Part::WHOLE)); break; - case VIDEO_FRAME_TYPE_3D: + case VideoFrameType::THREE_D_ALTERNATE: { - /* We receive the same frame index twice for 3D; hence we know which - frame this one is. - */ - bool const same = (_last_emitted && _last_emitted.get() == frame); - Data (ContentVideo (image, VideoFrame (frame, same ? EYES_RIGHT : EYES_LEFT), PART_WHOLE)); - _last_emitted = frame; + Data (ContentVideo (image, frame, eyes, Part::WHOLE)); + _last_emitted_eyes = eyes; break; } - case VIDEO_FRAME_TYPE_3D_ALTERNATE: - Data (ContentVideo (image, VideoFrame (frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT), PART_WHOLE)); - break; - case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - Data (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_LEFT_HALF)); - Data (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_RIGHT_HALF)); + case VideoFrameType::THREE_D_LEFT_RIGHT: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF)); + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF)); break; - case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - Data (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_TOP_HALF)); - Data (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_BOTTOM_HALF)); + case VideoFrameType::THREE_D_TOP_BOTTOM: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF)); + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF)); break; - case VIDEO_FRAME_TYPE_3D_LEFT: - Data (ContentVideo (image, VideoFrame (frame, EYES_LEFT), PART_WHOLE)); + case VideoFrameType::THREE_D_LEFT: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE)); break; - case VIDEO_FRAME_TYPE_3D_RIGHT: - Data (ContentVideo (image, VideoFrame (frame, EYES_RIGHT), PART_WHOLE)); + case VideoFrameType::THREE_D_RIGHT: + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE)); break; default: DCPOMATIC_ASSERT (false); } - _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ()); + _position = ContentTime::from_frames (frame, afr); +} + + +void +VideoDecoder::seek () +{ + _position = boost::none; + _last_emitted_eyes.reset (); + _frame_interval_checker.reset (new FrameIntervalChecker()); }