X-Git-Url: https://git.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fvideo_decoder.cc;h=cf21f885a3fbc6af45ba2efde1cfc18ad645eb9b;hp=19a99419753f402413652248a82e0df80a6905ee;hb=c103d8c1306e5fb3937b3a6c430a3fff32653fa3;hpb=98342fb53eae4d32440fc69c279f2ca0fef785b5 diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index 19a994197..cf21f885a 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2018 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,32 +18,35 @@ */ -#include "video_decoder.h" -#include "image.h" -#include "raw_image_proxy.h" + +#include "compose.hpp" #include "film.h" +#include "frame_interval_checker.h" +#include "image.h" +#include "j2k_image_proxy.h" #include "log.h" -#include "compose.hpp" -#include +#include "raw_image_proxy.h" +#include "video_decoder.h" #include #include "i18n.h" + using std::cout; -using std::list; -using std::max; -using std::back_inserter; -using boost::shared_ptr; -using boost::optional; +using std::dynamic_pointer_cast; +using std::shared_ptr; using namespace dcpomatic; + VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr c) : DecoderPart (parent) , _content (c) + , _frame_interval_checker (new FrameIntervalChecker()) { } + /** Called by decoder classes when they have a video frame ready. * @param frame Frame index within the content; this does not take into account 3D * so for 3D_ALTERNATE this value goes: @@ -60,89 +63,116 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im return; } - double const afr = _content->active_video_frame_rate(film); + auto const afr = _content->active_video_frame_rate(film); + auto const vft = _content->video->frame_type(); + + auto frame_time = ContentTime::from_frames (decoder_frame, afr); + + /* Do some heuristics to try and spot the case where the user sets content to 3D + * when it is not. We try to tell this by looking at the differences in time between + * the first few frames. Real 3D content should have two frames for each timestamp. + */ + if (_frame_interval_checker) { + _frame_interval_checker->feed (frame_time, afr); + if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) { + boost::throw_exception ( + DecodeError( + String::compose( + _("The content file %1 is set as 3D but does not appear to contain 3D images. Please set it to 2D. " + "You can still make a 3D DCP from this content by ticking the 3D option in the DCP video tab."), + _content->path(0) + ) + ) + ); + } + + if (_frame_interval_checker->guess() != FrameIntervalChecker::AGAIN) { + _frame_interval_checker.reset (); + } + } Frame frame; + Eyes eyes = Eyes::BOTH; if (!_position) { /* This is the first data we have received since initialisation or seek. Set the position based on the frame that was given. After this first time - we just cound frames, since (as with audio) it seems that ContentTimes + we just count frames, since (as with audio) it seems that ContentTimes are unreliable from FFmpegDecoder. They are much better than audio times but still we get the occasional one which is duplicated. In this case ffmpeg seems to carry on regardless, processing the video frame as normal. If we drop the frame with the duplicated timestamp we obviously lose sync. */ - _position = ContentTime::from_frames (decoder_frame, afr); - if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D_ALTERNATE) { + + if (vft == VideoFrameType::THREE_D_ALTERNATE) { frame = decoder_frame / 2; - _last_emitted_eyes = EYES_RIGHT; + eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT; } else { frame = decoder_frame; + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy + * knows which eye it is. + */ + if (j2k && j2k->eye()) { + eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT; + } + } } + + _position = ContentTime::from_frames (frame, afr); } else { - if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D_ALTERNATE) { + if (vft == VideoFrameType::THREE_D) { + auto j2k = dynamic_pointer_cast(image); + if (j2k && j2k->eye()) { + if (j2k->eye() == dcp::Eye::LEFT) { + frame = _position->frames_round(afr) + 1; + eyes = Eyes::LEFT; + } else { + frame = _position->frames_round(afr); + eyes = Eyes::RIGHT; + } + } else { + /* This should not happen; see above */ + frame = _position->frames_round(afr) + 1; + } + } else if (vft == VideoFrameType::THREE_D_ALTERNATE) { DCPOMATIC_ASSERT (_last_emitted_eyes); - if (_last_emitted_eyes.get() == EYES_RIGHT) { + if (_last_emitted_eyes.get() == Eyes::RIGHT) { frame = _position->frames_round(afr) + 1; + eyes = Eyes::LEFT; } else { frame = _position->frames_round(afr); + eyes = Eyes::RIGHT; } } else { frame = _position->frames_round(afr) + 1; } } - switch (_content->video->frame_type ()) { - case VIDEO_FRAME_TYPE_2D: - Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); - break; - case VIDEO_FRAME_TYPE_3D: - { - /* We should receive the same frame index twice for 3D; hence we know which - frame this one is. - */ - bool const same = (_last_emitted_frame && _last_emitted_frame.get() == frame); - if (!same && _last_emitted_eyes && *_last_emitted_eyes == EYES_LEFT) { - /* We just got a new frame index but the last frame was left-eye; it looks like - this content is not really 3D. - */ - boost::throw_exception ( - DecodeError( - String::compose( - _("The content file %1 is set as 3D but does not appear to contain 3D images. Please set it to 2D. " - "You can still make a 3D DCP from this content by ticking the 3D option in the DCP video tab."), - _content->path(0) - ) - ) - ); - } - Eyes const eyes = same ? EYES_RIGHT : EYES_LEFT; - Data (ContentVideo (image, frame, eyes, PART_WHOLE)); - _last_emitted_frame = frame; - _last_emitted_eyes = eyes; + switch (vft) { + case VideoFrameType::TWO_D: + case VideoFrameType::THREE_D: + Data (ContentVideo (image, frame, eyes, Part::WHOLE)); break; - } - case VIDEO_FRAME_TYPE_3D_ALTERNATE: + case VideoFrameType::THREE_D_ALTERNATE: { - DCPOMATIC_ASSERT (_last_emitted_eyes); - Eyes const eyes = _last_emitted_eyes.get() == EYES_LEFT ? EYES_RIGHT : EYES_LEFT; - Data (ContentVideo (image, frame, eyes, PART_WHOLE)); + Data (ContentVideo (image, frame, eyes, Part::WHOLE)); _last_emitted_eyes = eyes; break; } - case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); - Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); + case VideoFrameType::THREE_D_LEFT_RIGHT: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF)); + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF)); break; - case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); - Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); + case VideoFrameType::THREE_D_TOP_BOTTOM: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF)); + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF)); break; - case VIDEO_FRAME_TYPE_3D_LEFT: - Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); + case VideoFrameType::THREE_D_LEFT: + Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE)); break; - case VIDEO_FRAME_TYPE_3D_RIGHT: - Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); + case VideoFrameType::THREE_D_RIGHT: + Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE)); break; default: DCPOMATIC_ASSERT (false); @@ -151,10 +181,11 @@ VideoDecoder::emit (shared_ptr film, shared_ptr im _position = ContentTime::from_frames (frame, afr); } + void VideoDecoder::seek () { - _position = boost::optional(); - _last_emitted_frame.reset (); + _position = boost::none; _last_emitted_eyes.reset (); + _frame_interval_checker.reset (new FrameIntervalChecker()); }