/*
- Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2020 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "raw_image_proxy.h"
#include "film.h"
#include "log.h"
+#include "frame_interval_checker.h"
#include "compose.hpp"
#include <boost/foreach.hpp>
#include <iostream>
using std::list;
using std::max;
using std::back_inserter;
-using boost::shared_ptr;
+using std::shared_ptr;
using boost::optional;
+using namespace dcpomatic;
-VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c, shared_ptr<Log> log)
- : DecoderPart (parent, log)
+VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c)
+ : DecoderPart (parent)
, _content (c)
+ , _frame_interval_checker (new FrameIntervalChecker())
{
}
* and so on.
*/
void
-VideoDecoder::emit (shared_ptr<const ImageProxy> image, Frame frame)
+VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame decoder_frame)
{
if (ignore ()) {
return;
}
- FrameRateChange const frc = _content->film()->active_frame_rate_change (_content->position());
- for (int i = 0; i < frc.repeat; ++i) {
- switch (_content->video->frame_type ()) {
- case VIDEO_FRAME_TYPE_2D:
- Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
- break;
- case VIDEO_FRAME_TYPE_3D:
- {
- /* We receive the same frame index twice for 3D; hence we know which
- frame this one is.
- */
- bool const same = (_last_emitted && _last_emitted.get() == frame);
- Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
- _last_emitted = frame;
- break;
+ double const afr = _content->active_video_frame_rate(film);
+ VideoFrameType const vft = _content->video->frame_type();
+
+ ContentTime frame_time = ContentTime::from_frames (decoder_frame, afr);
+
+ /* Do some heuristics to try and spot the case where the user sets content to 3D
+ * when it is not. We try to tell this by looking at the differences in time between
+ * the first few frames. Real 3D content should have two frames for each timestamp.
+ */
+ if (_frame_interval_checker) {
+ _frame_interval_checker->feed (frame_time, afr);
+ if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VIDEO_FRAME_TYPE_3D) {
+ boost::throw_exception (
+ DecodeError(
+ String::compose(
+ _("The content file %1 is set as 3D but does not appear to contain 3D images. Please set it to 2D. "
+ "You can still make a 3D DCP from this content by ticking the 3D option in the DCP video tab."),
+ _content->path(0)
+ )
+ )
+ );
+ }
+
+ if (_frame_interval_checker->guess() != FrameIntervalChecker::AGAIN) {
+ _frame_interval_checker.reset ();
+ }
+ }
+
+ Frame frame;
+ Eyes eyes = EYES_BOTH;
+ if (!_position) {
+ /* This is the first data we have received since initialisation or seek. Set
+ the position based on the frame that was given. After this first time
+ we just cound frames, since (as with audio) it seems that ContentTimes
+ are unreliable from FFmpegDecoder. They are much better than audio times
+ but still we get the occasional one which is duplicated. In this case
+ ffmpeg seems to carry on regardless, processing the video frame as normal.
+ If we drop the frame with the duplicated timestamp we obviously lose sync.
+ */
+ _position = ContentTime::from_frames (decoder_frame, afr);
+ if (vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ frame = decoder_frame / 2;
+ _last_emitted_eyes = EYES_RIGHT;
+ } else {
+ frame = decoder_frame;
}
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
- frame /= 2;
- break;
- case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
- break;
- case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
- break;
- case VIDEO_FRAME_TYPE_3D_LEFT:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
- break;
- case VIDEO_FRAME_TYPE_3D_RIGHT:
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
- break;
- default:
- DCPOMATIC_ASSERT (false);
+ } else {
+ if (vft == VIDEO_FRAME_TYPE_3D || vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ DCPOMATIC_ASSERT (_last_emitted_eyes);
+ if (_last_emitted_eyes.get() == EYES_RIGHT) {
+ frame = _position->frames_round(afr) + 1;
+ eyes = EYES_LEFT;
+ } else {
+ frame = _position->frames_round(afr);
+ eyes = EYES_RIGHT;
+ }
+ } else {
+ frame = _position->frames_round(afr) + 1;
}
+ }
- ++frame;
+ switch (vft) {
+ case VIDEO_FRAME_TYPE_2D:
+ Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
+ break;
+ case VIDEO_FRAME_TYPE_3D:
+ {
+ Data (ContentVideo (image, frame, eyes, PART_WHOLE));
+ _last_emitted_frame = frame;
+ _last_emitted_eyes = eyes;
+ break;
+ }
+ case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ {
+ Data (ContentVideo (image, frame, eyes, PART_WHOLE));
+ _last_emitted_eyes = eyes;
+ break;
+ }
+ case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
+ break;
+ case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
+ break;
+ case VIDEO_FRAME_TYPE_3D_LEFT:
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
+ break;
+ case VIDEO_FRAME_TYPE_3D_RIGHT:
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
+ break;
+ default:
+ DCPOMATIC_ASSERT (false);
}
- _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
+ _position = ContentTime::from_frames (frame, afr);
+}
+
+void
+VideoDecoder::seek ()
+{
+ _position = boost::none;
+ _last_emitted_frame.reset ();
+ _last_emitted_eyes.reset ();
+ _frame_interval_checker.reset (new FrameIntervalChecker());
}