/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
- This program is free software; you can redistribute it and/or modify
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
- This program is distributed in the hope that it will be useful,
+ DCP-o-matic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "video_decoder.h"
-#include "subtitle.h"
+
+#include "compose.hpp"
#include "film.h"
+#include "frame_interval_checker.h"
#include "image.h"
+#include "j2k_image_proxy.h"
#include "log.h"
-#include "job.h"
+#include "raw_image_proxy.h"
+#include "video_decoder.h"
+#include <iostream>
#include "i18n.h"
-using boost::shared_ptr;
-using boost::optional;
-VideoDecoder::VideoDecoder (shared_ptr<const Film> f)
- : Decoder (f)
- , _video_frame (0)
- , _last_source_time (0)
-{
+using std::cout;
+using std::dynamic_pointer_cast;
+using std::shared_ptr;
+using namespace dcpomatic;
-}
-/** Called by subclasses to tell the world that some video data is ready.
- * We find a subtitle then emit it for listeners.
- * @param image frame to emit.
- * @param t Time of the frame within the source, in seconds.
- */
-void
-VideoDecoder::emit_video (shared_ptr<Image> image, double t)
+VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c)
+ : DecoderPart (parent)
+ , _content (c)
+ , _frame_interval_checker (new FrameIntervalChecker())
{
- shared_ptr<Subtitle> sub;
- if (_timed_subtitle && _timed_subtitle->displayed_at (t)) {
- sub = _timed_subtitle->subtitle ();
- }
- signal_video (image, false, sub);
- _last_source_time = t;
}
-/** Called by subclasses to repeat the last video frame that we
- * passed to emit_video(). If emit_video hasn't yet been called,
- * we will generate a black frame.
+
+/** Called by decoder classes when they have a video frame ready.
+ * @param frame Frame index within the content; this does not take into account 3D
+ * so for 3D_ALTERNATE this value goes:
+ * 0: frame 0 left
+ * 1: frame 0 right
+ * 2: frame 1 left
+ * 3: frame 1 right
+ * and so on.
*/
void
-VideoDecoder::repeat_last_video ()
+VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame decoder_frame)
{
- if (!_last_image) {
- _last_image.reset (new SimpleImage (pixel_format(), native_size(), true));
- _last_image->make_black ();
+ if (ignore ()) {
+ return;
}
- signal_video (_last_image, true, _last_subtitle);
-}
-
-/** Emit our signal to say that some video data is ready.
- * @param image Video frame.
- * @param same true if `image' is the same as the last one we emitted.
- * @param sub Subtitle for this frame, or 0.
- */
-void
-VideoDecoder::signal_video (shared_ptr<Image> image, bool same, shared_ptr<Subtitle> sub)
-{
- TIMING (N_("Decoder emits %1"), _video_frame);
- Video (image, same, sub);
- ++_video_frame;
+ auto const afr = _content->active_video_frame_rate(film);
+ auto const vft = _content->video->frame_type();
+
+ auto frame_time = ContentTime::from_frames (decoder_frame, afr);
+
+ /* Do some heuristics to try and spot the case where the user sets content to 3D
+ * when it is not. We try to tell this by looking at the differences in time between
+ * the first few frames. Real 3D content should have two frames for each timestamp.
+ */
+ if (_frame_interval_checker) {
+ _frame_interval_checker->feed (frame_time, afr);
+ if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) {
+ boost::throw_exception (
+ DecodeError(
+ String::compose(
+ _("The content file %1 is set as 3D but does not appear to contain 3D images. Please set it to 2D. "
+ "You can still make a 3D DCP from this content by ticking the 3D option in the DCP video tab."),
+ _content->path(0)
+ )
+ )
+ );
+ }
+
+ if (_frame_interval_checker->guess() != FrameIntervalChecker::AGAIN) {
+ _frame_interval_checker.reset ();
+ }
+ }
- _last_image = image;
- _last_subtitle = sub;
-}
+ Frame frame;
+ Eyes eyes = Eyes::BOTH;
+ if (!_position) {
+ /* This is the first data we have received since initialisation or seek. Set
+ the position based on the frame that was given. After this first time
+ we just count frames, since (as with audio) it seems that ContentTimes
+ are unreliable from FFmpegDecoder. They are much better than audio times
+ but still we get the occasional one which is duplicated. In this case
+ ffmpeg seems to carry on regardless, processing the video frame as normal.
+ If we drop the frame with the duplicated timestamp we obviously lose sync.
+ */
+
+ if (vft == VideoFrameType::THREE_D_ALTERNATE) {
+ frame = decoder_frame / 2;
+ eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
+ } else {
+ frame = decoder_frame;
+ if (vft == VideoFrameType::THREE_D) {
+ auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
+ /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy
+ * knows which eye it is.
+ */
+ if (j2k && j2k->eye()) {
+ eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
+ }
+ }
+ }
+
+ _position = ContentTime::from_frames (frame, afr);
+ } else {
+ if (vft == VideoFrameType::THREE_D) {
+ auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
+ if (j2k && j2k->eye()) {
+ if (j2k->eye() == dcp::Eye::LEFT) {
+ frame = _position->frames_round(afr) + 1;
+ eyes = Eyes::LEFT;
+ } else {
+ frame = _position->frames_round(afr);
+ eyes = Eyes::RIGHT;
+ }
+ } else {
+ /* This should not happen; see above */
+ frame = _position->frames_round(afr) + 1;
+ }
+ } else if (vft == VideoFrameType::THREE_D_ALTERNATE) {
+ DCPOMATIC_ASSERT (_last_emitted_eyes);
+ if (_last_emitted_eyes.get() == Eyes::RIGHT) {
+ frame = _position->frames_round(afr) + 1;
+ eyes = Eyes::LEFT;
+ } else {
+ frame = _position->frames_round(afr);
+ eyes = Eyes::RIGHT;
+ }
+ } else {
+ frame = _position->frames_round(afr) + 1;
+ }
+ }
-/** Set up the current subtitle. This will be put onto frames that
- * fit within its time specification. s may be 0 to say that there
- * is no current subtitle.
- * @param s New current subtitle, or 0.
- */
-void
-VideoDecoder::emit_subtitle (shared_ptr<TimedSubtitle> s)
-{
- _timed_subtitle = s;
-
- if (_timed_subtitle) {
- Position const p = _timed_subtitle->subtitle()->position ();
- _timed_subtitle->subtitle()->set_position (Position (p.x - _film->crop().left, p.y - _film->crop().top));
+ switch (vft) {
+ case VideoFrameType::TWO_D:
+ case VideoFrameType::THREE_D:
+ Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+ break;
+ case VideoFrameType::THREE_D_ALTERNATE:
+ {
+ Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+ _last_emitted_eyes = eyes;
+ break;
+ }
+ case VideoFrameType::THREE_D_LEFT_RIGHT:
+ Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF));
+ Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF));
+ break;
+ case VideoFrameType::THREE_D_TOP_BOTTOM:
+ Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF));
+ Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF));
+ break;
+ case VideoFrameType::THREE_D_LEFT:
+ Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE));
+ break;
+ case VideoFrameType::THREE_D_RIGHT:
+ Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE));
+ break;
+ default:
+ DCPOMATIC_ASSERT (false);
}
+
+ _position = ContentTime::from_frames (frame, afr);
}
+
void
-VideoDecoder::set_progress (Job* j) const
+VideoDecoder::seek ()
{
- assert (j);
-
- if (_film->video_length()) {
- j->set_progress (float (_video_frame) / _film->video_length());
- }
+ _position = boost::none;
+ _last_emitted_eyes.reset ();
+ _frame_interval_checker.reset (new FrameIntervalChecker());
}