Change how video timing is done.
authorCarl Hetherington <cth@carlh.net>
Thu, 29 Sep 2022 08:17:50 +0000 (10:17 +0200)
committerCarl Hetherington <cth@carlh.net>
Thu, 22 Dec 2022 23:11:11 +0000 (00:11 +0100)
This commit changes the approach with video timing.  Previously,
we would (more-or-less) try to use every video frame from the content
in the output, hoping that they come at a constant frame rate.

This is not always the case, however.  Here we preserve the PTS
of video frames, and then when one arrives we output whatever
DCP video frames we can (at the regular DCP frame rate).

Hopefully this will solve a range of sync problems, but it
could also introduce new ones.

20 files changed:
src/lib/content_video.h
src/lib/dcp_decoder.cc
src/lib/ffmpeg_decoder.cc
src/lib/image_decoder.cc
src/lib/player.cc
src/lib/player.h
src/lib/player_video.cc
src/lib/player_video.h
src/lib/shuffler.cc
src/lib/util.cc
src/lib/video_content.cc
src/lib/video_content.h
src/lib/video_decoder.cc
src/lib/video_decoder.h
src/lib/video_mxf_decoder.cc
test/client_server_test.cc
test/content_test.cc
test/ffmpeg_decoder_seek_test.cc
test/low_bitrate_test.cc
test/shuffler_test.cc

index 4fdab717a2965749bbd78f7a9939e1403eb15496..1c145f60294f4e7d1e1523cd8cf3abbe16eb12e5 100644 (file)
@@ -23,6 +23,7 @@
 #define DCPOMATIC_CONTENT_VIDEO_H
 
 
+#include "dcpomatic_time.h"
 #include "types.h"
 
 
@@ -36,22 +37,22 @@ class ContentVideo
 {
 public:
        ContentVideo ()
-               : frame (0)
-               , eyes (Eyes::LEFT)
+               : eyes (Eyes::LEFT)
                , part (Part::WHOLE)
        {}
 
-       ContentVideo (std::shared_ptr<const ImageProxy> i, Frame f, Eyes e, Part p)
+       ContentVideo (std::shared_ptr<const ImageProxy> i, dcpomatic::ContentTime t, Eyes e, Part p)
                : image (i)
-               , frame (f)
+               , time (t)
                , eyes (e)
                , part (p)
        {}
 
        std::shared_ptr<const ImageProxy> image;
-       Frame frame;
+       dcpomatic::ContentTime time;
        Eyes eyes;
        Part part;
 };
 
+
 #endif
index 9064627ba9cd937a4930631e17f4665837553ee6..3ccb6ccc671ff3ad2335cc31b14f739a375f89bc 100644 (file)
@@ -177,7 +177,7 @@ DCPDecoder::pass ()
                                        AV_PIX_FMT_XYZ12LE,
                                        _forced_reduction
                                        ),
-                               _offset + frame
+                               ContentTime::from_frames(_offset + frame, vfr)
                                );
                } else {
                        video->emit (
@@ -189,7 +189,7 @@ DCPDecoder::pass ()
                                        AV_PIX_FMT_XYZ12LE,
                                        _forced_reduction
                                        ),
-                               _offset + frame
+                               ContentTime::from_frames(_offset + frame, vfr)
                                );
 
                        video->emit (
@@ -201,7 +201,7 @@ DCPDecoder::pass ()
                                        AV_PIX_FMT_XYZ12LE,
                                        _forced_reduction
                                        ),
-                               _offset + frame
+                               ContentTime::from_frames(_offset + frame, vfr)
                                );
                }
        }
index 29f661fc01f56f6f21cadda982a2baee96183523..b717836e2eaba593729f853ff65b9d913d0763b0 100644 (file)
@@ -178,9 +178,8 @@ FFmpegDecoder::flush_fill()
        full_length = full_length.ceil (frc.source);
        if (video && !video->ignore()) {
                double const vfr = _ffmpeg_content->video_frame_rate().get();
-               auto const f = full_length.frames_round (vfr);
-               auto const v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
-               if (v < f) {
+               auto const v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr);
+               if (v < full_length) {
                        video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
                        did_something = true;
                }
@@ -623,7 +622,7 @@ FFmpegDecoder::process_video_frame ()
                        video->emit (
                                film(),
                                make_shared<RawImageProxy>(image),
-                               llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
+                               ContentTime::from_seconds(pts)
                                );
                } else {
                        LOG_WARNING_NC ("Dropping frame without PTS");
index 26a9ad624b31fb168633c7aa3f721f44edf403db..d743c83958d1ab148c62e8e3693bae4e532d21d5 100644 (file)
@@ -79,7 +79,7 @@ ImageDecoder::pass ()
                }
        }
 
-       video->emit (film(), _image, _frame_video_position);
+       video->emit(film(), _image, dcpomatic::ContentTime::from_frames(_frame_video_position, _image_content->video_frame_rate().get_value_or(24)));
        ++_frame_video_position;
        return false;
 }
index 3c3032bd12ed7a9162a2b7c668213139c17a3b00..db179eabb5b7d3d67b8b63ef6a64be26c39be881 100644 (file)
@@ -71,10 +71,8 @@ using std::dynamic_pointer_cast;
 using std::list;
 using std::make_pair;
 using std::make_shared;
-using std::make_shared;
 using std::max;
 using std::min;
-using std::min;
 using std::pair;
 using std::shared_ptr;
 using std::vector;
@@ -394,7 +392,6 @@ Player::setup_pieces ()
        _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
 
        _next_video_time = boost::none;
-       _next_video_eyes = Eyes::BOTH;
        _next_audio_time = boost::none;
 }
 
@@ -507,7 +504,7 @@ Player::black_player_video_frame (Eyes eyes) const
        boost::mutex::scoped_lock lm(_black_image_mutex);
 
        return std::make_shared<PlayerVideo> (
-               std::make_shared<const RawImageProxy>(_black_image),
+               make_shared<const RawImageProxy>(_black_image),
                Crop(),
                optional<double>(),
                _video_container_size,
@@ -517,7 +514,7 @@ Player::black_player_video_frame (Eyes eyes) const
                PresetColourConversion::all().front().conversion,
                VideoRange::FULL,
                std::weak_ptr<Content>(),
-               boost::optional<Frame>(),
+               boost::optional<dcpomatic::ContentTime>(),
                false
        );
 }
@@ -685,7 +682,7 @@ Player::pass ()
 
        if (_playback_length.load() == DCPTime() || !film) {
                /* Special; just give one black frame */
-               emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
+               use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
                return true;
        }
 
@@ -743,18 +740,23 @@ Player::pass ()
                LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
                auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
-               if (dcp && !_play_referenced && dcp->reference_audio()) {
-                       /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
-                          to `hide' the fact that no audio was emitted during the referenced DCP (though
-                          we need to behave as though it was).
-                       */
-                       _next_audio_time = dcp->end(film);
+               if (dcp && !_play_referenced) {
+                       if (dcp->reference_video()) {
+                               _next_video_time = dcp->end(film);
+                       }
+                       if (dcp->reference_audio()) {
+                               /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
+                                  to `hide' the fact that no audio was emitted during the referenced DCP (though
+                                  we need to behave as though it was).
+                               */
+                               _next_audio_time = dcp->end(film);
+                       }
                }
                break;
        }
        case BLACK:
                LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
-               emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+               use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
@@ -855,24 +857,13 @@ Player::pass ()
        }
 
        if (done) {
+               emit_video_until(film->length());
+
                if (_shuffler) {
                        _shuffler->flush ();
                }
                for (auto const& i: _delay) {
-                       do_emit_video(i.first, i.second);
-               }
-
-               /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
-                * However, if we have L and R video files, and one is shorter than the other,
-                * the fill code in ::video mostly takes care of filling in the gaps.
-                * However, since it fills at the point when it knows there is more video coming
-                * at time t (so it should fill any gap up to t) it can't do anything right at the
-                * end.  This is particularly bad news if the last frame emitted is a LEFT
-                * eye, as the MXF writer will complain about the 3D sequence being wrong.
-                * Here's a hack to workaround that particular case.
-                */
-               if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
-                       do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+                       emit_video(i.first, i.second);
                }
        }
 
@@ -944,6 +935,57 @@ increment_eyes (Eyes e)
 }
 
 
+void
+Player::emit_video_until(DCPTime time)
+{
+       auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+               /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
+                  player before the video that requires them.
+               */
+               _delay.push_back(make_pair(pv, time));
+
+               if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+                       _next_video_time = time + one_video_frame();
+               }
+
+               if (_delay.size() < 3) {
+                       return;
+               }
+
+               auto to_do = _delay.front();
+               _delay.pop_front();
+               emit_video(to_do.first, to_do.second);
+       };
+
+       auto const age_threshold = one_video_frame() * 2;
+
+       while (_next_video_time.get_value_or({}) < time) {
+               auto left = _last_video[Eyes::LEFT];
+               auto right = _last_video[Eyes::RIGHT];
+               auto both = _last_video[Eyes::BOTH];
+
+               auto const next = _next_video_time.get_value_or({});
+
+               if (
+                       left.first &&
+                       right.first &&
+                       (!both.first || (left.second >= both.second && right.second >= both.second)) &&
+                       (left.second - next) < age_threshold &&
+                       (right.second - next) < age_threshold
+                  ) {
+                       frame(left.first, next);
+                       frame(right.first, next);
+               } else if (both.first && (both.second - next) < age_threshold) {
+                       frame(both.first, next);
+                       LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
+               } else {
+                       frame(black_player_video_frame(Eyes::BOTH), next);
+                       LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
+               }
+       }
+}
+
+
 void
 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 {
@@ -965,20 +1007,23 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       FrameRateChange frc(film, piece->content);
-       if (frc.skip && (video.frame % 2) == 1) {
-               return;
+       auto const three_d = film->three_d();
+
+       if (!three_d) {
+               if (video.eyes == Eyes::LEFT) {
+                       /* Use left-eye images for both eyes... */
+                       video.eyes = Eyes::BOTH;
+               } else if (video.eyes == Eyes::RIGHT) {
+                       /* ...and discard the right */
+                       return;
+               }
        }
 
-       /* Time of the first frame we will emit */
-       DCPTime const time = content_video_to_dcp (piece, video.frame);
-       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+       /* Time of the frame we just received within the DCP */
+       auto const time = content_time_to_dcp(piece, video.time);
+       LOG_DEBUG_PLAYER("Received video frame %1 %2 eyes %3", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
 
-       /* Discard if it's before the content's period or the last accurate seek.  We can't discard
-          if it's after the content's period here as in that case we still need to fill any gap between
-          `now' and the end of the content's period.
-       */
-       if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
+       if (time < piece->content->position()) {
                return;
        }
 
@@ -986,86 +1031,42 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       /* Fill gaps that we discover now that we have some video which needs to be emitted.
-          This is where we need to fill to.
-       */
-       DCPTime fill_to = min(time, piece->content->end(film));
-
-       if (_next_video_time) {
-               DCPTime fill_from = max (*_next_video_time, piece->content->position());
-
-               /* Fill if we have more than half a frame to do */
-               if ((fill_to - fill_from) > one_video_frame() / 2) {
-                       auto last = _last_video.find (weak_piece);
-                       if (film->three_d()) {
-                               auto fill_to_eyes = video.eyes;
-                               if (fill_to_eyes == Eyes::BOTH) {
-                                       fill_to_eyes = Eyes::LEFT;
-                               }
-                               if (fill_to == piece->content->end(film)) {
-                                       /* Don't fill after the end of the content */
-                                       fill_to_eyes = Eyes::LEFT;
-                               }
-                               auto j = fill_from;
-                               auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
-                               if (eyes == Eyes::BOTH) {
-                                       eyes = Eyes::LEFT;
-                               }
-                               while (j < fill_to || eyes != fill_to_eyes) {
-                                       if (last != _last_video.end()) {
-                                               LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
-                                               auto copy = last->second->shallow_copy();
-                                               copy->set_eyes (eyes);
-                                               emit_video (copy, j);
-                                       } else {
-                                               LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
-                                               emit_video (black_player_video_frame(eyes), j);
-                                       }
-                                       if (eyes == Eyes::RIGHT) {
-                                               j += one_video_frame();
-                                       }
-                                       eyes = increment_eyes (eyes);
-                               }
-                       } else {
-                               for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
-                                       if (last != _last_video.end()) {
-                                               emit_video (last->second, j);
-                                       } else {
-                                               emit_video (black_player_video_frame(Eyes::BOTH), j);
-                                       }
-                               }
-                       }
-               }
+       if (!_next_video_time) {
+               _next_video_time = time.round(film->video_frame_rate());
        }
 
        auto const content_video = piece->content->video;
-
-       _last_video[weak_piece] = std::make_shared<PlayerVideo>(
-               video.image,
-               content_video->actual_crop(),
-               content_video->fade(film, video.frame),
-               scale_for_display(
-                       content_video->scaled_size(film->frame_size()),
+       use_video(
+               std::make_shared<PlayerVideo>(
+                       video.image,
+                       content_video->actual_crop(),
+                       content_video->fade(film, video.time),
+                       scale_for_display(
+                               content_video->scaled_size(film->frame_size()),
+                               _video_container_size,
+                               film->frame_size(),
+                               content_video->pixel_quanta()
+                               ),
                        _video_container_size,
-                       film->frame_size(),
-                       content_video->pixel_quanta()
+                       video.eyes,
+                       video.part,
+                       content_video->colour_conversion(),
+                       content_video->range(),
+                       piece->content,
+                       video.time,
+                       false
                        ),
-               _video_container_size,
-               video.eyes,
-               video.part,
-               content_video->colour_conversion(),
-               content_video->range(),
-               piece->content,
-               video.frame,
-               false
-               );
+               time,
+               piece->content->end(film)
+       );
+}
 
-       DCPTime t = time;
-       for (int i = 0; i < frc.repeat; ++i) {
-               if (t < piece->content->end(film)) {
-                       emit_video (_last_video[weak_piece], t);
-               }
-               t += one_video_frame ();
+void
+Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
+{
+       _last_video[pv->eyes()] = { pv, time };
+       if (pv->eyes() != Eyes::LEFT) {
+               emit_video_until(std::min(time + one_video_frame() / 2, end));
        }
 }
 
@@ -1347,18 +1348,18 @@ Player::seek (DCPTime time, bool accurate)
 
        if (accurate) {
                _next_video_time = time;
-               _next_video_eyes = Eyes::LEFT;
                _next_audio_time = time;
        } else {
                _next_video_time = boost::none;
-               _next_video_eyes = boost::none;
                _next_audio_time = boost::none;
        }
 
        _black.set_position (time);
        _silent.set_position (time);
 
-       _last_video.clear ();
+       _last_video[Eyes::LEFT] = {};
+       _last_video[Eyes::RIGHT] = {};
+       _last_video[Eyes::BOTH] = {};
 
        for (auto& state: _stream_states) {
                state.second.last_push_end = boost::none;
@@ -1367,43 +1368,7 @@ Player::seek (DCPTime time, bool accurate)
 
 
 void
-Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
-{
-       auto film = _film.lock();
-       DCPOMATIC_ASSERT(film);
-
-       if (!film->three_d()) {
-               if (pv->eyes() == Eyes::LEFT) {
-                       /* Use left-eye images for both eyes... */
-                       pv->set_eyes (Eyes::BOTH);
-               } else if (pv->eyes() == Eyes::RIGHT) {
-                       /* ...and discard the right */
-                       return;
-               }
-       }
-
-       /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
-          player before the video that requires them.
-       */
-       _delay.push_back (make_pair (pv, time));
-
-       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               _next_video_time = time + one_video_frame();
-       }
-       _next_video_eyes = increment_eyes (pv->eyes());
-
-       if (_delay.size() < 3) {
-               return;
-       }
-
-       auto to_do = _delay.front();
-       _delay.pop_front();
-       do_emit_video (to_do.first, to_do.second);
-}
-
-
-void
-Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
 {
        if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
                std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
index 45dcd9900c256bce1996e3b92f4ec4699dfe83bd..8d95816ca4d249a014338344c9ed7502d1a32831 100644 (file)
@@ -152,6 +152,8 @@ private:
        dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
        dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const;
        std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
+       void emit_video_until(dcpomatic::DCPTime time);
+       void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
 
        void video (std::weak_ptr<Piece>, ContentVideo);
        void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
@@ -166,8 +168,8 @@ private:
                std::shared_ptr<const AudioBuffers> audio, dcpomatic::DCPTime time, dcpomatic::DCPTime discard_to
                ) const;
        boost::optional<PositionImage> open_subtitles_for_frame (dcpomatic::DCPTime time) const;
-       void emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
-       void do_emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+       void emit_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+       void use_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
        void emit_audio (std::shared_ptr<AudioBuffers> data, dcpomatic::DCPTime time);
        std::shared_ptr<const Playlist> playlist () const;
 
@@ -208,15 +210,12 @@ private:
 
        /** Time of the next video that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_video_time;
-       /** Eyes of the next video that we will emit */
-       boost::optional<Eyes> _next_video_eyes;
        /** Time of the next audio that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_audio_time;
 
        boost::atomic<boost::optional<int>> _dcp_decode_reduction;
 
-       typedef std::map<std::weak_ptr<Piece>, std::shared_ptr<PlayerVideo>, std::owner_less<std::weak_ptr<Piece>>> LastVideoMap;
-       LastVideoMap _last_video;
+       EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video;
 
        AudioMerger _audio_merger;
        std::unique_ptr<Shuffler> _shuffler;
index d45bf9f432b90c996feb870baf1580fc3a5796a4..b020ca1cd8172ba1cd5193c5658d42c621166171 100644 (file)
@@ -45,6 +45,7 @@ using std::weak_ptr;
 using boost::optional;
 using dcp::Data;
 using dcp::raw_convert;
+using namespace dcpomatic;
 
 
 PlayerVideo::PlayerVideo (
@@ -58,7 +59,7 @@ PlayerVideo::PlayerVideo (
        optional<ColourConversion> colour_conversion,
        VideoRange video_range,
        weak_ptr<Content> content,
-       optional<Frame> video_frame,
+       optional<ContentTime> video_time,
        bool error
        )
        : _in (in)
@@ -71,7 +72,7 @@ PlayerVideo::PlayerVideo (
        , _colour_conversion (colour_conversion)
        , _video_range (video_range)
        , _content (content)
-       , _video_frame (video_frame)
+       , _video_time(video_time)
        , _error (error)
 {
 
@@ -343,7 +344,7 @@ PlayerVideo::shallow_copy () const
                _colour_conversion,
                _video_range,
                _content,
-               _video_frame,
+               _video_time,
                _error
                );
 }
@@ -356,12 +357,12 @@ bool
 PlayerVideo::reset_metadata (shared_ptr<const Film> film, dcp::Size player_video_container_size)
 {
        auto content = _content.lock();
-       if (!content || !_video_frame) {
+       if (!content || !_video_time) {
                return false;
        }
 
        _crop = content->video->actual_crop();
-       _fade = content->video->fade(film, _video_frame.get());
+       _fade = content->video->fade(film, _video_time.get());
        _inter_size = scale_for_display(
                content->video->scaled_size(film->frame_size()),
                player_video_container_size,
index f2781c1a0c51c3e099dd0009604e04a89ce2c3df..10b2078a00af83e73b1505b97aa2801145c9edde 100644 (file)
@@ -59,7 +59,7 @@ public:
                boost::optional<ColourConversion> colour_conversion,
                VideoRange video_range,
                std::weak_ptr<Content> content,
-               boost::optional<Frame> video_frame,
+               boost::optional<dcpomatic::ContentTime> video_time,
                bool error
                );
 
@@ -141,8 +141,8 @@ private:
        boost::optional<PositionImage> _text;
        /** Content that we came from.  This is so that reset_metadata() can work. */
        std::weak_ptr<Content> _content;
-       /** Video frame that we came from.  Again, this is for reset_metadata() */
-       boost::optional<Frame> _video_frame;
+       /** Video time that we came from.  Again, this is for reset_metadata() */
+       boost::optional<dcpomatic::ContentTime> _video_time;
 
        mutable boost::mutex _mutex;
        mutable std::shared_ptr<Image> _image;
index 5a4faf4d15fa4c0f74617bd2bd03ecd57f1ba114..a4ea0f5dc05a923d00d8da3ea7a34983fc4522c8 100644 (file)
@@ -40,8 +40,8 @@ int const Shuffler::_max_size = 64;
 struct Comparator
 {
        bool operator()(Shuffler::Store const & a, Shuffler::Store const & b) {
-               if (a.second.frame != b.second.frame) {
-                       return a.second.frame < b.second.frame;
+               if (a.second.time != b.second.time) {
+                       return a.second.time < b.second.time;
                }
                return a.second.eyes < b.second.eyes;
        }
@@ -51,7 +51,7 @@ struct Comparator
 void
 Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 {
-       LOG_DEBUG_THREE_D ("Shuffler::video frame=%1 eyes=%2 part=%3", video.frame, static_cast<int>(video.eyes), static_cast<int>(video.part));
+       LOG_DEBUG_THREE_D("Shuffler::video time=%1 eyes=%2 part=%3", to_string(video.time), static_cast<int>(video.eyes), static_cast<int>(video.part));
 
        if (video.eyes != Eyes::LEFT && video.eyes != Eyes::RIGHT) {
                /* Pass through anything that we don't care about */
@@ -79,13 +79,13 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                        !_store.empty() &&
                        _last &&
                        (
-                               (_store.front().second.frame == _last->frame       && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) ||
-                               (_store.front().second.frame >= (_last->frame + 1) && _store.front().second.eyes == Eyes::LEFT  && _last->eyes == Eyes::RIGHT)
+                               (_store.front().second.time == _last->time && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) ||
+                               (_store.front().second.time > _last->time  && _store.front().second.eyes == Eyes::LEFT  && _last->eyes == Eyes::RIGHT)
                                );
 
                if (!store_front_in_sequence) {
-                       string const store = _store.empty() ? "store empty" : String::compose("store front frame=%1 eyes=%2", _store.front().second.frame, static_cast<int>(_store.front().second.eyes));
-                       string const last = _last ? String::compose("last frame=%1 eyes=%2", _last->frame, static_cast<int>(_last->eyes)) : "no last";
+                       string const store = _store.empty() ? "store empty" : String::compose("store front time=%1 eyes=%2", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes));
+                       string const last = _last ? String::compose("last time=%1 eyes=%2", to_string(_last->time), static_cast<int>(_last->eyes)) : "no last";
                        LOG_DEBUG_THREE_D("Shuffler not in sequence: %1 %2", store, last);
                }
 
@@ -98,10 +98,10 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                }
 
                if (_store.size() > _max_size) {
-                       LOG_WARNING ("Shuffler is full after receiving frame %1; 3D sync may be incorrect.", video.frame);
+                       LOG_WARNING("Shuffler is full after receiving frame at %1; 3D sync may be incorrect.", to_string(video.time));
                }
 
-               LOG_DEBUG_THREE_D("Shuffler emits frame=%1 eyes=%2 store=%3", _store.front().second.frame, static_cast<int>(_store.front().second.eyes), _store.size());
+               LOG_DEBUG_THREE_D("Shuffler emits time=%1 eyes=%2 store=%3", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes), _store.size());
                Video (_store.front().first, _store.front().second);
                _last = _store.front().second;
                _store.pop_front ();
index 086a99f243784439f6877065e8c89953f0114b20..95787293b654f2dd560fbd0379596f94bcaa6224 100644 (file)
@@ -815,16 +815,6 @@ remap (shared_ptr<const AudioBuffers> input, int output_channels, AudioMapping m
        return mapped;
 }
 
-Eyes
-increment_eyes (Eyes e)
-{
-       if (e == Eyes::LEFT) {
-               return Eyes::RIGHT;
-       }
-
-       return Eyes::LEFT;
-}
-
 
 size_t
 utf8_strlen (string s)
index c10a94f43f372a4ca184ab4edddcf8577367de58..9abb2b39483e2529b1896eee30e37c6a21aa198a 100644 (file)
@@ -411,27 +411,29 @@ VideoContent::size_after_crop () const
 }
 
 
-/** @param f Frame index within the whole (untrimmed) content.
+/** @param time Time within the whole (untrimmed) content.
  *  @return Fade factor (between 0 and 1) or unset if there is no fade.
  */
 optional<double>
-VideoContent::fade (shared_ptr<const Film> film, Frame f) const
+VideoContent::fade(shared_ptr<const Film> film, ContentTime time) const
 {
-       DCPOMATIC_ASSERT (f >= 0);
+       DCPOMATIC_ASSERT(time.get() >= 0);
 
        double const vfr = _parent->active_video_frame_rate(film);
 
-       auto const ts = _parent->trim_start().frames_round(vfr);
-       if ((f - ts) < fade_in()) {
-               return double (f - ts) / fade_in();
+       auto const ts = _parent->trim_start();
+       auto const fade_in_time = ContentTime::from_frames(fade_in(), vfr);
+       if ((time - ts) < fade_in_time) {
+               return double(ContentTime(time - ts).get()) / fade_in_time.get();
        }
 
-       auto fade_out_start = length() - _parent->trim_end().frames_round(vfr) - fade_out();
-       if (f >= fade_out_start) {
-               return 1 - double (f - fade_out_start) / fade_out();
+       auto const fade_out_time = ContentTime::from_frames(fade_out(), vfr);
+       auto fade_out_start = ContentTime::from_frames(length(), vfr) - _parent->trim_end() - fade_out_time;
+       if (time >= fade_out_start) {
+               return 1 - double(ContentTime(time - fade_out_start).get()) / fade_out_time.get();
        }
 
-       return optional<double> ();
+       return {};
 }
 
 string
index 9f6cc105ea42155fec19d9776ff016a368e96280..25e43b0ebc8de4b37176e3dc90c38cfe26091013 100644 (file)
@@ -208,7 +208,7 @@ public:
        dcp::Size size_after_crop () const;
        dcp::Size scaled_size (dcp::Size container_size);
 
-       boost::optional<double> fade (std::shared_ptr<const Film> film, Frame) const;
+       boost::optional<double> fade(std::shared_ptr<const Film> film, dcpomatic::ContentTime time) const;
 
        std::string processing_description (std::shared_ptr<const Film> film);
 
index cf21f885a3fbc6af45ba2efde1cfc18ad645eb9b..c628fddd983e2015a81b1bd7d13b52b0d9424a0a 100644 (file)
@@ -20,7 +20,6 @@
 
 
 #include "compose.hpp"
-#include "film.h"
 #include "frame_interval_checker.h"
 #include "image.h"
 #include "j2k_image_proxy.h"
@@ -47,17 +46,9 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c)
 }
 
 
-/** Called by decoder classes when they have a video frame ready.
- *  @param frame Frame index within the content; this does not take into account 3D
- *  so for 3D_ALTERNATE this value goes:
- *     0: frame 0 left
- *     1: frame 0 right
- *     2: frame 1 left
- *     3: frame 1 right
- *  and so on.
- */
+/** Called by decoder classes when they have a video frame ready */
 void
-VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame decoder_frame)
+VideoDecoder::emit(shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, ContentTime time)
 {
        if (ignore ()) {
                return;
@@ -66,14 +57,12 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
        auto const afr = _content->active_video_frame_rate(film);
        auto const vft = _content->video->frame_type();
 
-       auto frame_time = ContentTime::from_frames (decoder_frame, afr);
-
        /* Do some heuristics to try and spot the case where the user sets content to 3D
         * when it is not.  We try to tell this by looking at the differences in time between
         * the first few frames.  Real 3D content should have two frames for each timestamp.
         */
        if (_frame_interval_checker) {
-               _frame_interval_checker->feed (frame_time, afr);
+               _frame_interval_checker->feed(time, afr);
                if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) {
                        boost::throw_exception (
                                DecodeError(
@@ -91,94 +80,54 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
                }
        }
 
-       Frame frame;
-       Eyes eyes = Eyes::BOTH;
-       if (!_position) {
-               /* This is the first data we have received since initialisation or seek.  Set
-                  the position based on the frame that was given.  After this first time
-                  we just count frames, since (as with audio) it seems that ContentTimes
-                  are unreliable from FFmpegDecoder.  They are much better than audio times
-                  but still we get the occasional one which is duplicated.  In this case
-                  ffmpeg seems to carry on regardless, processing the video frame as normal.
-                  If we drop the frame with the duplicated timestamp we obviously lose sync.
-               */
-
-               if (vft == VideoFrameType::THREE_D_ALTERNATE) {
-                       frame = decoder_frame / 2;
-                       eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
-               } else {
-                       frame = decoder_frame;
-                       if (vft == VideoFrameType::THREE_D) {
-                               auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
-                               /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy
-                                * knows which eye it is.
-                                */
-                               if (j2k && j2k->eye()) {
-                                       eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
-                               }
-                       }
-               }
-
-               _position = ContentTime::from_frames (frame, afr);
-       } else {
-               if (vft == VideoFrameType::THREE_D) {
-                       auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
-                       if (j2k && j2k->eye()) {
-                               if (j2k->eye() == dcp::Eye::LEFT) {
-                                       frame = _position->frames_round(afr) + 1;
-                                       eyes = Eyes::LEFT;
-                               } else {
-                                       frame = _position->frames_round(afr);
-                                       eyes = Eyes::RIGHT;
-                               }
-                       } else {
-                               /* This should not happen; see above */
-                               frame = _position->frames_round(afr) + 1;
-                       }
-               } else if (vft == VideoFrameType::THREE_D_ALTERNATE) {
-                       DCPOMATIC_ASSERT (_last_emitted_eyes);
-                       if (_last_emitted_eyes.get() == Eyes::RIGHT) {
-                               frame = _position->frames_round(afr) + 1;
-                               eyes = Eyes::LEFT;
-                       } else {
-                               frame = _position->frames_round(afr);
-                               eyes = Eyes::RIGHT;
-                       }
-               } else {
-                       frame = _position->frames_round(afr) + 1;
-               }
-       }
-
        switch (vft) {
        case VideoFrameType::TWO_D:
+               Data(ContentVideo(image, time, Eyes::BOTH, Part::WHOLE));
+               break;
        case VideoFrameType::THREE_D:
-               Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+       {
+               auto eyes = Eyes::LEFT;
+               auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
+               if (j2k && j2k->eye()) {
+                       eyes = *j2k->eye() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
+               }
+
+               Data(ContentVideo(image, time, eyes, Part::WHOLE));
                break;
+       }
        case VideoFrameType::THREE_D_ALTERNATE:
        {
-               Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+               Eyes eyes;
+               if (_last_emitted_eyes) {
+                       eyes = _last_emitted_eyes.get() == Eyes::LEFT ? Eyes::RIGHT : Eyes::LEFT;
+               } else {
+                       /* We don't know what eye this frame is, so just guess */
+                       auto frame = time.frames_round(_content->video_frame_rate().get_value_or(24));
+                       eyes = (frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
+               }
+               Data(ContentVideo(image, time, eyes, Part::WHOLE));
                _last_emitted_eyes = eyes;
                break;
        }
        case VideoFrameType::THREE_D_LEFT_RIGHT:
-               Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF));
-               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF));
+               Data(ContentVideo(image, time, Eyes::LEFT, Part::LEFT_HALF));
+               Data(ContentVideo(image, time, Eyes::RIGHT, Part::RIGHT_HALF));
                break;
        case VideoFrameType::THREE_D_TOP_BOTTOM:
-               Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF));
-               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF));
+               Data(ContentVideo(image, time, Eyes::LEFT, Part::TOP_HALF));
+               Data(ContentVideo(image, time, Eyes::RIGHT, Part::BOTTOM_HALF));
                break;
        case VideoFrameType::THREE_D_LEFT:
-               Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE));
+               Data(ContentVideo(image, time, Eyes::LEFT, Part::WHOLE));
                break;
        case VideoFrameType::THREE_D_RIGHT:
-               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE));
+               Data(ContentVideo(image, time, Eyes::RIGHT, Part::WHOLE));
                break;
        default:
                DCPOMATIC_ASSERT (false);
        }
 
-       _position = ContentTime::from_frames (frame, afr);
+       _position = time;
 }
 
 
index f6ee17425b6d77b48d6b074c5d369f6cfbd888c3..b609404c49230a66b9788564cb9d60cad886e4b1 100644 (file)
@@ -60,7 +60,7 @@ public:
        }
 
        void seek () override;
-       void emit (std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, Frame frame);
+       void emit(std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, dcpomatic::ContentTime time);
 
        boost::signals2::signal<void (ContentVideo)> Data;
 
index 40d3a461afbd4ebb4fd57765d5bc80a5f42d8c39..9f451297b992d9a9fa0ecc7297aedfc340412b73 100644 (file)
@@ -76,18 +76,18 @@ VideoMXFDecoder::pass ()
                video->emit (
                        film(),
                        std::make_shared<J2KImageProxy>(_mono_reader->get_frame(frame), _size, AV_PIX_FMT_XYZ12LE, optional<int>()),
-                       frame
+                       _next
                        );
        } else {
                video->emit (
                        film(),
                        std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::LEFT, AV_PIX_FMT_XYZ12LE, optional<int>()),
-                       frame
+                       _next
                        );
                video->emit (
                        film(),
                        std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::RIGHT, AV_PIX_FMT_XYZ12LE, optional<int>()),
-                       frame
+                       _next
                        );
        }
 
index 7a99f72274758a1bace8f1e9f637fe7e1c61d502..32af60cbe6c0b4efd755bcdff20f14521f7a1a76 100644 (file)
@@ -51,6 +51,7 @@ using std::weak_ptr;
 using boost::thread;
 using boost::optional;
 using dcp::ArrayData;
+using namespace dcpomatic;
 
 
 void
@@ -105,7 +106,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_rgb)
                ColourConversion(),
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
@@ -188,7 +189,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_yuv)
                ColourConversion(),
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
@@ -258,7 +259,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                ColourConversion(),
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
@@ -283,7 +284,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                PresetColourConversion::all().front().conversion,
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
index 12e653875a43d7e3587d81d5a46d7a4076e641b7..1c167a79f11687edf4735b2bad477cae4f664897 100644 (file)
@@ -152,7 +152,7 @@ BOOST_AUTO_TEST_CASE (content_test6)
                );
 
        make_and_verify_dcp (film);
-       check_dcp (TestPaths::private_data() / "fha", film);
+       check_dcp (TestPaths::private_data() / "v2.18.x" / "fha", film);
 
        cl.run ();
 }
index 4dceae86ba6f6416e7add63be7c0093265fad6e8..f38ef3564a2004a0b49efb2fda7c929563c2fd39 100644 (file)
@@ -64,18 +64,18 @@ store (ContentVideo v)
 
 
 static void
-check (shared_ptr<FFmpegDecoder> decoder, int frame)
+check (shared_ptr<FFmpegDecoder> decoder, ContentTime time)
 {
        BOOST_REQUIRE (decoder->ffmpeg_content()->video_frame_rate ());
-       decoder->seek (ContentTime::from_frames (frame, decoder->ffmpeg_content()->video_frame_rate().get()), true);
+       decoder->seek(time, true);
        stored = optional<ContentVideo> ();
        while (!decoder->pass() && !stored) {}
-       BOOST_CHECK (stored->frame <= frame);
+       BOOST_CHECK(stored->time <= time);
 }
 
 
 static void
-test (boost::filesystem::path file, vector<int> frames)
+test (boost::filesystem::path file, vector<ContentTime> times)
 {
        auto path = TestPaths::private_data() / file;
        BOOST_REQUIRE (boost::filesystem::exists (path));
@@ -87,7 +87,7 @@ test (boost::filesystem::path file, vector<int> frames)
        auto decoder = make_shared<FFmpegDecoder>(film, content, false);
        decoder->video->Data.connect (bind (&store, _1));
 
-       for (auto i: frames) {
+       for (auto i: times) {
                check (decoder, i);
        }
 }
@@ -95,10 +95,43 @@ test (boost::filesystem::path file, vector<int> frames)
 
 BOOST_AUTO_TEST_CASE (ffmpeg_decoder_seek_test)
 {
-       vector<int> frames = { 0, 42, 999, 0 };
-
-       test ("boon_telly.mkv", frames);
-       test ("Sintel_Trailer1.480p.DivX_Plus_HD.mkv", frames);
-       test ("prophet_long_clip.mkv", { 15, 42, 999, 15 });
-       test ("dolby_aurora.vob", { 0, 125, 250, 41 });
+       test(
+               "boon_telly.mkv",
+               {
+                       ContentTime::from_frames(0, 29.97),
+                       ContentTime::from_frames(42, 29.97),
+                       ContentTime::from_frames(999, 29.97),
+                       ContentTime::from_frames(0, 29.97),
+               }
+           );
+
+       test(
+               "Sintel_Trailer1.480p.DivX_Plus_HD.mkv",
+               {
+                       ContentTime::from_frames(0, 24),
+                       ContentTime::from_frames(42, 24),
+                       ContentTime::from_frames(999, 24),
+                       ContentTime::from_frames(0, 24),
+               }
+           );
+
+       test(
+               "prophet_long_clip.mkv",
+               {
+                       ContentTime::from_frames(15, 23.976),
+                       ContentTime::from_frames(42, 23.976),
+                       ContentTime::from_frames(999, 23.976),
+                       ContentTime::from_frames(15, 23.976)
+               }
+           );
+
+       test(
+               "dolby_aurora.vob",
+               {
+                       ContentTime::from_frames(0, 25),
+                       ContentTime::from_frames(125, 25),
+                       ContentTime::from_frames(250, 25),
+                       ContentTime::from_frames(41, 25)
+               }
+           );
 }
index 7050dd771acc6dd74dd8a57f0e30b8d177010470..52b8d54be118c33559b9f1722d58141cbd34c116 100644 (file)
@@ -31,6 +31,7 @@ extern "C" {
 
 
 using std::make_shared;
+using namespace dcpomatic;
 
 
 BOOST_AUTO_TEST_CASE (low_bitrate_test)
@@ -51,7 +52,7 @@ BOOST_AUTO_TEST_CASE (low_bitrate_test)
                boost::optional<ColourConversion>(),
                VideoRange::FULL,
                std::weak_ptr<Content>(),
-               boost::optional<Frame>(),
+               boost::optional<ContentTime>(),
                false
                );
 
index d1c5b85338f53272ead9d19e4388a9cea0dd12dc..0187300569c0a84afb3ac019dc889b8dfc0d1d2d 100644 (file)
@@ -10,14 +10,15 @@ using boost::optional;
 #if BOOST_VERSION >= 106100
 using namespace boost::placeholders;
 #endif
+using namespace dcpomatic;
 
 
 static void
-push (Shuffler& s, int frame, Eyes eyes)
+push(Shuffler& s, int frame, Eyes eyes)
 {
        shared_ptr<Piece> piece (new Piece (shared_ptr<Content>(), shared_ptr<Decoder>(), FrameRateChange(24, 24)));
        ContentVideo cv;
-       cv.frame = frame;
+       cv.time = ContentTime::from_frames(frame, 24);
        cv.eyes = eyes;
        s.video (piece, cv);
 }
@@ -33,8 +34,9 @@ receive (weak_ptr<Piece>, ContentVideo cv)
 static void
 check (int frame, Eyes eyes, int line)
 {
+       auto const time = ContentTime::from_frames(frame, 24);
        BOOST_REQUIRE_MESSAGE (!pending_cv.empty(), "Check at " << line << " failed.");
-       BOOST_CHECK_MESSAGE (pending_cv.front().frame == frame, "Check at " << line << " failed.");
+       BOOST_CHECK_MESSAGE (pending_cv.front().time == time, "Check at " << line << " failed.");
        BOOST_CHECK_MESSAGE (pending_cv.front().eyes == eyes, "Check at " << line << " failed.");
        pending_cv.pop_front();
 }