diff options
| author | Carl Hetherington <cth@carlh.net> | 2022-09-29 10:17:50 +0200 |
|---|---|---|
| committer | Carl Hetherington <cth@carlh.net> | 2022-11-27 18:41:23 +0100 |
| commit | de957554b73ec357cf004b52d345b4d52e27d198 (patch) | |
| tree | 8f5a3a13ba6aad822b6a7f9d0a12c637a2c67157 | |
| parent | 21599c87b22714f0d255751a4f509859691e01ee (diff) | |
Change how video timing is done.
This commit changes the approach with video timing. Previously,
we would (more-or-less) try to use every video frame from the content
in the output, hoping that they come at a constant frame rate.
This is not always the case, however. Here we preserve the PTS
of video frames, and then when one arrives we output whatever
DCP video frames we can (at the regular DCP frame rate).
Hopefully this will solve a range of sync problems, but it
could also introduce new ones.
| -rw-r--r-- | src/lib/content_video.h | 15 | ||||
| -rw-r--r-- | src/lib/dcp_decoder.cc | 6 | ||||
| -rw-r--r-- | src/lib/ffmpeg_decoder.cc | 7 | ||||
| -rw-r--r-- | src/lib/image_decoder.cc | 2 | ||||
| -rw-r--r-- | src/lib/player.cc | 269 | ||||
| -rw-r--r-- | src/lib/player.h | 11 | ||||
| -rw-r--r-- | src/lib/player_video.cc | 11 | ||||
| -rw-r--r-- | src/lib/player_video.h | 6 | ||||
| -rw-r--r-- | src/lib/shuffler.cc | 18 | ||||
| -rw-r--r-- | src/lib/util.cc | 10 | ||||
| -rw-r--r-- | src/lib/util.h | 1 | ||||
| -rw-r--r-- | src/lib/video_content.cc | 22 | ||||
| -rw-r--r-- | src/lib/video_content.h | 2 | ||||
| -rw-r--r-- | src/lib/video_decoder.cc | 111 | ||||
| -rw-r--r-- | src/lib/video_decoder.h | 2 | ||||
| -rw-r--r-- | src/lib/video_mxf_decoder.cc | 6 | ||||
| -rw-r--r-- | test/client_server_test.cc | 9 | ||||
| -rw-r--r-- | test/content_test.cc | 2 | ||||
| -rw-r--r-- | test/ffmpeg_decoder_seek_test.cc | 55 | ||||
| -rw-r--r-- | test/low_bitrate_test.cc | 3 | ||||
| -rw-r--r-- | test/shuffler_test.cc | 8 |
21 files changed, 261 insertions, 315 deletions
diff --git a/src/lib/content_video.h b/src/lib/content_video.h index 8ca18576e..1c145f602 100644 --- a/src/lib/content_video.h +++ b/src/lib/content_video.h @@ -18,13 +18,18 @@ */ + #ifndef DCPOMATIC_CONTENT_VIDEO_H #define DCPOMATIC_CONTENT_VIDEO_H + +#include "dcpomatic_time.h" #include "types.h" + class ImageProxy; + /** @class ContentVideo * @brief A frame of video straight out of some content. */ @@ -32,22 +37,22 @@ class ContentVideo { public: ContentVideo () - : frame (0) - , eyes (Eyes::LEFT) + : eyes (Eyes::LEFT) , part (Part::WHOLE) {} - ContentVideo (std::shared_ptr<const ImageProxy> i, Frame f, Eyes e, Part p) + ContentVideo (std::shared_ptr<const ImageProxy> i, dcpomatic::ContentTime t, Eyes e, Part p) : image (i) - , frame (f) + , time (t) , eyes (e) , part (p) {} std::shared_ptr<const ImageProxy> image; - Frame frame; + dcpomatic::ContentTime time; Eyes eyes; Part part; }; + #endif diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc index 9064627ba..3ccb6ccc6 100644 --- a/src/lib/dcp_decoder.cc +++ b/src/lib/dcp_decoder.cc @@ -177,7 +177,7 @@ DCPDecoder::pass () AV_PIX_FMT_XYZ12LE, _forced_reduction ), - _offset + frame + ContentTime::from_frames(_offset + frame, vfr) ); } else { video->emit ( @@ -189,7 +189,7 @@ DCPDecoder::pass () AV_PIX_FMT_XYZ12LE, _forced_reduction ), - _offset + frame + ContentTime::from_frames(_offset + frame, vfr) ); video->emit ( @@ -201,7 +201,7 @@ DCPDecoder::pass () AV_PIX_FMT_XYZ12LE, _forced_reduction ), - _offset + frame + ContentTime::from_frames(_offset + frame, vfr) ); } } diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index 29f661fc0..b717836e2 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -178,9 +178,8 @@ FFmpegDecoder::flush_fill() full_length = full_length.ceil (frc.source); if (video && !video->ignore()) { double const vfr = _ffmpeg_content->video_frame_rate().get(); - auto const f = full_length.frames_round (vfr); - auto const v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1; - if (v < f) { + auto const v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr); + if (v < full_length) { video->emit(film(), make_shared<const RawImageProxy>(_black_image), v); did_something = true; } @@ -623,7 +622,7 @@ FFmpegDecoder::process_video_frame () video->emit ( film(), make_shared<RawImageProxy>(image), - llrint(pts * _ffmpeg_content->active_video_frame_rate(film())) + ContentTime::from_seconds(pts) ); } else { LOG_WARNING_NC ("Dropping frame without PTS"); diff --git a/src/lib/image_decoder.cc b/src/lib/image_decoder.cc index 59dc4e873..ecbbd5134 100644 --- a/src/lib/image_decoder.cc +++ b/src/lib/image_decoder.cc @@ -78,7 +78,7 @@ ImageDecoder::pass () } } - video->emit (film(), _image, _frame_video_position); + video->emit(film(), _image, dcpomatic::ContentTime::from_frames(_frame_video_position, _image_content->video_frame_rate().get_value_or(24))); ++_frame_video_position; return false; } diff --git a/src/lib/player.cc b/src/lib/player.cc index 40f726d10..b3c869508 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -71,10 +71,8 @@ using std::dynamic_pointer_cast; using std::list; using std::make_pair; using std::make_shared; -using std::make_shared; using std::max; using std::min; -using std::min; using std::pair; using std::shared_ptr; using std::vector; @@ -394,7 +392,6 @@ Player::setup_pieces () _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length); _next_video_time = boost::none; - _next_video_eyes = Eyes::BOTH; _next_audio_time = boost::none; } @@ -507,7 +504,7 @@ Player::black_player_video_frame (Eyes eyes) const boost::mutex::scoped_lock lm(_black_image_mutex); return std::make_shared<PlayerVideo> ( - std::make_shared<const RawImageProxy>(_black_image), + make_shared<const RawImageProxy>(_black_image), Crop(), optional<double>(), _video_container_size, @@ -517,7 +514,7 @@ Player::black_player_video_frame (Eyes eyes) const PresetColourConversion::all().front().conversion, VideoRange::FULL, std::weak_ptr<Content>(), - boost::optional<Frame>(), + boost::optional<dcpomatic::ContentTime>(), false ); } @@ -685,7 +682,7 @@ Player::pass () if (_playback_length.load() == DCPTime() || !film) { /* Special; just give one black frame */ - emit_video (black_player_video_frame(Eyes::BOTH), DCPTime()); + use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame()); return true; } @@ -743,18 +740,23 @@ Player::pass () LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0)); earliest_content->done = earliest_content->decoder->pass (); auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content); - if (dcp && !_play_referenced && dcp->reference_audio()) { - /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time - to `hide' the fact that no audio was emitted during the referenced DCP (though - we need to behave as though it was). - */ - _next_audio_time = dcp->end(film); + if (dcp && !_play_referenced) { + if (dcp->reference_video()) { + _next_video_time = dcp->end(film); + } + if (dcp->reference_audio()) { + /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time + to `hide' the fact that no audio was emitted during the referenced DCP (though + we need to behave as though it was). + */ + _next_audio_time = dcp->end(film); + } } break; } case BLACK: LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position())); - emit_video (black_player_video_frame(Eyes::BOTH), _black.position()); + use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to); _black.set_position (_black.position() + one_video_frame()); break; case SILENT: @@ -855,24 +857,13 @@ Player::pass () } if (done) { + emit_video_until(film->length()); + if (_shuffler) { _shuffler->flush (); } for (auto const& i: _delay) { - do_emit_video(i.first, i.second); - } - - /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow). - * However, if we have L and R video files, and one is shorter than the other, - * the fill code in ::video mostly takes care of filling in the gaps. - * However, since it fills at the point when it knows there is more video coming - * at time t (so it should fill any gap up to t) it can't do anything right at the - * end. This is particularly bad news if the last frame emitted is a LEFT - * eye, as the MXF writer will complain about the 3D sequence being wrong. - * Here's a hack to workaround that particular case. - */ - if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) { - do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time); + emit_video(i.first, i.second); } } @@ -933,6 +924,57 @@ Player::open_subtitles_for_frame (DCPTime time) const void +Player::emit_video_until(DCPTime time) +{ + auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) { + /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the + player before the video that requires them. + */ + _delay.push_back(make_pair(pv, time)); + + if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) { + _next_video_time = time + one_video_frame(); + } + + if (_delay.size() < 3) { + return; + } + + auto to_do = _delay.front(); + _delay.pop_front(); + emit_video(to_do.first, to_do.second); + }; + + auto const age_threshold = one_video_frame() * 2; + + while (_next_video_time.get_value_or({}) < time) { + auto left = _last_video[Eyes::LEFT]; + auto right = _last_video[Eyes::RIGHT]; + auto both = _last_video[Eyes::BOTH]; + + auto const next = _next_video_time.get_value_or({}); + + if ( + left.first && + right.first && + (!both.first || (left.second >= both.second && right.second >= both.second)) && + (left.second - next) < age_threshold && + (right.second - next) < age_threshold + ) { + frame(left.first, next); + frame(right.first, next); + } else if (both.first && (both.second - next) < age_threshold) { + frame(both.first, next); + LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next)); + } else { + frame(black_player_video_frame(Eyes::BOTH), next); + LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next)); + } + } +} + + +void Player::video (weak_ptr<Piece> weak_piece, ContentVideo video) { if (_suspended) { @@ -953,20 +995,23 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video) return; } - FrameRateChange frc(film, piece->content); - if (frc.skip && (video.frame % 2) == 1) { - return; + auto const three_d = film->three_d(); + + if (!three_d) { + if (video.eyes == Eyes::LEFT) { + /* Use left-eye images for both eyes... */ + video.eyes = Eyes::BOTH; + } else if (video.eyes == Eyes::RIGHT) { + /* ...and discard the right */ + return; + } } - /* Time of the first frame we will emit */ - DCPTime const time = content_video_to_dcp (piece, video.frame); - LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time)); + /* Time of the frame we just received within the DCP */ + auto const time = content_time_to_dcp(piece, video.time); + LOG_DEBUG_PLAYER("Received video frame %1 %2 eyes %3", to_string(video.time), to_string(time), static_cast<int>(video.eyes)); - /* Discard if it's before the content's period or the last accurate seek. We can't discard - if it's after the content's period here as in that case we still need to fill any gap between - `now' and the end of the content's period. - */ - if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) { + if (time < piece->content->position()) { return; } @@ -974,86 +1019,42 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video) return; } - /* Fill gaps that we discover now that we have some video which needs to be emitted. - This is where we need to fill to. - */ - DCPTime fill_to = min(time, piece->content->end(film)); - - if (_next_video_time) { - DCPTime fill_from = max (*_next_video_time, piece->content->position()); - - /* Fill if we have more than half a frame to do */ - if ((fill_to - fill_from) > one_video_frame() / 2) { - auto last = _last_video.find (weak_piece); - if (film->three_d()) { - auto fill_to_eyes = video.eyes; - if (fill_to_eyes == Eyes::BOTH) { - fill_to_eyes = Eyes::LEFT; - } - if (fill_to == piece->content->end(film)) { - /* Don't fill after the end of the content */ - fill_to_eyes = Eyes::LEFT; - } - auto j = fill_from; - auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT); - if (eyes == Eyes::BOTH) { - eyes = Eyes::LEFT; - } - while (j < fill_to || eyes != fill_to_eyes) { - if (last != _last_video.end()) { - LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j)); - auto copy = last->second->shallow_copy(); - copy->set_eyes (eyes); - emit_video (copy, j); - } else { - LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j)); - emit_video (black_player_video_frame(eyes), j); - } - if (eyes == Eyes::RIGHT) { - j += one_video_frame(); - } - eyes = increment_eyes (eyes); - } - } else { - for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) { - if (last != _last_video.end()) { - emit_video (last->second, j); - } else { - emit_video (black_player_video_frame(Eyes::BOTH), j); - } - } - } - } + if (!_next_video_time) { + _next_video_time = time.round(film->video_frame_rate()); } auto const content_video = piece->content->video; - - _last_video[weak_piece] = std::make_shared<PlayerVideo>( - video.image, - content_video->actual_crop(), - content_video->fade(film, video.frame), - scale_for_display( - content_video->scaled_size(film->frame_size()), + use_video( + std::make_shared<PlayerVideo>( + video.image, + content_video->actual_crop(), + content_video->fade(film, video.time), + scale_for_display( + content_video->scaled_size(film->frame_size()), + _video_container_size, + film->frame_size(), + content_video->pixel_quanta() + ), _video_container_size, - film->frame_size(), - content_video->pixel_quanta() + video.eyes, + video.part, + content_video->colour_conversion(), + content_video->range(), + piece->content, + video.time, + false ), - _video_container_size, - video.eyes, - video.part, - content_video->colour_conversion(), - content_video->range(), - piece->content, - video.frame, - false - ); + time, + piece->content->end(film) + ); +} - DCPTime t = time; - for (int i = 0; i < frc.repeat; ++i) { - if (t < piece->content->end(film)) { - emit_video (_last_video[weak_piece], t); - } - t += one_video_frame (); +void +Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end) +{ + _last_video[pv->eyes()] = { pv, time }; + if (pv->eyes() != Eyes::LEFT) { + emit_video_until(std::min(time + one_video_frame() / 2, end)); } } @@ -1335,18 +1336,18 @@ Player::seek (DCPTime time, bool accurate) if (accurate) { _next_video_time = time; - _next_video_eyes = Eyes::LEFT; _next_audio_time = time; } else { _next_video_time = boost::none; - _next_video_eyes = boost::none; _next_audio_time = boost::none; } _black.set_position (time); _silent.set_position (time); - _last_video.clear (); + _last_video[Eyes::LEFT] = {}; + _last_video[Eyes::RIGHT] = {}; + _last_video[Eyes::BOTH] = {}; for (auto& state: _stream_states) { state.second.last_push_end = {}; @@ -1355,43 +1356,7 @@ Player::seek (DCPTime time, bool accurate) void -Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time) -{ - auto film = _film.lock(); - DCPOMATIC_ASSERT(film); - - if (!film->three_d()) { - if (pv->eyes() == Eyes::LEFT) { - /* Use left-eye images for both eyes... */ - pv->set_eyes (Eyes::BOTH); - } else if (pv->eyes() == Eyes::RIGHT) { - /* ...and discard the right */ - return; - } - } - - /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the - player before the video that requires them. - */ - _delay.push_back (make_pair (pv, time)); - - if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) { - _next_video_time = time + one_video_frame(); - } - _next_video_eyes = increment_eyes (pv->eyes()); - - if (_delay.size() < 3) { - return; - } - - auto to_do = _delay.front(); - _delay.pop_front(); - do_emit_video (to_do.first, to_do.second); -} - - -void -Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time) +Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time) { if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) { std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); }); diff --git a/src/lib/player.h b/src/lib/player.h index 45dcd9900..8d95816ca 100644 --- a/src/lib/player.h +++ b/src/lib/player.h @@ -152,6 +152,8 @@ private: dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const; dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const; std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const; + void emit_video_until(dcpomatic::DCPTime time); + void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end); void video (std::weak_ptr<Piece>, ContentVideo); void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio); @@ -166,8 +168,8 @@ private: std::shared_ptr<const AudioBuffers> audio, dcpomatic::DCPTime time, dcpomatic::DCPTime discard_to ) const; boost::optional<PositionImage> open_subtitles_for_frame (dcpomatic::DCPTime time) const; - void emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time); - void do_emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time); + void emit_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time); + void use_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end); void emit_audio (std::shared_ptr<AudioBuffers> data, dcpomatic::DCPTime time); std::shared_ptr<const Playlist> playlist () const; @@ -208,15 +210,12 @@ private: /** Time of the next video that we will emit, or the time of the last accurate seek */ boost::optional<dcpomatic::DCPTime> _next_video_time; - /** Eyes of the next video that we will emit */ - boost::optional<Eyes> _next_video_eyes; /** Time of the next audio that we will emit, or the time of the last accurate seek */ boost::optional<dcpomatic::DCPTime> _next_audio_time; boost::atomic<boost::optional<int>> _dcp_decode_reduction; - typedef std::map<std::weak_ptr<Piece>, std::shared_ptr<PlayerVideo>, std::owner_less<std::weak_ptr<Piece>>> LastVideoMap; - LastVideoMap _last_video; + EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video; AudioMerger _audio_merger; std::unique_ptr<Shuffler> _shuffler; diff --git a/src/lib/player_video.cc b/src/lib/player_video.cc index d45bf9f43..b020ca1cd 100644 --- a/src/lib/player_video.cc +++ b/src/lib/player_video.cc @@ -45,6 +45,7 @@ using std::weak_ptr; using boost::optional; using dcp::Data; using dcp::raw_convert; +using namespace dcpomatic; PlayerVideo::PlayerVideo ( @@ -58,7 +59,7 @@ PlayerVideo::PlayerVideo ( optional<ColourConversion> colour_conversion, VideoRange video_range, weak_ptr<Content> content, - optional<Frame> video_frame, + optional<ContentTime> video_time, bool error ) : _in (in) @@ -71,7 +72,7 @@ PlayerVideo::PlayerVideo ( , _colour_conversion (colour_conversion) , _video_range (video_range) , _content (content) - , _video_frame (video_frame) + , _video_time(video_time) , _error (error) { @@ -343,7 +344,7 @@ PlayerVideo::shallow_copy () const _colour_conversion, _video_range, _content, - _video_frame, + _video_time, _error ); } @@ -356,12 +357,12 @@ bool PlayerVideo::reset_metadata (shared_ptr<const Film> film, dcp::Size player_video_container_size) { auto content = _content.lock(); - if (!content || !_video_frame) { + if (!content || !_video_time) { return false; } _crop = content->video->actual_crop(); - _fade = content->video->fade(film, _video_frame.get()); + _fade = content->video->fade(film, _video_time.get()); _inter_size = scale_for_display( content->video->scaled_size(film->frame_size()), player_video_container_size, diff --git a/src/lib/player_video.h b/src/lib/player_video.h index f2781c1a0..10b2078a0 100644 --- a/src/lib/player_video.h +++ b/src/lib/player_video.h @@ -59,7 +59,7 @@ public: boost::optional<ColourConversion> colour_conversion, VideoRange video_range, std::weak_ptr<Content> content, - boost::optional<Frame> video_frame, + boost::optional<dcpomatic::ContentTime> video_time, bool error ); @@ -141,8 +141,8 @@ private: boost::optional<PositionImage> _text; /** Content that we came from. This is so that reset_metadata() can work. */ std::weak_ptr<Content> _content; - /** Video frame that we came from. Again, this is for reset_metadata() */ - boost::optional<Frame> _video_frame; + /** Video time that we came from. Again, this is for reset_metadata() */ + boost::optional<dcpomatic::ContentTime> _video_time; mutable boost::mutex _mutex; mutable std::shared_ptr<Image> _image; diff --git a/src/lib/shuffler.cc b/src/lib/shuffler.cc index 5a4faf4d1..a4ea0f5dc 100644 --- a/src/lib/shuffler.cc +++ b/src/lib/shuffler.cc @@ -40,8 +40,8 @@ int const Shuffler::_max_size = 64; struct Comparator { bool operator()(Shuffler::Store const & a, Shuffler::Store const & b) { - if (a.second.frame != b.second.frame) { - return a.second.frame < b.second.frame; + if (a.second.time != b.second.time) { + return a.second.time < b.second.time; } return a.second.eyes < b.second.eyes; } @@ -51,7 +51,7 @@ struct Comparator void Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video) { - LOG_DEBUG_THREE_D ("Shuffler::video frame=%1 eyes=%2 part=%3", video.frame, static_cast<int>(video.eyes), static_cast<int>(video.part)); + LOG_DEBUG_THREE_D("Shuffler::video time=%1 eyes=%2 part=%3", to_string(video.time), static_cast<int>(video.eyes), static_cast<int>(video.part)); if (video.eyes != Eyes::LEFT && video.eyes != Eyes::RIGHT) { /* Pass through anything that we don't care about */ @@ -79,13 +79,13 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video) !_store.empty() && _last && ( - (_store.front().second.frame == _last->frame && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) || - (_store.front().second.frame >= (_last->frame + 1) && _store.front().second.eyes == Eyes::LEFT && _last->eyes == Eyes::RIGHT) + (_store.front().second.time == _last->time && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) || + (_store.front().second.time > _last->time && _store.front().second.eyes == Eyes::LEFT && _last->eyes == Eyes::RIGHT) ); if (!store_front_in_sequence) { - string const store = _store.empty() ? "store empty" : String::compose("store front frame=%1 eyes=%2", _store.front().second.frame, static_cast<int>(_store.front().second.eyes)); - string const last = _last ? String::compose("last frame=%1 eyes=%2", _last->frame, static_cast<int>(_last->eyes)) : "no last"; + string const store = _store.empty() ? "store empty" : String::compose("store front time=%1 eyes=%2", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes)); + string const last = _last ? String::compose("last time=%1 eyes=%2", to_string(_last->time), static_cast<int>(_last->eyes)) : "no last"; LOG_DEBUG_THREE_D("Shuffler not in sequence: %1 %2", store, last); } @@ -98,10 +98,10 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video) } if (_store.size() > _max_size) { - LOG_WARNING ("Shuffler is full after receiving frame %1; 3D sync may be incorrect.", video.frame); + LOG_WARNING("Shuffler is full after receiving frame at %1; 3D sync may be incorrect.", to_string(video.time)); } - LOG_DEBUG_THREE_D("Shuffler emits frame=%1 eyes=%2 store=%3", _store.front().second.frame, static_cast<int>(_store.front().second.eyes), _store.size()); + LOG_DEBUG_THREE_D("Shuffler emits time=%1 eyes=%2 store=%3", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes), _store.size()); Video (_store.front().first, _store.front().second); _last = _store.front().second; _store.pop_front (); diff --git a/src/lib/util.cc b/src/lib/util.cc index 829e6de97..d561ad825 100644 --- a/src/lib/util.cc +++ b/src/lib/util.cc @@ -873,16 +873,6 @@ remap (shared_ptr<const AudioBuffers> input, int output_channels, AudioMapping m return mapped; } -Eyes -increment_eyes (Eyes e) -{ - if (e == Eyes::LEFT) { - return Eyes::RIGHT; - } - - return Eyes::LEFT; -} - size_t utf8_strlen (string s) diff --git a/src/lib/util.h b/src/lib/util.h index cf03423c7..328ad08d5 100644 --- a/src/lib/util.h +++ b/src/lib/util.h @@ -115,7 +115,6 @@ extern float relaxed_string_to_float (std::string); extern std::string careful_string_filter (std::string); extern std::pair<int, int> audio_channel_types (std::list<int> mapped, int channels); extern std::shared_ptr<AudioBuffers> remap (std::shared_ptr<const AudioBuffers> input, int output_channels, AudioMapping map); -extern Eyes increment_eyes (Eyes e); extern size_t utf8_strlen (std::string s); extern std::string day_of_week_to_string (boost::gregorian::greg_weekday d); extern void emit_subtitle_image (dcpomatic::ContentTimePeriod period, dcp::SubtitleImage sub, dcp::Size size, std::shared_ptr<TextDecoder> decoder); diff --git a/src/lib/video_content.cc b/src/lib/video_content.cc index c10a94f43..9abb2b394 100644 --- a/src/lib/video_content.cc +++ b/src/lib/video_content.cc @@ -411,27 +411,29 @@ VideoContent::size_after_crop () const } -/** @param f Frame index within the whole (untrimmed) content. +/** @param time Time within the whole (untrimmed) content. * @return Fade factor (between 0 and 1) or unset if there is no fade. */ optional<double> -VideoContent::fade (shared_ptr<const Film> film, Frame f) const +VideoContent::fade(shared_ptr<const Film> film, ContentTime time) const { - DCPOMATIC_ASSERT (f >= 0); + DCPOMATIC_ASSERT(time.get() >= 0); double const vfr = _parent->active_video_frame_rate(film); - auto const ts = _parent->trim_start().frames_round(vfr); - if ((f - ts) < fade_in()) { - return double (f - ts) / fade_in(); + auto const ts = _parent->trim_start(); + auto const fade_in_time = ContentTime::from_frames(fade_in(), vfr); + if ((time - ts) < fade_in_time) { + return double(ContentTime(time - ts).get()) / fade_in_time.get(); } - auto fade_out_start = length() - _parent->trim_end().frames_round(vfr) - fade_out(); - if (f >= fade_out_start) { - return 1 - double (f - fade_out_start) / fade_out(); + auto const fade_out_time = ContentTime::from_frames(fade_out(), vfr); + auto fade_out_start = ContentTime::from_frames(length(), vfr) - _parent->trim_end() - fade_out_time; + if (time >= fade_out_start) { + return 1 - double(ContentTime(time - fade_out_start).get()) / fade_out_time.get(); } - return optional<double> (); + return {}; } string diff --git a/src/lib/video_content.h b/src/lib/video_content.h index cff141e4e..a6da1b958 100644 --- a/src/lib/video_content.h +++ b/src/lib/video_content.h @@ -205,7 +205,7 @@ public: dcp::Size size_after_crop () const; dcp::Size scaled_size (dcp::Size container_size); - boost::optional<double> fade (std::shared_ptr<const Film> film, Frame) const; + boost::optional<double> fade(std::shared_ptr<const Film> film, dcpomatic::ContentTime time) const; std::string processing_description (std::shared_ptr<const Film> film); diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index cf21f885a..c628fddd9 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -20,7 +20,6 @@ #include "compose.hpp" -#include "film.h" #include "frame_interval_checker.h" #include "image.h" #include "j2k_image_proxy.h" @@ -47,17 +46,9 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c) } -/** Called by decoder classes when they have a video frame ready. - * @param frame Frame index within the content; this does not take into account 3D - * so for 3D_ALTERNATE this value goes: - * 0: frame 0 left - * 1: frame 0 right - * 2: frame 1 left - * 3: frame 1 right - * and so on. - */ +/** Called by decoder classes when they have a video frame ready */ void -VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame decoder_frame) +VideoDecoder::emit(shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, ContentTime time) { if (ignore ()) { return; @@ -66,14 +57,12 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im auto const afr = _content->active_video_frame_rate(film); auto const vft = _content->video->frame_type(); - auto frame_time = ContentTime::from_frames (decoder_frame, afr); - /* Do some heuristics to try and spot the case where the user sets content to 3D * when it is not. We try to tell this by looking at the differences in time between * the first few frames. Real 3D content should have two frames for each timestamp. */ if (_frame_interval_checker) { - _frame_interval_checker->feed (frame_time, afr); + _frame_interval_checker->feed(time, afr); if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) { boost::throw_exception ( DecodeError( @@ -91,94 +80,54 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im } } - Frame frame; - Eyes eyes = Eyes::BOTH; - if (!_position) { - /* This is the first data we have received since initialisation or seek. Set - the position based on the frame that was given. After this first time - we just count frames, since (as with audio) it seems that ContentTimes - are unreliable from FFmpegDecoder. They are much better than audio times - but still we get the occasional one which is duplicated. In this case - ffmpeg seems to carry on regardless, processing the video frame as normal. - If we drop the frame with the duplicated timestamp we obviously lose sync. - */ - - if (vft == VideoFrameType::THREE_D_ALTERNATE) { - frame = decoder_frame / 2; - eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT; - } else { - frame = decoder_frame; - if (vft == VideoFrameType::THREE_D) { - auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image); - /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy - * knows which eye it is. - */ - if (j2k && j2k->eye()) { - eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT; - } - } - } - - _position = ContentTime::from_frames (frame, afr); - } else { - if (vft == VideoFrameType::THREE_D) { - auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image); - if (j2k && j2k->eye()) { - if (j2k->eye() == dcp::Eye::LEFT) { - frame = _position->frames_round(afr) + 1; - eyes = Eyes::LEFT; - } else { - frame = _position->frames_round(afr); - eyes = Eyes::RIGHT; - } - } else { - /* This should not happen; see above */ - frame = _position->frames_round(afr) + 1; - } - } else if (vft == VideoFrameType::THREE_D_ALTERNATE) { - DCPOMATIC_ASSERT (_last_emitted_eyes); - if (_last_emitted_eyes.get() == Eyes::RIGHT) { - frame = _position->frames_round(afr) + 1; - eyes = Eyes::LEFT; - } else { - frame = _position->frames_round(afr); - eyes = Eyes::RIGHT; - } - } else { - frame = _position->frames_round(afr) + 1; - } - } - switch (vft) { case VideoFrameType::TWO_D: + Data(ContentVideo(image, time, Eyes::BOTH, Part::WHOLE)); + break; case VideoFrameType::THREE_D: - Data (ContentVideo (image, frame, eyes, Part::WHOLE)); + { + auto eyes = Eyes::LEFT; + auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image); + if (j2k && j2k->eye()) { + eyes = *j2k->eye() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT; + } + + Data(ContentVideo(image, time, eyes, Part::WHOLE)); break; + } case VideoFrameType::THREE_D_ALTERNATE: { - Data (ContentVideo (image, frame, eyes, Part::WHOLE)); + Eyes eyes; + if (_last_emitted_eyes) { + eyes = _last_emitted_eyes.get() == Eyes::LEFT ? Eyes::RIGHT : Eyes::LEFT; + } else { + /* We don't know what eye this frame is, so just guess */ + auto frame = time.frames_round(_content->video_frame_rate().get_value_or(24)); + eyes = (frame % 2) ? Eyes::RIGHT : Eyes::LEFT; + } + Data(ContentVideo(image, time, eyes, Part::WHOLE)); _last_emitted_eyes = eyes; break; } case VideoFrameType::THREE_D_LEFT_RIGHT: - Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF)); - Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF)); + Data(ContentVideo(image, time, Eyes::LEFT, Part::LEFT_HALF)); + Data(ContentVideo(image, time, Eyes::RIGHT, Part::RIGHT_HALF)); break; case VideoFrameType::THREE_D_TOP_BOTTOM: - Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF)); - Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF)); + Data(ContentVideo(image, time, Eyes::LEFT, Part::TOP_HALF)); + Data(ContentVideo(image, time, Eyes::RIGHT, Part::BOTTOM_HALF)); break; case VideoFrameType::THREE_D_LEFT: - Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE)); + Data(ContentVideo(image, time, Eyes::LEFT, Part::WHOLE)); break; case VideoFrameType::THREE_D_RIGHT: - Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE)); + Data(ContentVideo(image, time, Eyes::RIGHT, Part::WHOLE)); break; default: DCPOMATIC_ASSERT (false); } - _position = ContentTime::from_frames (frame, afr); + _position = time; } diff --git a/src/lib/video_decoder.h b/src/lib/video_decoder.h index 828ac66a2..007222404 100644 --- a/src/lib/video_decoder.h +++ b/src/lib/video_decoder.h @@ -61,7 +61,7 @@ public: } void seek () override; - void emit (std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, Frame frame); + void emit(std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, dcpomatic::ContentTime time); boost::signals2::signal<void (ContentVideo)> Data; diff --git a/src/lib/video_mxf_decoder.cc b/src/lib/video_mxf_decoder.cc index 92cab0259..911b8c0bb 100644 --- a/src/lib/video_mxf_decoder.cc +++ b/src/lib/video_mxf_decoder.cc @@ -91,18 +91,18 @@ VideoMXFDecoder::pass () video->emit ( film(), std::make_shared<J2KImageProxy>(_mono_reader->get_frame(frame), _size, AV_PIX_FMT_XYZ12LE, optional<int>()), - frame + _next ); } else { video->emit ( film(), std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::LEFT, AV_PIX_FMT_XYZ12LE, optional<int>()), - frame + _next ); video->emit ( film(), std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::RIGHT, AV_PIX_FMT_XYZ12LE, optional<int>()), - frame + _next ); } diff --git a/test/client_server_test.cc b/test/client_server_test.cc index 7a99f7227..32af60cbe 100644 --- a/test/client_server_test.cc +++ b/test/client_server_test.cc @@ -51,6 +51,7 @@ using std::weak_ptr; using boost::thread; using boost::optional; using dcp::ArrayData; +using namespace dcpomatic; void @@ -105,7 +106,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_rgb) ColourConversion(), VideoRange::FULL, weak_ptr<Content>(), - optional<Frame>(), + optional<ContentTime>(), false ); @@ -188,7 +189,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_yuv) ColourConversion(), VideoRange::FULL, weak_ptr<Content>(), - optional<Frame>(), + optional<ContentTime>(), false ); @@ -258,7 +259,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k) ColourConversion(), VideoRange::FULL, weak_ptr<Content>(), - optional<Frame>(), + optional<ContentTime>(), false ); @@ -283,7 +284,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k) PresetColourConversion::all().front().conversion, VideoRange::FULL, weak_ptr<Content>(), - optional<Frame>(), + optional<ContentTime>(), false ); diff --git a/test/content_test.cc b/test/content_test.cc index 12e653875..1c167a79f 100644 --- a/test/content_test.cc +++ b/test/content_test.cc @@ -152,7 +152,7 @@ BOOST_AUTO_TEST_CASE (content_test6) ); make_and_verify_dcp (film); - check_dcp (TestPaths::private_data() / "fha", film); + check_dcp (TestPaths::private_data() / "v2.18.x" / "fha", film); cl.run (); } diff --git a/test/ffmpeg_decoder_seek_test.cc b/test/ffmpeg_decoder_seek_test.cc index 4dceae86b..f38ef3564 100644 --- a/test/ffmpeg_decoder_seek_test.cc +++ b/test/ffmpeg_decoder_seek_test.cc @@ -64,18 +64,18 @@ store (ContentVideo v) static void -check (shared_ptr<FFmpegDecoder> decoder, int frame) +check (shared_ptr<FFmpegDecoder> decoder, ContentTime time) { BOOST_REQUIRE (decoder->ffmpeg_content()->video_frame_rate ()); - decoder->seek (ContentTime::from_frames (frame, decoder->ffmpeg_content()->video_frame_rate().get()), true); + decoder->seek(time, true); stored = optional<ContentVideo> (); while (!decoder->pass() && !stored) {} - BOOST_CHECK (stored->frame <= frame); + BOOST_CHECK(stored->time <= time); } static void -test (boost::filesystem::path file, vector<int> frames) +test (boost::filesystem::path file, vector<ContentTime> times) { auto path = TestPaths::private_data() / file; BOOST_REQUIRE (boost::filesystem::exists (path)); @@ -87,7 +87,7 @@ test (boost::filesystem::path file, vector<int> frames) auto decoder = make_shared<FFmpegDecoder>(film, content, false); decoder->video->Data.connect (bind (&store, _1)); - for (auto i: frames) { + for (auto i: times) { check (decoder, i); } } @@ -95,10 +95,43 @@ test (boost::filesystem::path file, vector<int> frames) BOOST_AUTO_TEST_CASE (ffmpeg_decoder_seek_test) { - vector<int> frames = { 0, 42, 999, 0 }; - - test ("boon_telly.mkv", frames); - test ("Sintel_Trailer1.480p.DivX_Plus_HD.mkv", frames); - test ("prophet_long_clip.mkv", { 15, 42, 999, 15 }); - test ("dolby_aurora.vob", { 0, 125, 250, 41 }); + test( + "boon_telly.mkv", + { + ContentTime::from_frames(0, 29.97), + ContentTime::from_frames(42, 29.97), + ContentTime::from_frames(999, 29.97), + ContentTime::from_frames(0, 29.97), + } + ); + + test( + "Sintel_Trailer1.480p.DivX_Plus_HD.mkv", + { + ContentTime::from_frames(0, 24), + ContentTime::from_frames(42, 24), + ContentTime::from_frames(999, 24), + ContentTime::from_frames(0, 24), + } + ); + + test( + "prophet_long_clip.mkv", + { + ContentTime::from_frames(15, 23.976), + ContentTime::from_frames(42, 23.976), + ContentTime::from_frames(999, 23.976), + ContentTime::from_frames(15, 23.976) + } + ); + + test( + "dolby_aurora.vob", + { + ContentTime::from_frames(0, 25), + ContentTime::from_frames(125, 25), + ContentTime::from_frames(250, 25), + ContentTime::from_frames(41, 25) + } + ); } diff --git a/test/low_bitrate_test.cc b/test/low_bitrate_test.cc index 7050dd771..52b8d54be 100644 --- a/test/low_bitrate_test.cc +++ b/test/low_bitrate_test.cc @@ -31,6 +31,7 @@ extern "C" { using std::make_shared; +using namespace dcpomatic; BOOST_AUTO_TEST_CASE (low_bitrate_test) @@ -51,7 +52,7 @@ BOOST_AUTO_TEST_CASE (low_bitrate_test) boost::optional<ColourConversion>(), VideoRange::FULL, std::weak_ptr<Content>(), - boost::optional<Frame>(), + boost::optional<ContentTime>(), false ); diff --git a/test/shuffler_test.cc b/test/shuffler_test.cc index d1c5b8533..018730056 100644 --- a/test/shuffler_test.cc +++ b/test/shuffler_test.cc @@ -10,14 +10,15 @@ using boost::optional; #if BOOST_VERSION >= 106100 using namespace boost::placeholders; #endif +using namespace dcpomatic; static void -push (Shuffler& s, int frame, Eyes eyes) +push(Shuffler& s, int frame, Eyes eyes) { shared_ptr<Piece> piece (new Piece (shared_ptr<Content>(), shared_ptr<Decoder>(), FrameRateChange(24, 24))); ContentVideo cv; - cv.frame = frame; + cv.time = ContentTime::from_frames(frame, 24); cv.eyes = eyes; s.video (piece, cv); } @@ -33,8 +34,9 @@ receive (weak_ptr<Piece>, ContentVideo cv) static void check (int frame, Eyes eyes, int line) { + auto const time = ContentTime::from_frames(frame, 24); BOOST_REQUIRE_MESSAGE (!pending_cv.empty(), "Check at " << line << " failed."); - BOOST_CHECK_MESSAGE (pending_cv.front().frame == frame, "Check at " << line << " failed."); + BOOST_CHECK_MESSAGE (pending_cv.front().time == time, "Check at " << line << " failed."); BOOST_CHECK_MESSAGE (pending_cv.front().eyes == eyes, "Check at " << line << " failed."); pending_cv.pop_front(); } |
