#define DCPOMATIC_CONTENT_VIDEO_H
+#include "dcpomatic_time.h"
#include "types.h"
{
public:
ContentVideo ()
- : frame (0)
- , eyes (Eyes::LEFT)
+ : eyes (Eyes::LEFT)
, part (Part::WHOLE)
{}
- ContentVideo (std::shared_ptr<const ImageProxy> i, Frame f, Eyes e, Part p)
+ ContentVideo (std::shared_ptr<const ImageProxy> i, dcpomatic::ContentTime t, Eyes e, Part p)
: image (i)
- , frame (f)
+ , time (t)
, eyes (e)
, part (p)
{}
std::shared_ptr<const ImageProxy> image;
- Frame frame;
+ dcpomatic::ContentTime time;
Eyes eyes;
Part part;
};
+
#endif
AV_PIX_FMT_XYZ12LE,
_forced_reduction
),
- _offset + frame
+ ContentTime::from_frames(_offset + frame, vfr)
);
} else {
video->emit (
AV_PIX_FMT_XYZ12LE,
_forced_reduction
),
- _offset + frame
+ ContentTime::from_frames(_offset + frame, vfr)
);
video->emit (
AV_PIX_FMT_XYZ12LE,
_forced_reduction
),
- _offset + frame
+ ContentTime::from_frames(_offset + frame, vfr)
);
}
}
full_length = full_length.ceil (frc.source);
if (video && !video->ignore()) {
double const vfr = _ffmpeg_content->video_frame_rate().get();
- auto const f = full_length.frames_round (vfr);
- auto const v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
- if (v < f) {
+ auto const v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr);
+ if (v < full_length) {
video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
did_something = true;
}
video->emit (
film(),
make_shared<RawImageProxy>(image),
- llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
+ ContentTime::from_seconds(pts)
);
} else {
LOG_WARNING_NC ("Dropping frame without PTS");
}
}
- video->emit (film(), _image, _frame_video_position);
+ video->emit(film(), _image, dcpomatic::ContentTime::from_frames(_frame_video_position, _image_content->video_frame_rate().get_value_or(24)));
++_frame_video_position;
return false;
}
using std::list;
using std::make_pair;
using std::make_shared;
-using std::make_shared;
using std::max;
using std::min;
-using std::min;
using std::pair;
using std::shared_ptr;
using std::vector;
_silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
_next_video_time = boost::none;
- _next_video_eyes = Eyes::BOTH;
_next_audio_time = boost::none;
}
boost::mutex::scoped_lock lm(_black_image_mutex);
return std::make_shared<PlayerVideo> (
- std::make_shared<const RawImageProxy>(_black_image),
+ make_shared<const RawImageProxy>(_black_image),
Crop(),
optional<double>(),
_video_container_size,
PresetColourConversion::all().front().conversion,
VideoRange::FULL,
std::weak_ptr<Content>(),
- boost::optional<Frame>(),
+ boost::optional<dcpomatic::ContentTime>(),
false
);
}
if (_playback_length.load() == DCPTime() || !film) {
/* Special; just give one black frame */
- emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
+ use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
return true;
}
LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
earliest_content->done = earliest_content->decoder->pass ();
auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
- if (dcp && !_play_referenced && dcp->reference_audio()) {
- /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
- to `hide' the fact that no audio was emitted during the referenced DCP (though
- we need to behave as though it was).
- */
- _next_audio_time = dcp->end(film);
+ if (dcp && !_play_referenced) {
+ if (dcp->reference_video()) {
+ _next_video_time = dcp->end(film);
+ }
+ if (dcp->reference_audio()) {
+ /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
+ to `hide' the fact that no audio was emitted during the referenced DCP (though
+ we need to behave as though it was).
+ */
+ _next_audio_time = dcp->end(film);
+ }
}
break;
}
case BLACK:
LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
if (film->three_d()) {
- emit_video(black_player_video_frame(Eyes::LEFT), _black.position());
- emit_video(black_player_video_frame(Eyes::RIGHT), _black.position());
+ use_video(black_player_video_frame(Eyes::LEFT), _black.position(), _black.period_at_position().to);
+ use_video(black_player_video_frame(Eyes::RIGHT), _black.position(), _black.period_at_position().to);
} else {
- emit_video(black_player_video_frame(Eyes::BOTH), _black.position());
+ use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
}
_black.set_position (_black.position() + one_video_frame());
break;
}
if (done) {
+ emit_video_until(film->length());
+
if (_shuffler) {
_shuffler->flush ();
}
for (auto const& i: _delay) {
- do_emit_video(i.first, i.second);
- }
-
- /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
- * However, if we have L and R video files, and one is shorter than the other,
- * the fill code in ::video mostly takes care of filling in the gaps.
- * However, since it fills at the point when it knows there is more video coming
- * at time t (so it should fill any gap up to t) it can't do anything right at the
- * end. This is particularly bad news if the last frame emitted is a LEFT
- * eye, as the MXF writer will complain about the 3D sequence being wrong.
- * Here's a hack to workaround that particular case.
- */
- if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
- do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+ emit_video(i.first, i.second);
}
}
}
-static
-Eyes
-increment_eyes (Eyes e)
+void
+Player::emit_video_until(DCPTime time)
{
- if (e == Eyes::LEFT) {
- return Eyes::RIGHT;
- }
+ auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+ /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
+ player before the video that requires them.
+ */
+ _delay.push_back(make_pair(pv, time));
+
+ if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+ _next_video_time = time + one_video_frame();
+ }
+
+ if (_delay.size() < 3) {
+ return;
+ }
- return Eyes::LEFT;
+ auto to_do = _delay.front();
+ _delay.pop_front();
+ emit_video(to_do.first, to_do.second);
+ };
+
+ auto const age_threshold = one_video_frame() * 2;
+
+ while (_next_video_time.get_value_or({}) < time) {
+ auto left = _last_video[Eyes::LEFT];
+ auto right = _last_video[Eyes::RIGHT];
+ auto both = _last_video[Eyes::BOTH];
+
+ auto const next = _next_video_time.get_value_or({});
+
+ if (
+ left.first &&
+ right.first &&
+ (!both.first || (left.second >= both.second && right.second >= both.second)) &&
+ (left.second - next) < age_threshold &&
+ (right.second - next) < age_threshold
+ ) {
+ frame(left.first, next);
+ frame(right.first, next);
+ } else if (both.first && (both.second - next) < age_threshold) {
+ frame(both.first, next);
+ LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
+ } else {
+ auto film = _film.lock();
+ if (film && film->three_d()) {
+ frame(black_player_video_frame(Eyes::LEFT), next);
+ frame(black_player_video_frame(Eyes::RIGHT), next);
+ } else {
+ frame(black_player_video_frame(Eyes::BOTH), next);
+ }
+ LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
+ }
+ }
}
return;
}
- FrameRateChange frc(film, piece->content);
- if (frc.skip && (video.frame % 2) == 1) {
- return;
- }
-
vector<Eyes> eyes_to_emit;
if (!film->three_d()) {
eyes_to_emit = { video.eyes };
}
- /* Time of the first frame we will emit */
- DCPTime const time = content_video_to_dcp (piece, video.frame);
- LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+ /* Time of the frame we just received within the DCP */
+ auto const time = content_time_to_dcp(piece, video.time);
+ LOG_DEBUG_PLAYER("Received video frame %1 %2 eyes %3", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
- /* Discard if it's before the content's period or the last accurate seek. We can't discard
- if it's after the content's period here as in that case we still need to fill any gap between
- `now' and the end of the content's period.
- */
- if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
+ if (time < piece->content->position()) {
return;
}
return;
}
- /* Fill gaps that we discover now that we have some video which needs to be emitted.
- This is where we need to fill to.
- */
- DCPTime fill_to = min(time, piece->content->end(film));
-
- if (_next_video_time) {
- DCPTime fill_from = max (*_next_video_time, piece->content->position());
-
- /* Fill if we have more than half a frame to do */
- if ((fill_to - fill_from) > one_video_frame() / 2) {
- auto last = _last_video.find (weak_piece);
- if (film->three_d()) {
- auto fill_to_eyes = eyes_to_emit[0];
- if (fill_to_eyes == Eyes::BOTH) {
- fill_to_eyes = Eyes::LEFT;
- }
- if (fill_to == piece->content->end(film)) {
- /* Don't fill after the end of the content */
- fill_to_eyes = Eyes::LEFT;
- }
- auto j = fill_from;
- auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
- if (eyes == Eyes::BOTH) {
- eyes = Eyes::LEFT;
- }
- while (j < fill_to || eyes != fill_to_eyes) {
- if (last != _last_video.end()) {
- LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
- auto copy = last->second->shallow_copy();
- copy->set_eyes (eyes);
- emit_video (copy, j);
- } else {
- LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
- emit_video (black_player_video_frame(eyes), j);
- }
- if (eyes == Eyes::RIGHT) {
- j += one_video_frame();
- }
- eyes = increment_eyes (eyes);
- }
- } else {
- for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
- if (last != _last_video.end()) {
- emit_video (last->second, j);
- } else {
- emit_video (black_player_video_frame(Eyes::BOTH), j);
- }
- }
- }
- }
+ if (!_next_video_time) {
+ _next_video_time = time.round(film->video_frame_rate());
}
auto const content_video = piece->content->video;
for (auto eyes: eyes_to_emit) {
- _last_video[weak_piece] = std::make_shared<PlayerVideo>(
- video.image,
- content_video->actual_crop(),
- content_video->fade(film, video.frame),
- scale_for_display(
- content_video->scaled_size(film->frame_size()),
+ use_video(
+ std::make_shared<PlayerVideo>(
+ video.image,
+ content_video->actual_crop(),
+ content_video->fade(film, video.time),
+ scale_for_display(
+ content_video->scaled_size(film->frame_size()),
+ _video_container_size,
+ film->frame_size(),
+ content_video->pixel_quanta()
+ ),
_video_container_size,
- film->frame_size(),
- content_video->pixel_quanta()
+ eyes,
+ video.part,
+ content_video->colour_conversion(),
+ content_video->range(),
+ piece->content,
+ video.time,
+ false
),
- _video_container_size,
- eyes,
- video.part,
- content_video->colour_conversion(),
- content_video->range(),
- piece->content,
- video.frame,
- false
+ time,
+ piece->content->end(film)
);
+ }
+}
- DCPTime t = time;
- for (int i = 0; i < frc.repeat; ++i) {
- if (t < piece->content->end(film)) {
- emit_video (_last_video[weak_piece], t);
- }
- t += one_video_frame ();
- }
+void
+Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
+{
+ _last_video[pv->eyes()] = { pv, time };
+ if (pv->eyes() != Eyes::LEFT) {
+ emit_video_until(std::min(time + one_video_frame() / 2, end));
}
}
if (accurate) {
_next_video_time = time;
- _next_video_eyes = Eyes::LEFT;
_next_audio_time = time;
} else {
_next_video_time = boost::none;
- _next_video_eyes = boost::none;
_next_audio_time = boost::none;
}
_black.set_position (time);
_silent.set_position (time);
- _last_video.clear ();
+ _last_video[Eyes::LEFT] = {};
+ _last_video[Eyes::RIGHT] = {};
+ _last_video[Eyes::BOTH] = {};
for (auto& state: _stream_states) {
state.second.last_push_end = boost::none;
void
-Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
-{
- auto film = _film.lock();
- DCPOMATIC_ASSERT(film);
-
- /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
- player before the video that requires them.
- */
- _delay.push_back (make_pair (pv, time));
-
- if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
- _next_video_time = time + one_video_frame();
- }
- _next_video_eyes = increment_eyes (pv->eyes());
-
- if (_delay.size() < 3) {
- return;
- }
-
- auto to_do = _delay.front();
- _delay.pop_front();
- do_emit_video (to_do.first, to_do.second);
-}
-
-
-void
-Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
{
if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const;
std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
+ void emit_video_until(dcpomatic::DCPTime time);
+ void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
void video (std::weak_ptr<Piece>, ContentVideo);
void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
std::shared_ptr<const AudioBuffers> audio, dcpomatic::DCPTime time, dcpomatic::DCPTime discard_to
) const;
boost::optional<PositionImage> open_subtitles_for_frame (dcpomatic::DCPTime time) const;
- void emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
- void do_emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+ void emit_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+ void use_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
void emit_audio (std::shared_ptr<AudioBuffers> data, dcpomatic::DCPTime time);
std::shared_ptr<const Playlist> playlist () const;
/** Time of the next video that we will emit, or the time of the last accurate seek */
boost::optional<dcpomatic::DCPTime> _next_video_time;
- /** Eyes of the next video that we will emit */
- boost::optional<Eyes> _next_video_eyes;
/** Time of the next audio that we will emit, or the time of the last accurate seek */
boost::optional<dcpomatic::DCPTime> _next_audio_time;
boost::atomic<boost::optional<int>> _dcp_decode_reduction;
- typedef std::map<std::weak_ptr<Piece>, std::shared_ptr<PlayerVideo>, std::owner_less<std::weak_ptr<Piece>>> LastVideoMap;
- LastVideoMap _last_video;
+ EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video;
AudioMerger _audio_merger;
std::unique_ptr<Shuffler> _shuffler;
using boost::optional;
using dcp::Data;
using dcp::raw_convert;
+using namespace dcpomatic;
PlayerVideo::PlayerVideo (
optional<ColourConversion> colour_conversion,
VideoRange video_range,
weak_ptr<Content> content,
- optional<Frame> video_frame,
+ optional<ContentTime> video_time,
bool error
)
: _in (in)
, _colour_conversion (colour_conversion)
, _video_range (video_range)
, _content (content)
- , _video_frame (video_frame)
+ , _video_time(video_time)
, _error (error)
{
_colour_conversion,
_video_range,
_content,
- _video_frame,
+ _video_time,
_error
);
}
PlayerVideo::reset_metadata (shared_ptr<const Film> film, dcp::Size player_video_container_size)
{
auto content = _content.lock();
- if (!content || !_video_frame) {
+ if (!content || !_video_time) {
return false;
}
_crop = content->video->actual_crop();
- _fade = content->video->fade(film, _video_frame.get());
+ _fade = content->video->fade(film, _video_time.get());
_inter_size = scale_for_display(
content->video->scaled_size(film->frame_size()),
player_video_container_size,
boost::optional<ColourConversion> colour_conversion,
VideoRange video_range,
std::weak_ptr<Content> content,
- boost::optional<Frame> video_frame,
+ boost::optional<dcpomatic::ContentTime> video_time,
bool error
);
boost::optional<PositionImage> _text;
/** Content that we came from. This is so that reset_metadata() can work. */
std::weak_ptr<Content> _content;
- /** Video frame that we came from. Again, this is for reset_metadata() */
- boost::optional<Frame> _video_frame;
+ /** Video time that we came from. Again, this is for reset_metadata() */
+ boost::optional<dcpomatic::ContentTime> _video_time;
mutable boost::mutex _mutex;
mutable std::shared_ptr<Image> _image;
struct Comparator
{
bool operator()(Shuffler::Store const & a, Shuffler::Store const & b) {
- if (a.second.frame != b.second.frame) {
- return a.second.frame < b.second.frame;
+ if (a.second.time != b.second.time) {
+ return a.second.time < b.second.time;
}
return a.second.eyes < b.second.eyes;
}
void
Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
{
- LOG_DEBUG_THREE_D ("Shuffler::video frame=%1 eyes=%2 part=%3", video.frame, static_cast<int>(video.eyes), static_cast<int>(video.part));
+ LOG_DEBUG_THREE_D("Shuffler::video time=%1 eyes=%2 part=%3", to_string(video.time), static_cast<int>(video.eyes), static_cast<int>(video.part));
if (video.eyes != Eyes::LEFT && video.eyes != Eyes::RIGHT) {
/* Pass through anything that we don't care about */
!_store.empty() &&
_last &&
(
- (_store.front().second.frame == _last->frame && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) ||
- (_store.front().second.frame >= (_last->frame + 1) && _store.front().second.eyes == Eyes::LEFT && _last->eyes == Eyes::RIGHT)
+ (_store.front().second.time == _last->time && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) ||
+ (_store.front().second.time > _last->time && _store.front().second.eyes == Eyes::LEFT && _last->eyes == Eyes::RIGHT)
);
if (!store_front_in_sequence) {
- string const store = _store.empty() ? "store empty" : String::compose("store front frame=%1 eyes=%2", _store.front().second.frame, static_cast<int>(_store.front().second.eyes));
- string const last = _last ? String::compose("last frame=%1 eyes=%2", _last->frame, static_cast<int>(_last->eyes)) : "no last";
+ string const store = _store.empty() ? "store empty" : String::compose("store front time=%1 eyes=%2", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes));
+ string const last = _last ? String::compose("last time=%1 eyes=%2", to_string(_last->time), static_cast<int>(_last->eyes)) : "no last";
LOG_DEBUG_THREE_D("Shuffler not in sequence: %1 %2", store, last);
}
}
if (_store.size() > _max_size) {
- LOG_WARNING ("Shuffler is full after receiving frame %1; 3D sync may be incorrect.", video.frame);
+ LOG_WARNING("Shuffler is full after receiving frame at %1; 3D sync may be incorrect.", to_string(video.time));
}
- LOG_DEBUG_THREE_D("Shuffler emits frame=%1 eyes=%2 store=%3", _store.front().second.frame, static_cast<int>(_store.front().second.eyes), _store.size());
+ LOG_DEBUG_THREE_D("Shuffler emits time=%1 eyes=%2 store=%3", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes), _store.size());
Video (_store.front().first, _store.front().second);
_last = _store.front().second;
_store.pop_front ();
return mapped;
}
-Eyes
-increment_eyes (Eyes e)
-{
- if (e == Eyes::LEFT) {
- return Eyes::RIGHT;
- }
-
- return Eyes::LEFT;
-}
-
size_t
utf8_strlen (string s)
}
-/** @param f Frame index within the whole (untrimmed) content.
+/** @param time Time within the whole (untrimmed) content.
* @return Fade factor (between 0 and 1) or unset if there is no fade.
*/
optional<double>
-VideoContent::fade (shared_ptr<const Film> film, Frame f) const
+VideoContent::fade(shared_ptr<const Film> film, ContentTime time) const
{
- DCPOMATIC_ASSERT (f >= 0);
+ DCPOMATIC_ASSERT(time.get() >= 0);
double const vfr = _parent->active_video_frame_rate(film);
- auto const ts = _parent->trim_start().frames_round(vfr);
- if ((f - ts) < fade_in()) {
- return double (f - ts) / fade_in();
+ auto const ts = _parent->trim_start();
+ auto const fade_in_time = ContentTime::from_frames(fade_in(), vfr);
+ if ((time - ts) < fade_in_time) {
+ return double(ContentTime(time - ts).get()) / fade_in_time.get();
}
- auto fade_out_start = length() - _parent->trim_end().frames_round(vfr) - fade_out();
- if (f >= fade_out_start) {
- return 1 - double (f - fade_out_start) / fade_out();
+ auto const fade_out_time = ContentTime::from_frames(fade_out(), vfr);
+ auto fade_out_start = ContentTime::from_frames(length(), vfr) - _parent->trim_end() - fade_out_time;
+ if (time >= fade_out_start) {
+ return 1 - double(ContentTime(time - fade_out_start).get()) / fade_out_time.get();
}
- return optional<double> ();
+ return {};
}
string
dcp::Size size_after_crop () const;
dcp::Size scaled_size (dcp::Size container_size);
- boost::optional<double> fade (std::shared_ptr<const Film> film, Frame) const;
+ boost::optional<double> fade(std::shared_ptr<const Film> film, dcpomatic::ContentTime time) const;
std::string processing_description (std::shared_ptr<const Film> film);
#include "compose.hpp"
-#include "film.h"
#include "frame_interval_checker.h"
#include "image.h"
#include "j2k_image_proxy.h"
}
-/** Called by decoder classes when they have a video frame ready.
- * @param frame Frame index within the content; this does not take into account 3D
- * so for 3D_ALTERNATE this value goes:
- * 0: frame 0 left
- * 1: frame 0 right
- * 2: frame 1 left
- * 3: frame 1 right
- * and so on.
- */
+/** Called by decoder classes when they have a video frame ready */
void
-VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame decoder_frame)
+VideoDecoder::emit(shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, ContentTime time)
{
if (ignore ()) {
return;
auto const afr = _content->active_video_frame_rate(film);
auto const vft = _content->video->frame_type();
- auto frame_time = ContentTime::from_frames (decoder_frame, afr);
-
/* Do some heuristics to try and spot the case where the user sets content to 3D
* when it is not. We try to tell this by looking at the differences in time between
* the first few frames. Real 3D content should have two frames for each timestamp.
*/
if (_frame_interval_checker) {
- _frame_interval_checker->feed (frame_time, afr);
+ _frame_interval_checker->feed(time, afr);
if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) {
boost::throw_exception (
DecodeError(
}
}
- Frame frame;
- Eyes eyes = Eyes::BOTH;
- if (!_position) {
- /* This is the first data we have received since initialisation or seek. Set
- the position based on the frame that was given. After this first time
- we just count frames, since (as with audio) it seems that ContentTimes
- are unreliable from FFmpegDecoder. They are much better than audio times
- but still we get the occasional one which is duplicated. In this case
- ffmpeg seems to carry on regardless, processing the video frame as normal.
- If we drop the frame with the duplicated timestamp we obviously lose sync.
- */
-
- if (vft == VideoFrameType::THREE_D_ALTERNATE) {
- frame = decoder_frame / 2;
- eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
- } else {
- frame = decoder_frame;
- if (vft == VideoFrameType::THREE_D) {
- auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
- /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy
- * knows which eye it is.
- */
- if (j2k && j2k->eye()) {
- eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
- }
- }
- }
-
- _position = ContentTime::from_frames (frame, afr);
- } else {
- if (vft == VideoFrameType::THREE_D) {
- auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
- if (j2k && j2k->eye()) {
- if (j2k->eye() == dcp::Eye::LEFT) {
- frame = _position->frames_round(afr) + 1;
- eyes = Eyes::LEFT;
- } else {
- frame = _position->frames_round(afr);
- eyes = Eyes::RIGHT;
- }
- } else {
- /* This should not happen; see above */
- frame = _position->frames_round(afr) + 1;
- }
- } else if (vft == VideoFrameType::THREE_D_ALTERNATE) {
- DCPOMATIC_ASSERT (_last_emitted_eyes);
- if (_last_emitted_eyes.get() == Eyes::RIGHT) {
- frame = _position->frames_round(afr) + 1;
- eyes = Eyes::LEFT;
- } else {
- frame = _position->frames_round(afr);
- eyes = Eyes::RIGHT;
- }
- } else {
- frame = _position->frames_round(afr) + 1;
- }
- }
-
switch (vft) {
case VideoFrameType::TWO_D:
+ Data(ContentVideo(image, time, Eyes::BOTH, Part::WHOLE));
+ break;
case VideoFrameType::THREE_D:
- Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+ {
+ auto eyes = Eyes::LEFT;
+ auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
+ if (j2k && j2k->eye()) {
+ eyes = *j2k->eye() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
+ }
+
+ Data(ContentVideo(image, time, eyes, Part::WHOLE));
break;
+ }
case VideoFrameType::THREE_D_ALTERNATE:
{
- Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+ Eyes eyes;
+ if (_last_emitted_eyes) {
+ eyes = _last_emitted_eyes.get() == Eyes::LEFT ? Eyes::RIGHT : Eyes::LEFT;
+ } else {
+ /* We don't know what eye this frame is, so just guess */
+ auto frame = time.frames_round(_content->video_frame_rate().get_value_or(24));
+ eyes = (frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
+ }
+ Data(ContentVideo(image, time, eyes, Part::WHOLE));
_last_emitted_eyes = eyes;
break;
}
case VideoFrameType::THREE_D_LEFT_RIGHT:
- Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF));
- Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF));
+ Data(ContentVideo(image, time, Eyes::LEFT, Part::LEFT_HALF));
+ Data(ContentVideo(image, time, Eyes::RIGHT, Part::RIGHT_HALF));
break;
case VideoFrameType::THREE_D_TOP_BOTTOM:
- Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF));
- Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF));
+ Data(ContentVideo(image, time, Eyes::LEFT, Part::TOP_HALF));
+ Data(ContentVideo(image, time, Eyes::RIGHT, Part::BOTTOM_HALF));
break;
case VideoFrameType::THREE_D_LEFT:
- Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE));
+ Data(ContentVideo(image, time, Eyes::LEFT, Part::WHOLE));
break;
case VideoFrameType::THREE_D_RIGHT:
- Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE));
+ Data(ContentVideo(image, time, Eyes::RIGHT, Part::WHOLE));
break;
default:
DCPOMATIC_ASSERT (false);
}
- _position = ContentTime::from_frames (frame, afr);
+ _position = time;
}
}
void seek () override;
- void emit (std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, Frame frame);
+ void emit(std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, dcpomatic::ContentTime time);
boost::signals2::signal<void (ContentVideo)> Data;
video->emit (
film(),
std::make_shared<J2KImageProxy>(_mono_reader->get_frame(frame), _size, AV_PIX_FMT_XYZ12LE, optional<int>()),
- frame
+ _next
);
} else {
video->emit (
film(),
std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::LEFT, AV_PIX_FMT_XYZ12LE, optional<int>()),
- frame
+ _next
);
video->emit (
film(),
std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::RIGHT, AV_PIX_FMT_XYZ12LE, optional<int>()),
- frame
+ _next
);
}
using boost::thread;
using boost::optional;
using dcp::ArrayData;
+using namespace dcpomatic;
void
ColourConversion(),
VideoRange::FULL,
weak_ptr<Content>(),
- optional<Frame>(),
+ optional<ContentTime>(),
false
);
ColourConversion(),
VideoRange::FULL,
weak_ptr<Content>(),
- optional<Frame>(),
+ optional<ContentTime>(),
false
);
ColourConversion(),
VideoRange::FULL,
weak_ptr<Content>(),
- optional<Frame>(),
+ optional<ContentTime>(),
false
);
PresetColourConversion::all().front().conversion,
VideoRange::FULL,
weak_ptr<Content>(),
- optional<Frame>(),
+ optional<ContentTime>(),
false
);
film->set_audio_channels(16);
make_and_verify_dcp (film);
- check_dcp (TestPaths::private_data() / "fha", film);
+ check_dcp (TestPaths::private_data() / "v2.18.x" / "fha", film);
cl.run ();
}
static void
-check (shared_ptr<FFmpegDecoder> decoder, int frame)
+check (shared_ptr<FFmpegDecoder> decoder, ContentTime time)
{
BOOST_REQUIRE (decoder->ffmpeg_content()->video_frame_rate ());
- decoder->seek (ContentTime::from_frames (frame, decoder->ffmpeg_content()->video_frame_rate().get()), true);
+ decoder->seek(time, true);
stored = optional<ContentVideo> ();
while (!decoder->pass() && !stored) {}
- BOOST_CHECK (stored->frame <= frame);
+ BOOST_CHECK(stored->time <= time);
}
static void
-test (boost::filesystem::path file, vector<int> frames)
+test (boost::filesystem::path file, vector<ContentTime> times)
{
auto path = TestPaths::private_data() / file;
BOOST_REQUIRE (boost::filesystem::exists (path));
auto decoder = make_shared<FFmpegDecoder>(film, content, false);
decoder->video->Data.connect (bind (&store, _1));
- for (auto i: frames) {
+ for (auto i: times) {
check (decoder, i);
}
}
BOOST_AUTO_TEST_CASE (ffmpeg_decoder_seek_test)
{
- vector<int> frames = { 0, 42, 999, 0 };
-
- test ("boon_telly.mkv", frames);
- test ("Sintel_Trailer1.480p.DivX_Plus_HD.mkv", frames);
- test ("prophet_long_clip.mkv", { 15, 42, 999, 15 });
- test ("dolby_aurora.vob", { 0, 125, 250, 41 });
+ test(
+ "boon_telly.mkv",
+ {
+ ContentTime::from_frames(0, 29.97),
+ ContentTime::from_frames(42, 29.97),
+ ContentTime::from_frames(999, 29.97),
+ ContentTime::from_frames(0, 29.97),
+ }
+ );
+
+ test(
+ "Sintel_Trailer1.480p.DivX_Plus_HD.mkv",
+ {
+ ContentTime::from_frames(0, 24),
+ ContentTime::from_frames(42, 24),
+ ContentTime::from_frames(999, 24),
+ ContentTime::from_frames(0, 24),
+ }
+ );
+
+ test(
+ "prophet_long_clip.mkv",
+ {
+ ContentTime::from_frames(15, 23.976),
+ ContentTime::from_frames(42, 23.976),
+ ContentTime::from_frames(999, 23.976),
+ ContentTime::from_frames(15, 23.976)
+ }
+ );
+
+ test(
+ "dolby_aurora.vob",
+ {
+ ContentTime::from_frames(0, 25),
+ ContentTime::from_frames(125, 25),
+ ContentTime::from_frames(250, 25),
+ ContentTime::from_frames(41, 25)
+ }
+ );
}
using std::make_shared;
+using namespace dcpomatic;
BOOST_AUTO_TEST_CASE (low_bitrate_test)
boost::optional<ColourConversion>(),
VideoRange::FULL,
std::weak_ptr<Content>(),
- boost::optional<Frame>(),
+ boost::optional<ContentTime>(),
false
);
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
#endif
+using namespace dcpomatic;
static void
-push (Shuffler& s, int frame, Eyes eyes)
+push(Shuffler& s, int frame, Eyes eyes)
{
auto piece = make_shared<Piece>(shared_ptr<Content>(), shared_ptr<Decoder>(), FrameRateChange(24, 24));
ContentVideo cv;
- cv.frame = frame;
+ cv.time = ContentTime::from_frames(frame, 24);
cv.eyes = eyes;
s.video (piece, cv);
}
static void
check (int frame, Eyes eyes, int line)
{
+ auto const time = ContentTime::from_frames(frame, 24);
BOOST_REQUIRE_MESSAGE (!pending_cv.empty(), "Check at " << line << " failed.");
- BOOST_CHECK_MESSAGE (pending_cv.front().frame == frame, "Check at " << line << " failed.");
+ BOOST_CHECK_MESSAGE (pending_cv.front().time == time, "Check at " << line << " failed.");
BOOST_CHECK_MESSAGE (pending_cv.front().eyes == eyes, "Check at " << line << " failed.");
pending_cv.pop_front();
}