* and so on.
*/
void
-VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame frame)
+VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame decoder_frame)
{
if (ignore ()) {
return;
}
+ double const afr = _content->active_video_frame_rate(film);
+
+ Frame frame;
+ if (!_position) {
+ /* This is the first data we have received since initialisation or seek. Set
+ the position based on the frame that was given. After this first time
+ we just cound frames, since (as with audio) it seems that ContentTimes
+ are unreliable from FFmpegDecoder. They are much better than audio times
+ but still we get the occasional one which is duplicated. In this case
+ ffmpeg seems to carry on regardless, processing the video frame as normal.
+ If we drop the frame with the duplicated timestamp we obviously lose sync.
+ */
+ _position = ContentTime::from_frames (decoder_frame, afr);
+ if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ frame = decoder_frame / 2;
+ _last_emitted_eyes = EYES_RIGHT;
+ } else {
+ frame = decoder_frame;
+ }
+ } else {
+ if (_content->video->frame_type() == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ DCPOMATIC_ASSERT (_last_emitted_eyes);
+ if (_last_emitted_eyes.get() == EYES_RIGHT) {
+ frame = _position->frames_round(afr) + 1;
+ } else {
+ frame = _position->frames_round(afr);
+ }
+ } else {
+ frame = _position->frames_round(afr) + 1;
+ }
+ }
+
switch (_content->video->frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
break;
}
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
- frame /= 2;
+ {
+ DCPOMATIC_ASSERT (_last_emitted_eyes);
+ Eyes const eyes = _last_emitted_eyes.get() == EYES_LEFT ? EYES_RIGHT : EYES_LEFT;
+ Data (ContentVideo (image, frame, eyes, PART_WHOLE));
+ _last_emitted_eyes = eyes;
break;
+ }
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
DCPOMATIC_ASSERT (false);
}
- _position = ContentTime::from_frames (frame, _content->active_video_frame_rate(film));
+ _position = ContentTime::from_frames (frame, afr);
}
void
friend struct ffmpeg_pts_offset_test;
friend void ffmpeg_decoder_sequential_test_one (boost::filesystem::path file, float fps, int gaps, int video_length);
- ContentTime position (boost::shared_ptr<const Film>) const {
+ boost::optional<ContentTime> position (boost::shared_ptr<const Film>) const {
return _position;
}
private:
boost::shared_ptr<const Content> _content;
- /** Frame of last thing to be emitted */
+ /** Frame of last thing to be emitted; only used for 3D */
boost::optional<Frame> _last_emitted_frame;
boost::optional<Eyes> _last_emitted_eyes;
- ContentTime _position;
+ boost::optional<ContentTime> _position;
};
#endif