void
Piece::video (shared_ptr<const ImageProxy> image, Frame frame, Eyes eyes, Part part)
{
- Video (PieceVideo(image, frame, eyes, part));
+ Video (PieceVideo(image, frame, content_video_to_dcp(frame), eyes, part));
}
class Decoder;
class PlayerVideo;
class Resampler;
-struct overlap_video_test1;
struct check_reuse_old_data_test;
+struct overlap_video_test1;
+struct player_time_calculation_test2;
class Piece
void update_pull_to (dcpomatic::DCPTime& pull_to) const;
void set_last_push_end (AudioStreamPtr stream, dcpomatic::DCPTime last_push_end);
- dcpomatic::DCPTime content_video_to_dcp (Frame f) const;
dcpomatic::DCPTime resampled_audio_to_dcp (Frame f) const;
boost::optional<dcpomatic::DCPTime> content_time_to_dcp (std::shared_ptr<const Content> content, dcpomatic::ContentTime t) const;
private:
friend struct overlap_video_test1;
friend struct check_reuse_old_data_test;
+ friend struct player_time_calculation_test2;
void video (std::shared_ptr<const ImageProxy> image, Frame frame, Eyes eyes, Part part);
void audio (std::shared_ptr<AudioStream> stream, std::shared_ptr<const AudioBuffers> audio, Frame frame);
void flush ();
bool done () const;
+ dcpomatic::DCPTime content_video_to_dcp (Frame f) const;
dcpomatic::ContentTime dcp_to_content_time (dcpomatic::DCPTime t) const;
std::weak_ptr<const Film> _film;
#define DCPOMATIC_PIECE_VIDEO_H
+#include "dcpomatic_time.h"
#include "types.h"
public:
PieceVideo () {}
- PieceVideo (std::shared_ptr<const ImageProxy> i, Frame f, Eyes e, Part p)
+ PieceVideo (std::shared_ptr<const ImageProxy> i, Frame f, dcpomatic::DCPTime t, Eyes e, Part p)
: image (i)
, frame (f)
+ , time (t)
, eyes (e)
, part (p)
{}
std::shared_ptr<const ImageProxy> image;
Frame frame = 0;
+ dcpomatic::DCPTime time;
Eyes eyes = Eyes::LEFT;
Part part = Part::WHOLE;
};
return;
}
- /* Time of the first frame we will emit */
- DCPTime const time = piece->content_video_to_dcp (video.frame);
- LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+ LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(video.time));
/* Discard if it's before the content's period or the last accurate seek. We can't discard
if it's after the content's period here as in that case we still need to fill any gap between
`now' and the end of the content's period.
*/
- if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
+ if (video.time < piece->position() || (_last_video_time && video.time < *_last_video_time)) {
return;
}
- if (piece->ignore_video_at(time)) {
+ if (piece->ignore_video_at(video.time)) {
return;
}
/* Fill gaps that we discover now that we have some video which needs to be emitted.
This is where we need to fill to.
*/
- DCPTime fill_to = min (time, piece->end());
+ DCPTime fill_to = min (video.time, piece->end());
if (_last_video_time) {
DCPTime fill_from = max (*_last_video_time, piece->position());
_last_video[wp] = piece->player_video (video, _video_container_size);
- DCPTime t = time;
+ DCPTime t = video.time;
for (int i = 0; i < frc.repeat; ++i) {
if (t < piece->end()) {
emit_video (_last_video[wp], t);