Change how video timing is done.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index 250bce9558513da6bd7d4899dfee416fe4feb641..765b9fa62b127976d000ff70ee68b2b8ba514fd8 100644 (file)
@@ -143,11 +143,10 @@ FFmpegDecoder::flush ()
        full_length = full_length.ceil (frc.source);
        if (video) {
                double const vfr = _ffmpeg_content->video_frame_rate().get();
-               auto const f = full_length.frames_round (vfr);
-               auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
-               while (v < f) {
-                       video->emit (film(), make_shared<const RawImageProxy>(_black_image), v);
-                       ++v;
+               auto v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr);
+               while (v < full_length) {
+                       video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
+                       v += ContentTime::from_frames(1, vfr);
                }
        }
 
@@ -219,14 +218,15 @@ FFmpegDecoder::pass ()
 /** @param data pointer to array of pointers to buffers.
  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
  */
+static
 shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (AVFrame* frame)
+deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
 {
        auto format = static_cast<AVSampleFormat>(frame->format);
 
        /* XXX: can't we use swr_convert() to do the format conversion? */
 
-       int const channels = frame->channels;
+       int const channels = frame->ch_layout.nb_channels;
        int const frames = frame->nb_samples;
        int const total_samples = frames * channels;
        auto audio = make_shared<AudioBuffers>(channels, frames);
@@ -326,12 +326,12 @@ FFmpegDecoder::deinterleave_audio (AVFrame* frame)
        case AV_SAMPLE_FMT_FLTP:
        {
                auto p = reinterpret_cast<float**> (frame->data);
-               DCPOMATIC_ASSERT (frame->channels <= channels);
+               DCPOMATIC_ASSERT(channels <= stream->channels());
                /* Sometimes there aren't as many channels in the frame as in the stream */
-               for (int i = 0; i < frame->channels; ++i) {
+               for (int i = 0; i < channels; ++i) {
                        memcpy (data[i], p[i], frames * sizeof(float));
                }
-               for (int i = frame->channels; i < channels; ++i) {
+               for (int i = channels; i < stream->channels(); ++i) {
                        audio->make_silent (i);
                }
        }
@@ -450,7 +450,7 @@ void
 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
 {
        auto frame = audio_frame (stream);
-       auto data = deinterleave_audio (frame);
+       auto data = deinterleave_audio(stream, frame);
 
        auto const time_base = stream->stream(_format_context)->time_base;
 
@@ -527,7 +527,7 @@ FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
                r = avcodec_receive_frame (context, frame);
                if (r == AVERROR(EAGAIN)) {
                        /* More input is required */
-                       LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive auido frame");
+                       LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive audio frame");
                        return;
                }
 
@@ -610,7 +610,7 @@ FFmpegDecoder::process_video_frame ()
                        video->emit (
                                film(),
                                make_shared<RawImageProxy>(image),
-                               llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
+                               ContentTime::from_seconds(pts)
                                );
                } else {
                        LOG_WARNING_NC ("Dropping frame without PTS");