full_length = full_length.ceil (frc.source);
if (video) {
double const vfr = _ffmpeg_content->video_frame_rate().get();
- auto const f = full_length.frames_round (vfr);
- auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
- while (v < f) {
- video->emit (film(), make_shared<const RawImageProxy>(_black_image), v);
- ++v;
+ auto v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr);
+ while (v < full_length) {
+ video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
+ v += ContentTime::from_frames(1, vfr);
}
}
/** @param data pointer to array of pointers to buffers.
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
+static
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (AVFrame* frame)
+deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
{
auto format = static_cast<AVSampleFormat>(frame->format);
/* XXX: can't we use swr_convert() to do the format conversion? */
- int const channels = frame->channels;
+ int const channels = frame->ch_layout.nb_channels;
int const frames = frame->nb_samples;
int const total_samples = frames * channels;
auto audio = make_shared<AudioBuffers>(channels, frames);
case AV_SAMPLE_FMT_FLTP:
{
auto p = reinterpret_cast<float**> (frame->data);
- DCPOMATIC_ASSERT (frame->channels <= channels);
+ DCPOMATIC_ASSERT(channels <= stream->channels());
/* Sometimes there aren't as many channels in the frame as in the stream */
- for (int i = 0; i < frame->channels; ++i) {
+ for (int i = 0; i < channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = frame->channels; i < channels; ++i) {
+ for (int i = channels; i < stream->channels(); ++i) {
audio->make_silent (i);
}
}
FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
{
auto frame = audio_frame (stream);
- auto data = deinterleave_audio (frame);
+ auto data = deinterleave_audio(stream, frame);
auto const time_base = stream->stream(_format_context)->time_base;
r = avcodec_receive_frame (context, frame);
if (r == AVERROR(EAGAIN)) {
/* More input is required */
- LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive auido frame");
+ LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive audio frame");
return;
}
video->emit (
film(),
make_shared<RawImageProxy>(image),
- llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
+ ContentTime::from_seconds(pts)
);
} else {
LOG_WARNING_NC ("Dropping frame without PTS");