diff options
| author | Carl Hetherington <cth@carlh.net> | 2022-10-01 12:04:12 +0200 |
|---|---|---|
| committer | Carl Hetherington <cth@carlh.net> | 2022-10-01 12:04:12 +0200 |
| commit | dba7e1137282b52a1bd6ad1d56fe6371a8c97e30 (patch) | |
| tree | 3a640d849c45d583a853900fa0c1fc6392f919ec /src/lib/ffmpeg_decoder.cc | |
| parent | 3d369d6ce80caf87520237f4e8009b7fd55aa91b (diff) | |
Fix checking of frame channels vs stream channels.
The comment says that we're handling differences between channel
counts in the frame and stream but the code wasn't doing that.
Diffstat (limited to 'src/lib/ffmpeg_decoder.cc')
| -rw-r--r-- | src/lib/ffmpeg_decoder.cc | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index ba96d71ff..a5b36d04f 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -219,8 +219,9 @@ FFmpegDecoder::pass () /** @param data pointer to array of pointers to buffers. * Only the first buffer will be used for non-planar data, otherwise there will be one per channel. */ +static shared_ptr<AudioBuffers> -FFmpegDecoder::deinterleave_audio (AVFrame* frame) +deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame) { auto format = static_cast<AVSampleFormat>(frame->format); @@ -326,12 +327,12 @@ FFmpegDecoder::deinterleave_audio (AVFrame* frame) case AV_SAMPLE_FMT_FLTP: { auto p = reinterpret_cast<float**> (frame->data); - DCPOMATIC_ASSERT (frame->channels <= channels); + DCPOMATIC_ASSERT(channels <= stream->channels()); /* Sometimes there aren't as many channels in the frame as in the stream */ - for (int i = 0; i < frame->channels; ++i) { + for (int i = 0; i < channels; ++i) { memcpy (data[i], p[i], frames * sizeof(float)); } - for (int i = frame->channels; i < channels; ++i) { + for (int i = channels; i < stream->channels(); ++i) { audio->make_silent (i); } } @@ -450,7 +451,7 @@ void FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream) { auto frame = audio_frame (stream); - auto data = deinterleave_audio (frame); + auto data = deinterleave_audio(stream, frame); auto const time_base = stream->stream(_format_context)->time_base; |
