Revert "Re-allow audio channel 15 to be mapped so that users can add"
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index 350362478e92dfb7522e9f8ffdd7d666ddd4d656..3c0f48804106f57677baeca55267df1f561d3a0f 100644 (file)
@@ -206,9 +206,14 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
 {
        DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
 
+DCPOMATIC_DISABLE_WARNINGS
        int const size = av_samples_get_buffer_size (
                0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
                );
+DCPOMATIC_ENABLE_WARNINGS
+
+       /* XXX: can't we just use _frame->nb_samples directly here? */
+       /* XXX: can't we use swr_convert() to do the format conversion? */
 
        /* Deinterleave and convert to float */
 
@@ -336,7 +341,9 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
 AVSampleFormat
 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
 {
+DCPOMATIC_DISABLE_WARNINGS
        return stream->stream (_format_context)->codec->sample_fmt;
+DCPOMATIC_ENABLE_WARNINGS
 }
 
 int
@@ -398,15 +405,21 @@ FFmpegDecoder::seek (ContentTime time, bool accurate)
                avcodec_flush_buffers (video_codec_context());
        }
 
+DCPOMATIC_DISABLE_WARNINGS
        BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
                avcodec_flush_buffers (i->stream(_format_context)->codec);
        }
+DCPOMATIC_ENABLE_WARNINGS
 
        if (subtitle_codec_context ()) {
                avcodec_flush_buffers (subtitle_codec_context ());
        }
 
        _have_current_subtitle = false;
+
+       BOOST_FOREACH (optional<ContentTime>& i, _next_time) {
+               i = optional<ContentTime>();
+       }
 }
 
 void
@@ -431,6 +444,7 @@ FFmpegDecoder::decode_audio_packet ()
                return;
        }
 
+DCPOMATIC_DISABLE_WARNINGS
        while (copy_packet.size > 0) {
 
                int frame_finished;
@@ -455,12 +469,14 @@ FFmpegDecoder::decode_audio_packet ()
                        shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
 
                        ContentTime ct;
-                       if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
+                       if (_frame->pts == AV_NOPTS_VALUE) {
                                /* In some streams we see not every frame coming through with a timestamp; for those
                                   that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
                                   particularly noticeable with TrueHD streams (see #1111).
                                */
-                               ct = *_next_time[stream_index];
+                               if (_next_time[stream_index]) {
+                                       ct = *_next_time[stream_index];
+                               }
                        } else {
                                ct = ContentTime::from_seconds (
                                        av_frame_get_best_effort_timestamp (_frame) *
@@ -490,6 +506,7 @@ FFmpegDecoder::decode_audio_packet ()
                                        to_string(_pts_offset)
                                        );
                        }
+DCPOMATIC_ENABLE_WARNINGS
 
                        /* Give this data provided there is some, and its time is sane */
                        if (ct >= ContentTime() && data->frames() > 0) {
@@ -508,9 +525,11 @@ FFmpegDecoder::decode_video_packet ()
        DCPOMATIC_ASSERT (_video_stream);
 
        int frame_finished;
+DCPOMATIC_DISABLE_WARNINGS
        if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
                return false;
        }
+DCPOMATIC_ENABLE_WARNINGS
 
        boost::mutex::scoped_lock lm (_filter_graphs_mutex);