Tread .psd as an image file.
[dcpomatic.git] / src / lib / ffmpeg_examiner.cc
index 23c2f1a72609189a010ba041f9e02f292f96b9c3..ed867b47529baf11b32e20626d348dee26ca3e94 100644 (file)
@@ -124,8 +124,11 @@ DCPOMATIC_DISABLE_WARNINGS
         */
        string temporal_reference;
        while (true) {
-               int r = av_read_frame (_format_context, &_packet);
+               auto packet = av_packet_alloc ();
+               DCPOMATIC_ASSERT (packet);
+               int r = av_read_frame (_format_context, packet);
                if (r < 0) {
+                       av_packet_free (&packet);
                        break;
                }
 
@@ -137,25 +140,25 @@ DCPOMATIC_DISABLE_WARNINGS
                        }
                }
 
-               AVCodecContext* context = _format_context->streams[_packet.stream_index]->codec;
+               auto context = _format_context->streams[packet->stream_index]->codec;
 DCPOMATIC_ENABLE_WARNINGS
 
-               if (_video_stream && _packet.stream_index == _video_stream.get()) {
-                       video_packet (context, temporal_reference);
+               if (_video_stream && packet->stream_index == _video_stream.get()) {
+                       video_packet (context, temporal_reference, packet);
                }
 
                bool got_all_audio = true;
 
                for (size_t i = 0; i < _audio_streams.size(); ++i) {
-                       if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index)) {
-                               audio_packet (context, _audio_streams[i]);
+                       if (_audio_streams[i]->uses_index(_format_context, packet->stream_index)) {
+                               audio_packet (context, _audio_streams[i], packet);
                        }
                        if (!_audio_streams[i]->first_audio) {
                                got_all_audio = false;
                        }
                }
 
-               av_packet_unref (&_packet);
+               av_packet_free (&packet);
 
                if (_first_video && got_all_audio && temporal_reference.size() >= (PULLDOWN_CHECK_FRAMES * 2)) {
                        /* All done */
@@ -163,10 +166,31 @@ DCPOMATIC_ENABLE_WARNINGS
                }
        }
 
+       if (_video_stream) {
+               AVPacket packet;
+               av_init_packet (&packet);
+               packet.data = nullptr;
+               packet.size = 0;
+DCPOMATIC_DISABLE_WARNINGS
+               auto context = _format_context->streams[*_video_stream]->codec;
+DCPOMATIC_ENABLE_WARNINGS
+               while (video_packet(context, temporal_reference, &packet)) {}
+       }
+
+       for (auto i: _audio_streams) {
+               AVPacket packet;
+               av_init_packet (&packet);
+               packet.data = nullptr;
+               packet.size = 0;
+DCPOMATIC_DISABLE_WARNINGS
+               audio_packet (i->stream(_format_context)->codec, i, &packet);
+DCPOMATIC_ENABLE_WARNINGS
+       }
+
        if (_video_stream) {
                /* This code taken from get_rotation() in ffmpeg:cmdutils.c */
-               AVStream* stream = _format_context->streams[*_video_stream];
-               AVDictionaryEntry* rotate_tag = av_dict_get (stream->metadata, "rotate", 0, 0);
+               auto stream = _format_context->streams[*_video_stream];
+               auto rotate_tag = av_dict_get (stream->metadata, "rotate", 0, 0);
                uint8_t* displaymatrix = av_stream_get_side_data (stream, AV_PKT_DATA_DISPLAYMATRIX, 0);
                _rotation = 0;
 
@@ -196,39 +220,44 @@ DCPOMATIC_ENABLE_WARNINGS
 
 /** @param temporal_reference A string to which we should add two characters per frame;
  *  the first   is T or B depending on whether it's top- or bottom-field first,
- *  ths seconds is 3 or 2 depending on whether "repeat_pict" is true or not.
+ *  the second  is 3 or 2 depending on whether "repeat_pict" is true or not.
+ *  @return true if some video was decoded, otherwise false.
  */
-void
-FFmpegExaminer::video_packet (AVCodecContext* context, string& temporal_reference)
+bool
+FFmpegExaminer::video_packet (AVCodecContext* context, string& temporal_reference, AVPacket* packet)
 {
        DCPOMATIC_ASSERT (_video_stream);
 
        if (_first_video && !_need_video_length && temporal_reference.size() >= (PULLDOWN_CHECK_FRAMES * 2)) {
-               return;
+               return false;
        }
 
        int frame_finished;
 DCPOMATIC_DISABLE_WARNINGS
-       if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
+       if (avcodec_decode_video2 (context, _frame, &frame_finished, packet) < 0 || !frame_finished) {
+               return false;
+       }
 DCPOMATIC_ENABLE_WARNINGS
-               if (!_first_video) {
-                       _first_video = frame_time (_format_context->streams[_video_stream.get()]);
-               }
-               if (_need_video_length) {
-                       _video_length = frame_time (
-                               _format_context->streams[_video_stream.get()]
-                               ).get_value_or (ContentTime ()).frames_round (video_frame_rate().get ());
-               }
-               if (temporal_reference.size() < (PULLDOWN_CHECK_FRAMES * 2)) {
-                       temporal_reference += (_frame->top_field_first ? "T" : "B");
-                       temporal_reference += (_frame->repeat_pict ? "3" : "2");
-               }
+
+       if (!_first_video) {
+               _first_video = frame_time (_format_context->streams[_video_stream.get()]);
+       }
+       if (_need_video_length) {
+               _video_length = frame_time (
+                       _format_context->streams[_video_stream.get()]
+                       ).get_value_or (ContentTime ()).frames_round (video_frame_rate().get ());
        }
+       if (temporal_reference.size() < (PULLDOWN_CHECK_FRAMES * 2)) {
+               temporal_reference += (_frame->top_field_first ? "T" : "B");
+               temporal_reference += (_frame->repeat_pict ? "3" : "2");
+       }
+
+       return true;
 }
 
 
 void
-FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr<FFmpegAudioStream> stream)
+FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr<FFmpegAudioStream> stream, AVPacket* packet)
 {
        if (stream->first_audio) {
                return;
@@ -236,7 +265,7 @@ FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr<FFmpegAudioStr
 
        int frame_finished;
 DCPOMATIC_DISABLE_WARNINGS
-       if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
+       if (avcodec_decode_audio4 (context, _frame, &frame_finished, packet) >= 0 && frame_finished) {
 DCPOMATIC_ENABLE_WARNINGS
                stream->first_audio = frame_time (stream->stream (_format_context));
        }
@@ -286,7 +315,7 @@ FFmpegExaminer::sample_aspect_ratio () const
        AVRational sar = av_guess_sample_aspect_ratio (_format_context, _format_context->streams[_video_stream.get()], 0);
        if (sar.num == 0) {
                /* I assume this means that we don't know */
-               return optional<double> ();
+               return {};
        }
        return double (sar.num) / sar.den;
 }
@@ -294,7 +323,7 @@ FFmpegExaminer::sample_aspect_ratio () const
 string
 FFmpegExaminer::subtitle_stream_name (AVStream* s) const
 {
-       string n = stream_name (s);
+       auto n = stream_name (s);
 
        if (n.empty()) {
                n = _("unknown");
@@ -431,9 +460,9 @@ FFmpegExaminer::range () const
        switch (color_range()) {
        case AVCOL_RANGE_MPEG:
        case AVCOL_RANGE_UNSPECIFIED:
-               return VIDEO_RANGE_VIDEO;
+               return VideoRange::VIDEO;
        case AVCOL_RANGE_JPEG:
        default:
-               return VIDEO_RANGE_FULL;
+               return VideoRange::FULL;
        }
 }