Merge master.
[dcpomatic.git] / src / lib / ffmpeg_examiner.cc
index e5d356a27b7b213ffeaaa2a04f6e09f9c168ca94..72db9bce1a3cd9abf1b3c2191eb5ed1f0180d10a 100644 (file)
@@ -24,9 +24,14 @@ extern "C" {
 #include "ffmpeg_examiner.h"
 #include "ffmpeg_content.h"
 
+#include "i18n.h"
+
 using std::string;
+using std::cout;
+using std::max;
 using std::stringstream;
 using boost::shared_ptr;
+using boost::optional;
 
 FFmpegExaminer::FFmpegExaminer (shared_ptr<const FFmpegContent> c)
        : FFmpeg (c)
@@ -47,40 +52,121 @@ FFmpegExaminer::FFmpegExaminer (shared_ptr<const FFmpegContent> c)
                        
                        _audio_streams.push_back (
                                shared_ptr<FFmpegAudioStream> (
-                                       new FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channels)
+                                       new FFmpegAudioStream (audio_stream_name (s), s->id, s->codec->sample_rate, s->codec->channels)
                                        )
                                );
 
                } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
-                       _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (stream_name (s), i)));
+                       _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (subtitle_stream_name (s), s->id)));
+               }
+       }
+
+       /* Run through until we find the first audio (for each stream) and video */
+
+       while (1) {
+               int r = av_read_frame (_format_context, &_packet);
+               if (r < 0) {
+                       break;
+               }
+
+               int frame_finished;
+
+               AVCodecContext* context = _format_context->streams[_packet.stream_index]->codec;
+
+               if (_packet.stream_index == _video_stream && !_first_video) {
+                       if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
+                               _first_video = frame_time (_format_context->streams[_video_stream]);
+                       }
+               } else {
+                       for (size_t i = 0; i < _audio_streams.size(); ++i) {
+                               if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index) && !_audio_streams[i]->first_audio) {
+                                       if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
+                                               _audio_streams[i]->first_audio = frame_time (_audio_streams[i]->stream (_format_context));
+                                       }
+                               }
+                       }
+               }
+
+               bool have_all_audio = true;
+               size_t i = 0;
+               while (i < _audio_streams.size() && have_all_audio) {
+                       have_all_audio = _audio_streams[i]->first_audio;
+                       ++i;
+               }
+
+               av_free_packet (&_packet);
+               
+               if (_first_video && have_all_audio) {
+                       break;
                }
        }
+}
+
+optional<ContentTime>
+FFmpegExaminer::frame_time (AVStream* s) const
+{
+       optional<ContentTime> t;
+       
+       int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+       if (bet != AV_NOPTS_VALUE) {
+               t = ContentTime::from_seconds (bet * av_q2d (s->time_base));
+       }
 
+       return t;
 }
 
 float
 FFmpegExaminer::video_frame_rate () const
 {
-       AVStream* s = _format_context->streams[_video_stream];
-
-       if (s->avg_frame_rate.num && s->avg_frame_rate.den) {
-               return av_q2d (s->avg_frame_rate);
-       }
-
-       return av_q2d (s->r_frame_rate);
+       /* This use of r_frame_rate is debateable; there's a few different
+        * frame rates in the format context, but this one seems to be the most
+        * reliable.
+        */
+       return av_q2d (av_stream_get_r_frame_rate (_format_context->streams[_video_stream]));
 }
 
-libdcp::Size
+dcp::Size
 FFmpegExaminer::video_size () const
 {
-       return libdcp::Size (_video_codec_context->width, _video_codec_context->height);
+       return dcp::Size (video_codec_context()->width, video_codec_context()->height);
 }
 
-/** @return Length (in video frames) according to our content's header */
-ContentVideoFrame
+/** @return Length according to our content's header */
+ContentTime
 FFmpegExaminer::video_length () const
 {
-       return (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate();
+       ContentTime const length = ContentTime::from_seconds (double (_format_context->duration) / AV_TIME_BASE);
+       return ContentTime (max (int64_t (1), length.get ()));
+}
+
+string
+FFmpegExaminer::audio_stream_name (AVStream* s) const
+{
+       stringstream n;
+
+       n << stream_name (s);
+
+       if (!n.str().empty()) {
+               n << "; ";
+       }
+
+       n << s->codec->channels << " channels";
+
+       return n.str ();
+}
+
+string
+FFmpegExaminer::subtitle_stream_name (AVStream* s) const
+{
+       stringstream n;
+
+       n << stream_name (s);
+
+       if (n.str().empty()) {
+               n << _("unknown");
+       }
+
+       return n.str ();
 }
 
 string
@@ -103,9 +189,5 @@ FFmpegExaminer::stream_name (AVStream* s) const
                }
        }
 
-       if (n.str().empty()) {
-               n << "unknown";
-       }
-
        return n.str ();
 }