Merge branch 'master' into speed-up
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index 78d0c48c40f4b76bf56e4a73db1ff2807d3b81ab..acaf149f43ade599dff78d95c109d36c86133097 100644 (file)
@@ -47,17 +47,22 @@ extern "C" {
 #include "util.h"
 #include "log.h"
 #include "ffmpeg_decoder.h"
+#include "filter_graph.h"
 #include "subtitle.h"
 
 using std::cout;
 using std::string;
 using std::vector;
 using std::stringstream;
+using std::list;
 using boost::shared_ptr;
 using boost::optional;
+using boost::dynamic_pointer_cast;
 
 FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const Options> o, Job* j)
        : Decoder (f, o, j)
+       , VideoDecoder (f, o, j)
+       , AudioDecoder (f, o, j)
        , _format_context (0)
        , _video_stream (-1)
        , _frame (0)
@@ -115,9 +120,27 @@ FFmpegDecoder::setup_general ()
                if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                        _video_stream = i;
                } else if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
-                       _audio_streams.push_back (AudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channel_layout));
+
+                       /* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
+                          so bodge it here.  No idea why we should have to do this.
+                       */
+                       
+                       if (s->codec->channel_layout == 0) {
+                               s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
+                       }
+                       
+                       _audio_streams.push_back (
+                               shared_ptr<AudioStream> (
+                                       new FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channel_layout)
+                                       )
+                               );
+                       
                } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
-                       _subtitle_streams.push_back (SubtitleStream (stream_name (s), i));
+                       _subtitle_streams.push_back (
+                               shared_ptr<SubtitleStream> (
+                                       new SubtitleStream (stream_name (s), i)
+                                       )
+                               );
                }
        }
 
@@ -141,14 +164,7 @@ FFmpegDecoder::setup_video ()
                throw DecodeError ("could not find video decoder");
        }
 
-       /* I think this prevents problems with green hash on decodes and
-          "changing frame properties on the fly is not supported by all filters"
-          messages with some content.  Although I'm not sure; needs checking.
-       */
-       AVDictionary* opts = 0;
-       av_dict_set (&opts, "threads", "1", 0);
-       
-       if (avcodec_open2 (_video_codec_context, _video_codec, &opts) < 0) {
+       if (avcodec_open2 (_video_codec_context, _video_codec, 0) < 0) {
                throw DecodeError ("could not open video decoder");
        }
 }
@@ -159,8 +175,11 @@ FFmpegDecoder::setup_audio ()
        if (!_audio_stream) {
                return;
        }
+
+       shared_ptr<FFmpegAudioStream> ffa = dynamic_pointer_cast<FFmpegAudioStream> (_audio_stream);
+       assert (ffa);
        
-       _audio_codec_context = _format_context->streams[_audio_stream.get().id()]->codec;
+       _audio_codec_context = _format_context->streams[ffa->id()]->codec;
        _audio_codec = avcodec_find_decoder (_audio_codec_context->codec_id);
 
        if (_audio_codec == 0) {
@@ -170,14 +189,6 @@ FFmpegDecoder::setup_audio ()
        if (avcodec_open2 (_audio_codec_context, _audio_codec, 0) < 0) {
                throw DecodeError ("could not open audio decoder");
        }
-
-       /* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
-          so bodge it here.  No idea why we should have to do this.
-       */
-
-       if (_audio_codec_context->channel_layout == 0) {
-               _audio_codec_context->channel_layout = av_get_default_channel_layout (_audio_stream.get().channels());
-       }
 }
 
 void
@@ -187,7 +198,7 @@ FFmpegDecoder::setup_subtitle ()
                return;
        }
 
-       _subtitle_codec_context = _format_context->streams[_subtitle_stream.get().id()]->codec;
+       _subtitle_codec_context = _format_context->streams[_subtitle_stream->id()]->codec;
        _subtitle_codec = avcodec_find_decoder (_subtitle_codec_context->codec_id);
 
        if (_subtitle_codec == 0) {
@@ -223,17 +234,17 @@ FFmpegDecoder::pass ()
                int frame_finished;
 
                while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
-                       process_video (_frame);
+                       filter_and_emit_video (_frame);
                }
 
-               if (_audio_stream && _opt->decode_audio && _film->use_content_audio()) {
+               if (_audio_stream && _opt->decode_audio) {
                        while (avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
                                int const data_size = av_samples_get_buffer_size (
                                        0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
                                        );
 
                                assert (_audio_codec_context->channels == _film->audio_channels());
-                               process_audio (deinterleave_audio (_frame->data[0], data_size));
+                               Audio (deinterleave_audio (_frame->data[0], data_size));
                        }
                }
 
@@ -241,6 +252,8 @@ FFmpegDecoder::pass ()
        }
 
        avcodec_get_frame_defaults (_frame);
+
+       shared_ptr<FFmpegAudioStream> ffa = dynamic_pointer_cast<FFmpegAudioStream> (_audio_stream);
        
        if (_packet.stream_index == _video_stream) {
 
@@ -259,6 +272,11 @@ FFmpegDecoder::pass ()
                        double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
                                * av_frame_get_best_effort_timestamp(_frame);
 
+                       _film->log()->log (
+                               String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds),
+                               Log::VERBOSE
+                               );
+
                        if (!_first_video) {
                                _first_video = source_pts_seconds;
                        }
@@ -274,8 +292,8 @@ FFmpegDecoder::pass ()
                                        repeat_last_video ();
                                        _film->log()->log (
                                                String::compose (
-                                                       "Extra frame inserted at %1s; source frame %2, source PTS %3",
-                                                       out_pts_seconds, video_frame(), source_pts_seconds
+                                                       "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
+                                                       out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
                                                        )
                                                );
                                }
@@ -283,14 +301,14 @@ FFmpegDecoder::pass ()
 
                        if (delta > -one_frame) {
                                /* Process this frame */
-                               process_video (_frame);
+                               filter_and_emit_video (_frame);
                        } else {
                                /* Otherwise we are omitting a frame to keep things right */
                                _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
                        }
                }
 
-       } else if (_audio_stream && _packet.stream_index == _audio_stream.get().id() && _opt->decode_audio && _film->use_content_audio()) {
+       } else if (ffa && _packet.stream_index == ffa->id() && _opt->decode_audio) {
 
                int frame_finished;
                if (avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
@@ -314,19 +332,19 @@ FFmpegDecoder::pass ()
                                        */
                        
                                        /* frames of silence that we must push */
-                                       int const s = rint ((_first_audio.get() - _first_video.get()) * _audio_stream.get().sample_rate ());
+                                       int const s = rint ((_first_audio.get() - _first_video.get()) * ffa->sample_rate ());
                                        
                                        _film->log()->log (
                                                String::compose (
-                                                       "First video at %1, first audio at %2, pushing %3 frames of silence for %4 channels (%5 bytes per sample)",
-                                                       _first_video.get(), _first_audio.get(), s, _audio_stream.get().channels(), bytes_per_audio_sample()
+                                                       "First video at %1, first audio at %2, pushing %3 audio frames of silence for %4 channels (%5 bytes per sample)",
+                                                       _first_video.get(), _first_audio.get(), s, ffa->channels(), bytes_per_audio_sample()
                                                        )
                                                );
                                        
                                        if (s) {
-                                               shared_ptr<AudioBuffers> audio (new AudioBuffers (_audio_stream.get().channels(), s));
+                                               shared_ptr<AudioBuffers> audio (new AudioBuffers (ffa->channels(), s));
                                                audio->make_silent ();
-                                               process_audio (audio);
+                                               Audio (audio);
                                        }
                                }
 
@@ -335,11 +353,11 @@ FFmpegDecoder::pass ()
                                        );
                                
                                assert (_audio_codec_context->channels == _film->audio_channels());
-                               process_audio (deinterleave_audio (_frame->data[0], data_size));
+                               Audio (deinterleave_audio (_frame->data[0], data_size));
                        }
                }
                        
-       } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream.get().id() && _opt->decode_subtitles && _first_video) {
+       } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream->id() && _opt->decode_subtitles && _first_video) {
 
                int got_subtitle;
                AVSubtitle sub;
@@ -348,9 +366,14 @@ FFmpegDecoder::pass ()
                           indicate that the previous subtitle should stop.
                        */
                        if (sub.num_rects > 0) {
-                               process_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub, _first_video.get())));
+                               shared_ptr<TimedSubtitle> ts;
+                               try {
+                                       emit_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub, _first_video.get())));
+                               } catch (...) {
+                                       /* some problem with the subtitle; we probably didn't understand it */
+                               }
                        } else {
-                               process_subtitle (shared_ptr<TimedSubtitle> ());
+                               emit_subtitle (shared_ptr<TimedSubtitle> ());
                        }
                        avsubtitle_free (&sub);
                }
@@ -365,14 +388,17 @@ FFmpegDecoder::deinterleave_audio (uint8_t* data, int size)
 {
        assert (_film->audio_channels());
        assert (bytes_per_audio_sample());
+
+       shared_ptr<FFmpegAudioStream> ffa = dynamic_pointer_cast<FFmpegAudioStream> (_audio_stream);
+       assert (ffa);
        
        /* Deinterleave and convert to float */
 
-       assert ((size % (bytes_per_audio_sample() * _audio_stream.get().channels())) == 0);
+       assert ((size % (bytes_per_audio_sample() * ffa->channels())) == 0);
 
        int const total_samples = size / bytes_per_audio_sample();
        int const frames = total_samples / _film->audio_channels();
-       shared_ptr<AudioBuffers> audio (new AudioBuffers (_audio_stream.get().channels(), frames));
+       shared_ptr<AudioBuffers> audio (new AudioBuffers (ffa->channels(), frames));
 
        switch (audio_sample_format()) {
        case AV_SAMPLE_FMT_S16:
@@ -515,15 +541,99 @@ FFmpegDecoder::bytes_per_audio_sample () const
 }
 
 void
-FFmpegDecoder::set_audio_stream (optional<AudioStream> s)
+FFmpegDecoder::set_audio_stream (shared_ptr<AudioStream> s)
 {
-       Decoder::set_audio_stream (s);
+       AudioDecoder::set_audio_stream (s);
        setup_audio ();
 }
 
 void
-FFmpegDecoder::set_subtitle_stream (optional<SubtitleStream> s)
+FFmpegDecoder::set_subtitle_stream (shared_ptr<SubtitleStream> s)
 {
-       Decoder::set_subtitle_stream (s);
+       VideoDecoder::set_subtitle_stream (s);
        setup_subtitle ();
 }
+
+void
+FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
+{
+       shared_ptr<FilterGraph> graph;
+
+       list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+       while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
+               ++i;
+       }
+
+       if (i == _filter_graphs.end ()) {
+               graph.reset (new FilterGraph (_film, this, _opt->apply_crop, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+               _filter_graphs.push_back (graph);
+               _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
+       } else {
+               graph = *i;
+       }
+
+       list<shared_ptr<Image> > images = graph->process (frame);
+
+       for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+               emit_video (*i);
+       }
+}
+
+shared_ptr<FFmpegAudioStream>
+FFmpegAudioStream::create (string t, optional<int> v)
+{
+       if (!v) {
+               /* version < 1; no type in the string, and there's only FFmpeg streams anyway */
+               return shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream (t, v));
+       }
+
+       stringstream s (t);
+       string type;
+       s >> type;
+       if (type != "ffmpeg") {
+               return shared_ptr<FFmpegAudioStream> ();
+       }
+
+       return shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream (t, v));
+}
+
+FFmpegAudioStream::FFmpegAudioStream (string t, optional<int> version)
+{
+       stringstream n (t);
+       
+       int name_index = 4;
+       if (!version) {
+               name_index = 2;
+               int channels;
+               n >> _id >> channels;
+               _channel_layout = av_get_default_channel_layout (channels);
+               _sample_rate = 0;
+       } else {
+               string type;
+               /* Current (marked version 1) */
+               n >> type >> _id >> _sample_rate >> _channel_layout;
+               assert (type == "ffmpeg");
+       }
+
+       for (int i = 0; i < name_index; ++i) {
+               size_t const s = t.find (' ');
+               if (s != string::npos) {
+                       t = t.substr (s + 1);
+               }
+       }
+
+       _name = t;
+}
+
+string
+FFmpegAudioStream::to_string () const
+{
+       return String::compose ("ffmpeg %1 %2 %3 %4", _id, _sample_rate, _channel_layout, _name);
+}
+
+/** @return Length (in video frames) according to our content's header */
+SourceFrame
+FFmpegDecoder::length () const
+{
+       return (double(_format_context->duration) / AV_TIME_BASE) * frames_per_second();
+}