Give Film a container; move crop into video content; other bits.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index d5285b73abea9448a971670017b823a9dbd507e0..fcb2e82baabd84de4b0f6d53169a92831b920809 100644 (file)
@@ -1,3 +1,5 @@
+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
 /*
     Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
 
@@ -48,6 +50,7 @@ extern "C" {
 #include "ffmpeg_decoder.h"
 #include "filter_graph.h"
 #include "subtitle.h"
+#include "audio_buffers.h"
 
 #include "i18n.h"
 
@@ -65,8 +68,8 @@ boost::mutex FFmpegDecoder::_mutex;
 
 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio, bool subtitles)
        : Decoder (f)
-       , VideoDecoder (f)
-       , AudioDecoder (f)
+       , VideoDecoder (f, c)
+       , AudioDecoder (f, c)
        , _ffmpeg_content (c)
        , _format_context (0)
        , _video_stream (-1)
@@ -138,11 +141,13 @@ FFmpegDecoder::setup_general ()
                        }
                        
                        _audio_streams.push_back (
-                               FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channels)
+                               shared_ptr<FFmpegAudioStream> (
+                                       new FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channels)
+                                       )
                                );
-                       
+
                } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
-                       _subtitle_streams.push_back (FFmpegSubtitleStream (stream_name (s), i));
+                       _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (stream_name (s), i)));
                }
        }
 
@@ -220,7 +225,7 @@ bool
 FFmpegDecoder::pass ()
 {
        int r = av_read_frame (_format_context, &_packet);
-       
+
        if (r < 0) {
                if (r != AVERROR_EOF) {
                        /* Maybe we should fail here, but for now we'll just finish off instead */
@@ -236,12 +241,8 @@ FFmpegDecoder::pass ()
                
                /* XXX: should we reset _packet.data and size after each *_decode_* call? */
                
-               int frame_finished;
-
                if (_decode_video) {
-                       while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
-                               filter_and_emit_video ();
-                       }
+                       while (decode_video_packet ());
                }
 
                if (_ffmpeg_content->audio_stream() && _decode_audio) {
@@ -254,18 +255,7 @@ FFmpegDecoder::pass ()
        avcodec_get_frame_defaults (_frame);
 
        if (_packet.stream_index == _video_stream && _decode_video) {
-
-               int frame_finished;
-               int const r = avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet);
-               if (r >= 0 && frame_finished) {
-
-                       if (r != _packet.size) {
-                               _film->log()->log (String::compose (N_("Used only %1 bytes of %2 in packet"), r, _packet.size));
-                       }
-
-                       filter_and_emit_video ();
-               }
-
+               decode_video_packet ();
        } else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) {
                decode_audio_packet ();
        } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles) {
@@ -481,38 +471,6 @@ FFmpegDecoder::bytes_per_audio_sample () const
        return av_get_bytes_per_sample (audio_sample_format ());
 }
 
-void
-FFmpegDecoder::filter_and_emit_video ()
-{
-       boost::mutex::scoped_lock lm (_filter_graphs_mutex);
-       
-       shared_ptr<FilterGraph> graph;
-
-       list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
-       while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
-               ++i;
-       }
-
-       if (i == _filter_graphs.end ()) {
-               graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
-               _filter_graphs.push_back (graph);
-               _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
-       } else {
-               graph = *i;
-       }
-
-       list<shared_ptr<Image> > images = graph->process (_frame);
-
-       for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
-               int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
-               if (bet != AV_NOPTS_VALUE) {
-                       emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
-               } else {
-                       _film->log()->log ("Dropping frame without PTS");
-               }
-       }
-}
-
 bool
 FFmpegDecoder::seek (double p)
 {
@@ -582,7 +540,6 @@ void
 FFmpegDecoder::film_changed (Film::Property p)
 {
        switch (p) {
-       case Film::CROP:
        case Film::FILTERS:
        {
                boost::mutex::scoped_lock lm (_filter_graphs_mutex);
@@ -635,3 +592,45 @@ FFmpegDecoder::decode_audio_packet ()
                }
        }
 }
+
+bool
+FFmpegDecoder::decode_video_packet ()
+{
+       int frame_finished;
+       if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
+               return false;
+       }
+               
+       boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+       
+       shared_ptr<FilterGraph> graph;
+       
+       list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+       while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
+               ++i;
+       }
+       
+       if (i == _filter_graphs.end ()) {
+               graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+               _filter_graphs.push_back (graph);
+               _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+       } else {
+               graph = *i;
+       }
+       
+       list<shared_ptr<Image> > images = graph->process (_frame);
+       
+       for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+               int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+               if (bet != AV_NOPTS_VALUE) {
+                       /* XXX: may need to insert extra frames / remove frames here ...
+                          (as per old Matcher)
+                       */
+                       emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
+               } else {
+                       _film->log()->log ("Dropping frame without PTS");
+               }
+       }
+
+       return true;
+}