Emit no audio from DCPs if none is mapped
[dcpomatic.git] / src / lib / video_filter_graph.cc
index b4198da723832a57b889e1eb94788ef81630e3c2..d5840c6d3b6790e8364599b4416f1ac232390ac4 100644 (file)
 
 
 #include "compose.hpp"
+#include "dcpomatic_assert.h"
+#include "exceptions.h"
 #include "image.h"
 #include "video_filter_graph.h"
-#include "warnings.h"
+#include <dcp/scope_guard.h>
 extern "C" {
 #include <libavfilter/buffersrc.h>
 #include <libavfilter/buffersink.h>
@@ -38,7 +40,6 @@ using std::make_shared;
 using std::pair;
 using std::shared_ptr;
 using std::string;
-using std::vector;
 
 
 VideoFilterGraph::VideoFilterGraph (dcp::Size s, AVPixelFormat p, dcp::Fraction r)
@@ -50,16 +51,59 @@ VideoFilterGraph::VideoFilterGraph (dcp::Size s, AVPixelFormat p, dcp::Fraction
 }
 
 
+list<shared_ptr<const Image>>
+VideoFilterGraph::process(shared_ptr<const Image> image)
+{
+       if (_copy) {
+               return { image };
+       }
+
+       auto frame = av_frame_alloc();
+       if (!frame) {
+               throw std::bad_alloc();
+       }
+
+       dcp::ScopeGuard sg = [&frame]() { av_frame_free(&frame); };
+
+       for (int i = 0; i < image->planes(); ++i) {
+               frame->data[i] = image->data()[i];
+               frame->linesize[i] = image->stride()[i];
+       }
+
+       frame->width = image->size().width;
+       frame->height = image->size().height;
+       frame->format = image->pixel_format();
+
+       int r = av_buffersrc_write_frame(_buffer_src_context, frame);
+       if (r < 0) {
+               throw DecodeError(String::compose(N_("could not push buffer into filter chain (%1)."), r));
+       }
+
+       list<shared_ptr<const Image>> images;
+
+       while (true) {
+               if (av_buffersink_get_frame(_buffer_sink_context, _frame) < 0) {
+                       break;
+               }
+
+               images.push_back(make_shared<Image>(_frame, Image::Alignment::PADDED));
+               av_frame_unref (_frame);
+       }
+
+       return images;
+}
+
+
 /** Take an AVFrame and process it using our configured filters, returning a
  *  set of Images.  Caller handles memory management of the input frame.
  */
-list<pair<shared_ptr<Image>, int64_t>>
+list<pair<shared_ptr<const Image>, int64_t>>
 VideoFilterGraph::process (AVFrame* frame)
 {
-       list<pair<shared_ptr<Image>, int64_t>> images;
+       list<pair<shared_ptr<const Image>, int64_t>> images;
 
        if (_copy) {
-               images.push_back (make_pair(make_shared<Image>(frame, true), frame->best_effort_timestamp));
+               images.push_back (make_pair(make_shared<Image>(frame, Image::Alignment::PADDED), frame->best_effort_timestamp));
        } else {
                int r = av_buffersrc_write_frame (_buffer_src_context, frame);
                if (r < 0) {
@@ -71,7 +115,7 @@ VideoFilterGraph::process (AVFrame* frame)
                                break;
                        }
 
-                       images.push_back (make_pair(make_shared<Image>(_frame, true), frame->best_effort_timestamp));
+                       images.push_back (make_pair(make_shared<Image>(_frame, Image::Alignment::PADDED), frame->best_effort_timestamp));
                        av_frame_unref (_frame);
                }
        }