X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fvideo_filter_graph.cc;h=d5840c6d3b6790e8364599b4416f1ac232390ac4;hb=HEAD;hp=f1141150b6c9ce042b6b15e837c4f5baceb887fc;hpb=7bc2134d658778e04f1756c255e604b4ab5a5831;p=dcpomatic.git diff --git a/src/lib/video_filter_graph.cc b/src/lib/video_filter_graph.cc index f1141150b..d5840c6d3 100644 --- a/src/lib/video_filter_graph.cc +++ b/src/lib/video_filter_graph.cc @@ -20,12 +20,15 @@ #include "compose.hpp" +#include "dcpomatic_assert.h" +#include "exceptions.h" #include "image.h" #include "video_filter_graph.h" -#include "warnings.h" +#include extern "C" { #include #include +#include } #include "i18n.h" @@ -37,7 +40,6 @@ using std::make_shared; using std::pair; using std::shared_ptr; using std::string; -using std::vector; VideoFilterGraph::VideoFilterGraph (dcp::Size s, AVPixelFormat p, dcp::Fraction r) @@ -49,17 +51,59 @@ VideoFilterGraph::VideoFilterGraph (dcp::Size s, AVPixelFormat p, dcp::Fraction } +list> +VideoFilterGraph::process(shared_ptr image) +{ + if (_copy) { + return { image }; + } + + auto frame = av_frame_alloc(); + if (!frame) { + throw std::bad_alloc(); + } + + dcp::ScopeGuard sg = [&frame]() { av_frame_free(&frame); }; + + for (int i = 0; i < image->planes(); ++i) { + frame->data[i] = image->data()[i]; + frame->linesize[i] = image->stride()[i]; + } + + frame->width = image->size().width; + frame->height = image->size().height; + frame->format = image->pixel_format(); + + int r = av_buffersrc_write_frame(_buffer_src_context, frame); + if (r < 0) { + throw DecodeError(String::compose(N_("could not push buffer into filter chain (%1)."), r)); + } + + list> images; + + while (true) { + if (av_buffersink_get_frame(_buffer_sink_context, _frame) < 0) { + break; + } + + images.push_back(make_shared(_frame, Image::Alignment::PADDED)); + av_frame_unref (_frame); + } + + return images; +} + + /** Take an AVFrame and process it using our configured filters, returning a * set of Images. Caller handles memory management of the input frame. */ -list, int64_t>> +list, int64_t>> VideoFilterGraph::process (AVFrame* frame) { - list, int64_t>> images; + list, int64_t>> images; -DCPOMATIC_DISABLE_WARNINGS if (_copy) { - images.push_back (make_pair(make_shared(frame), av_frame_get_best_effort_timestamp (frame))); + images.push_back (make_pair(make_shared(frame, Image::Alignment::PADDED), frame->best_effort_timestamp)); } else { int r = av_buffersrc_write_frame (_buffer_src_context, frame); if (r < 0) { @@ -71,11 +115,10 @@ DCPOMATIC_DISABLE_WARNINGS break; } - images.push_back (make_pair(make_shared(_frame), av_frame_get_best_effort_timestamp (_frame))); + images.push_back (make_pair(make_shared(_frame, Image::Alignment::PADDED), frame->best_effort_timestamp)); av_frame_unref (_frame); } } -DCPOMATIC_ENABLE_WARNINGS return images; } @@ -107,15 +150,12 @@ VideoFilterGraph::src_parameters () const } -void * -VideoFilterGraph::sink_parameters () const +void +VideoFilterGraph::set_parameters (AVFilterContext* context) const { - auto sink_params = av_buffersink_params_alloc (); - auto pixel_fmts = new AVPixelFormat[2]; - pixel_fmts[0] = _pixel_format; - pixel_fmts[1] = AV_PIX_FMT_NONE; - sink_params->pixel_fmts = pixel_fmts; - return sink_params; + AVPixelFormat pix_fmts[] = { _pixel_format, AV_PIX_FMT_NONE }; + int r = av_opt_set_int_list (context, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); + DCPOMATIC_ASSERT (r >= 0); }