#include "filter.h"
#include "exceptions.h"
#include "image.h"
-#include "film.h"
#include "ffmpeg_decoder.h"
#include "i18n.h"
using std::stringstream;
using std::string;
using std::list;
+using std::cout;
using boost::shared_ptr;
+using boost::weak_ptr;
using libdcp::Size;
-/** Construct a FilterGraph for the settings in a film.
- * @param film Film.
- * @param decoder Decoder that we are using.
+/** Construct a FilterGraph for the settings in a piece of content.
+ * @param content Content.
* @param s Size of the images to process.
* @param p Pixel format of the images to process.
*/
-FilterGraph::FilterGraph (shared_ptr<Film> film, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p)
+FilterGraph::FilterGraph (shared_ptr<const FFmpegContent> content, libdcp::Size s, AVPixelFormat p)
: _buffer_src_context (0)
, _buffer_sink_context (0)
, _size (s)
, _pixel_format (p)
{
- string filters = Filter::ffmpeg_strings (film->filters()).first;
+ _frame = av_frame_alloc ();
+
+ string filters = Filter::ffmpeg_strings (content->filters()).first;
if (!filters.empty ()) {
- filters += N_(",");
+ filters += ",";
}
- filters += crop_string (Position (film->crop().left, film->crop().top), film->cropped_size (decoder->native_size()));
+ Crop crop = content->crop ();
+ libdcp::Size cropped_size = _size;
+ cropped_size.width -= crop.left + crop.right;
+ cropped_size.height -= crop.top + crop.bottom;
+ filters += crop_string (Position (crop.left, crop.top), cropped_size);
AVFilterGraph* graph = avfilter_graph_alloc();
if (graph == 0) {
}
stringstream a;
- a << _size.width << N_(":")
- << _size.height << N_(":")
- << _pixel_format << N_(":")
- << decoder->time_base_numerator() << N_(":")
- << decoder->time_base_denominator() << N_(":")
- << decoder->sample_aspect_ratio_numerator() << N_(":")
- << decoder->sample_aspect_ratio_denominator();
+ a << "video_size=" << _size.width << "x" << _size.height << ":"
+ << "pix_fmt=" << _pixel_format << ":"
+ << "time_base=1/1:"
+ << "pixel_aspect=1/1";
int r;
- if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, N_("in"), a.str().c_str(), 0, graph)) < 0) {
+ if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, "in", a.str().c_str(), 0, graph)) < 0) {
throw DecodeError (N_("could not create buffer source"));
}
throw DecodeError (N_("could not create buffer sink."));
}
+ av_free (sink_params);
+
AVFilterInOut* outputs = avfilter_inout_alloc ();
outputs->name = av_strdup(N_("in"));
outputs->filter_ctx = _buffer_src_context;
/* XXX: leaking `inputs' / `outputs' ? */
}
+FilterGraph::~FilterGraph ()
+{
+ av_frame_free (&_frame);
+}
+
/** Take an AVFrame and process it using our configured filters, returning a
- * set of Images.
+ * set of Images. Caller handles memory management of the input frame.
*/
list<shared_ptr<Image> >
-FilterGraph::process (AVFrame const * frame)
+FilterGraph::process (AVFrame* frame)
{
list<shared_ptr<Image> > images;
-
if (av_buffersrc_write_frame (_buffer_src_context, frame) < 0) {
throw DecodeError (N_("could not push buffer into filter chain."));
}
- while (av_buffersink_read (_buffer_sink_context, 0)) {
- AVFilterBufferRef* filter_buffer;
- if (av_buffersink_get_buffer_ref (_buffer_sink_context, &filter_buffer, 0) < 0) {
- filter_buffer = 0;
+ while (1) {
+ if (av_buffersink_get_frame (_buffer_sink_context, _frame) < 0) {
+ break;
}
- if (filter_buffer) {
- /* This takes ownership of filter_buffer */
- images.push_back (shared_ptr<Image> (new FilterBufferImage ((PixelFormat) frame->format, filter_buffer)));
- }
+ images.push_back (shared_ptr<Image> (new SimpleImage (_frame)));
+ av_frame_unref (_frame);
}
return images;