summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2013-05-25 01:07:35 +0100
committerCarl Hetherington <cth@carlh.net>2013-05-25 01:07:35 +0100
commit996b0c06e23bcb6b300d7b8799df94993692e07d (patch)
tree615ff0c372dac97321489e3cb7f316cb4cb9eeec /src/lib
parent4f03da3aa12525cb8389ddefee629f5d0b2ac0aa (diff)
parent907735ee6ca162583c7c9d20f5603a6db83a149f (diff)
Merge master and multifarious hackery.
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/ab_transcode_job.cc3
-rw-r--r--src/lib/audio_decoder.cc6
-rw-r--r--src/lib/dcp_video_frame.cc11
-rw-r--r--src/lib/dcp_video_frame.h3
-rw-r--r--src/lib/encoder.cc3
-rw-r--r--src/lib/ffmpeg_compatibility.cc119
-rw-r--r--src/lib/ffmpeg_compatibility.h31
-rw-r--r--src/lib/ffmpeg_content.cc22
-rw-r--r--src/lib/ffmpeg_content.h12
-rw-r--r--src/lib/ffmpeg_decoder.cc34
-rw-r--r--src/lib/ffmpeg_decoder.h2
-rw-r--r--src/lib/film.cc26
-rw-r--r--src/lib/film.h10
-rw-r--r--src/lib/filter_graph.cc108
-rw-r--r--src/lib/filter_graph.h8
-rw-r--r--src/lib/image.cc204
-rw-r--r--src/lib/image.h29
-rw-r--r--src/lib/matcher.cc224
-rw-r--r--src/lib/matcher.h77
-rw-r--r--src/lib/player.cc6
-rw-r--r--src/lib/po/fr_FR.po4
-rw-r--r--src/lib/server.cc3
-rw-r--r--src/lib/sndfile_decoder.cc10
-rw-r--r--src/lib/wscript1
24 files changed, 475 insertions, 481 deletions
diff --git a/src/lib/ab_transcode_job.cc b/src/lib/ab_transcode_job.cc
index 2bdff47de..9a883fdd9 100644
--- a/src/lib/ab_transcode_job.cc
+++ b/src/lib/ab_transcode_job.cc
@@ -38,7 +38,8 @@ ABTranscodeJob::ABTranscodeJob (shared_ptr<Film> f)
{
_film_b.reset (new Film (*_film));
_film_b->set_scaler (Config::instance()->reference_scaler ());
- _film_b->set_filters (Config::instance()->reference_filters ());
+ /* XXX */
+// _film_b->set_filters (Config::instance()->reference_filters ());
}
string
diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc
index 8950e1546..9b8d15bf1 100644
--- a/src/lib/audio_decoder.cc
+++ b/src/lib/audio_decoder.cc
@@ -25,6 +25,8 @@
#include "i18n.h"
using std::stringstream;
+using std::list;
+using std::pair;
using boost::optional;
using boost::shared_ptr;
@@ -141,11 +143,11 @@ AudioDecoder::audio (shared_ptr<const AudioBuffers> data, Time time)
assert (film);
/* Remap channels */
- shared_ptr<AudioBuffers> dcp_mapped (film->dcp_audio_channels(), data->frames());
+ shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (film->dcp_audio_channels(), data->frames()));
dcp_mapped->make_silent ();
list<pair<int, libdcp::Channel> > map = _audio_content->audio_mapping().content_to_dcp ();
for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
- dcp_mapped->accumulate (data, i->first, i->second);
+ dcp_mapped->accumulate_channel (data.get(), i->first, i->second);
}
Audio (dcp_mapped, time);
diff --git a/src/lib/dcp_video_frame.cc b/src/lib/dcp_video_frame.cc
index da51665d1..1c1838df7 100644
--- a/src/lib/dcp_video_frame.cc
+++ b/src/lib/dcp_video_frame.cc
@@ -79,7 +79,7 @@ using libdcp::Size;
DCPVideoFrame::DCPVideoFrame (
shared_ptr<const Image> yuv, shared_ptr<Subtitle> sub,
Size out, int p, int subtitle_offset, float subtitle_scale,
- Scaler const * s, int f, int dcp_fps, string pp, int clut, int bw, shared_ptr<Log> l
+ Scaler const * s, int f, int dcp_fps, int clut, int bw, shared_ptr<Log> l
)
: _input (yuv)
, _subtitle (sub)
@@ -90,7 +90,6 @@ DCPVideoFrame::DCPVideoFrame (
, _scaler (s)
, _frame (f)
, _frames_per_second (dcp_fps)
- , _post_process (pp)
, _colour_lut (clut)
, _j2k_bandwidth (bw)
, _log (l)
@@ -156,10 +155,6 @@ DCPVideoFrame::~DCPVideoFrame ()
shared_ptr<EncodedData>
DCPVideoFrame::encode_locally ()
{
- if (!_post_process.empty ()) {
- _input = _input->post_process (_post_process, true);
- }
-
shared_ptr<Image> prepared = _input->scale_and_convert_to_rgb (_out_size, _padding, _scaler, true);
if (_subtitle) {
@@ -333,10 +328,6 @@ DCPVideoFrame::encode_remotely (ServerDescription const * serv)
<< N_("frame ") << _frame << N_("\n")
<< N_("frames_per_second ") << _frames_per_second << N_("\n");
- if (!_post_process.empty()) {
- s << N_("post_process ") << _post_process << N_("\n");
- }
-
s << N_("colour_lut ") << _colour_lut << N_("\n")
<< N_("j2k_bandwidth ") << _j2k_bandwidth << N_("\n");
diff --git a/src/lib/dcp_video_frame.h b/src/lib/dcp_video_frame.h
index 4ceb07d26..ba49c95a4 100644
--- a/src/lib/dcp_video_frame.h
+++ b/src/lib/dcp_video_frame.h
@@ -107,7 +107,7 @@ class DCPVideoFrame
public:
DCPVideoFrame (
boost::shared_ptr<const Image>, boost::shared_ptr<Subtitle>, libdcp::Size,
- int, int, float, Scaler const *, int, int, std::string, int, int, boost::shared_ptr<Log>
+ int, int, float, Scaler const *, int, int, int, int, boost::shared_ptr<Log>
);
virtual ~DCPVideoFrame ();
@@ -131,7 +131,6 @@ private:
Scaler const * _scaler; ///< scaler to use
int _frame; ///< frame index within the DCP's intrinsic duration
int _frames_per_second; ///< Frames per second that we will use for the DCP
- std::string _post_process; ///< FFmpeg post-processing string to use
int _colour_lut; ///< Colour look-up table to use
int _j2k_bandwidth; ///< J2K bandwidth to use
diff --git a/src/lib/encoder.cc b/src/lib/encoder.cc
index 52927c5d3..270bf3d43 100644
--- a/src/lib/encoder.cc
+++ b/src/lib/encoder.cc
@@ -207,14 +207,13 @@ Encoder::process_video (shared_ptr<const Image> image, bool same, shared_ptr<Sub
frame_done ();
} else {
/* Queue this new frame for encoding */
- pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
TIMING ("adding to queue of %1", _queue.size ());
/* XXX: padding */
_queue.push_back (shared_ptr<DCPVideoFrame> (
new DCPVideoFrame (
image, sub, _film->container()->dcp_size(), 0,
_film->subtitle_offset(), _film->subtitle_scale(),
- _film->scaler(), _video_frames_out, _film->dcp_video_frame_rate(), s.second,
+ _film->scaler(), _video_frames_out, _film->dcp_video_frame_rate(),
_film->colour_lut(), _film->j2k_bandwidth(),
_film->log()
)
diff --git a/src/lib/ffmpeg_compatibility.cc b/src/lib/ffmpeg_compatibility.cc
deleted file mode 100644
index 361fa7423..000000000
--- a/src/lib/ffmpeg_compatibility.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-extern "C" {
-#include <libavfilter/avfiltergraph.h>
-}
-#include "exceptions.h"
-
-#include "i18n.h"
-
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
-
-typedef struct {
- enum PixelFormat pix_fmt;
-} AVSinkContext;
-
-static int
-avsink_init (AVFilterContext* ctx, const char* args, void* opaque)
-{
- AVSinkContext* priv = (AVSinkContext *) ctx->priv;
- if (!opaque) {
- return AVERROR (EINVAL);
- }
-
- *priv = *(AVSinkContext *) opaque;
- return 0;
-}
-
-static void
-null_end_frame (AVFilterLink *)
-{
-
-}
-
-static int
-avsink_query_formats (AVFilterContext* ctx)
-{
- AVSinkContext* priv = (AVSinkContext *) ctx->priv;
- enum PixelFormat pix_fmts[] = {
- priv->pix_fmt,
- PIX_FMT_NONE
- };
-
- avfilter_set_common_formats (ctx, avfilter_make_format_list ((int *) pix_fmts));
- return 0;
-}
-
-#endif
-
-AVFilter*
-get_sink ()
-{
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
- /* XXX does this leak stuff? */
- AVFilter* buffer_sink = new AVFilter;
- buffer_sink->name = av_strdup (N_("avsink"));
- buffer_sink->priv_size = sizeof (AVSinkContext);
- buffer_sink->init = avsink_init;
- buffer_sink->query_formats = avsink_query_formats;
- buffer_sink->inputs = new AVFilterPad[2];
- AVFilterPad* i0 = const_cast<AVFilterPad*> (&buffer_sink->inputs[0]);
- i0->name = N_("default");
- i0->type = AVMEDIA_TYPE_VIDEO;
- i0->min_perms = AV_PERM_READ;
- i0->rej_perms = 0;
- i0->start_frame = 0;
- i0->get_video_buffer = 0;
- i0->get_audio_buffer = 0;
- i0->end_frame = null_end_frame;
- i0->draw_slice = 0;
- i0->filter_samples = 0;
- i0->poll_frame = 0;
- i0->request_frame = 0;
- i0->config_props = 0;
- const_cast<AVFilterPad*> (&buffer_sink->inputs[1])->name = 0;
- buffer_sink->outputs = new AVFilterPad[1];
- const_cast<AVFilterPad*> (&buffer_sink->outputs[0])->name = 0;
- return buffer_sink;
-#else
- AVFilter* buffer_sink = avfilter_get_by_name(N_("buffersink"));
- if (buffer_sink == 0) {
- throw DecodeError (N_("Could not create buffer sink filter"));
- }
-
- return buffer_sink;
-#endif
-}
-
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
-AVFilterInOut *
-avfilter_inout_alloc ()
-{
- return (AVFilterInOut *) av_malloc (sizeof (AVFilterInOut));
-}
-#endif
-
-#ifndef HAVE_AV_FRAME_GET_BEST_EFFORT_TIMESTAMP
-int64_t av_frame_get_best_effort_timestamp (AVFrame const * f)
-{
- return f->best_effort_timestamp;
-}
-
-#endif
diff --git a/src/lib/ffmpeg_compatibility.h b/src/lib/ffmpeg_compatibility.h
deleted file mode 100644
index 772d22c33..000000000
--- a/src/lib/ffmpeg_compatibility.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-struct AVFilterInOut;
-
-extern AVFilter* get_sink ();
-extern AVFilterInOut* avfilter_inout_alloc ();
-
-#ifndef HAVE_AV_PIXEL_FORMAT
-#define AVPixelFormat PixelFormat
-#endif
-
-#ifndef HAVE_AV_FRAME_GET_BEST_EFFORT_TIMESTAMP
-extern int64_t av_frame_get_best_effort_timestamp (AVFrame const *);
-#endif
diff --git a/src/lib/ffmpeg_content.cc b/src/lib/ffmpeg_content.cc
index ad7af07d8..55139ca56 100644
--- a/src/lib/ffmpeg_content.cc
+++ b/src/lib/ffmpeg_content.cc
@@ -25,6 +25,7 @@
#include "compose.hpp"
#include "job.h"
#include "util.h"
+#include "filter.h"
#include "log.h"
#include "i18n.h"
@@ -41,6 +42,7 @@ int const FFmpegContentProperty::SUBTITLE_STREAMS = 100;
int const FFmpegContentProperty::SUBTITLE_STREAM = 101;
int const FFmpegContentProperty::AUDIO_STREAMS = 102;
int const FFmpegContentProperty::AUDIO_STREAM = 103;
+int const FFmpegContentProperty::FILTERS = 104;
FFmpegContent::FFmpegContent (shared_ptr<const Film> f, boost::filesystem::path p)
: Content (f, p)
@@ -70,6 +72,11 @@ FFmpegContent::FFmpegContent (shared_ptr<const Film> f, shared_ptr<const cxml::N
_audio_stream = _audio_streams.back ();
}
}
+
+ c = node->node_children ("Filter");
+ for (list<shared_ptr<cxml::Node> >::iterator i = c.begin(); i != c.end(); ++i) {
+ _filters.push_back (Filter::from_id ((*i)->content ()));
+ }
}
FFmpegContent::FFmpegContent (FFmpegContent const & o)
@@ -109,6 +116,10 @@ FFmpegContent::as_xml (xmlpp::Node* node) const
}
(*i)->as_xml (t);
}
+
+ for (vector<Filter const *>::const_iterator i = _filters.begin(); i != _filters.end(); ++i) {
+ node->add_child("Filter")->add_child_text ((*i)->id ());
+ }
}
void
@@ -335,3 +346,14 @@ FFmpegContent::audio_mapping () const
return _audio_stream->mapping;
}
+void
+FFmpegContent::set_filters (vector<Filter const *> const & filters)
+{
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _filters = filters;
+ }
+
+ signal_changed (FFmpegContentProperty::FILTERS);
+}
+
diff --git a/src/lib/ffmpeg_content.h b/src/lib/ffmpeg_content.h
index d5b986996..8f5c773ee 100644
--- a/src/lib/ffmpeg_content.h
+++ b/src/lib/ffmpeg_content.h
@@ -26,6 +26,8 @@
#include "video_content.h"
#include "audio_content.h"
+class Filter;
+
class FFmpegAudioStream
{
public:
@@ -75,6 +77,7 @@ public:
static int const SUBTITLE_STREAM;
static int const AUDIO_STREAMS;
static int const AUDIO_STREAM;
+ static int const FILTERS;
};
class FFmpegContent : public VideoContent, public AudioContent
@@ -101,6 +104,8 @@ public:
int content_audio_frame_rate () const;
int output_audio_frame_rate () const;
AudioMapping audio_mapping () const;
+
+ void set_filters (std::vector<Filter const *> const &);
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > subtitle_streams () const {
boost::mutex::scoped_lock lm (_mutex);
@@ -122,6 +127,11 @@ public:
return _audio_stream;
}
+ std::vector<Filter const *> filters () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _filters;
+ }
+
void set_subtitle_stream (boost::shared_ptr<FFmpegSubtitleStream>);
void set_audio_stream (boost::shared_ptr<FFmpegAudioStream>);
@@ -130,6 +140,8 @@ private:
boost::shared_ptr<FFmpegSubtitleStream> _subtitle_stream;
std::vector<boost::shared_ptr<FFmpegAudioStream> > _audio_streams;
boost::shared_ptr<FFmpegAudioStream> _audio_stream;
+ /** Video filters that should be used when generating DCPs */
+ std::vector<Filter const *> _filters;
};
#endif
diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc
index 047829d45..119e82851 100644
--- a/src/lib/ffmpeg_decoder.cc
+++ b/src/lib/ffmpeg_decoder.cc
@@ -507,22 +507,6 @@ FFmpegDecoder::do_seek (Time t, bool backwards, bool accurate)
return;
}
-void
-FFmpegDecoder::film_changed (Film::Property p)
-{
- switch (p) {
- case Film::FILTERS:
- {
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
- _filter_graphs.clear ();
- }
- break;
-
- default:
- break;
- }
-}
-
/** @return Length (in video frames) according to our content's header */
ContentVideoFrame
FFmpegDecoder::video_length () const
@@ -582,27 +566,35 @@ FFmpegDecoder::decode_video_packet ()
}
if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
- _filter_graphs.push_back (graph);
-
shared_ptr<const Film> film = _film.lock ();
assert (film);
+
+ graph.reset (new FilterGraph (_ffmpeg_content, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+ _filter_graphs.push_back (graph);
+
film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
} else {
graph = *i;
}
-
list<shared_ptr<Image> > images = graph->process (_frame);
+
+ string post_process = Filter::ffmpeg_strings (_ffmpeg_content->filters()).second;
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+
+ shared_ptr<Image> image = *i;
+ if (!post_process.empty ()) {
+ image = image->post_process (post_process, true);
+ }
+
int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
if (bet != AV_NOPTS_VALUE) {
/* XXX: may need to insert extra frames / remove frames here ...
(as per old Matcher)
*/
Time const t = bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ;
- video (*i, false, t);
+ video (image, false, t);
} else {
shared_ptr<const Film> film = _film.lock ();
assert (film);
diff --git a/src/lib/ffmpeg_decoder.h b/src/lib/ffmpeg_decoder.h
index dbcfe3be0..c37479612 100644
--- a/src/lib/ffmpeg_decoder.h
+++ b/src/lib/ffmpeg_decoder.h
@@ -112,8 +112,6 @@ private:
void maybe_add_subtitle ();
boost::shared_ptr<AudioBuffers> deinterleave_audio (uint8_t** data, int size);
- void film_changed (Film::Property);
-
std::string stream_name (AVStream* s) const;
boost::shared_ptr<const FFmpegContent> _ffmpeg_content;
diff --git a/src/lib/film.cc b/src/lib/film.cc
index a61a0d53d..fc1d2d8a4 100644
--- a/src/lib/film.cc
+++ b/src/lib/film.cc
@@ -142,7 +142,6 @@ Film::Film (Film const & o)
, _use_dci_name (o._use_dci_name)
, _dcp_content_type (o._dcp_content_type)
, _container (o._container)
- , _filters (o._filters)
, _scaler (o._scaler)
, _ab (o._ab)
, _with_subtitles (o._with_subtitles)
@@ -164,13 +163,10 @@ Film::video_state_identifier () const
assert (container ());
LocaleGuard lg;
- pair<string, string> f = Filter::ffmpeg_strings (filters());
-
stringstream s;
s << container()->id()
<< "_" << _playlist->video_digest()
<< "_" << _dcp_video_frame_rate
- << "_" << f.first << "_" << f.second
<< "_" << scaler()->id()
<< "_" << j2k_bandwidth()
<< "_" << lexical_cast<int> (colour_lut());
@@ -395,10 +391,6 @@ Film::write_metadata () const
root->add_child("Container")->add_child_text (_container->id ());
}
- for (vector<Filter const *>::const_iterator i = _filters.begin(); i != _filters.end(); ++i) {
- root->add_child("Filter")->add_child_text ((*i)->id ());
- }
-
root->add_child("Scaler")->add_child_text (_scaler->id ());
root->add_child("AB")->add_child_text (_ab ? "1" : "0");
root->add_child("WithSubtitles")->add_child_text (_with_subtitles ? "1" : "0");
@@ -447,13 +439,6 @@ Film::read_metadata ()
}
}
- {
- list<shared_ptr<cxml::Node> > c = f.node_children ("Filter");
- for (list<shared_ptr<cxml::Node> >::iterator i = c.begin(); i != c.end(); ++i) {
- _filters.push_back (Filter::from_id ((*i)->content ()));
- }
- }
-
_scaler = Scaler::from_id (f.string_child ("Scaler"));
_ab = f.bool_child ("AB");
_with_subtitles = f.bool_child ("WithSubtitles");
@@ -636,16 +621,6 @@ Film::set_container (Container const * c)
}
void
-Film::set_filters (vector<Filter const *> f)
-{
- {
- boost::mutex::scoped_lock lm (_state_mutex);
- _filters = f;
- }
- signal_changed (FILTERS);
-}
-
-void
Film::set_scaler (Scaler const * s)
{
{
@@ -821,7 +796,6 @@ Film::have_dcp () const
shared_ptr<Player>
Film::player () const
{
- boost::mutex::scoped_lock lm (_state_mutex);
return shared_ptr<Player> (new Player (shared_from_this (), _playlist));
}
diff --git a/src/lib/film.h b/src/lib/film.h
index f0ccd99e7..84f0b0233 100644
--- a/src/lib/film.h
+++ b/src/lib/film.h
@@ -103,7 +103,6 @@ public:
boost::shared_ptr<Playlist> playlist () const;
OutputAudioFrame dcp_audio_frame_rate () const;
- int dcp_audio_channels () const;
OutputAudioFrame time_to_audio_frames (Time) const;
OutputVideoFrame time_to_video_frames (Time) const;
@@ -133,7 +132,6 @@ public:
LOOP,
DCP_CONTENT_TYPE,
CONTAINER,
- FILTERS,
SCALER,
AB,
WITH_SUBTITLES,
@@ -173,11 +171,6 @@ public:
return _container;
}
- std::vector<Filter const *> filters () const {
- boost::mutex::scoped_lock lm (_state_mutex);
- return _filters;
- }
-
Scaler const * scaler () const {
boost::mutex::scoped_lock lm (_state_mutex);
return _scaler;
@@ -238,7 +231,6 @@ public:
void remove_content (boost::shared_ptr<Content>);
void set_dcp_content_type (DCPContentType const *);
void set_container (Container const *);
- void set_filters (std::vector<Filter const *>);
void set_scaler (Scaler const *);
void set_ab (bool);
void set_with_subtitles (bool);
@@ -291,8 +283,6 @@ private:
DCPContentType const * _dcp_content_type;
/** The container to put this Film in (flat, scope, etc.) */
Container const * _container;
- /** Video filters that should be used when generating DCPs */
- std::vector<Filter const *> _filters;
/** Scaler algorithm to use */
Scaler const * _scaler;
/** true to create an A/B comparison DCP, where the left half of the image
diff --git a/src/lib/filter_graph.cc b/src/lib/filter_graph.cc
index df8f1e9dd..4564033d5 100644
--- a/src/lib/filter_graph.cc
+++ b/src/lib/filter_graph.cc
@@ -23,24 +23,16 @@
extern "C" {
#include <libavfilter/avfiltergraph.h>
-#ifdef HAVE_BUFFERSRC_H
#include <libavfilter/buffersrc.h>
-#endif
-#if (LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 53 && LIBAVFILTER_VERSION_MINOR <= 77) || LIBAVFILTER_VERSION_MAJOR == 3
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
-#elif LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
-#include <libavfilter/vsrc_buffer.h>
-#endif
#include <libavformat/avio.h>
}
#include "decoder.h"
#include "filter_graph.h"
-#include "ffmpeg_compatibility.h"
#include "filter.h"
#include "exceptions.h"
#include "image.h"
-#include "film.h"
#include "ffmpeg_decoder.h"
#include "i18n.h"
@@ -52,28 +44,26 @@ using boost::shared_ptr;
using boost::weak_ptr;
using libdcp::Size;
-/** Construct a FilterGraph for the settings in a film.
- * @param film Film.
- * @param decoder Decoder that we are using.
+/** Construct a FilterGraph for the settings in a piece of content.
+ * @param content Content.
* @param s Size of the images to process.
* @param p Pixel format of the images to process.
*/
-FilterGraph::FilterGraph (weak_ptr<const Film> weak_film, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p)
+FilterGraph::FilterGraph (shared_ptr<const FFmpegContent> content, libdcp::Size s, AVPixelFormat p)
: _buffer_src_context (0)
, _buffer_sink_context (0)
, _size (s)
, _pixel_format (p)
{
- shared_ptr<const Film> film = weak_film.lock ();
- assert (film);
+ _frame = av_frame_alloc ();
- string filters = Filter::ffmpeg_strings (film->filters()).first;
+ string filters = Filter::ffmpeg_strings (content->filters()).first;
if (!filters.empty ()) {
- filters += N_(",");
+ filters += ",";
}
- Crop crop = decoder->ffmpeg_content()->crop ();
- libdcp::Size cropped_size = decoder->video_size ();
+ Crop crop = content->crop ();
+ libdcp::Size cropped_size = _size;
cropped_size.width -= crop.left + crop.right;
cropped_size.height -= crop.top + crop.bottom;
filters += crop_string (Position (crop.left, crop.top), cropped_size);
@@ -88,17 +78,20 @@ FilterGraph::FilterGraph (weak_ptr<const Film> weak_film, FFmpegDecoder* decoder
throw DecodeError (N_("could not find buffer src filter"));
}
- AVFilter* buffer_sink = get_sink ();
+ AVFilter* buffer_sink = avfilter_get_by_name(N_("buffersink"));
+ if (buffer_sink == 0) {
+ throw DecodeError (N_("Could not create buffer sink filter"));
+ }
stringstream a;
- a << _size.width << N_(":")
- << _size.height << N_(":")
- << _pixel_format << N_(":")
- << "0:1:0:1";
+ a << "video_size=" << _size.width << "x" << _size.height << ":"
+ << "pix_fmt=" << _pixel_format << ":"
+ << "time_base=0/1:"
+ << "pixel_aspect=0/1";
int r;
- if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, N_("in"), a.str().c_str(), 0, graph)) < 0) {
+ if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, "in", a.str().c_str(), 0, graph)) < 0) {
throw DecodeError (N_("could not create buffer source"));
}
@@ -112,6 +105,8 @@ FilterGraph::FilterGraph (weak_ptr<const Film> weak_film, FFmpegDecoder* decoder
throw DecodeError (N_("could not create buffer sink."));
}
+ av_free (sink_params);
+
AVFilterInOut* outputs = avfilter_inout_alloc ();
outputs->name = av_strdup(N_("in"));
outputs->filter_ctx = _buffer_src_context;
@@ -124,15 +119,9 @@ FilterGraph::FilterGraph (weak_ptr<const Film> weak_film, FFmpegDecoder* decoder
inputs->pad_idx = 0;
inputs->next = 0;
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
- if (avfilter_graph_parse (graph, filters.c_str(), inputs, outputs, 0) < 0) {
- throw DecodeError (N_("could not set up filter graph."));
- }
-#else
if (avfilter_graph_parse (graph, filters.c_str(), &inputs, &outputs, 0) < 0) {
throw DecodeError (N_("could not set up filter graph."));
}
-#endif
if (avfilter_graph_config (graph, 0) < 0) {
throw DecodeError (N_("could not configure filter graph."));
@@ -141,66 +130,29 @@ FilterGraph::FilterGraph (weak_ptr<const Film> weak_film, FFmpegDecoder* decoder
/* XXX: leaking `inputs' / `outputs' ? */
}
+FilterGraph::~FilterGraph ()
+{
+ av_frame_free (&_frame);
+}
+
/** Take an AVFrame and process it using our configured filters, returning a
- * set of Images.
+ * set of Images. Caller handles memory management of the input frame.
*/
list<shared_ptr<Image> >
-FilterGraph::process (AVFrame const * frame)
+FilterGraph::process (AVFrame* frame)
{
list<shared_ptr<Image> > images;
-
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 53 && LIBAVFILTER_VERSION_MINOR <= 61
-
- if (av_vsrc_buffer_add_frame (_buffer_src_context, frame, 0) < 0) {
- throw DecodeError (N_("could not push buffer into filter chain."));
- }
-
-#elif LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
-
- AVRational par;
- par.num = sample_aspect_ratio_numerator ();
- par.den = sample_aspect_ratio_denominator ();
-
- if (av_vsrc_buffer_add_frame (_buffer_src_context, frame, 0, par) < 0) {
- throw DecodeError (N_("could not push buffer into filter chain."));
- }
-
-#else
if (av_buffersrc_write_frame (_buffer_src_context, frame) < 0) {
throw DecodeError (N_("could not push buffer into filter chain."));
}
-#endif
-
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 15 && LIBAVFILTER_VERSION_MINOR <= 61
- while (avfilter_poll_frame (_buffer_sink_context->inputs[0])) {
-#else
- while (av_buffersink_read (_buffer_sink_context, 0)) {
-#endif
-
-#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 15
-
- int r = avfilter_request_frame (_buffer_sink_context->inputs[0]);
- if (r < 0) {
- throw DecodeError (N_("could not request filtered frame"));
- }
-
- AVFilterBufferRef* filter_buffer = _buffer_sink_context->inputs[0]->cur_buf;
-
-#else
-
- AVFilterBufferRef* filter_buffer;
- if (av_buffersink_get_buffer_ref (_buffer_sink_context, &filter_buffer, 0) < 0) {
- filter_buffer = 0;
+ while (1) {
+ if (av_buffersink_get_frame (_buffer_sink_context, _frame) < 0) {
+ break;
}
-#endif
-
- if (filter_buffer) {
- /* This takes ownership of filter_buffer */
- images.push_back (shared_ptr<Image> (new FilterBufferImage ((PixelFormat) frame->format, filter_buffer)));
- }
+ images.push_back (shared_ptr<Image> (new SimpleImage (_frame)));
}
return images;
diff --git a/src/lib/filter_graph.h b/src/lib/filter_graph.h
index c7a01f58e..e294812c2 100644
--- a/src/lib/filter_graph.h
+++ b/src/lib/filter_graph.h
@@ -25,11 +25,9 @@
#define DCPOMATIC_FILTER_GRAPH_H
#include "util.h"
-#include "ffmpeg_compatibility.h"
class Image;
class VideoFilter;
-class FFmpegDecoder;
/** @class FilterGraph
* @brief A graph of FFmpeg filters.
@@ -37,16 +35,18 @@ class FFmpegDecoder;
class FilterGraph
{
public:
- FilterGraph (boost::weak_ptr<const Film>, FFmpegDecoder* decoder, libdcp::Size s, AVPixelFormat p);
+ FilterGraph (boost::shared_ptr<const FFmpegContent> content, libdcp::Size s, AVPixelFormat p);
+ ~FilterGraph ();
bool can_process (libdcp::Size s, AVPixelFormat p) const;
- std::list<boost::shared_ptr<Image> > process (AVFrame const * frame);
+ std::list<boost::shared_ptr<Image> > process (AVFrame * frame);
private:
AVFilterContext* _buffer_src_context;
AVFilterContext* _buffer_sink_context;
libdcp::Size _size; ///< size of the images that this chain can process
AVPixelFormat _pixel_format; ///< pixel format of the images that this chain can process
+ AVFrame* _frame;
};
#endif
diff --git a/src/lib/image.cc b/src/lib/image.cc
index 1be41fecf..b166dfac6 100644
--- a/src/lib/image.cc
+++ b/src/lib/image.cc
@@ -35,6 +35,7 @@ extern "C" {
#include <libavfilter/avfiltergraph.h>
#include <libpostproc/postprocess.h>
#include <libavutil/pixfmt.h>
+#include <libavutil/pixdesc.h>
}
#include "image.h"
#include "exceptions.h"
@@ -58,55 +59,32 @@ Image::swap (Image& other)
int
Image::lines (int n) const
{
- switch (_pixel_format) {
- case PIX_FMT_YUV420P:
- if (n == 0) {
- return size().height;
- } else {
- return size().height / 2;
- }
- break;
- case PIX_FMT_RGB24:
- case PIX_FMT_RGBA:
- case PIX_FMT_YUV422P10LE:
- case PIX_FMT_YUV422P:
- case PIX_FMT_YUV444P:
- case PIX_FMT_YUV444P9BE:
- case PIX_FMT_YUV444P9LE:
- case PIX_FMT_YUV444P10BE:
- case PIX_FMT_YUV444P10LE:
- case PIX_FMT_UYVY422:
+ if (n == 0) {
return size().height;
- default:
+ }
+
+ AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format);
+ if (!d) {
throw PixelFormatError (N_("lines()"), _pixel_format);
}
-
- return 0;
+
+ return size().height / pow(2, d->log2_chroma_h);
}
/** @return Number of components */
int
Image::components () const
{
- switch (_pixel_format) {
- case PIX_FMT_YUV420P:
- case PIX_FMT_YUV422P10LE:
- case PIX_FMT_YUV422P:
- case PIX_FMT_YUV444P:
- case PIX_FMT_YUV444P9BE:
- case PIX_FMT_YUV444P9LE:
- case PIX_FMT_YUV444P10BE:
- case PIX_FMT_YUV444P10LE:
- return 3;
- case PIX_FMT_RGB24:
- case PIX_FMT_RGBA:
- case PIX_FMT_UYVY422:
- return 1;
- default:
+ AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format);
+ if (!d) {
throw PixelFormatError (N_("components()"), _pixel_format);
}
- return 0;
+ if ((d->flags & PIX_FMT_PLANAR) == 0) {
+ return 1;
+ }
+
+ return d->nb_components;
}
shared_ptr<Image>
@@ -296,12 +274,12 @@ Image::make_black ()
{
/* U/V black value for 8-bit colour */
static uint8_t const eight_bit_uv = (1 << 7) - 1;
-
/* U/V black value for 9-bit colour */
static uint16_t const nine_bit_uv = (1 << 8) - 1;
-
/* U/V black value for 10-bit colour */
static uint16_t const ten_bit_uv = (1 << 9) - 1;
+ /* U/V black value for 16-bit colour */
+ static uint16_t const sixteen_bit_uv = (1 << 15) - 1;
switch (_pixel_format) {
case PIX_FMT_YUV420P:
@@ -326,11 +304,17 @@ Image::make_black ()
case PIX_FMT_YUV444P10LE:
yuv_16_black (ten_bit_uv);
break;
+
+ case PIX_FMT_YUV422P16LE:
+ case PIX_FMT_YUV444P16LE:
+ yuv_16_black (sixteen_bit_uv);
+ break;
case PIX_FMT_YUV444P10BE:
case PIX_FMT_YUV422P10BE:
yuv_16_black (swap_16 (ten_bit_uv));
-
+ break;
+
case PIX_FMT_RGB24:
memset (data()[0], 0, lines(0) * stride()[0]);
break;
@@ -420,53 +404,36 @@ Image::write_to_socket (shared_ptr<Socket> socket) const
float
Image::bytes_per_pixel (int c) const
{
- if (c == 3) {
+ AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format);
+ if (!d) {
+ throw PixelFormatError (N_("lines()"), _pixel_format);
+ }
+
+ if (c >= components()) {
return 0;
}
+
+ float bpp[4] = { 0, 0, 0, 0 };
+
+ bpp[0] = floor ((d->comp[0].depth_minus1 + 1 + 7) / 8);
+ if (d->nb_components > 1) {
+ bpp[1] = floor ((d->comp[1].depth_minus1 + 1 + 7) / 8) / pow (2, d->log2_chroma_w);
+ }
+ if (d->nb_components > 2) {
+ bpp[2] = floor ((d->comp[2].depth_minus1 + 1 + 7) / 8) / pow (2, d->log2_chroma_w);
+ }
+ if (d->nb_components > 3) {
+ bpp[3] = floor ((d->comp[3].depth_minus1 + 1 + 7) / 8) / pow (2, d->log2_chroma_w);
+ }
- switch (_pixel_format) {
- case PIX_FMT_RGB24:
- if (c == 0) {
- return 3;
- } else {
- return 0;
- }
- case PIX_FMT_RGBA:
- if (c == 0) {
- return 4;
- } else {
- return 0;
- }
- case PIX_FMT_YUV420P:
- case PIX_FMT_YUV422P:
- if (c == 0) {
- return 1;
- } else {
- return 0.5;
- }
- case PIX_FMT_YUV422P10LE:
- if (c == 0) {
- return 2;
- } else {
- return 1;
- }
- case PIX_FMT_UYVY422:
- return 2;
- case PIX_FMT_YUV444P:
- return 3;
- case PIX_FMT_YUV444P9BE:
- case PIX_FMT_YUV444P9LE:
- case PIX_FMT_YUV444P10LE:
- case PIX_FMT_YUV444P10BE:
- return 6;
- default:
- throw PixelFormatError (N_("bytes_per_pixel()"), _pixel_format);
+ if ((d->flags & PIX_FMT_PLANAR) == 0) {
+ /* Not planar; sum them up */
+ return bpp[0] + bpp[1] + bpp[2] + bpp[3];
}
- return 0;
+ return bpp[c];
}
-
/** Construct a SimpleImage of a given size and format, allocating memory
* as required.
*
@@ -502,10 +469,9 @@ SimpleImage::allocate ()
SimpleImage::SimpleImage (SimpleImage const & other)
: Image (other)
+ , _size (other._size)
+ , _aligned (other._aligned)
{
- _size = other._size;
- _aligned = other._aligned;
-
allocate ();
for (int i = 0; i < components(); ++i) {
@@ -519,6 +485,25 @@ SimpleImage::SimpleImage (SimpleImage const & other)
}
}
+SimpleImage::SimpleImage (AVFrame* frame)
+ : Image (static_cast<AVPixelFormat> (frame->format))
+ , _size (frame->width, frame->height)
+ , _aligned (true)
+{
+ allocate ();
+
+ for (int i = 0; i < components(); ++i) {
+ uint8_t* p = _data[i];
+ uint8_t* q = frame->data[i];
+ for (int j = 0; j < lines(i); ++j) {
+ memcpy (p, q, _line_size[i]);
+ p += stride()[i];
+ /* AVFrame's linesize is what we call `stride' */
+ q += frame->linesize[i];
+ }
+ }
+}
+
SimpleImage::SimpleImage (shared_ptr<const Image> other)
: Image (*other.get())
{
@@ -609,59 +594,6 @@ SimpleImage::aligned () const
return _aligned;
}
-FilterBufferImage::FilterBufferImage (AVPixelFormat p, AVFilterBufferRef* b)
- : Image (p)
- , _buffer (b)
-{
- _line_size = (int *) av_malloc (4 * sizeof (int));
- _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0;
-
- for (int i = 0; i < components(); ++i) {
- _line_size[i] = size().width * bytes_per_pixel(i);
- }
-}
-
-FilterBufferImage::~FilterBufferImage ()
-{
- avfilter_unref_buffer (_buffer);
- av_free (_line_size);
-}
-
-uint8_t **
-FilterBufferImage::data () const
-{
- return _buffer->data;
-}
-
-int *
-FilterBufferImage::line_size () const
-{
- return _line_size;
-}
-
-int *
-FilterBufferImage::stride () const
-{
- /* I've seen images where the _buffer->linesize is larger than the width
- (by a small amount), suggesting that _buffer->linesize is what we call
- stride. But I'm not sure.
- */
- return _buffer->linesize;
-}
-
-libdcp::Size
-FilterBufferImage::size () const
-{
- return libdcp::Size (_buffer->video->w, _buffer->video->h);
-}
-
-bool
-FilterBufferImage::aligned () const
-{
- /* XXX? */
- return true;
-}
-
RGBPlusAlphaImage::RGBPlusAlphaImage (shared_ptr<const Image> im)
: SimpleImage (im->pixel_format(), im->size(), false)
{
diff --git a/src/lib/image.h b/src/lib/image.h
index de03d0e3f..34f87b188 100644
--- a/src/lib/image.h
+++ b/src/lib/image.h
@@ -32,10 +32,8 @@ extern "C" {
#include <libavfilter/avfilter.h>
}
#include "util.h"
-#include "ffmpeg_compatibility.h"
class Scaler;
-class RGBFrameImage;
class SimpleImage;
/** @class Image
@@ -92,6 +90,8 @@ protected:
virtual void swap (Image &);
float bytes_per_pixel (int) const;
+ friend class pixel_formats_test;
+
private:
void yuv_16_black (uint16_t);
static uint16_t swap_16 (uint16_t);
@@ -99,30 +99,6 @@ private:
AVPixelFormat _pixel_format; ///< FFmpeg's way of describing the pixel format of this Image
};
-/** @class FilterBufferImage
- * @brief An Image that is held in an AVFilterBufferRef.
- */
-class FilterBufferImage : public Image
-{
-public:
- FilterBufferImage (AVPixelFormat, AVFilterBufferRef *);
- ~FilterBufferImage ();
-
- uint8_t ** data () const;
- int * line_size () const;
- int * stride () const;
- libdcp::Size size () const;
- bool aligned () const;
-
-private:
- /* Not allowed */
- FilterBufferImage (FilterBufferImage const &);
- FilterBufferImage& operator= (FilterBufferImage const &);
-
- AVFilterBufferRef* _buffer;
- int* _line_size;
-};
-
/** @class SimpleImage
* @brief An Image for which memory is allocated using a `simple' av_malloc().
*/
@@ -130,6 +106,7 @@ class SimpleImage : public Image
{
public:
SimpleImage (AVPixelFormat, libdcp::Size, bool);
+ SimpleImage (AVFrame *);
SimpleImage (SimpleImage const &);
SimpleImage (boost::shared_ptr<const Image>);
SimpleImage& operator= (SimpleImage const &);
diff --git a/src/lib/matcher.cc b/src/lib/matcher.cc
new file mode 100644
index 000000000..4acb82afa
--- /dev/null
+++ b/src/lib/matcher.cc
@@ -0,0 +1,224 @@
+/*
+ Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "matcher.h"
+#include "image.h"
+#include "log.h"
+
+#include "i18n.h"
+
+using std::min;
+using std::cout;
+using std::list;
+using boost::shared_ptr;
+
+Matcher::Matcher (shared_ptr<Log> log, int sample_rate, float frames_per_second)
+ : Processor (log)
+ , _sample_rate (sample_rate)
+ , _frames_per_second (frames_per_second)
+ , _video_frames (0)
+ , _audio_frames (0)
+ , _had_first_video (false)
+ , _had_first_audio (false)
+{
+
+}
+
+void
+Matcher::process_video (boost::shared_ptr<const Image> image, bool same, boost::shared_ptr<Subtitle> sub, double t)
+{
+ _pixel_format = image->pixel_format ();
+ _size = image->size ();
+
+ _log->log(String::compose("Matcher video @ %1 [audio=%2, video=%3, pending_audio=%4]", t, _audio_frames, _video_frames, _pending_audio.size()));
+
+ if (!_first_input || t < _first_input.get()) {
+ _first_input = t;
+ }
+
+ bool const this_is_first_video = !_had_first_video;
+ _had_first_video = true;
+
+ if (!_had_first_audio) {
+ /* No audio yet; we must postpone these data until we have some */
+ _pending_video.push_back (VideoRecord (image, same, sub, t));
+ } else if (this_is_first_video && _had_first_audio) {
+ /* First video since we got audio */
+ _pending_video.push_back (VideoRecord (image, same, sub, t));
+ fix_start ();
+ } else {
+ /* Normal running */
+
+ /* Difference between where this video is and where it should be */
+ double const delta = t - _first_input.get() - _video_frames / _frames_per_second;
+ double const one_frame = 1 / _frames_per_second;
+
+ if (delta > one_frame) {
+ /* Insert frames to make up the difference */
+ int const extra = rint (delta / one_frame);
+ for (int i = 0; i < extra; ++i) {
+ repeat_last_video ();
+ _log->log (String::compose ("Extra video frame inserted at %1s", _video_frames / _frames_per_second));
+ }
+ }
+
+ if (delta > -one_frame) {
+ Video (image, same, sub);
+ ++_video_frames;
+ } else {
+ /* We are omitting a frame to keep things right */
+ _log->log (String::compose ("Frame removed at %1s; delta %2; first input was at %3", t, delta, _first_input.get()));
+ }
+
+ _last_image = image;
+ _last_subtitle = sub;
+ }
+}
+
+void
+Matcher::process_audio (boost::shared_ptr<const AudioBuffers> b, double t)
+{
+ _channels = b->channels ();
+
+ _log->log (String::compose (
+ "Matcher audio (%1 frames) @ %2 [video=%3, audio=%4, pending_video=%5, pending_audio=%6]",
+ b->frames(), t, _video_frames, _audio_frames, _pending_video.size(), _pending_audio.size()
+ )
+ );
+
+ if (!_first_input || t < _first_input.get()) {
+ _first_input = t;
+ }
+
+ bool const this_is_first_audio = !_had_first_audio;
+ _had_first_audio = true;
+
+ if (!_had_first_video) {
+ /* No video yet; we must postpone these data until we have some */
+ _pending_audio.push_back (AudioRecord (b, t));
+ } else if (this_is_first_audio && _had_first_video) {
+ /* First audio since we got video */
+ _pending_audio.push_back (AudioRecord (b, t));
+ fix_start ();
+ } else {
+ /* Normal running. We assume audio time stamps are consecutive, so there's no equivalent of
+ the checking / insertion of repeat frames that there is for video.
+ */
+ Audio (b);
+ _audio_frames += b->frames ();
+ }
+}
+
+void
+Matcher::process_end ()
+{
+ if (_audio_frames == 0 || !_pixel_format || !_size || !_channels) {
+ /* We won't do anything */
+ return;
+ }
+
+ _log->log (String::compose ("Matcher has seen %1 video frames (which equals %2 audio frames) and %3 audio frames",
+ _video_frames, video_frames_to_audio_frames (_video_frames, _sample_rate, _frames_per_second), _audio_frames));
+
+ match ((double (_audio_frames) / _sample_rate) - (double (_video_frames) / _frames_per_second));
+}
+
+void
+Matcher::fix_start ()
+{
+ assert (!_pending_video.empty ());
+ assert (!_pending_audio.empty ());
+
+ _log->log (String::compose ("Fixing start; video at %1, audio at %2", _pending_video.front().time, _pending_audio.front().time));
+
+ match (_pending_video.front().time - _pending_audio.front().time);
+
+ for (list<VideoRecord>::iterator i = _pending_video.begin(); i != _pending_video.end(); ++i) {
+ process_video (i->image, i->same, i->subtitle, i->time);
+ }
+
+ _pending_video.clear ();
+
+ for (list<AudioRecord>::iterator i = _pending_audio.begin(); i != _pending_audio.end(); ++i) {
+ process_audio (i->audio, i->time);
+ }
+
+ _pending_audio.clear ();
+}
+
+void
+Matcher::match (double extra_video_needed)
+{
+ _log->log (String::compose ("Match %1", extra_video_needed));
+
+ if (extra_video_needed > 0) {
+
+ /* Emit black video frames */
+
+ int const black_video_frames = ceil (extra_video_needed * _frames_per_second);
+
+ _log->log (String::compose (N_("Emitting %1 frames of black video"), black_video_frames));
+
+ shared_ptr<Image> black (new SimpleImage (_pixel_format.get(), _size.get(), true));
+ black->make_black ();
+ for (int i = 0; i < black_video_frames; ++i) {
+ Video (black, i != 0, shared_ptr<Subtitle>());
+ ++_video_frames;
+ }
+
+ extra_video_needed -= black_video_frames / _frames_per_second;
+ }
+
+ if (extra_video_needed < 0) {
+
+ /* Emit silence */
+
+ int64_t to_do = -extra_video_needed * _sample_rate;
+ _log->log (String::compose (N_("Emitting %1 frames of silence"), to_do));
+
+ /* Do things in half second blocks as I think there may be limits
+ to what FFmpeg (and in particular the resampler) can cope with.
+ */
+ int64_t const block = _sample_rate / 2;
+ shared_ptr<AudioBuffers> b (new AudioBuffers (_channels.get(), block));
+ b->make_silent ();
+
+ while (to_do > 0) {
+ int64_t const this_time = min (to_do, block);
+ b->set_frames (this_time);
+ Audio (b);
+ _audio_frames += b->frames ();
+ to_do -= this_time;
+ }
+ }
+}
+
+void
+Matcher::repeat_last_video ()
+{
+ if (!_last_image) {
+ shared_ptr<Image> im (new SimpleImage (_pixel_format.get(), _size.get(), true));
+ im->make_black ();
+ _last_image = im;
+ }
+
+ Video (_last_image, true, _last_subtitle);
+ ++_video_frames;
+}
+
diff --git a/src/lib/matcher.h b/src/lib/matcher.h
new file mode 100644
index 000000000..61fd81436
--- /dev/null
+++ b/src/lib/matcher.h
@@ -0,0 +1,77 @@
+/*
+ Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <boost/optional.hpp>
+#include "processor.h"
+
+class Matcher : public Processor, public TimedAudioSink, public TimedVideoSink, public AudioSource, public VideoSource
+{
+public:
+ Matcher (boost::shared_ptr<Log> log, int sample_rate, float frames_per_second);
+ void process_video (boost::shared_ptr<const Image> i, bool, boost::shared_ptr<Subtitle> s, double);
+ void process_audio (boost::shared_ptr<const AudioBuffers>, double);
+ void process_end ();
+
+private:
+ void fix_start ();
+ void match (double);
+ void repeat_last_video ();
+
+ int _sample_rate;
+ float _frames_per_second;
+ int _video_frames;
+ int64_t _audio_frames;
+ boost::optional<AVPixelFormat> _pixel_format;
+ boost::optional<libdcp::Size> _size;
+ boost::optional<int> _channels;
+
+ struct VideoRecord {
+ VideoRecord (boost::shared_ptr<const Image> i, bool s, boost::shared_ptr<Subtitle> u, double t)
+ : image (i)
+ , same (s)
+ , subtitle (u)
+ , time (t)
+ {}
+
+ boost::shared_ptr<const Image> image;
+ bool same;
+ boost::shared_ptr<Subtitle> subtitle;
+ double time;
+ };
+
+ struct AudioRecord {
+ AudioRecord (boost::shared_ptr<const AudioBuffers> a, double t)
+ : audio (a)
+ , time (t)
+ {}
+
+ boost::shared_ptr<const AudioBuffers> audio;
+ double time;
+ };
+
+ std::list<VideoRecord> _pending_video;
+ std::list<AudioRecord> _pending_audio;
+
+ boost::optional<double> _first_input;
+ boost::shared_ptr<const Image> _last_image;
+ boost::shared_ptr<Subtitle> _last_subtitle;
+
+ bool _had_first_video;
+ bool _had_first_audio;
+};
diff --git a/src/lib/player.cc b/src/lib/player.cc
index 032b3d49b..ff13f95db 100644
--- a/src/lib/player.cc
+++ b/src/lib/player.cc
@@ -107,6 +107,10 @@ Player::pass ()
if (((*i)->decoder->next() + (*i)->content->start()) >= (*i)->content->end()) {
continue;
}
+
+ if (!_audio && dynamic_pointer_cast<SndfileContent> ((*i)->content)) {
+ continue;
+ }
Time const t = (*i)->content->start() + (*i)->decoder->next();
if (t < earliest_t) {
@@ -188,7 +192,7 @@ Player::process_audio (weak_ptr<Content> weak_content, shared_ptr<const AudioBuf
/* Now accumulate the new audio into our buffers */
_audio_buffers.ensure_size (_audio_buffers.frames() + audio->frames());
- _audio_buffers.accumulate_frames (audio, 0, 0, audio->frames ());
+ _audio_buffers.accumulate_frames (audio.get(), 0, 0, audio->frames ());
}
/** @return true on error */
diff --git a/src/lib/po/fr_FR.po b/src/lib/po/fr_FR.po
index d69e2f7d5..f7e362eda 100644
--- a/src/lib/po/fr_FR.po
+++ b/src/lib/po/fr_FR.po
@@ -8,7 +8,7 @@ msgstr ""
"Project-Id-Version: DCP-o-matic FRENCH\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2013-05-09 09:51+0100\n"
-"PO-Revision-Date: 2013-05-10 14:33+0100\n"
+"PO-Revision-Date: 2013-05-21 10:30+0100\n"
"Last-Translator: \n"
"Language-Team: \n"
"Language: \n"
@@ -489,7 +489,7 @@ msgstr "lecture du fichier impossible"
#: src/lib/exceptions.cc:44
msgid "could not read from file %1 (%2)"
-msgstr "création du dossier distant %1 impossible (%2)"
+msgstr "lecture du fichier impossible %1 (%2)"
#: src/lib/encoder.cc:137
#: src/lib/encoder.cc:314
diff --git a/src/lib/server.cc b/src/lib/server.cc
index ca0bec580..07b826946 100644
--- a/src/lib/server.cc
+++ b/src/lib/server.cc
@@ -111,7 +111,6 @@ Server::process (shared_ptr<Socket> socket)
string scaler_id = get_required_string (kv, N_("scaler"));
int frame = get_required_int (kv, N_("frame"));
int frames_per_second = get_required_int (kv, N_("frames_per_second"));
- string post_process = get_optional_string (kv, N_("post_process"));
int colour_lut_index = get_required_int (kv, N_("colour_lut"));
int j2k_bandwidth = get_required_int (kv, N_("j2k_bandwidth"));
Position subtitle_position (get_optional_int (kv, N_("subtitle_x")), get_optional_int (kv, N_("subtitle_y")));
@@ -136,7 +135,7 @@ Server::process (shared_ptr<Socket> socket)
DCPVideoFrame dcp_video_frame (
image, sub, out_size, padding, subtitle_offset, subtitle_scale,
- scaler, frame, frames_per_second, post_process, colour_lut_index, j2k_bandwidth, _log
+ scaler, frame, frames_per_second, colour_lut_index, j2k_bandwidth, _log
);
shared_ptr<EncodedData> encoded = dcp_video_frame.encode_locally ();
diff --git a/src/lib/sndfile_decoder.cc b/src/lib/sndfile_decoder.cc
index b6dac2e76..c4c6e5f4e 100644
--- a/src/lib/sndfile_decoder.cc
+++ b/src/lib/sndfile_decoder.cc
@@ -65,11 +65,11 @@ SndfileDecoder::pass ()
int const channels = _sndfile_content->audio_channels ();
- shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, this_time));
+ shared_ptr<AudioBuffers> data (new AudioBuffers (channels, this_time));
if (_sndfile_content->audio_channels() == 1) {
/* No de-interleaving required */
- sf_read_float (_sndfile, audio->data(0), this_time);
+ sf_read_float (_sndfile, data->data(0), this_time);
} else {
/* Deinterleave */
if (!_deinterleave_buffer) {
@@ -78,7 +78,7 @@ SndfileDecoder::pass ()
sf_readf_float (_sndfile, _deinterleave_buffer, this_time);
vector<float*> out_ptr (channels);
for (int i = 0; i < channels; ++i) {
- out_ptr[i] = audio->data(i);
+ out_ptr[i] = data->data(i);
}
float* in_ptr = _deinterleave_buffer;
for (int i = 0; i < this_time; ++i) {
@@ -88,8 +88,8 @@ SndfileDecoder::pass ()
}
}
- audio->set_frames (this_time);
- Audio (audio, double(_done) / audio_frame_rate());
+ data->set_frames (this_time);
+ audio (data, double(_done) / audio_frame_rate());
_done += this_time;
_remaining -= this_time;
}
diff --git a/src/lib/wscript b/src/lib/wscript
index e7349a9c4..54941df42 100644
--- a/src/lib/wscript
+++ b/src/lib/wscript
@@ -26,7 +26,6 @@ sources = """
examine_content_job.cc
exceptions.cc
filter_graph.cc
- ffmpeg_compatibility.cc
ffmpeg_content.cc
ffmpeg_decoder.cc
film.cc