* @brief Parent class for classes which can encode video and audio frames.
*/
+#include <iostream>
#include <boost/filesystem.hpp>
+#include <boost/lexical_cast.hpp>
+#include <libdcp/picture_asset.h>
#include "encoder.h"
#include "util.h"
#include "options.h"
#include "film.h"
#include "log.h"
#include "exceptions.h"
+#include "filter.h"
+#include "config.h"
+#include "dcp_video_frame.h"
+#include "server.h"
+#include "format.h"
+#include "cross.h"
+#include "writer.h"
+
+#include "i18n.h"
using std::pair;
+using std::string;
using std::stringstream;
using std::vector;
+using std::list;
+using std::cout;
+using std::make_pair;
using namespace boost;
int const Encoder::_history_size = 25;
-/** @param f Film that we are encoding.
- * @param o Options.
- */
-Encoder::Encoder (shared_ptr<const Film> f, shared_ptr<const EncodeOptions> o)
+/** @param f Film that we are encoding */
+Encoder::Encoder (shared_ptr<Film> f)
: _film (f)
- , _opt (o)
- , _just_skipped (false)
- , _video_frame (0)
- , _audio_frame (0)
+ , _video_frames_in (0)
+ , _video_frames_out (0)
#ifdef HAVE_SWRESAMPLE
, _swr_context (0)
-#endif
- , _audio_frames_written (0)
+#endif
+ , _have_a_real_frame (false)
+ , _terminate (false)
{
- if (_film->audio_stream()) {
- /* Create sound output files with .tmp suffixes; we will rename
- them if and when we complete.
- */
- for (int i = 0; i < _film->audio_channels(); ++i) {
- SF_INFO sf_info;
- sf_info.samplerate = dcp_audio_sample_rate (_film->audio_stream()->sample_rate());
- /* We write mono files */
- sf_info.channels = 1;
- sf_info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_24;
- SNDFILE* f = sf_open (_opt->multichannel_audio_out_path (i, true).c_str (), SFM_WRITE, &sf_info);
- if (f == 0) {
- throw CreateFileError (_opt->multichannel_audio_out_path (i, true));
- }
- _sound_files.push_back (f);
- }
- }
+
}
Encoder::~Encoder ()
{
- close_sound_files ();
+ terminate_threads ();
+ if (_writer) {
+ _writer->finish ();
+ }
}
void
#ifdef HAVE_SWRESAMPLE
stringstream s;
- s << "Will resample audio from " << _film->audio_stream()->sample_rate() << " to " << _film->target_audio_sample_rate();
+ s << String::compose (N_("Will resample audio from %1 to %2"), _film->audio_stream()->sample_rate(), _film->target_audio_sample_rate());
_film->log()->log (s.str ());
/* We will be using planar float data when we call the resampler */
swr_init (_swr_context);
#else
- throw EncodeError ("Cannot resample audio as libswresample is not present");
+ throw EncodeError (_("Cannot resample audio as libswresample is not present"));
#endif
} else {
#ifdef HAVE_SWRESAMPLE
_swr_context = 0;
#endif
}
+
+ for (int i = 0; i < Config::instance()->num_local_encoding_threads (); ++i) {
+ _threads.push_back (new boost::thread (boost::bind (&Encoder::encoder_thread, this, (ServerDescription *) 0)));
+ }
+
+ vector<ServerDescription*> servers = Config::instance()->servers ();
+
+ for (vector<ServerDescription*>::iterator i = servers.begin(); i != servers.end(); ++i) {
+ for (int j = 0; j < (*i)->threads (); ++j) {
+ _threads.push_back (new boost::thread (boost::bind (&Encoder::encoder_thread, this, *i)));
+ }
+ }
+
+ _writer.reset (new Writer (_film));
}
Encoder::process_end ()
{
#if HAVE_SWRESAMPLE
- if (_film->audio_stream() && _swr_context) {
+ if (_film->audio_stream() && _film->audio_stream()->channels() && _swr_context) {
shared_ptr<AudioBuffers> out (new AudioBuffers (_film->audio_stream()->channels(), 256));
int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
if (frames < 0) {
- throw EncodeError ("could not run sample-rate converter");
+ throw EncodeError (_("could not run sample-rate converter"));
}
if (frames == 0) {
}
#endif
- if (_film->audio_stream()) {
- close_sound_files ();
-
- /* Rename .wav.tmp files to .wav */
- for (int i = 0; i < _film->audio_channels(); ++i) {
- if (boost::filesystem::exists (_opt->multichannel_audio_out_path (i, false))) {
- boost::filesystem::remove (_opt->multichannel_audio_out_path (i, false));
- }
- boost::filesystem::rename (_opt->multichannel_audio_out_path (i, true), _opt->multichannel_audio_out_path (i, false));
+ boost::mutex::scoped_lock lock (_mutex);
+
+ _film->log()->log (String::compose (N_("Clearing queue of %1"), _queue.size ()));
+
+ /* Keep waking workers until the queue is empty */
+ while (!_queue.empty ()) {
+ _film->log()->log (String::compose (N_("Waking with %1"), _queue.size ()), Log::VERBOSE);
+ _condition.notify_all ();
+ _condition.wait (lock);
+ }
+
+ lock.unlock ();
+
+ terminate_threads ();
+
+ _film->log()->log (String::compose (N_("Mopping up %1"), _queue.size()));
+
+ /* The following sequence of events can occur in the above code:
+ 1. a remote worker takes the last image off the queue
+ 2. the loop above terminates
+ 3. the remote worker fails to encode the image and puts it back on the queue
+ 4. the remote worker is then terminated by terminate_threads
+
+ So just mop up anything left in the queue here.
+ */
+
+ for (list<shared_ptr<DCPVideoFrame> >::iterator i = _queue.begin(); i != _queue.end(); ++i) {
+ _film->log()->log (String::compose (N_("Encode left-over frame %1"), (*i)->frame ()));
+ try {
+ _writer->write ((*i)->encode_locally(), (*i)->frame ());
+ frame_done ();
+ } catch (std::exception& e) {
+ _film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
}
}
+
+ _writer->finish ();
+ _writer.reset ();
}
/** @return an estimate of the current number of frames we are encoding per second,
return _history_size / (seconds (now) - seconds (_time_history.back ()));
}
-/** @return true if the last frame to be processed was skipped as it already existed */
-bool
-Encoder::skipping () const
-{
- boost::mutex::scoped_lock (_history_mutex);
- return _just_skipped;
-}
-
-/** @return Number of video frames that have been received */
-SourceFrame
-Encoder::video_frame () const
+/** @return Number of video frames that have been sent out */
+int
+Encoder::video_frames_out () const
{
boost::mutex::scoped_lock (_history_mutex);
- return _video_frame;
+ return _video_frames_out;
}
/** Should be called when a frame has been encoded successfully.
Encoder::frame_done ()
{
boost::mutex::scoped_lock lock (_history_mutex);
- _just_skipped = false;
struct timeval tv;
gettimeofday (&tv, 0);
}
}
-/** Called by a subclass when it has just skipped the processing
- of a frame because it has already been done.
-*/
-void
-Encoder::frame_skipped ()
-{
- boost::mutex::scoped_lock lock (_history_mutex);
- _just_skipped = true;
-}
-
void
-Encoder::process_video (shared_ptr<Image> i, boost::shared_ptr<Subtitle> s)
+Encoder::process_video (shared_ptr<Image> image, bool same, boost::shared_ptr<Subtitle> sub)
{
- if (_opt->video_skip != 0 && (_video_frame % _opt->video_skip) != 0) {
- ++_video_frame;
+ DCPFrameRate dfr (_film->frames_per_second ());
+
+ if (dfr.skip && (_video_frames_in % 2)) {
+ ++_video_frames_in;
return;
}
- if (_opt->video_range) {
- pair<SourceFrame, SourceFrame> const r = _opt->video_range.get();
- if (_video_frame < r.first || _video_frame >= r.second) {
- ++_video_frame;
- return;
- }
+ boost::mutex::scoped_lock lock (_mutex);
+
+ /* Wait until the queue has gone down a bit */
+ while (_queue.size() >= _threads.size() * 2 && !_terminate) {
+ TIMING (_("decoder sleeps with queue of %1"), _queue.size());
+ _condition.wait (lock);
+ TIMING (_("decoder wakes with queue of %1"), _queue.size());
}
- do_process_video (i, s);
- ++_video_frame;
-}
+ if (_terminate) {
+ return;
+ }
-void
-Encoder::process_audio (shared_ptr<AudioBuffers> data)
-{
- if (_opt->audio_range) {
+ if (_writer->thrown ()) {
+ _writer->rethrow ();
+ }
- shared_ptr<AudioBuffers> trimmed (new AudioBuffers (*data.get ()));
+ if (_writer->can_fake_write (_video_frames_out)) {
+ _writer->fake_write (_video_frames_out);
+ _have_a_real_frame = false;
+ frame_done ();
+ } else if (same && _have_a_real_frame) {
+ /* Use the last frame that we encoded. */
+ _writer->repeat (_video_frames_out);
+ frame_done ();
+ } else {
+ /* Queue this new frame for encoding */
+ pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
+ TIMING (_("adding to queue of %1"), _queue.size ());
+ _queue.push_back (boost::shared_ptr<DCPVideoFrame> (
+ new DCPVideoFrame (
+ image, sub, _film->format()->dcp_size(), _film->format()->dcp_padding (_film),
+ _film->subtitle_offset(), _film->subtitle_scale(),
+ _film->scaler(), _video_frames_out, _film->frames_per_second(), s.second,
+ _film->colour_lut(), _film->j2k_bandwidth(),
+ _film->log()
+ )
+ ));
- /* Range that we are encoding */
- pair<int64_t, int64_t> required_range = _opt->audio_range.get();
- /* Range of this block of data */
- pair<int64_t, int64_t> this_range (_audio_frame, _audio_frame + trimmed->frames());
+ _condition.notify_all ();
+ _have_a_real_frame = true;
+ }
- if (this_range.second < required_range.first || required_range.second < this_range.first) {
- /* No part of this audio is within the required range */
- return;
- } else if (required_range.first >= this_range.first && required_range.first < this_range.second) {
- /* Trim start */
- int64_t const shift = required_range.first - this_range.first;
- trimmed->move (shift, 0, trimmed->frames() - shift);
- trimmed->set_frames (trimmed->frames() - shift);
- } else if (required_range.second >= this_range.first && required_range.second < this_range.second) {
- /* Trim end */
- trimmed->set_frames (required_range.second - this_range.first);
- }
+ ++_video_frames_in;
+ ++_video_frames_out;
- data = trimmed;
+ if (dfr.repeat) {
+ _writer->repeat (_video_frames_out);
+ ++_video_frames_out;
+ frame_done ();
}
+}
+void
+Encoder::process_audio (shared_ptr<AudioBuffers> data)
+{
#if HAVE_SWRESAMPLE
/* Maybe sample-rate convert */
if (_swr_context) {
);
if (resampled_frames < 0) {
- throw EncodeError ("could not run sample-rate converter");
+ throw EncodeError (_("could not run sample-rate converter"));
}
resampled->set_frames (resampled_frames);
#endif
write_audio (data);
-
- _audio_frame += data->frames ();
}
void
-Encoder::write_audio (shared_ptr<const AudioBuffers> audio)
+Encoder::terminate_threads ()
{
- for (int i = 0; i < _film->audio_channels(); ++i) {
- sf_write_float (_sound_files[i], audio->data(i), audio->frames());
+ boost::mutex::scoped_lock lock (_mutex);
+ _terminate = true;
+ _condition.notify_all ();
+ lock.unlock ();
+
+ for (list<boost::thread *>::iterator i = _threads.begin(); i != _threads.end(); ++i) {
+ (*i)->join ();
+ delete *i;
}
-
- _audio_frames_written += audio->frames ();
}
void
-Encoder::close_sound_files ()
+Encoder::encoder_thread (ServerDescription* server)
{
- for (vector<SNDFILE*>::iterator i = _sound_files.begin(); i != _sound_files.end(); ++i) {
- sf_close (*i);
+ /* Number of seconds that we currently wait between attempts
+ to connect to the server; not relevant for localhost
+ encodings.
+ */
+ int remote_backoff = 0;
+
+ while (1) {
+
+ TIMING (N_("encoder thread %1 sleeps"), boost::this_thread::get_id());
+ boost::mutex::scoped_lock lock (_mutex);
+ while (_queue.empty () && !_terminate) {
+ _condition.wait (lock);
+ }
+
+ if (_terminate) {
+ return;
+ }
+
+ TIMING (N_("encoder thread %1 wakes with queue of %2"), boost::this_thread::get_id(), _queue.size());
+ boost::shared_ptr<DCPVideoFrame> vf = _queue.front ();
+ _film->log()->log (String::compose (N_("Encoder thread %1 pops frame %2 from queue"), boost::this_thread::get_id(), vf->frame()), Log::VERBOSE);
+ _queue.pop_front ();
+
+ lock.unlock ();
+
+ shared_ptr<EncodedData> encoded;
+
+ if (server) {
+ try {
+ encoded = vf->encode_remotely (server);
+
+ if (remote_backoff > 0) {
+ _film->log()->log (String::compose (N_("%1 was lost, but now she is found; removing backoff"), server->host_name ()));
+ }
+
+ /* This job succeeded, so remove any backoff */
+ remote_backoff = 0;
+
+ } catch (std::exception& e) {
+ if (remote_backoff < 60) {
+ /* back off more */
+ remote_backoff += 10;
+ }
+ _film->log()->log (
+ String::compose (
+ N_("Remote encode of %1 on %2 failed (%3); thread sleeping for %4s"),
+ vf->frame(), server->host_name(), e.what(), remote_backoff)
+ );
+ }
+
+ } else {
+ try {
+ TIMING (N_("encoder thread %1 begins local encode of %2"), boost::this_thread::get_id(), vf->frame());
+ encoded = vf->encode_locally ();
+ TIMING (N_("encoder thread %1 finishes local encode of %2"), boost::this_thread::get_id(), vf->frame());
+ } catch (std::exception& e) {
+ _film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
+ }
+ }
+
+ if (encoded) {
+ _writer->write (encoded, vf->frame ());
+ frame_done ();
+ } else {
+ lock.lock ();
+ _film->log()->log (
+ String::compose (N_("Encoder thread %1 pushes frame %2 back onto queue after failure"), boost::this_thread::get_id(), vf->frame())
+ );
+ _queue.push_front (vf);
+ lock.unlock ();
+ }
+
+ if (remote_backoff > 0) {
+ dvdomatic_sleep (remote_backoff);
+ }
+
+ lock.lock ();
+ _condition.notify_all ();
}
+}
- _sound_files.clear ();
-}
+void
+Encoder::write_audio (shared_ptr<const AudioBuffers> data)
+{
+ if (_film->audio_channels() == 1) {
+ /* We need to switch things around so that the mono channel is on
+ the centre channel of a 5.1 set (with other channels silent).
+ */
+
+ shared_ptr<AudioBuffers> b (new AudioBuffers (6, data->frames ()));
+ b->make_silent (libdcp::LEFT);
+ b->make_silent (libdcp::RIGHT);
+ memcpy (b->data()[libdcp::CENTRE], data->data()[0], data->frames() * sizeof(float));
+ b->make_silent (libdcp::LFE);
+ b->make_silent (libdcp::LS);
+ b->make_silent (libdcp::RS);
+ data = b;
+ }
+
+ _writer->write (data);
+}