Merge master and multifarious hackery.
[dcpomatic.git] / src / lib / encoder.cc
index f352f5a5246d4c48aa95fa6ba2b7233e4a46b449..270bf3d43b736fe888f23a537cdf1c1c68d0dfa5 100644 (file)
  *  @brief Parent class for classes which can encode video and audio frames.
  */
 
+#include <iostream>
 #include <boost/filesystem.hpp>
+#include <boost/lexical_cast.hpp>
+#include <libdcp/picture_asset.h>
 #include "encoder.h"
 #include "util.h"
-#include "options.h"
 #include "film.h"
 #include "log.h"
 #include "exceptions.h"
+#include "filter.h"
+#include "config.h"
+#include "dcp_video_frame.h"
+#include "server.h"
+#include "format.h"
+#include "cross.h"
+#include "writer.h"
+#include "player.h"
+#include "audio_mapping.h"
+#include "container.h"
+
+#include "i18n.h"
 
 using std::pair;
+using std::string;
 using std::stringstream;
 using std::vector;
-using namespace boost;
+using std::list;
+using std::cout;
+using std::make_pair;
+using boost::shared_ptr;
+using boost::optional;
 
 int const Encoder::_history_size = 25;
 
-/** @param f Film that we are encoding.
- *  @param o Options.
- */
-Encoder::Encoder (shared_ptr<const Film> f, shared_ptr<const EncodeOptions> o)
+/** @param f Film that we are encoding */
+Encoder::Encoder (shared_ptr<Film> f, shared_ptr<Job> j)
        : _film (f)
-       , _opt (o)
-       , _just_skipped (false)
-       , _video_frame (0)
-       , _audio_frame (0)
-#ifdef HAVE_SWRESAMPLE   
-       , _swr_context (0)
-#endif   
-       , _audio_frames_written (0)
+       , _job (j)
+       , _video_frames_out (0)
+       , _have_a_real_frame (false)
+       , _terminate (false)
 {
-       if (_film->audio_stream()) {
-               /* Create sound output files with .tmp suffixes; we will rename
-                  them if and when we complete.
-               */
-               for (int i = 0; i < _film->audio_channels(); ++i) {
-                       SF_INFO sf_info;
-                       sf_info.samplerate = dcp_audio_sample_rate (_film->audio_stream()->sample_rate());
-                       /* We write mono files */
-                       sf_info.channels = 1;
-                       sf_info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_24;
-                       SNDFILE* f = sf_open (_opt->multichannel_audio_out_path (i, true).c_str (), SFM_WRITE, &sf_info);
-                       if (f == 0) {
-                               throw CreateFileError (_opt->multichannel_audio_out_path (i, true));
-                       }
-                       _sound_files.push_back (f);
-               }
-       }
+       
 }
 
 Encoder::~Encoder ()
 {
-       close_sound_files ();
+       terminate_threads ();
+       if (_writer) {
+               _writer->finish ();
+       }
 }
 
 void
 Encoder::process_begin ()
 {
-       if (_film->audio_stream() && _film->audio_stream()->sample_rate() != _film->target_audio_sample_rate()) {
-#ifdef HAVE_SWRESAMPLE
-
-               stringstream s;
-               s << "Will resample audio from " << _film->audio_stream()->sample_rate() << " to " << _film->target_audio_sample_rate();
-               _film->log()->log (s.str ());
-
-               /* We will be using planar float data when we call the resampler */
-               _swr_context = swr_alloc_set_opts (
-                       0,
-                       _film->audio_stream()->channel_layout(),
-                       AV_SAMPLE_FMT_FLTP,
-                       _film->target_audio_sample_rate(),
-                       _film->audio_stream()->channel_layout(),
-                       AV_SAMPLE_FMT_FLTP,
-                       _film->audio_stream()->sample_rate(),
-                       0, 0
-                       );
-               
-               swr_init (_swr_context);
-#else
-               throw EncodeError ("Cannot resample audio as libswresample is not present");
-#endif
-       } else {
-#ifdef HAVE_SWRESAMPLE
-               _swr_context = 0;
-#endif         
+       for (int i = 0; i < Config::instance()->num_local_encoding_threads (); ++i) {
+               _threads.push_back (new boost::thread (boost::bind (&Encoder::encoder_thread, this, (ServerDescription *) 0)));
+       }
+
+       vector<ServerDescription*> servers = Config::instance()->servers ();
+
+       for (vector<ServerDescription*>::iterator i = servers.begin(); i != servers.end(); ++i) {
+               for (int j = 0; j < (*i)->threads (); ++j) {
+                       _threads.push_back (new boost::thread (boost::bind (&Encoder::encoder_thread, this, *i)));
+               }
        }
+
+       _writer.reset (new Writer (_film, _job));
 }
 
 
 void
 Encoder::process_end ()
 {
-#if HAVE_SWRESAMPLE    
-       if (_film->audio_stream() && _swr_context) {
-
-               shared_ptr<AudioBuffers> out (new AudioBuffers (_film->audio_stream()->channels(), 256));
-                       
-               while (1) {
-                       int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
-
-                       if (frames < 0) {
-                               throw EncodeError ("could not run sample-rate converter");
-                       }
-
-                       if (frames == 0) {
-                               break;
-                       }
+       boost::mutex::scoped_lock lock (_mutex);
 
-                       out->set_frames (frames);
-                       write_audio (out);
-               }
+       _film->log()->log (String::compose (N_("Clearing queue of %1"), _queue.size ()));
 
-               swr_free (&_swr_context);
+       /* Keep waking workers until the queue is empty */
+       while (!_queue.empty ()) {
+               _film->log()->log (String::compose (N_("Waking with %1"), _queue.size ()), Log::VERBOSE);
+               _condition.notify_all ();
+               _condition.wait (lock);
        }
-#endif
 
-       if (_film->audio_stream()) {
-               close_sound_files ();
-               
-               /* Rename .wav.tmp files to .wav */
-               for (int i = 0; i < _film->audio_channels(); ++i) {
-                       if (boost::filesystem::exists (_opt->multichannel_audio_out_path (i, false))) {
-                               boost::filesystem::remove (_opt->multichannel_audio_out_path (i, false));
-                       }
-                       boost::filesystem::rename (_opt->multichannel_audio_out_path (i, true), _opt->multichannel_audio_out_path (i, false));
+       lock.unlock ();
+       
+       terminate_threads ();
+
+       _film->log()->log (String::compose (N_("Mopping up %1"), _queue.size()));
+
+       /* The following sequence of events can occur in the above code:
+            1. a remote worker takes the last image off the queue
+            2. the loop above terminates
+            3. the remote worker fails to encode the image and puts it back on the queue
+            4. the remote worker is then terminated by terminate_threads
+
+            So just mop up anything left in the queue here.
+       */
+
+       for (list<shared_ptr<DCPVideoFrame> >::iterator i = _queue.begin(); i != _queue.end(); ++i) {
+               _film->log()->log (String::compose (N_("Encode left-over frame %1"), (*i)->frame ()));
+               try {
+                       _writer->write ((*i)->encode_locally(), (*i)->frame ());
+                       frame_done ();
+               } catch (std::exception& e) {
+                       _film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
                }
        }
+
+       _writer->finish ();
+       _writer.reset ();
 }      
 
 /** @return an estimate of the current number of frames we are encoding per second,
  *  or 0 if not known.
  */
 float
-Encoder::current_frames_per_second () const
+Encoder::current_encoding_rate () const
 {
        boost::mutex::scoped_lock lock (_history_mutex);
        if (int (_time_history.size()) < _history_size) {
@@ -165,20 +153,12 @@ Encoder::current_frames_per_second () const
        return _history_size / (seconds (now) - seconds (_time_history.back ()));
 }
 
-/** @return true if the last frame to be processed was skipped as it already existed */
-bool
-Encoder::skipping () const
-{
-       boost::mutex::scoped_lock (_history_mutex);
-       return _just_skipped;
-}
-
-/** @return Number of video frames that have been received */
-SourceFrame
-Encoder::video_frame () const
+/** @return Number of video frames that have been sent out */
+int
+Encoder::video_frames_out () const
 {
        boost::mutex::scoped_lock (_history_mutex);
-       return _video_frame;
+       return _video_frames_out;
 }
 
 /** Should be called when a frame has been encoded successfully.
@@ -188,7 +168,6 @@ void
 Encoder::frame_done ()
 {
        boost::mutex::scoped_lock lock (_history_mutex);
-       _just_skipped = false;
        
        struct timeval tv;
        gettimeofday (&tv, 0);
@@ -198,111 +177,157 @@ Encoder::frame_done ()
        }
 }
 
-/** Called by a subclass when it has just skipped the processing
-    of a frame because it has already been done.
-*/
 void
-Encoder::frame_skipped ()
+Encoder::process_video (shared_ptr<const Image> image, bool same, shared_ptr<Subtitle> sub, Time)
 {
-       boost::mutex::scoped_lock lock (_history_mutex);
-       _just_skipped = true;
-}
+       boost::mutex::scoped_lock lock (_mutex);
 
-void
-Encoder::process_video (shared_ptr<Image> i, boost::shared_ptr<Subtitle> s)
-{
-       if (_opt->video_skip != 0 && (_video_frame % _opt->video_skip) != 0) {
-               ++_video_frame;
+       /* Wait until the queue has gone down a bit */
+       while (_queue.size() >= _threads.size() * 2 && !_terminate) {
+               TIMING ("decoder sleeps with queue of %1", _queue.size());
+               _condition.wait (lock);
+               TIMING ("decoder wakes with queue of %1", _queue.size());
+       }
+
+       if (_terminate) {
                return;
        }
 
-       if (_opt->video_range) {
-               pair<SourceFrame, SourceFrame> const r = _opt->video_range.get();
-               if (_video_frame < r.first || _video_frame >= r.second) {
-                       ++_video_frame;
-                       return;
-               }
+       if (_writer->thrown ()) {
+               _writer->rethrow ();
        }
 
-       do_process_video (i, s);
-       ++_video_frame;
+       if (_writer->can_fake_write (_video_frames_out)) {
+               _writer->fake_write (_video_frames_out);
+               _have_a_real_frame = false;
+               frame_done ();
+       } else if (same && _have_a_real_frame) {
+               /* Use the last frame that we encoded. */
+               _writer->repeat (_video_frames_out);
+               frame_done ();
+       } else {
+               /* Queue this new frame for encoding */
+               TIMING ("adding to queue of %1", _queue.size ());
+               /* XXX: padding */
+               _queue.push_back (shared_ptr<DCPVideoFrame> (
+                                         new DCPVideoFrame (
+                                                 image, sub, _film->container()->dcp_size(), 0,
+                                                 _film->subtitle_offset(), _film->subtitle_scale(),
+                                                 _film->scaler(), _video_frames_out, _film->dcp_video_frame_rate(),
+                                                 _film->colour_lut(), _film->j2k_bandwidth(),
+                                                 _film->log()
+                                                 )
+                                         ));
+               
+               _condition.notify_all ();
+               _have_a_real_frame = true;
+       }
+
+       ++_video_frames_out;
 }
 
 void
-Encoder::process_audio (shared_ptr<AudioBuffers> data)
+Encoder::process_audio (shared_ptr<const AudioBuffers> data, Time)
 {
-       if (_opt->audio_range) {
-
-               shared_ptr<AudioBuffers> trimmed (new AudioBuffers (*data.get ()));
-               
-               /* Range that we are encoding */
-               pair<int64_t, int64_t> required_range = _opt->audio_range.get();
-               /* Range of this block of data */
-               pair<int64_t, int64_t> this_range (_audio_frame, _audio_frame + trimmed->frames());
+       _writer->write (data);
+}
 
-               if (this_range.second < required_range.first || required_range.second < this_range.first) {
-                       /* No part of this audio is within the required range */
-                       return;
-               } else if (required_range.first >= this_range.first && required_range.first < this_range.second) {
-                       /* Trim start */
-                       int64_t const shift = required_range.first - this_range.first;
-                       trimmed->move (shift, 0, trimmed->frames() - shift);
-                       trimmed->set_frames (trimmed->frames() - shift);
-               } else if (required_range.second >= this_range.first && required_range.second < this_range.second) {
-                       /* Trim end */
-                       trimmed->set_frames (required_range.second - this_range.first);
+void
+Encoder::terminate_threads ()
+{
+       boost::mutex::scoped_lock lock (_mutex);
+       _terminate = true;
+       _condition.notify_all ();
+       lock.unlock ();
+
+       for (list<boost::thread *>::iterator i = _threads.begin(); i != _threads.end(); ++i) {
+               if ((*i)->joinable ()) {
+                       (*i)->join ();
                }
-
-               data = trimmed;
+               delete *i;
        }
+}
 
-#if HAVE_SWRESAMPLE
-       /* Maybe sample-rate convert */
-       if (_swr_context) {
+void
+Encoder::encoder_thread (ServerDescription* server)
+{
+       /* Number of seconds that we currently wait between attempts
+          to connect to the server; not relevant for localhost
+          encodings.
+       */
+       int remote_backoff = 0;
+       
+       while (1) {
 
-               /* Compute the resampled frames count and add 32 for luck */
-               int const max_resampled_frames = ceil ((int64_t) data->frames() * _film->target_audio_sample_rate() / _film->audio_stream()->sample_rate()) + 32;
+               TIMING ("encoder thread %1 sleeps", boost::this_thread::get_id());
+               boost::mutex::scoped_lock lock (_mutex);
+               while (_queue.empty () && !_terminate) {
+                       _condition.wait (lock);
+               }
 
-               shared_ptr<AudioBuffers> resampled (new AudioBuffers (_film->audio_stream()->channels(), max_resampled_frames));
+               if (_terminate) {
+                       return;
+               }
 
-               /* Resample audio */
-               int const resampled_frames = swr_convert (
-                       _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) data->data(), data->frames()
-                       );
+               TIMING ("encoder thread %1 wakes with queue of %2", boost::this_thread::get_id(), _queue.size());
+               shared_ptr<DCPVideoFrame> vf = _queue.front ();
+               _film->log()->log (String::compose (N_("Encoder thread %1 pops frame %2 from queue"), boost::this_thread::get_id(), vf->frame()), Log::VERBOSE);
+               _queue.pop_front ();
                
-               if (resampled_frames < 0) {
-                       throw EncodeError ("could not run sample-rate converter");
+               lock.unlock ();
+
+               shared_ptr<EncodedData> encoded;
+
+               if (server) {
+                       try {
+                               encoded = vf->encode_remotely (server);
+
+                               if (remote_backoff > 0) {
+                                       _film->log()->log (String::compose (N_("%1 was lost, but now she is found; removing backoff"), server->host_name ()));
+                               }
+                               
+                               /* This job succeeded, so remove any backoff */
+                               remote_backoff = 0;
+                               
+                       } catch (std::exception& e) {
+                               if (remote_backoff < 60) {
+                                       /* back off more */
+                                       remote_backoff += 10;
+                               }
+                               _film->log()->log (
+                                       String::compose (
+                                               N_("Remote encode of %1 on %2 failed (%3); thread sleeping for %4s"),
+                                               vf->frame(), server->host_name(), e.what(), remote_backoff)
+                                       );
+                       }
+                               
+               } else {
+                       try {
+                               TIMING ("encoder thread %1 begins local encode of %2", boost::this_thread::get_id(), vf->frame());
+                               encoded = vf->encode_locally ();
+                               TIMING ("encoder thread %1 finishes local encode of %2", boost::this_thread::get_id(), vf->frame());
+                       } catch (std::exception& e) {
+                               _film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
+                       }
                }
 
-               resampled->set_frames (resampled_frames);
-               
-               /* And point our variables at the resampled audio */
-               data = resampled;
-       }
-#endif
+               if (encoded) {
+                       _writer->write (encoded, vf->frame ());
+                       frame_done ();
+               } else {
+                       lock.lock ();
+                       _film->log()->log (
+                               String::compose (N_("Encoder thread %1 pushes frame %2 back onto queue after failure"), boost::this_thread::get_id(), vf->frame())
+                               );
+                       _queue.push_front (vf);
+                       lock.unlock ();
+               }
 
-       write_audio (data);
-       
-       _audio_frame += data->frames ();
-}
+               if (remote_backoff > 0) {
+                       dcpomatic_sleep (remote_backoff);
+               }
 
-void
-Encoder::write_audio (shared_ptr<const AudioBuffers> audio)
-{
-       for (int i = 0; i < _film->audio_channels(); ++i) {
-               sf_write_float (_sound_files[i], audio->data(i), audio->frames());
+               lock.lock ();
+               _condition.notify_all ();
        }
-
-       _audio_frames_written += audio->frames ();
 }
-
-void
-Encoder::close_sound_files ()
-{
-       for (vector<SNDFILE*>::iterator i = _sound_files.begin(); i != _sound_files.end(); ++i) {
-               sf_close (*i);
-       }
-
-       _sound_files.clear ();
-}      
-