Merge master and multifarious hackery.
[dcpomatic.git] / src / lib / encoder.cc
index c1d1041ae539f9cdab1f087291eb2d8d7104abbb..270bf3d43b736fe888f23a537cdf1c1c68d0dfa5 100644 (file)
@@ -39,6 +39,7 @@
 #include "writer.h"
 #include "player.h"
 #include "audio_mapping.h"
+#include "container.h"
 
 #include "i18n.h"
 
@@ -55,13 +56,10 @@ using boost::optional;
 int const Encoder::_history_size = 25;
 
 /** @param f Film that we are encoding */
-Encoder::Encoder (shared_ptr<Film> f)
+Encoder::Encoder (shared_ptr<Film> f, shared_ptr<Job> j)
        : _film (f)
-       , _video_frames_in (0)
+       , _job (j)
        , _video_frames_out (0)
-#ifdef HAVE_SWRESAMPLE   
-       , _swr_context (0)
-#endif
        , _have_a_real_frame (false)
        , _terminate (false)
 {
@@ -79,42 +77,6 @@ Encoder::~Encoder ()
 void
 Encoder::process_begin ()
 {
-       if (_film->has_audio() && _film->audio_frame_rate() != _film->target_audio_sample_rate()) {
-#ifdef HAVE_SWRESAMPLE
-
-               stringstream s;
-               s << String::compose (N_("Will resample audio from %1 to %2"), _film->audio_frame_rate(), _film->target_audio_sample_rate());
-               _film->log()->log (s.str ());
-
-               /* We will be using planar float data when we call the
-                  resampler.  As far as I can see, the audio channel
-                  layout is not necessary for our purposes; it seems
-                  only to be used get the number of channels and
-                  decide if rematrixing is needed.  It won't be, since
-                  input and output layouts are the same.
-               */
-
-               _swr_context = swr_alloc_set_opts (
-                       0,
-                       av_get_default_channel_layout (_film->audio_mapping().dcp_channels ()),
-                       AV_SAMPLE_FMT_FLTP,
-                       _film->target_audio_sample_rate(),
-                       av_get_default_channel_layout (_film->audio_mapping().dcp_channels ()),
-                       AV_SAMPLE_FMT_FLTP,
-                       _film->audio_frame_rate(),
-                       0, 0
-                       );
-               
-               swr_init (_swr_context);
-#else
-               throw EncodeError (_("Cannot resample audio as libswresample is not present"));
-#endif
-       } else {
-#ifdef HAVE_SWRESAMPLE
-               _swr_context = 0;
-#endif         
-       }
-
        for (int i = 0; i < Config::instance()->num_local_encoding_threads (); ++i) {
                _threads.push_back (new boost::thread (boost::bind (&Encoder::encoder_thread, this, (ServerDescription *) 0)));
        }
@@ -127,37 +89,13 @@ Encoder::process_begin ()
                }
        }
 
-       _writer.reset (new Writer (_film));
+       _writer.reset (new Writer (_film, _job));
 }
 
 
 void
 Encoder::process_end ()
 {
-#if HAVE_SWRESAMPLE    
-       if (_film->has_audio() && _swr_context) {
-
-               shared_ptr<AudioBuffers> out (new AudioBuffers (_film->audio_mapping().dcp_channels(), 256));
-                       
-               while (1) {
-                       int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
-
-                       if (frames < 0) {
-                               throw EncodeError (_("could not run sample-rate converter"));
-                       }
-
-                       if (frames == 0) {
-                               break;
-                       }
-
-                       out->set_frames (frames);
-                       _writer->write (out);
-               }
-
-               swr_free (&_swr_context);
-       }
-#endif
-
        boost::mutex::scoped_lock lock (_mutex);
 
        _film->log()->log (String::compose (N_("Clearing queue of %1"), _queue.size ()));
@@ -240,15 +178,8 @@ Encoder::frame_done ()
 }
 
 void
-Encoder::process_video (shared_ptr<const Image> image, bool same, shared_ptr<Subtitle> sub)
+Encoder::process_video (shared_ptr<const Image> image, bool same, shared_ptr<Subtitle> sub, Time)
 {
-       FrameRateConversion frc (_film->video_frame_rate(), _film->dcp_frame_rate());
-       
-       if (frc.skip && (_video_frames_in % 2)) {
-               ++_video_frames_in;
-               return;
-       }
-
        boost::mutex::scoped_lock lock (_mutex);
 
        /* Wait until the queue has gone down a bit */
@@ -276,13 +207,13 @@ Encoder::process_video (shared_ptr<const Image> image, bool same, shared_ptr<Sub
                frame_done ();
        } else {
                /* Queue this new frame for encoding */
-               pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
                TIMING ("adding to queue of %1", _queue.size ());
+               /* XXX: padding */
                _queue.push_back (shared_ptr<DCPVideoFrame> (
                                          new DCPVideoFrame (
-                                                 image, sub, _film->format()->dcp_size(), _film->format()->dcp_padding (_film),
+                                                 image, sub, _film->container()->dcp_size(), 0,
                                                  _film->subtitle_offset(), _film->subtitle_scale(),
-                                                 _film->scaler(), _video_frames_out, _film->dcp_frame_rate(), s.second,
+                                                 _film->scaler(), _video_frames_out, _film->dcp_video_frame_rate(),
                                                  _film->colour_lut(), _film->j2k_bandwidth(),
                                                  _film->log()
                                                  )
@@ -292,44 +223,12 @@ Encoder::process_video (shared_ptr<const Image> image, bool same, shared_ptr<Sub
                _have_a_real_frame = true;
        }
 
-       ++_video_frames_in;
        ++_video_frames_out;
-
-       if (frc.repeat) {
-               _writer->repeat (_video_frames_out);
-               ++_video_frames_out;
-               frame_done ();
-       }
 }
 
 void
-Encoder::process_audio (shared_ptr<const AudioBuffers> data)
+Encoder::process_audio (shared_ptr<const AudioBuffers> data, Time)
 {
-#if HAVE_SWRESAMPLE
-       /* Maybe sample-rate convert */
-       if (_swr_context) {
-
-               /* Compute the resampled frames count and add 32 for luck */
-               int const max_resampled_frames = ceil ((int64_t) data->frames() * _film->target_audio_sample_rate() / _film->audio_frame_rate()) + 32;
-
-               shared_ptr<AudioBuffers> resampled (new AudioBuffers (_film->audio_mapping().dcp_channels(), max_resampled_frames));
-
-               /* Resample audio */
-               int const resampled_frames = swr_convert (
-                       _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) data->data(), data->frames()
-                       );
-               
-               if (resampled_frames < 0) {
-                       throw EncodeError (_("could not run sample-rate converter"));
-               }
-
-               resampled->set_frames (resampled_frames);
-               
-               /* And point our variables at the resampled audio */
-               data = resampled;
-       }
-#endif
-
        _writer->write (data);
 }