X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fencoder.cc;h=270bf3d43b736fe888f23a537cdf1c1c68d0dfa5;hb=996b0c06e23bcb6b300d7b8799df94993692e07d;hp=d25e0d0f8d7dde5a2870e39ba1719b0c5ec77084;hpb=fd040c2bd27fde35424a384174ecb56c643764cd;p=dcpomatic.git diff --git a/src/lib/encoder.cc b/src/lib/encoder.cc index d25e0d0f8..270bf3d43 100644 --- a/src/lib/encoder.cc +++ b/src/lib/encoder.cc @@ -39,6 +39,7 @@ #include "writer.h" #include "player.h" #include "audio_mapping.h" +#include "container.h" #include "i18n.h" @@ -58,11 +59,7 @@ int const Encoder::_history_size = 25; Encoder::Encoder (shared_ptr f, shared_ptr j) : _film (f) , _job (j) - , _video_frames_in (0) , _video_frames_out (0) -#ifdef HAVE_SWRESAMPLE - , _swr_context (0) -#endif , _have_a_real_frame (false) , _terminate (false) { @@ -80,42 +77,6 @@ Encoder::~Encoder () void Encoder::process_begin () { - if (_film->has_audio() && _film->audio_frame_rate() != _film->target_audio_sample_rate()) { -#ifdef HAVE_SWRESAMPLE - - stringstream s; - s << String::compose (N_("Will resample audio from %1 to %2"), _film->audio_frame_rate(), _film->target_audio_sample_rate()); - _film->log()->log (s.str ()); - - /* We will be using planar float data when we call the - resampler. As far as I can see, the audio channel - layout is not necessary for our purposes; it seems - only to be used get the number of channels and - decide if rematrixing is needed. It won't be, since - input and output layouts are the same. - */ - - _swr_context = swr_alloc_set_opts ( - 0, - av_get_default_channel_layout (_film->audio_mapping().dcp_channels ()), - AV_SAMPLE_FMT_FLTP, - _film->target_audio_sample_rate(), - av_get_default_channel_layout (_film->audio_mapping().dcp_channels ()), - AV_SAMPLE_FMT_FLTP, - _film->audio_frame_rate(), - 0, 0 - ); - - swr_init (_swr_context); -#else - throw EncodeError (_("Cannot resample audio as libswresample is not present")); -#endif - } else { -#ifdef HAVE_SWRESAMPLE - _swr_context = 0; -#endif - } - for (int i = 0; i < Config::instance()->num_local_encoding_threads (); ++i) { _threads.push_back (new boost::thread (boost::bind (&Encoder::encoder_thread, this, (ServerDescription *) 0))); } @@ -135,30 +96,6 @@ Encoder::process_begin () void Encoder::process_end () { -#if HAVE_SWRESAMPLE - if (_film->has_audio() && _swr_context) { - - shared_ptr out (new AudioBuffers (_film->audio_mapping().dcp_channels(), 256)); - - while (1) { - int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0); - - if (frames < 0) { - throw EncodeError (_("could not run sample-rate converter")); - } - - if (frames == 0) { - break; - } - - out->set_frames (frames); - _writer->write (out); - } - - swr_free (&_swr_context); - } -#endif - boost::mutex::scoped_lock lock (_mutex); _film->log()->log (String::compose (N_("Clearing queue of %1"), _queue.size ())); @@ -241,15 +178,8 @@ Encoder::frame_done () } void -Encoder::process_video (shared_ptr image, bool same, shared_ptr sub) +Encoder::process_video (shared_ptr image, bool same, shared_ptr sub, Time) { - FrameRateConversion frc (_film->video_frame_rate(), _film->dcp_frame_rate()); - - if (frc.skip && (_video_frames_in % 2)) { - ++_video_frames_in; - return; - } - boost::mutex::scoped_lock lock (_mutex); /* Wait until the queue has gone down a bit */ @@ -277,13 +207,13 @@ Encoder::process_video (shared_ptr image, bool same, shared_ptr const s = Filter::ffmpeg_strings (_film->filters()); TIMING ("adding to queue of %1", _queue.size ()); + /* XXX: padding */ _queue.push_back (shared_ptr ( new DCPVideoFrame ( - image, sub, _film->format()->dcp_size(), _film->format()->dcp_padding (_film), + image, sub, _film->container()->dcp_size(), 0, _film->subtitle_offset(), _film->subtitle_scale(), - _film->scaler(), _video_frames_out, _film->dcp_frame_rate(), s.second, + _film->scaler(), _video_frames_out, _film->dcp_video_frame_rate(), _film->colour_lut(), _film->j2k_bandwidth(), _film->log() ) @@ -293,44 +223,12 @@ Encoder::process_video (shared_ptr image, bool same, shared_ptrrepeat (_video_frames_out); - ++_video_frames_out; - frame_done (); - } } void -Encoder::process_audio (shared_ptr data) +Encoder::process_audio (shared_ptr data, Time) { -#if HAVE_SWRESAMPLE - /* Maybe sample-rate convert */ - if (_swr_context) { - - /* Compute the resampled frames count and add 32 for luck */ - int const max_resampled_frames = ceil ((int64_t) data->frames() * _film->target_audio_sample_rate() / _film->audio_frame_rate()) + 32; - - shared_ptr resampled (new AudioBuffers (_film->audio_mapping().dcp_channels(), max_resampled_frames)); - - /* Resample audio */ - int const resampled_frames = swr_convert ( - _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) data->data(), data->frames() - ); - - if (resampled_frames < 0) { - throw EncodeError (_("could not run sample-rate converter")); - } - - resampled->set_frames (resampled_frames); - - /* And point our variables at the resampled audio */ - data = resampled; - } -#endif - _writer->write (data); }