*/
#include <iostream>
-#include <boost/filesystem.hpp>
-#include <boost/lexical_cast.hpp>
-#include <libdcp/picture_asset.h>
#include "encoder.h"
#include "util.h"
#include "film.h"
#include "log.h"
-#include "exceptions.h"
-#include "filter.h"
#include "config.h"
#include "dcp_video_frame.h"
#include "server.h"
-#include "format.h"
#include "cross.h"
#include "writer.h"
-#include "player.h"
-#include "audio_mapping.h"
#include "i18n.h"
using std::vector;
using std::list;
using std::cout;
+using std::min;
using std::make_pair;
using boost::shared_ptr;
using boost::optional;
int const Encoder::_history_size = 25;
/** @param f Film that we are encoding */
-Encoder::Encoder (shared_ptr<Film> f)
+Encoder::Encoder (shared_ptr<const Film> f, shared_ptr<Job> j)
: _film (f)
- , _video_frames_in (0)
+ , _job (j)
, _video_frames_out (0)
-#ifdef HAVE_SWRESAMPLE
- , _swr_context (0)
-#endif
, _have_a_real_frame (false)
, _terminate (false)
{
void
Encoder::process_begin ()
{
- if (_film->has_audio() && _film->audio_frame_rate() != _film->target_audio_sample_rate()) {
-#ifdef HAVE_SWRESAMPLE
-
- stringstream s;
- s << String::compose (N_("Will resample audio from %1 to %2"), _film->audio_frame_rate(), _film->target_audio_sample_rate());
- _film->log()->log (s.str ());
-
- /* We will be using planar float data when we call the
- resampler. As far as I can see, the audio channel
- layout is not necessary for our purposes; it seems
- only to be used get the number of channels and
- decide if rematrixing is needed. It won't be, since
- input and output layouts are the same.
- */
-
- _swr_context = swr_alloc_set_opts (
- 0,
- av_get_default_channel_layout (_film->audio_channels ()),
- AV_SAMPLE_FMT_FLTP,
- _film->target_audio_sample_rate(),
- av_get_default_channel_layout (_film->audio_channels ()),
- AV_SAMPLE_FMT_FLTP,
- _film->audio_frame_rate(),
- 0, 0
- );
-
- swr_init (_swr_context);
-#else
- throw EncodeError (_("Cannot resample audio as libswresample is not present"));
-#endif
- } else {
-#ifdef HAVE_SWRESAMPLE
- _swr_context = 0;
-#endif
- }
-
for (int i = 0; i < Config::instance()->num_local_encoding_threads (); ++i) {
_threads.push_back (new boost::thread (boost::bind (&Encoder::encoder_thread, this, (ServerDescription *) 0)));
}
}
}
- _writer.reset (new Writer (_film));
+ _writer.reset (new Writer (_film, _job));
}
void
Encoder::process_end ()
{
-#if HAVE_SWRESAMPLE
- if (_film->has_audio() && _film->audio_channels() && _swr_context) {
-
- shared_ptr<AudioBuffers> out (new AudioBuffers (_film->audio_channels(), 256));
-
- while (1) {
- int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
-
- if (frames < 0) {
- throw EncodeError (_("could not run sample-rate converter"));
- }
-
- if (frames == 0) {
- break;
- }
-
- out->set_frames (frames);
- _writer->write (out);
- }
-
- swr_free (&_swr_context);
- }
-#endif
-
boost::mutex::scoped_lock lock (_mutex);
_film->log()->log (String::compose (N_("Clearing queue of %1"), _queue.size ()));
}
void
-Encoder::process_video (shared_ptr<Image> image, bool same, shared_ptr<Subtitle> sub)
+Encoder::process_video (shared_ptr<const Image> image, bool same)
{
- FrameRateConversion frc (_film->video_frame_rate(), _film->dcp_frame_rate());
-
- if (frc.skip && (_video_frames_in % 2)) {
- ++_video_frames_in;
- return;
- }
-
boost::mutex::scoped_lock lock (_mutex);
/* Wait until the queue has gone down a bit */
frame_done ();
} else {
/* Queue this new frame for encoding */
- pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
TIMING ("adding to queue of %1", _queue.size ());
+ /* XXX: padding */
_queue.push_back (shared_ptr<DCPVideoFrame> (
new DCPVideoFrame (
- image, sub, _film->format()->dcp_size(), _film->format()->dcp_padding (_film),
- _film->subtitle_offset(), _film->subtitle_scale(),
- _film->scaler(), _video_frames_out, _film->dcp_frame_rate(), s.second,
- _film->colour_lut(), _film->j2k_bandwidth(),
- _film->log()
+ image, _video_frames_out, _film->dcp_video_frame_rate(),
+ _film->colour_lut(), _film->j2k_bandwidth(), _film->log()
)
));
_have_a_real_frame = true;
}
- ++_video_frames_in;
++_video_frames_out;
-
- if (frc.repeat) {
- _writer->repeat (_video_frames_out);
- ++_video_frames_out;
- frame_done ();
- }
}
void
-Encoder::process_audio (shared_ptr<AudioBuffers> data)
+Encoder::process_audio (shared_ptr<const AudioBuffers> data)
{
-#if HAVE_SWRESAMPLE
- /* Maybe sample-rate convert */
- if (_swr_context) {
-
- /* Compute the resampled frames count and add 32 for luck */
- int const max_resampled_frames = ceil ((int64_t) data->frames() * _film->target_audio_sample_rate() / _film->audio_frame_rate()) + 32;
-
- shared_ptr<AudioBuffers> resampled (new AudioBuffers (_film->audio_channels(), max_resampled_frames));
-
- /* Resample audio */
- int const resampled_frames = swr_convert (
- _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) data->data(), data->frames()
- );
-
- if (resampled_frames < 0) {
- throw EncodeError (_("could not run sample-rate converter"));
- }
-
- resampled->set_frames (resampled_frames);
-
- /* And point our variables at the resampled audio */
- data = resampled;
- }
-#endif
-
_writer->write (data);
}
lock.unlock ();
for (list<boost::thread *>::iterator i = _threads.begin(); i != _threads.end(); ++i) {
- (*i)->join ();
+ if ((*i)->joinable ()) {
+ (*i)->join ();
+ }
delete *i;
}
+
+ _threads.clear ();
}
void
}
if (remote_backoff > 0) {
- dvdomatic_sleep (remote_backoff);
+ dcpomatic_sleep (remote_backoff);
}
lock.lock ();