X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fdecoder.cc;h=3f4cda6eb5a4345595410fe475314f3d24881bd7;hb=2d5b8cdde08044d323aa7193dfac6c9f8bca7131;hp=a904e085b173131b87090c031879416010a6ab2e;hpb=bb767c7e338414beee132af3e96829c1448e214b;p=dcpomatic.git diff --git a/src/lib/decoder.cc b/src/lib/decoder.cc index a904e085b..3f4cda6eb 100644 --- a/src/lib/decoder.cc +++ b/src/lib/decoder.cc @@ -21,280 +21,18 @@ * @brief Parent class for decoders of content. */ -#include -#include -extern "C" { -#include -#include -#include -} #include "film.h" -#include "format.h" -#include "job.h" -#include "film_state.h" -#include "options.h" -#include "exceptions.h" -#include "image.h" -#include "util.h" -#include "log.h" #include "decoder.h" -#include "filter.h" -#include "delay_line.h" - -using namespace std; -using namespace boost; - -/** @param s FilmState of the Film. - * @param o Options. - * @param j Job that we are running within, or 0 - * @param l Log to use. - * @param minimal true to do the bare minimum of work; just run through the content. Useful for acquiring - * accurate frame counts as quickly as possible. This generates no video or audio output. - * @param ignore_length Ignore the content's claimed length when computing progress. - */ -Decoder::Decoder (boost::shared_ptr s, boost::shared_ptr o, Job* j, Log* l, bool minimal, bool ignore_length) - : _fs (s) - , _opt (o) - , _job (j) - , _log (l) - , _minimal (minimal) - , _ignore_length (ignore_length) - , _video_frame (0) - , _buffer_src_context (0) - , _buffer_sink_context (0) - , _have_setup_video_filters (false) - , _delay_line (0) - , _delay_in_bytes (0) -{ - if (_opt->decode_video_frequency != 0 && _fs->length == 0) { - throw DecodeError ("cannot do a partial decode if length == 0"); - } -} - -Decoder::~Decoder () -{ - delete _delay_line; -} - -void -Decoder::process_begin () -{ - /* This assumes 2 bytes per sample */ - _delay_in_bytes = _fs->audio_delay * _fs->audio_sample_rate * _fs->audio_channels * 2 / 1000; - delete _delay_line; - _delay_line = new DelayLine (_delay_in_bytes); -} - -void -Decoder::process_end () -{ - if (_delay_in_bytes < 0) { - uint8_t remainder[-_delay_in_bytes]; - _delay_line->get_remaining (remainder); - Audio (remainder, _delay_in_bytes); - } -} - -/** Start decoding */ -void -Decoder::go () -{ - process_begin (); - - if (_job && _ignore_length) { - _job->set_progress_unknown (); - } - - while (pass () == false) { - if (_job && !_ignore_length) { - _job->set_progress (float (_video_frame) / decoding_frames ()); - } - } - - process_end (); -} - -/** @return Number of frames that we will be decoding */ -int -Decoder::decoding_frames () const -{ - if (_opt->num_frames > 0) { - return _opt->num_frames; - } - - return _fs->length; -} -/** Run one pass. This may or may not generate any actual video / audio data; - * some decoders may require several passes to generate a single frame. - * @return true if we have finished processing all data; otherwise false. - */ -bool -Decoder::pass () -{ - if (!_have_setup_video_filters) { - setup_video_filters (); - _have_setup_video_filters = true; - } - - if (_opt->num_frames != 0 && _video_frame >= _opt->num_frames) { - return true; - } - - return do_pass (); -} - -/** Called by subclasses to tell the world that some audio data is ready */ -void -Decoder::process_audio (uint8_t* data, int channels, int size) -{ - if (_fs->audio_gain != 0) { - float const linear_gain = pow (10, _fs->audio_gain / 20); - uint8_t* p = data; - int const samples = size / 2; - switch (_fs->audio_sample_format) { - case AV_SAMPLE_FMT_S16: - for (int i = 0; i < samples; ++i) { - /* XXX: assumes little-endian; also we should probably be dithering here */ - int const ou = p[0] | (p[1] << 8); - int const os = ou >= 0x8000 ? (- 0x10000 + ou) : ou; - int const gs = int (os * linear_gain); - int const gu = gs > 0 ? gs : (0x10000 + gs); - p[0] = gu & 0xff; - p[1] = (gu & 0xff00) >> 8; - p += 2; - } - break; - default: - assert (false); - } - } +#include "i18n.h" - int available = _delay_line->feed (data, size); - Audio (data, available); -} +using boost::shared_ptr; -/** Called by subclasses to tell the world that some video data is ready. - * We do some post-processing / filtering then emit it for listeners. - * @param frame to decode; caller manages memory. +/** @param f Film. + * @param o Decode options. */ -void -Decoder::process_video (AVFrame* frame) -{ - if (_minimal) { - ++_video_frame; - return; - } - - /* Use FilmState::length here as our one may be wrong */ - - int gap = 0; - if (_opt->decode_video_frequency != 0) { - gap = _fs->length / _opt->decode_video_frequency; - } - - if (_opt->decode_video_frequency != 0 && gap != 0 && (_video_frame % gap) != 0) { - ++_video_frame; - return; - } - - if (av_vsrc_buffer_add_frame (_buffer_src_context, frame, 0) < 0) { - throw DecodeError ("could not push buffer into filter chain."); - } - - while (avfilter_poll_frame (_buffer_sink_context->inputs[0])) { - AVFilterBufferRef* filter_buffer; - if (av_buffersink_get_buffer_ref (_buffer_sink_context, &filter_buffer, 0) >= 0) { - - /* This takes ownership of filter_buffer */ - shared_ptr image (new FilterBufferImage ((PixelFormat) frame->format, filter_buffer)); - - if (_opt->black_after > 0 && _video_frame > _opt->black_after) { - image->make_black (); - } - - Video (image, _video_frame); - ++_video_frame; - } - } -} - -void -Decoder::setup_video_filters () +Decoder::Decoder (shared_ptr f) + : _film (f) { - stringstream fs; - Size size_after_crop; - - if (_opt->apply_crop) { - size_after_crop = _fs->cropped_size (native_size ()); - fs << crop_string (Position (_fs->left_crop, _fs->top_crop), size_after_crop); - } else { - size_after_crop = native_size (); - fs << crop_string (Position (0, 0), size_after_crop); - } - - string filters = Filter::ffmpeg_strings (_fs->filters).first; - if (!filters.empty ()) { - filters += ","; - } - - filters += fs.str (); - - avfilter_register_all (); - - AVFilterGraph* graph = avfilter_graph_alloc(); - if (graph == 0) { - throw DecodeError ("Could not create filter graph."); - } - - AVFilter* buffer_src = avfilter_get_by_name("buffer"); - if (buffer_src == 0) { - throw DecodeError ("Could not create buffer src filter"); - } - - AVFilter* buffer_sink = avfilter_get_by_name("buffersink"); - if (buffer_sink == 0) { - throw DecodeError ("Could not create buffer sink filter"); - } - - stringstream a; - a << native_size().width << ":" - << native_size().height << ":" - << pixel_format() << ":" - << time_base_numerator() << ":" - << time_base_denominator() << ":" - << sample_aspect_ratio_numerator() << ":" - << sample_aspect_ratio_denominator(); - int r; - if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, "in", a.str().c_str(), 0, graph)) < 0) { - throw DecodeError ("could not create buffer source"); - } - - enum PixelFormat pixel_formats[] = { pixel_format(), PIX_FMT_NONE }; - if (avfilter_graph_create_filter (&_buffer_sink_context, buffer_sink, "out", 0, pixel_formats, graph) < 0) { - throw DecodeError ("could not create buffer sink."); - } - - AVFilterInOut* outputs = avfilter_inout_alloc (); - outputs->name = av_strdup("in"); - outputs->filter_ctx = _buffer_src_context; - outputs->pad_idx = 0; - outputs->next = 0; - - AVFilterInOut* inputs = avfilter_inout_alloc (); - inputs->name = av_strdup("out"); - inputs->filter_ctx = _buffer_sink_context; - inputs->pad_idx = 0; - inputs->next = 0; - - _log->log ("Using filter chain `" + filters + "'"); - if (avfilter_graph_parse (graph, filters.c_str(), &inputs, &outputs, 0) < 0) { - throw DecodeError ("could not set up filter graph."); - } - - if (avfilter_graph_config (graph, 0) < 0) { - throw DecodeError ("could not configure filter graph."); - } } -