+/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */
+
/*
Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
#include <stdint.h>
#include <boost/lexical_cast.hpp>
extern "C" {
-#include <tiffio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
#include <sndfile.h>
#include "film.h"
-#include "format.h"
-#include "transcoder.h"
-#include "job.h"
#include "filter.h"
#include "exceptions.h"
#include "image.h"
#include "ffmpeg_decoder.h"
#include "filter_graph.h"
#include "subtitle.h"
+#include "audio_buffers.h"
#include "i18n.h"
using std::vector;
using std::stringstream;
using std::list;
+using std::min;
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
boost::mutex FFmpegDecoder::_mutex;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio, bool subtitles, bool video_sync)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio, bool subtitles)
: Decoder (f)
- , VideoDecoder (f)
- , AudioDecoder (f)
+ , VideoDecoder (f, c)
+ , AudioDecoder (f, c)
, _ffmpeg_content (c)
, _format_context (0)
, _video_stream (-1)
, _decode_video (video)
, _decode_audio (audio)
, _decode_subtitles (subtitles)
- , _video_sync (video_sync)
{
setup_general ();
setup_video ();
setup_audio ();
setup_subtitle ();
-
- if (!video_sync) {
- _first_video = 0;
- }
}
FFmpegDecoder::~FFmpegDecoder ()
}
_audio_streams.push_back (
- FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channel_layout)
+ shared_ptr<FFmpegAudioStream> (
+ new FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channels)
+ )
);
-
+
} else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
- _subtitle_streams.push_back (FFmpegSubtitleStream (stream_name (s), i));
+ _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (stream_name (s), i)));
}
}
}
-bool
+void
FFmpegDecoder::pass ()
{
int r = av_read_frame (_format_context, &_packet);
-
+
if (r < 0) {
if (r != AVERROR_EOF) {
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
av_strerror (r, buf, sizeof(buf));
- _film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
}
-
+
/* Get any remaining frames */
_packet.data = 0;
_packet.size = 0;
-
+
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
-
- int frame_finished;
-
+
if (_decode_video) {
- while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- filter_and_emit_video (_frame);
- }
+ while (decode_video_packet ());
}
if (_ffmpeg_content->audio_stream() && _decode_audio) {
decode_audio_packet ();
}
-
- return true;
+
+ return;
}
avcodec_get_frame_defaults (_frame);
if (_packet.stream_index == _video_stream && _decode_video) {
-
- int frame_finished;
- int const r = avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet);
- if (r >= 0 && frame_finished) {
-
- if (r != _packet.size) {
- _film->log()->log (String::compose (N_("Used only %1 bytes of %2 in packet"), r, _packet.size));
- }
-
- if (_video_sync) {
- out_with_sync ();
- } else {
- filter_and_emit_video (_frame);
- }
- }
-
+ decode_video_packet ();
} else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) {
decode_audio_packet ();
- } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles && _first_video) {
+ } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles) {
int got_subtitle;
AVSubtitle sub;
if (sub.num_rects > 0) {
shared_ptr<TimedSubtitle> ts;
try {
- emit_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub)));
+ subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub)));
} catch (...) {
/* some problem with the subtitle; we probably didn't understand it */
}
} else {
- emit_subtitle (shared_ptr<TimedSubtitle> ());
+ subtitle (shared_ptr<TimedSubtitle> ());
}
avsubtitle_free (&sub);
}
}
-
+
av_free_packet (&_packet);
- return false;
}
/** @param data pointer to array of pointers to buffers.
}
libdcp::Size
-FFmpegDecoder::native_size () const
+FFmpegDecoder::video_size () const
{
return libdcp::Size (_video_codec_context->width, _video_codec_context->height);
}
-PixelFormat
-FFmpegDecoder::pixel_format () const
-{
- return _video_codec_context->pix_fmt;
-}
-
-int
-FFmpegDecoder::time_base_numerator () const
-{
- return _video_codec_context->time_base.num;
-}
-
-int
-FFmpegDecoder::time_base_denominator () const
-{
- return _video_codec_context->time_base.den;
-}
-
-int
-FFmpegDecoder::sample_aspect_ratio_numerator () const
-{
- return _video_codec_context->sample_aspect_ratio.num;
-}
-
-int
-FFmpegDecoder::sample_aspect_ratio_denominator () const
-{
- return _video_codec_context->sample_aspect_ratio.den;
-}
-
string
FFmpegDecoder::stream_name (AVStream* s) const
{
stringstream n;
-
- AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0);
- if (lang) {
- n << lang->value;
- }
-
- AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0);
- if (title) {
- if (!n.str().empty()) {
- n << N_(" ");
+
+ if (s->metadata) {
+ AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0);
+ if (lang) {
+ n << lang->value;
+ }
+
+ AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0);
+ if (title) {
+ if (!n.str().empty()) {
+ n << N_(" ");
+ }
+ n << title->value;
}
- n << title->value;
}
if (n.str().empty()) {
}
void
-FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
+FFmpegDecoder::seek (Time t)
{
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
-
- shared_ptr<FilterGraph> graph;
-
- list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
- ++i;
- }
-
- if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format));
- _filter_graphs.push_back (graph);
- _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), frame->width, frame->height, frame->format));
- } else {
- graph = *i;
- }
-
- list<shared_ptr<Image> > images = graph->process (frame);
-
- for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
- emit_video (*i, frame_time ());
- }
-}
-
-bool
-FFmpegDecoder::seek (double p)
-{
- return do_seek (p, false);
-}
-
-bool
-FFmpegDecoder::seek_to_last ()
-{
- /* This AVSEEK_FLAG_BACKWARD in do_seek is a bit of a hack; without it, if we ask for a seek to the same place as last time
- (used when we change decoder parameters and want to re-fetch the frame) we end up going forwards rather than
- staying in the same place.
- */
- return do_seek (last_source_time(), true);
+ do_seek (t, false, false);
}
-bool
-FFmpegDecoder::do_seek (double p, bool backwards)
+void
+FFmpegDecoder::seek_back ()
{
- int64_t const vt = p / av_q2d (_format_context->streams[_video_stream]->time_base);
-
- int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
-
- avcodec_flush_buffers (_video_codec_context);
- if (_subtitle_codec_context) {
- avcodec_flush_buffers (_subtitle_codec_context);
+ if (next() < (2.5 * TIME_HZ / video_frame_rate())) {
+ return;
}
- return r < 0;
+ do_seek (next() - 2.5 * TIME_HZ / video_frame_rate(), true, true);
}
void
-FFmpegDecoder::out_with_sync ()
+FFmpegDecoder::seek_forward ()
{
- /* Where we are in the output, in seconds */
- double const out_pts_seconds = video_frame() / video_frame_rate();
-
- /* Where we are in the source, in seconds */
- double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame);
-
- _film->log()->log (
- String::compose (N_("Source video frame ready; source at %1, output at %2"), source_pts_seconds, out_pts_seconds),
- Log::VERBOSE
- );
-
- if (!_first_video) {
- _first_video = source_pts_seconds;
- }
-
- /* Difference between where we are and where we should be */
- double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
- double const one_frame = 1 / video_frame_rate();
-
- /* Insert frames if required to get out_pts_seconds up to pts_seconds */
- if (delta > one_frame) {
- int const extra = rint (delta / one_frame);
- for (int i = 0; i < extra; ++i) {
- repeat_last_video (frame_time ());
- _film->log()->log (
- String::compose (
- N_("Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)"),
- out_pts_seconds, video_frame(), source_pts_seconds, video_frame_rate()
- )
- );
- }
+ if (next() >= (_ffmpeg_content->length() - 0.5 * TIME_HZ / video_frame_rate())) {
+ return;
}
- if (delta > -one_frame) {
- /* Process this frame */
- filter_and_emit_video (_frame);
- } else {
- /* Otherwise we are omitting a frame to keep things right */
- _film->log()->log (String::compose (N_("Frame removed at %1s"), out_pts_seconds));
- }
+ do_seek (next() - 0.5 * TIME_HZ / video_frame_rate(), true, true);
}
void
-FFmpegDecoder::film_changed (Film::Property p)
+FFmpegDecoder::do_seek (Time t, bool backwards, bool accurate)
{
- switch (p) {
- case Film::CROP:
- case Film::FILTERS:
- {
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
- _filter_graphs.clear ();
+ int64_t const vt = t / (av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
+ av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
+
+ avcodec_flush_buffers (_video_codec_context);
+ if (_subtitle_codec_context) {
+ avcodec_flush_buffers (_subtitle_codec_context);
}
- break;
- default:
- break;
+ if (accurate) {
+ while (1) {
+ int r = av_read_frame (_format_context, &_packet);
+ if (r < 0) {
+ return;
+ }
+
+ avcodec_get_frame_defaults (_frame);
+
+ if (_packet.stream_index == _video_stream) {
+ int finished = 0;
+ int const r = avcodec_decode_video2 (_video_codec_context, _frame, &finished, &_packet);
+ if (r >= 0 && finished) {
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet > vt) {
+ break;
+ }
+ }
+ }
+
+ av_free_packet (&_packet);
+ }
}
+
+ return;
}
/** @return Length (in video frames) according to our content's header */
return (double(_format_context->duration) / AV_TIME_BASE) * video_frame_rate();
}
-double
-FFmpegDecoder::frame_time () const
-{
- return av_frame_get_best_effort_timestamp(_frame) * av_q2d (_format_context->streams[_video_stream]->time_base);
-}
-
void
FFmpegDecoder::decode_audio_packet ()
{
int frame_finished;
int const decode_result = avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, ©_packet);
- if (decode_result >= 0 && frame_finished) {
-
- /* Where we are in the source, in seconds */
- double const source_pts_seconds = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame);
-
- /* We only decode audio if we've had our first video packet through, and if it
- was before this packet. Until then audio is thrown away.
- */
+ if (decode_result >= 0) {
+ if (frame_finished) {
- if ((_first_video && _first_video.get() <= source_pts_seconds) || !_decode_video) {
-
- if (!_first_audio && _decode_video) {
- _first_audio = source_pts_seconds;
-
- /* This is our first audio frame, and if we've arrived here we must have had our
- first video frame. Push some silence to make up any gap between our first
- video frame and our first audio.
- */
-
- /* frames of silence that we must push */
- int const s = rint ((_first_audio.get() - _first_video.get()) * _ffmpeg_content->audio_frame_rate ());
-
- _film->log()->log (
- String::compose (
- N_("First video at %1, first audio at %2, pushing %3 audio frames of silence for %4 channels (%5 bytes per sample)"),
- _first_video.get(), _first_audio.get(), s, _ffmpeg_content->audio_channels(), bytes_per_audio_sample()
- )
- );
-
- if (s) {
- shared_ptr<AudioBuffers> audio (new AudioBuffers (_ffmpeg_content->audio_channels(), s));
- audio->make_silent ();
- Audio (audio);
- }
- }
+ /* Where we are in the source, in seconds */
+ double const source_pts_seconds = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
+ * av_frame_get_best_effort_timestamp(_frame);
int const data_size = av_samples_get_buffer_size (
0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
);
assert (_audio_codec_context->channels == _ffmpeg_content->audio_channels());
- Audio (deinterleave_audio (_frame->data, data_size));
+ audio (deinterleave_audio (_frame->data, data_size), source_pts_seconds);
}
- }
-
- if (decode_result >= 0) {
+
copy_packet.data += decode_result;
copy_packet.size -= decode_result;
}
}
}
+
+bool
+FFmpegDecoder::decode_video_packet ()
+{
+ int frame_finished;
+ if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
+ return false;
+ }
+
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+
+ shared_ptr<FilterGraph> graph;
+
+ list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+ while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
+ ++i;
+ }
+
+ if (i == _filter_graphs.end ()) {
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+
+ graph.reset (new FilterGraph (_ffmpeg_content, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+ _filter_graphs.push_back (graph);
+
+ film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+ } else {
+ graph = *i;
+ }
+
+ list<shared_ptr<Image> > images = graph->process (_frame);
+
+ string post_process = Filter::ffmpeg_strings (_ffmpeg_content->filters()).second;
+
+ for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+
+ shared_ptr<Image> image = *i;
+ if (!post_process.empty ()) {
+ image = image->post_process (post_process, true);
+ }
+
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet != AV_NOPTS_VALUE) {
+ /* XXX: may need to insert extra frames / remove frames here ...
+ (as per old Matcher)
+ */
+ Time const t = bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ;
+ video (image, false, t);
+ } else {
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log ("Dropping frame without PTS");
+ }
+ }
+
+ return true;
+}
+
+Time
+FFmpegDecoder::next () const
+{
+ if (_decode_video && _decode_audio && _audio_codec_context) {
+ return min (_next_video, _next_audio);
+ }
+
+ if (_decode_audio && _audio_codec_context) {
+ return _next_audio;
+ }
+
+ return _next_video;
+}
+
+bool
+FFmpegDecoder::done () const
+{
+ return (!_decode_audio || !_audio_codec_context || audio_done()) && (!_decode_video || video_done());
+}
+