2 Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /** @file src/ffmpeg_decoder.cc
21 * @brief A decoder using FFmpeg to decode content.
31 #include <libavcodec/avcodec.h>
32 #include <libavformat/avformat.h>
35 #include "exceptions.h"
39 #include "ffmpeg_decoder.h"
40 #include "ffmpeg_audio_stream.h"
41 #include "ffmpeg_subtitle_stream.h"
42 #include "filter_graph.h"
43 #include "audio_buffers.h"
44 #include "ffmpeg_content.h"
45 #include "raw_image_proxy.h"
51 #define LOG_GENERAL(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
52 #define LOG_ERROR(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_ERROR);
53 #define LOG_WARNING_NC(...) _video_content->film()->log()->log (__VA_ARGS__, Log::TYPE_WARNING);
54 #define LOG_WARNING(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_WARNING);
63 using boost::shared_ptr;
64 using boost::optional;
65 using boost::dynamic_pointer_cast;
68 FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
75 /* Audio and video frame PTS values may not start with 0. We want
76 to fiddle them so that:
78 1. One of them starts at time 0.
79 2. The first video PTS value ends up on a frame boundary.
81 Then we remove big initial gaps in PTS and we allow our
82 insertion of black frames to work.
84 We will do pts_to_use = pts_from_ffmpeg + pts_offset;
87 bool const have_video = c->first_video();
88 bool const have_audio = c->audio_stream () && c->audio_stream()->first_audio;
90 /* First, make one of them start at 0 */
92 if (have_audio && have_video) {
93 _pts_offset = - min (c->first_video().get(), c->audio_stream()->first_audio.get());
94 } else if (have_video) {
95 _pts_offset = - c->first_video().get();
96 } else if (have_audio) {
97 _pts_offset = - c->audio_stream()->first_audio.get();
100 /* If _pts_offset is positive we would be pushing things from a -ve PTS to be played.
101 I don't think we ever want to do that, as it seems things at -ve PTS are not meant
102 to be seen (use for alignment bars etc.); see mantis #418.
104 if (_pts_offset > ContentTime ()) {
105 _pts_offset = ContentTime ();
108 /* Now adjust both so that the video pts starts on a frame */
109 if (have_video && have_audio) {
110 ContentTime first_video = c->first_video().get() + _pts_offset;
111 ContentTime const old_first_video = first_video;
112 _pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video;
117 FFmpegDecoder::flush ()
119 /* Get any remaining frames */
124 /* XXX: should we reset _packet.data and size after each *_decode_* call? */
126 while (decode_video_packet ()) {}
128 if (_ffmpeg_content->audio_stream()) {
129 decode_audio_packet ();
130 AudioDecoder::flush ();
135 FFmpegDecoder::pass ()
137 int r = av_read_frame (_format_context, &_packet);
139 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
140 has pretty-much succeeded (and hence generated data which should be processed).
141 Hence it makes sense to continue here in that case.
143 if (r < 0 && r != AVERROR_INVALIDDATA) {
144 if (r != AVERROR_EOF) {
145 /* Maybe we should fail here, but for now we'll just finish off instead */
147 av_strerror (r, buf, sizeof(buf));
148 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
155 int const si = _packet.stream_index;
157 if (si == _video_stream && !_ignore_video) {
158 decode_video_packet ();
159 } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, si)) {
160 decode_audio_packet ();
161 } else if (_ffmpeg_content->subtitle_stream() && _ffmpeg_content->subtitle_stream()->uses_index (_format_context, si)) {
162 decode_subtitle_packet ();
165 av_free_packet (&_packet);
169 /** @param data pointer to array of pointers to buffers.
170 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
172 shared_ptr<AudioBuffers>
173 FFmpegDecoder::deinterleave_audio (uint8_t** data, int size)
175 DCPOMATIC_ASSERT (_ffmpeg_content->audio_channels());
176 DCPOMATIC_ASSERT (bytes_per_audio_sample());
178 /* Deinterleave and convert to float */
180 /* total_samples and frames will be rounded down here, so if there are stray samples at the end
181 of the block that do not form a complete sample or frame they will be dropped.
183 int const total_samples = size / bytes_per_audio_sample();
184 int const frames = total_samples / _ffmpeg_content->audio_channels();
185 shared_ptr<AudioBuffers> audio (new AudioBuffers (_ffmpeg_content->audio_channels(), frames));
187 switch (audio_sample_format()) {
188 case AV_SAMPLE_FMT_U8:
190 uint8_t* p = reinterpret_cast<uint8_t *> (data[0]);
193 for (int i = 0; i < total_samples; ++i) {
194 audio->data(channel)[sample] = float(*p++) / (1 << 23);
197 if (channel == _ffmpeg_content->audio_channels()) {
205 case AV_SAMPLE_FMT_S16:
207 int16_t* p = reinterpret_cast<int16_t *> (data[0]);
210 for (int i = 0; i < total_samples; ++i) {
211 audio->data(channel)[sample] = float(*p++) / (1 << 15);
214 if (channel == _ffmpeg_content->audio_channels()) {
222 case AV_SAMPLE_FMT_S16P:
224 int16_t** p = reinterpret_cast<int16_t **> (data);
225 for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) {
226 for (int j = 0; j < frames; ++j) {
227 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
233 case AV_SAMPLE_FMT_S32:
235 int32_t* p = reinterpret_cast<int32_t *> (data[0]);
238 for (int i = 0; i < total_samples; ++i) {
239 audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
242 if (channel == _ffmpeg_content->audio_channels()) {
250 case AV_SAMPLE_FMT_FLT:
252 float* p = reinterpret_cast<float*> (data[0]);
255 for (int i = 0; i < total_samples; ++i) {
256 audio->data(channel)[sample] = *p++;
259 if (channel == _ffmpeg_content->audio_channels()) {
267 case AV_SAMPLE_FMT_FLTP:
269 float** p = reinterpret_cast<float**> (data);
270 for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) {
271 memcpy (audio->data(i), p[i], frames * sizeof(float));
277 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format())));
284 FFmpegDecoder::audio_sample_format () const
286 if (!_ffmpeg_content->audio_stream()) {
287 return (AVSampleFormat) 0;
290 return audio_codec_context()->sample_fmt;
294 FFmpegDecoder::bytes_per_audio_sample () const
296 return av_get_bytes_per_sample (audio_sample_format ());
300 FFmpegDecoder::seek (ContentTime time, bool accurate)
302 VideoDecoder::seek (time, accurate);
303 AudioDecoder::seek (time, accurate);
304 SubtitleDecoder::seek (time, accurate);
306 /* If we are doing an `accurate' seek, we need to use pre-roll, as
307 we don't really know what the seek will give us.
310 ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
313 /* XXX: it seems debatable whether PTS should be used here...
314 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
317 ContentTime const u = time - _pts_offset;
318 av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), 0);
320 avcodec_flush_buffers (video_codec_context());
321 if (audio_codec_context ()) {
322 avcodec_flush_buffers (audio_codec_context ());
324 if (subtitle_codec_context ()) {
325 avcodec_flush_buffers (subtitle_codec_context ());
330 FFmpegDecoder::decode_audio_packet ()
332 /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
336 AVPacket copy_packet = _packet;
338 while (copy_packet.size > 0) {
341 int decode_result = avcodec_decode_audio4 (audio_codec_context(), _frame, &frame_finished, ©_packet);
342 if (decode_result < 0) {
343 /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
344 some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
345 if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
346 even in the face of such an error, so I think we should too.
348 Returning from the method here caused mantis #352.
350 LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
352 /* Fudge decode_result so that we come out of the while loop when
353 we've processed this data.
355 decode_result = copy_packet.size;
358 if (frame_finished) {
359 ContentTime const ct = ContentTime::from_seconds (
360 av_frame_get_best_effort_timestamp (_frame) *
361 av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base))
364 int const data_size = av_samples_get_buffer_size (
365 0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1
368 audio (deinterleave_audio (_frame->data, data_size), ct);
371 copy_packet.data += decode_result;
372 copy_packet.size -= decode_result;
377 FFmpegDecoder::decode_video_packet ()
380 if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
384 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
386 shared_ptr<FilterGraph> graph;
388 list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
389 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
393 if (i == _filter_graphs.end ()) {
394 graph.reset (new FilterGraph (_ffmpeg_content, dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
395 _filter_graphs.push_back (graph);
396 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
401 list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
403 for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
405 shared_ptr<Image> image = i->first;
407 if (i->second != AV_NOPTS_VALUE) {
408 double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds ();
410 shared_ptr<ImageProxy> (new RawImageProxy (image)),
411 rint (pts * _ffmpeg_content->video_frame_rate ())
414 LOG_WARNING_NC ("Dropping frame without PTS");
422 FFmpegDecoder::decode_subtitle_packet ()
426 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
430 /* Subtitle PTS (within the source, not taking into account any of the
431 source that we may have chopped off for the DCP)
433 FFmpegSubtitlePeriod period = subtitle_period (sub);
434 period.from += _pts_offset;
436 period.to = period.to.get() + _pts_offset;
439 if (sub.num_rects <= 0) {
440 /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
441 indicate that the previous subtitle should stop. Emit the pending one.
443 if (_pending_subtitle_from && _pending_subtitle_image && _pending_subtitle_rect) {
445 ContentTimePeriod (_pending_subtitle_from.get(), period.from),
446 _pending_subtitle_image,
447 _pending_subtitle_rect.get ()
449 _pending_subtitle_from = optional<ContentTime> ();
450 _pending_subtitle_image.reset ();
451 _pending_subtitle_rect = optional<dcpomatic::Rect<double> > ();
454 } else if (sub.num_rects > 1) {
455 throw DecodeError (_("multi-part subtitles not yet supported"));
458 AVSubtitleRect const * rect = sub.rects[0];
460 if (rect->type != SUBTITLE_BITMAP) {
461 throw DecodeError (_("non-bitmap subtitles not yet supported"));
464 /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
465 G, third B, fourth A.
467 shared_ptr<Image> image (new Image (PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
469 /* Start of the first line in the subtitle */
470 uint8_t* sub_p = rect->pict.data[0];
471 /* sub_p looks up into a BGRA palette which is here
472 (i.e. first byte B, second G, third R, fourth A)
474 uint32_t const * palette = (uint32_t *) rect->pict.data[1];
475 /* Start of the output data */
476 uint32_t* out_p = (uint32_t *) image->data()[0];
478 for (int y = 0; y < rect->h; ++y) {
479 uint8_t* sub_line_p = sub_p;
480 uint32_t* out_line_p = out_p;
481 for (int x = 0; x < rect->w; ++x) {
482 uint32_t const p = palette[*sub_line_p++];
483 *out_line_p++ = ((p & 0xff) << 16) | (p & 0xff00) | ((p & 0xff0000) >> 16) | (p & 0xff000000);
485 sub_p += rect->pict.linesize[0];
486 out_p += image->stride()[0] / sizeof (uint32_t);
489 dcp::Size const vs = _ffmpeg_content->video_size ();
490 dcpomatic::Rect<double> const scaled_rect (
491 static_cast<double> (rect->x) / vs.width,
492 static_cast<double> (rect->y) / vs.height,
493 static_cast<double> (rect->w) / vs.width,
494 static_cast<double> (rect->h) / vs.height
498 image_subtitle (ContentTimePeriod (period.from, period.to.get()), image, scaled_rect);
500 /* We don't know when this subtitle stops, so store it until we find out */
501 _pending_subtitle_from = period.from;
502 _pending_subtitle_image = image;
503 _pending_subtitle_rect = scaled_rect;
506 avsubtitle_free (&sub);
509 list<ContentTimePeriod>
510 FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
512 return _ffmpeg_content->subtitles_during (p, starting);
515 list<ContentTimePeriod>
516 FFmpegDecoder::text_subtitles_during (ContentTimePeriod, bool) const
518 return list<ContentTimePeriod> ();