2 Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /** @file src/ffmpeg_decoder.cc
21 * @brief A decoder using FFmpeg to decode content.
25 #include "exceptions.h"
29 #include "ffmpeg_decoder.h"
30 #include "ffmpeg_audio_stream.h"
31 #include "ffmpeg_subtitle_stream.h"
32 #include "filter_graph.h"
33 #include "audio_buffers.h"
34 #include "ffmpeg_content.h"
35 #include "raw_image_proxy.h"
39 #include <libavcodec/avcodec.h>
40 #include <libavformat/avformat.h>
42 #include <boost/foreach.hpp>
52 #define LOG_GENERAL(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
53 #define LOG_ERROR(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_ERROR);
54 #define LOG_WARNING_NC(...) _video_content->film()->log()->log (__VA_ARGS__, Log::TYPE_WARNING);
55 #define LOG_WARNING(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_WARNING);
65 using boost::shared_ptr;
66 using boost::optional;
67 using boost::dynamic_pointer_cast;
70 FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
77 /* Audio and video frame PTS values may not start with 0. We want
78 to fiddle them so that:
80 1. One of them starts at time 0.
81 2. The first video PTS value ends up on a frame boundary.
83 Then we remove big initial gaps in PTS and we allow our
84 insertion of black frames to work.
87 audio_pts_to_use = audio_pts_from_ffmpeg + pts_offset;
88 video_pts_to_use = video_pts_from_ffmpeg + pts_offset;
91 /* First, make one of them start at 0 */
93 vector<shared_ptr<FFmpegAudioStream> > streams = c->ffmpeg_audio_streams ();
95 _pts_offset = ContentTime::min ();
97 if (c->first_video ()) {
98 _pts_offset = - c->first_video().get ();
101 BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, streams) {
102 if (i->first_audio) {
103 _pts_offset = max (_pts_offset, - i->first_audio.get ());
107 /* If _pts_offset is positive we would be pushing things from a -ve PTS to be played.
108 I don't think we ever want to do that, as it seems things at -ve PTS are not meant
109 to be seen (use for alignment bars etc.); see mantis #418.
111 if (_pts_offset > ContentTime ()) {
112 _pts_offset = ContentTime ();
115 /* Now adjust so that the video pts starts on a frame */
116 if (c->first_video ()) {
117 ContentTime first_video = c->first_video().get() + _pts_offset;
118 ContentTime const old_first_video = first_video;
119 _pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video;
124 FFmpegDecoder::flush ()
126 /* Get any remaining frames */
131 /* XXX: should we reset _packet.data and size after each *_decode_* call? */
133 while (decode_video_packet ()) {}
135 decode_audio_packet ();
136 AudioDecoder::flush ();
140 FFmpegDecoder::pass ()
142 int r = av_read_frame (_format_context, &_packet);
144 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
145 has pretty-much succeeded (and hence generated data which should be processed).
146 Hence it makes sense to continue here in that case.
148 if (r < 0 && r != AVERROR_INVALIDDATA) {
149 if (r != AVERROR_EOF) {
150 /* Maybe we should fail here, but for now we'll just finish off instead */
152 av_strerror (r, buf, sizeof(buf));
153 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
160 int const si = _packet.stream_index;
161 shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
163 if (si == _video_stream && !_ignore_video) {
164 decode_video_packet ();
165 } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
166 decode_subtitle_packet ();
168 decode_audio_packet ();
171 av_free_packet (&_packet);
175 /** @param data pointer to array of pointers to buffers.
176 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
178 shared_ptr<AudioBuffers>
179 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t** data, int size)
181 DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
183 /* Deinterleave and convert to float */
185 /* total_samples and frames will be rounded down here, so if there are stray samples at the end
186 of the block that do not form a complete sample or frame they will be dropped.
188 int const total_samples = size / bytes_per_audio_sample (stream);
189 int const frames = total_samples / stream->channels();
190 shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
192 switch (audio_sample_format (stream)) {
193 case AV_SAMPLE_FMT_U8:
195 uint8_t* p = reinterpret_cast<uint8_t *> (data[0]);
198 for (int i = 0; i < total_samples; ++i) {
199 audio->data(channel)[sample] = float(*p++) / (1 << 23);
202 if (channel == stream->channels()) {
210 case AV_SAMPLE_FMT_S16:
212 int16_t* p = reinterpret_cast<int16_t *> (data[0]);
215 for (int i = 0; i < total_samples; ++i) {
216 audio->data(channel)[sample] = float(*p++) / (1 << 15);
219 if (channel == stream->channels()) {
227 case AV_SAMPLE_FMT_S16P:
229 int16_t** p = reinterpret_cast<int16_t **> (data);
230 for (int i = 0; i < stream->channels(); ++i) {
231 for (int j = 0; j < frames; ++j) {
232 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
238 case AV_SAMPLE_FMT_S32:
240 int32_t* p = reinterpret_cast<int32_t *> (data[0]);
243 for (int i = 0; i < total_samples; ++i) {
244 audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
247 if (channel == stream->channels()) {
255 case AV_SAMPLE_FMT_FLT:
257 float* p = reinterpret_cast<float*> (data[0]);
260 for (int i = 0; i < total_samples; ++i) {
261 audio->data(channel)[sample] = *p++;
264 if (channel == stream->channels()) {
272 case AV_SAMPLE_FMT_FLTP:
274 float** p = reinterpret_cast<float**> (data);
275 for (int i = 0; i < stream->channels(); ++i) {
276 memcpy (audio->data(i), p[i], frames * sizeof(float));
282 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
289 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
291 return stream->stream (_format_context)->codec->sample_fmt;
295 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
297 return av_get_bytes_per_sample (audio_sample_format (stream));
301 FFmpegDecoder::seek (ContentTime time, bool accurate)
303 VideoDecoder::seek (time, accurate);
304 AudioDecoder::seek (time, accurate);
305 SubtitleDecoder::seek (time, accurate);
307 /* If we are doing an `accurate' seek, we need to use pre-roll, as
308 we don't really know what the seek will give us.
311 ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
314 /* XXX: it seems debatable whether PTS should be used here...
315 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
318 ContentTime u = time - _pts_offset;
319 if (u < ContentTime ()) {
322 av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), AVSEEK_FLAG_BACKWARD);
324 avcodec_flush_buffers (video_codec_context());
326 /* XXX: should be flushing audio buffers? */
328 if (subtitle_codec_context ()) {
329 avcodec_flush_buffers (subtitle_codec_context ());
334 FFmpegDecoder::decode_audio_packet ()
336 /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
340 AVPacket copy_packet = _packet;
342 /* XXX: inefficient */
343 vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
344 vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
345 while (stream != streams.end () && !(*stream)->uses_index (_format_context, copy_packet.stream_index)) {
349 if (stream == streams.end ()) {
350 /* The packet's stream may not be an audio one; just ignore it in this method if so */
354 while (copy_packet.size > 0) {
357 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, ©_packet);
358 if (decode_result < 0) {
359 /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
360 some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
361 if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
362 even in the face of such an error, so I think we should too.
364 Returning from the method here caused mantis #352.
366 LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
368 /* Fudge decode_result so that we come out of the while loop when
369 we've processed this data.
371 decode_result = copy_packet.size;
374 if (frame_finished) {
375 ContentTime const ct = ContentTime::from_seconds (
376 av_frame_get_best_effort_timestamp (_frame) *
377 av_q2d ((*stream)->stream (_format_context)->time_base))
380 int const data_size = av_samples_get_buffer_size (
381 0, (*stream)->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (*stream), 1
384 audio (*stream, deinterleave_audio (*stream, _frame->data, data_size), ct);
387 copy_packet.data += decode_result;
388 copy_packet.size -= decode_result;
393 FFmpegDecoder::decode_video_packet ()
396 if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
400 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
402 shared_ptr<FilterGraph> graph;
404 list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
405 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
409 if (i == _filter_graphs.end ()) {
410 graph.reset (new FilterGraph (_ffmpeg_content, dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
411 _filter_graphs.push_back (graph);
412 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
417 list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
419 for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
421 shared_ptr<Image> image = i->first;
423 if (i->second != AV_NOPTS_VALUE) {
424 double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds ();
426 shared_ptr<ImageProxy> (new RawImageProxy (image)),
427 rint (pts * _ffmpeg_content->video_frame_rate ())
430 LOG_WARNING_NC ("Dropping frame without PTS");
438 FFmpegDecoder::decode_subtitle_packet ()
442 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
446 if (sub.num_rects <= 0) {
447 /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
448 indicate that the previous subtitle should stop. We can ignore it here.
451 } else if (sub.num_rects > 1) {
452 throw DecodeError (_("multi-part subtitles not yet supported"));
455 /* Subtitle PTS (within the source, not taking into account any of the
456 source that we may have chopped off for the DCP).
458 FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
459 ContentTimePeriod period;
460 period.from = sub_period.from + _pts_offset;
462 /* We already know the subtitle period `to' time */
463 period.to = sub_period.to.get() + _pts_offset;
465 /* We have to look up the `to' time in the stream's records */
466 period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (sub_period.from);
469 AVSubtitleRect const * rect = sub.rects[0];
471 switch (rect->type) {
474 case SUBTITLE_BITMAP:
475 decode_bitmap_subtitle (rect, period);
478 cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
481 cout << "XXX: SUBTITLE_ASS " << rect->ass << "\n";
485 avsubtitle_free (&sub);
488 list<ContentTimePeriod>
489 FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
491 return _ffmpeg_content->subtitles_during (p, starting);
494 list<ContentTimePeriod>
495 FFmpegDecoder::text_subtitles_during (ContentTimePeriod, bool) const
497 return list<ContentTimePeriod> ();
501 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period)
503 /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
504 G, third B, fourth A.
506 shared_ptr<Image> image (new Image (PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
508 /* Start of the first line in the subtitle */
509 uint8_t* sub_p = rect->pict.data[0];
510 /* sub_p looks up into a BGRA palette which is here
511 (i.e. first byte B, second G, third R, fourth A)
513 uint32_t const * palette = (uint32_t *) rect->pict.data[1];
514 /* Start of the output data */
515 uint32_t* out_p = (uint32_t *) image->data()[0];
517 for (int y = 0; y < rect->h; ++y) {
518 uint8_t* sub_line_p = sub_p;
519 uint32_t* out_line_p = out_p;
520 for (int x = 0; x < rect->w; ++x) {
521 uint32_t const p = palette[*sub_line_p++];
522 *out_line_p++ = ((p & 0xff) << 16) | (p & 0xff00) | ((p & 0xff0000) >> 16) | (p & 0xff000000);
524 sub_p += rect->pict.linesize[0];
525 out_p += image->stride()[0] / sizeof (uint32_t);
528 dcp::Size const vs = _ffmpeg_content->video_size ();
529 dcpomatic::Rect<double> const scaled_rect (
530 static_cast<double> (rect->x) / vs.width,
531 static_cast<double> (rect->y) / vs.height,
532 static_cast<double> (rect->w) / vs.width,
533 static_cast<double> (rect->h) / vs.height
536 image_subtitle (period, image, scaled_rect);