X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_decoder.cc;h=df6b7416b35a2fc521846ed91ec490e6faaef45d;hb=a306df9145d16046e51e8b7ff5222e341e98fdbd;hp=c3a42545a6ed695a97a06257971e9bd71fae6059;hpb=a9f8daff61e227f046f5b87830ce902fd9c3fed7;p=dcpomatic.git diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index c3a42545a..df6b7416b 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -1,19 +1,20 @@ /* - Copyright (C) 2012-2015 Carl Hetherington + Copyright (C) 2012-2016 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ @@ -27,19 +28,28 @@ #include "util.h" #include "log.h" #include "ffmpeg_decoder.h" +#include "subtitle_decoder.h" #include "ffmpeg_audio_stream.h" #include "ffmpeg_subtitle_stream.h" -#include "filter_graph.h" +#include "video_filter_graph.h" #include "audio_buffers.h" #include "ffmpeg_content.h" #include "raw_image_proxy.h" +#include "video_decoder.h" #include "film.h" +#include "audio_decoder.h" #include "compose.hpp" +#include "subtitle_content.h" +#include +#include +#include +#include extern "C" { #include #include } #include +#include #include #include #include @@ -53,23 +63,43 @@ extern "C" { #define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING); using std::cout; +using std::string; using std::vector; using std::list; using std::min; using std::pair; using std::max; +using std::map; using boost::shared_ptr; +using boost::is_any_of; +using boost::split; using dcp::Size; FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr log, bool fast) - : VideoDecoder (c) - , AudioDecoder (c, fast) - , SubtitleDecoder (c) - , FFmpeg (c) + : FFmpeg (c) , _log (log) - , _pts_offset (pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->video_frame_rate())) { + if (c->video) { + video.reset (new VideoDecoder (this, c, log)); + _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate()); + } else { + _pts_offset = ContentTime (); + } + if (c->audio) { + audio.reset (new AudioDecoder (this, c->audio, fast, log)); + } + + if (c->subtitle) { + subtitle.reset ( + new SubtitleDecoder ( + this, + c->subtitle, + bind (&FFmpegDecoder::image_subtitles_during, this, _1, _2), + bind (&FFmpegDecoder::text_subtitles_during, this, _1, _2) + ) + ); + } } void @@ -82,10 +112,12 @@ FFmpegDecoder::flush () /* XXX: should we reset _packet.data and size after each *_decode_* call? */ - while (decode_video_packet ()) {} + while (video && decode_video_packet ()) {} - decode_audio_packet (); - AudioDecoder::flush (); + if (audio) { + decode_audio_packet (); + audio->flush (); + } } bool @@ -112,7 +144,7 @@ FFmpegDecoder::pass (PassReason reason, bool accurate) int const si = _packet.stream_index; shared_ptr fc = _ffmpeg_content; - if (si == _video_stream && !_ignore_video && (accurate || reason != PASS_REASON_SUBTITLE)) { + if (_video_stream && si == _video_stream.get() && !video->ignore() && (accurate || reason != PASS_REASON_SUBTITLE)) { decode_video_packet (); } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) { decode_subtitle_packet (); @@ -120,7 +152,7 @@ FFmpegDecoder::pass (PassReason reason, bool accurate) decode_audio_packet (); } - av_free_packet (&_packet); + av_packet_unref (&_packet); return false; } @@ -128,10 +160,14 @@ FFmpegDecoder::pass (PassReason reason, bool accurate) * Only the first buffer will be used for non-planar data, otherwise there will be one per channel. */ shared_ptr -FFmpegDecoder::deinterleave_audio (shared_ptr stream, uint8_t** data, int size) +FFmpegDecoder::deinterleave_audio (shared_ptr stream) const { DCPOMATIC_ASSERT (bytes_per_audio_sample (stream)); + int const size = av_samples_get_buffer_size ( + 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1 + ); + /* Deinterleave and convert to float */ /* total_samples and frames will be rounded down here, so if there are stray samples at the end @@ -144,7 +180,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream, uint8_t switch (audio_sample_format (stream)) { case AV_SAMPLE_FMT_U8: { - uint8_t* p = reinterpret_cast (data[0]); + uint8_t* p = reinterpret_cast (_frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { @@ -161,7 +197,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream, uint8_t case AV_SAMPLE_FMT_S16: { - int16_t* p = reinterpret_cast (data[0]); + int16_t* p = reinterpret_cast (_frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { @@ -178,7 +214,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream, uint8_t case AV_SAMPLE_FMT_S16P: { - int16_t** p = reinterpret_cast (data); + int16_t** p = reinterpret_cast (_frame->data); for (int i = 0; i < stream->channels(); ++i) { for (int j = 0; j < frames; ++j) { audio->data(i)[j] = static_cast(p[i][j]) / (1 << 15); @@ -189,11 +225,11 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream, uint8_t case AV_SAMPLE_FMT_S32: { - int32_t* p = reinterpret_cast (data[0]); + int32_t* p = reinterpret_cast (_frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = static_cast(*p++) / (1 << 31); + audio->data(channel)[sample] = static_cast(*p++) / 2147483648; ++channel; if (channel == stream->channels()) { @@ -204,9 +240,20 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream, uint8_t } break; + case AV_SAMPLE_FMT_S32P: + { + int32_t** p = reinterpret_cast (_frame->data); + for (int i = 0; i < stream->channels(); ++i) { + for (int j = 0; j < frames; ++j) { + audio->data(i)[j] = static_cast(p[i][j]) / 2147483648; + } + } + } + break; + case AV_SAMPLE_FMT_FLT: { - float* p = reinterpret_cast (data[0]); + float* p = reinterpret_cast (_frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { @@ -223,10 +270,14 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream, uint8_t case AV_SAMPLE_FMT_FLTP: { - float** p = reinterpret_cast (data); - for (int i = 0; i < stream->channels(); ++i) { + float** p = reinterpret_cast (_frame->data); + /* Sometimes there aren't as many channels in the _frame as in the stream */ + for (int i = 0; i < _frame->channels; ++i) { memcpy (audio->data(i), p[i], frames * sizeof(float)); } + for (int i = _frame->channels; i < stream->channels(); ++i) { + audio->make_silent (i); + } } break; @@ -252,9 +303,17 @@ FFmpegDecoder::bytes_per_audio_sample (shared_ptr stream) con void FFmpegDecoder::seek (ContentTime time, bool accurate) { - VideoDecoder::seek (time, accurate); - AudioDecoder::seek (time, accurate); - SubtitleDecoder::seek (time, accurate); + if (video) { + video->seek (time, accurate); + } + + if (audio) { + audio->seek (time, accurate); + } + + if (subtitle) { + subtitle->seek (time, accurate); + } /* If we are doing an `accurate' seek, we need to use pre-roll, as we don't really know what the seek will give us. @@ -267,11 +326,18 @@ FFmpegDecoder::seek (ContentTime time, bool accurate) http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html */ + DCPOMATIC_ASSERT (_video_stream); + ContentTime u = time - _pts_offset; if (u < ContentTime ()) { u = ContentTime (); } - av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), AVSEEK_FLAG_BACKWARD); + av_seek_frame ( + _format_context, + _video_stream.get(), + u.seconds() / av_q2d (_format_context->streams[_video_stream.get()]->time_base), + AVSEEK_FLAG_BACKWARD + ); avcodec_flush_buffers (video_codec_context()); @@ -329,11 +395,7 @@ FFmpegDecoder::decode_audio_packet () av_q2d ((*stream)->stream (_format_context)->time_base)) + _pts_offset; - int const data_size = av_samples_get_buffer_size ( - 0, (*stream)->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (*stream), 1 - ); - - shared_ptr data = deinterleave_audio (*stream, _frame->data, data_size); + shared_ptr data = deinterleave_audio (*stream); if (ct < ContentTime ()) { /* Discard audio data that comes before time 0 */ @@ -344,7 +406,7 @@ FFmpegDecoder::decode_audio_packet () } if (data->frames() > 0) { - audio (*stream, data, ct); + audio->give (*stream, data, ct); } } @@ -356,6 +418,8 @@ FFmpegDecoder::decode_audio_packet () bool FFmpegDecoder::decode_video_packet () { + DCPOMATIC_ASSERT (_video_stream); + int frame_finished; if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) { return false; @@ -363,15 +427,16 @@ FFmpegDecoder::decode_video_packet () boost::mutex::scoped_lock lm (_filter_graphs_mutex); - shared_ptr graph; + shared_ptr graph; - list >::iterator i = _filter_graphs.begin(); + list >::iterator i = _filter_graphs.begin(); while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) { ++i; } if (i == _filter_graphs.end ()) { - graph.reset (new FilterGraph (_ffmpeg_content, dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)); + graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)); + graph->setup (_ffmpeg_content->filters ()); _filter_graphs.push_back (graph); LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format); } else { @@ -385,10 +450,10 @@ FFmpegDecoder::decode_video_packet () shared_ptr image = i->first; if (i->second != AV_NOPTS_VALUE) { - double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds (); - video ( + double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds (); + video->give ( shared_ptr (new RawImageProxy (image)), - llrint (pts * _ffmpeg_content->video_frame_rate ()) + llrint (pts * _ffmpeg_content->active_video_frame_rate ()) ); } else { LOG_WARNING_NC ("Dropping frame without PTS"); @@ -425,7 +490,7 @@ FFmpegDecoder::decode_subtitle_packet () period.to = sub_period.to.get() + _pts_offset; } else { /* We have to look up the `to' time in the stream's records */ - period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (period.from); + period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (subtitle_id (sub)); } for (unsigned int i = 0; i < sub.num_rects; ++i) { @@ -441,7 +506,7 @@ FFmpegDecoder::decode_subtitle_packet () cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n"; break; case SUBTITLE_ASS: - cout << "XXX: SUBTITLE_ASS " << rect->ass << "\n"; + decode_ass_subtitle (rect->ass, period); break; } } @@ -452,13 +517,13 @@ FFmpegDecoder::decode_subtitle_packet () list FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const { - return _ffmpeg_content->subtitles_during (p, starting); + return _ffmpeg_content->image_subtitles_during (p, starting); } list -FFmpegDecoder::text_subtitles_during (ContentTimePeriod, bool) const +FFmpegDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const { - return list (); + return _ffmpeg_content->text_subtitles_during (p, starting); } void @@ -469,12 +534,40 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimeP */ shared_ptr image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true)); +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT /* Start of the first line in the subtitle */ uint8_t* sub_p = rect->pict.data[0]; /* sub_p looks up into a BGRA palette which is here (i.e. first byte B, second G, third R, fourth A) */ uint32_t const * palette = (uint32_t *) rect->pict.data[1]; +#else + /* Start of the first line in the subtitle */ + uint8_t* sub_p = rect->data[0]; + /* sub_p looks up into a BGRA palette which is here + (i.e. first byte B, second G, third R, fourth A) + */ + uint32_t const * palette = (uint32_t *) rect->data[1]; +#endif + /* And the stream has a map of those palette colours to colours + chosen by the user; created a `mapped' palette from those settings. + */ + map colour_map = ffmpeg_content()->subtitle_stream()->colours (); + vector mapped_palette (rect->nb_colors); + for (int i = 0; i < rect->nb_colors; ++i) { + RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24); + map::const_iterator j = colour_map.find (c); + if (j != colour_map.end ()) { + mapped_palette[i] = j->second; + } else { + /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because + it is from a project that was created before this stuff was added. Just use the + colour straight from the original palette. + */ + mapped_palette[i] = c; + } + } + /* Start of the output data */ uint32_t* out_p = (uint32_t *) image->data()[0]; @@ -482,14 +575,19 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimeP uint8_t* sub_line_p = sub_p; uint32_t* out_line_p = out_p; for (int x = 0; x < rect->w; ++x) { - uint32_t const p = palette[*sub_line_p++]; - *out_line_p++ = ((p & 0xff) << 16) | (p & 0xff00) | ((p & 0xff0000) >> 16) | (p & 0xff000000); + RGBA const p = mapped_palette[*sub_line_p++]; + /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */ + *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b; } +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT sub_p += rect->pict.linesize[0]; +#else + sub_p += rect->linesize[0]; +#endif out_p += image->stride()[0] / sizeof (uint32_t); } - dcp::Size const vs = _ffmpeg_content->video_size (); + dcp::Size const vs = _ffmpeg_content->video->size (); dcpomatic::Rect const scaled_rect ( static_cast (rect->x) / vs.width, static_cast (rect->y) / vs.height, @@ -497,5 +595,26 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimeP static_cast (rect->h) / vs.height ); - image_subtitle (period, image, scaled_rect); + subtitle->give_image (period, image, scaled_rect); +} + +void +FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period) +{ + /* We have no styles and no Format: line, so I'm assuming that FFmpeg + produces a single format of Dialogue: lines... + */ + + vector bits; + split (bits, ass, is_any_of (",")); + if (bits.size() < 10) { + return; + } + + sub::RawSubtitle base; + list raw = sub::SSAReader::parse_line (base, bits[9]); + + BOOST_FOREACH (sub::Subtitle const & i, sub::collect > (raw)) { + subtitle->give_text (period, i); + } }