X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg.cc;h=4b8d7b8ab1d89ab7b14b67dbd8da739a52abc144;hb=7dbcb7cb856f29b5d8fb9b16dc9f4a30898b8b40;hp=31aa182937d24512d3f48c543bf7d4329f4dcc20;hpb=60450bd93af0b331d7b98c88aa199366305f0721;p=dcpomatic.git diff --git a/src/lib/ffmpeg.cc b/src/lib/ffmpeg.cc index 31aa18293..4b8d7b8ab 100644 --- a/src/lib/ffmpeg.cc +++ b/src/lib/ffmpeg.cc @@ -19,18 +19,19 @@ */ +#include "compose.hpp" +#include "config.h" +#include "dcpomatic_log.h" +#include "digester.h" +#include "exceptions.h" #include "ffmpeg.h" +#include "ffmpeg_audio_stream.h" #include "ffmpeg_content.h" +#include "ffmpeg_subtitle_stream.h" #include "film.h" -#include "exceptions.h" -#include "util.h" #include "log.h" -#include "dcpomatic_log.h" -#include "ffmpeg_subtitle_stream.h" -#include "ffmpeg_audio_stream.h" -#include "digester.h" -#include "compose.hpp" -#include "config.h" +#include "memory_util.h" +#include "util.h" #include extern "C" { #include @@ -72,7 +73,11 @@ FFmpeg::~FFmpeg () avcodec_free_context (&i); } - av_frame_free (&_frame); + av_frame_free (&_video_frame); + for (auto& audio_frame: _audio_frame) { + av_frame_free (&audio_frame.second); + } + avformat_close_input (&_format_context); } @@ -91,30 +96,9 @@ avio_seek_wrapper (void* data, int64_t offset, int whence) } -void -FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl) -{ - if (level > AV_LOG_WARNING) { - return; - } - - char line[1024]; - static int prefix = 0; - av_log_format_line (ptr, level, fmt, vl, line, sizeof (line), &prefix); - string str (line); - boost::algorithm::trim (str); - dcpomatic_log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL); -} - - void FFmpeg::setup_general () { - /* This might not work too well in some cases of multiple FFmpeg decoders, - but it's probably good enough. - */ - av_log_set_callback (FFmpeg::ffmpeg_log_callback); - _file_group.set_paths (_ffmpeg_content->paths ()); _avio_buffer = static_cast (wrapped_av_malloc(_avio_buffer_size)); _avio_context = avio_alloc_context (_avio_buffer, _avio_buffer_size, 0, this, avio_read_wrapper, 0, avio_seek_wrapper); @@ -144,6 +128,11 @@ FFmpeg::setup_general () for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { auto s = _format_context->streams[i]; if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) { + auto const frame_rate = av_q2d(s->avg_frame_rate); + if (frame_rate < 1 || frame_rate > 1000) { + /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */ + continue; + } if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) { /* This is definitely our video stream */ _video_stream = i; @@ -161,11 +150,6 @@ FFmpeg::setup_general () _video_stream = video_stream_undefined_frame_rate.get(); } - /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */ - if (_video_stream && av_q2d(av_guess_frame_rate(_format_context, _format_context->streams[_video_stream.get()], 0)) > 1000) { - _video_stream = optional(); - } - /* Hack: if the AVStreams have duplicate IDs, replace them with our own. We use the IDs so that we can cope with VOBs, in which streams move about in index but remain with the same ID in different @@ -188,8 +172,8 @@ FFmpeg::setup_general () } } - _frame = av_frame_alloc (); - if (_frame == 0) { + _video_frame = av_frame_alloc (); + if (_video_frame == nullptr) { throw std::bad_alloc (); } } @@ -268,7 +252,11 @@ FFmpeg::subtitle_codec_context () const int FFmpeg::avio_read (uint8_t* buffer, int const amount) { - return _file_group.read (buffer, amount); + auto result = _file_group.read(buffer, amount); + if (result.eof && result.bytes_read == 0) { + return AVERROR_EOF; + } + return result.bytes_read; } @@ -287,16 +275,18 @@ FFmpegSubtitlePeriod FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub) { auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base)); + auto const start = packet_time + ContentTime::from_seconds(sub.start_display_time / 1e3); - if (sub.end_display_time == static_cast (-1)) { - /* End time is not known */ - return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3)); + if (sub.end_display_time == 0 || sub.end_display_time == static_cast(-1)) { + /* End time is not in the AVSubtitle; perhaps we can use the AVPacket's duration */ + if (packet->duration) { + return FFmpegSubtitlePeriod(start, start + ContentTime::from_seconds(packet->duration * av_q2d(stream->time_base))); + } else { + return FFmpegSubtitlePeriod(start); + } } - return FFmpegSubtitlePeriod ( - packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3), - packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3) - ); + return FFmpegSubtitlePeriod (start, packet_time + ContentTime::from_seconds(sub.end_display_time / 1e3)); } @@ -352,3 +342,22 @@ FFmpeg::pts_offset (vector> audio_streams, optiona return po; } + + +AVFrame * +FFmpeg::audio_frame (shared_ptr stream) +{ + auto iter = _audio_frame.find(stream); + if (iter != _audio_frame.end()) { + return iter->second; + } + + auto frame = av_frame_alloc (); + if (frame == nullptr) { + throw std::bad_alloc(); + } + + _audio_frame[stream] = frame; + return frame; +} +