*/
+#include "compose.hpp"
+#include "config.h"
+#include "dcpomatic_log.h"
+#include "digester.h"
+#include "exceptions.h"
#include "ffmpeg.h"
+#include "ffmpeg_audio_stream.h"
#include "ffmpeg_content.h"
+#include "ffmpeg_subtitle_stream.h"
#include "film.h"
-#include "exceptions.h"
-#include "util.h"
#include "log.h"
-#include "dcpomatic_log.h"
-#include "ffmpeg_subtitle_stream.h"
-#include "ffmpeg_audio_stream.h"
-#include "digester.h"
-#include "compose.hpp"
-#include "config.h"
+#include "memory_util.h"
+#include "util.h"
#include <dcp/raw_convert.h>
extern "C" {
#include <libavcodec/avcodec.h>
avcodec_free_context (&i);
}
- av_frame_free (&_frame);
+ av_frame_free (&_video_frame);
+ for (auto& audio_frame: _audio_frame) {
+ av_frame_free (&audio_frame.second);
+ }
+
avformat_close_input (&_format_context);
}
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
auto s = _format_context->streams[i];
if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) {
+ auto const frame_rate = av_q2d(s->avg_frame_rate);
+ if (frame_rate < 1 || frame_rate > 1000) {
+ /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */
+ continue;
+ }
if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) {
/* This is definitely our video stream */
_video_stream = i;
_video_stream = video_stream_undefined_frame_rate.get();
}
- /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */
- if (_video_stream && av_q2d(av_guess_frame_rate(_format_context, _format_context->streams[_video_stream.get()], 0)) > 1000) {
- _video_stream = optional<int>();
- }
-
/* Hack: if the AVStreams have duplicate IDs, replace them with our
own. We use the IDs so that we can cope with VOBs, in which streams
move about in index but remain with the same ID in different
}
}
- _frame = av_frame_alloc ();
- if (_frame == 0) {
+ _video_frame = av_frame_alloc ();
+ if (_video_frame == nullptr) {
throw std::bad_alloc ();
}
}
FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub)
{
auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base));
+ auto const start = packet_time + ContentTime::from_seconds(sub.start_display_time / 1e3);
if (sub.end_display_time == 0 || sub.end_display_time == static_cast<uint32_t>(-1)) {
- /* End time is not known */
- return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3));
+ /* End time is not in the AVSubtitle; perhaps we can use the AVPacket's duration */
+ if (packet->duration) {
+ return FFmpegSubtitlePeriod(start, start + ContentTime::from_seconds(packet->duration * av_q2d(stream->time_base)));
+ } else {
+ return FFmpegSubtitlePeriod(start);
+ }
}
- return FFmpegSubtitlePeriod (
- packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3),
- packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3)
- );
+ return FFmpegSubtitlePeriod (start, packet_time + ContentTime::from_seconds(sub.end_display_time / 1e3));
}
return po;
}
+
+
+AVFrame *
+FFmpeg::audio_frame (shared_ptr<const FFmpegAudioStream> stream)
+{
+ auto iter = _audio_frame.find(stream);
+ if (iter != _audio_frame.end()) {
+ return iter->second;
+ }
+
+ auto frame = av_frame_alloc ();
+ if (frame == nullptr) {
+ throw std::bad_alloc();
+ }
+
+ _audio_frame[stream] = frame;
+ return frame;
+
+}
+