C++11 and whitespace cleanups.
[dcpomatic.git] / src / lib / ffmpeg.cc
index 7bb17f9bf0f1ff238be0bd04d002cabe79946eff..77717a38f3c1dcf8af1cf367ba59e3e7211d7b29 100644 (file)
 */
 
 
+#include "compose.hpp"
+#include "config.h"
+#include "dcpomatic_log.h"
+#include "digester.h"
+#include "exceptions.h"
 #include "ffmpeg.h"
+#include "ffmpeg_audio_stream.h"
 #include "ffmpeg_content.h"
+#include "ffmpeg_subtitle_stream.h"
 #include "film.h"
-#include "exceptions.h"
-#include "util.h"
 #include "log.h"
-#include "dcpomatic_log.h"
-#include "ffmpeg_subtitle_stream.h"
-#include "ffmpeg_audio_stream.h"
-#include "digester.h"
-#include "compose.hpp"
-#include "config.h"
+#include "util.h"
 #include <dcp/raw_convert.h>
 extern "C" {
 #include <libavcodec/avcodec.h>
@@ -68,13 +68,15 @@ FFmpeg::~FFmpeg ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-DCPOMATIC_DISABLE_WARNINGS
-       for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
-               avcodec_close (_format_context->streams[i]->codec);
+       for (auto& i: _codec_context) {
+               avcodec_free_context (&i);
+       }
+
+       av_frame_free (&_video_frame);
+       for (auto& audio_frame: _audio_frame) {
+               av_frame_free (&audio_frame.second);
        }
-DCPOMATIC_ENABLE_WARNINGS
 
-       av_frame_free (&_frame);
        avformat_close_input (&_format_context);
 }
 
@@ -190,9 +192,9 @@ FFmpeg::setup_general ()
                }
        }
 
-       _frame = av_frame_alloc ();
-       if (_frame == 0) {
-               throw DecodeError (N_("could not allocate frame"));
+       _video_frame = av_frame_alloc ();
+       if (_video_frame == nullptr) {
+               throw std::bad_alloc ();
        }
 }
 
@@ -202,15 +204,23 @@ FFmpeg::setup_decoders ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-DCPOMATIC_DISABLE_WARNINGS
+       _codec_context.resize (_format_context->nb_streams);
        for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
-               auto context = _format_context->streams[i]->codec;
+               auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id);
+               if (codec) {
+                       auto context = avcodec_alloc_context3 (codec);
+                       if (!context) {
+                               throw std::bad_alloc ();
+                       }
+                       _codec_context[i] = context;
 
-               context->thread_count = 8;
-               context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
+                       int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar);
+                       if (r < 0) {
+                               throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r);
+                       }
 
-               AVCodec* codec = avcodec_find_decoder (context->codec_id);
-               if (codec) {
+                       context->thread_count = 8;
+                       context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
 
                        AVDictionary* options = nullptr;
                        /* This option disables decoding of DCA frame footers in our patched version
@@ -225,18 +235,17 @@ DCPOMATIC_DISABLE_WARNINGS
                        /* Enable following of links in files */
                        av_dict_set_int (&options, "enable_drefs", 1, 0);
 
-                       if (avcodec_open2 (context, codec, &options) < 0) {
-                               throw DecodeError (N_("could not open decoder"));
+                       r = avcodec_open2 (context, codec, &options);
+                       if (r < 0) {
+                               throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r);
                        }
                } else {
                        dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING);
                }
        }
-DCPOMATIC_ENABLE_WARNINGS
 }
 
 
-DCPOMATIC_DISABLE_WARNINGS
 AVCodecContext *
 FFmpeg::video_codec_context () const
 {
@@ -244,20 +253,20 @@ FFmpeg::video_codec_context () const
                return nullptr;
        }
 
-       return _format_context->streams[_video_stream.get()]->codec;
+       return _codec_context[_video_stream.get()];
 }
 
 
 AVCodecContext *
 FFmpeg::subtitle_codec_context () const
 {
-       if (!_ffmpeg_content->subtitle_stream()) {
+       auto str = _ffmpeg_content->subtitle_stream();
+       if (!str) {
                return nullptr;
        }
 
-       return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec;
+       return _codec_context[str->index(_format_context)];
 }
-DCPOMATIC_ENABLE_WARNINGS
 
 
 int
@@ -279,19 +288,21 @@ FFmpeg::avio_seek (int64_t const pos, int whence)
 
 
 FFmpegSubtitlePeriod
-FFmpeg::subtitle_period (AVSubtitle const & sub)
+FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub)
 {
-       auto const packet_time = ContentTime::from_seconds (static_cast<double> (sub.pts) / AV_TIME_BASE);
+       auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base));
+       auto const start = packet_time + ContentTime::from_seconds(sub.start_display_time / 1e3);
 
-       if (sub.end_display_time == static_cast<uint32_t> (-1)) {
-               /* End time is not known */
-               return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3));
+       if (sub.end_display_time == 0 || sub.end_display_time == static_cast<uint32_t>(-1)) {
+               /* End time is not in the AVSubtitle; perhaps we can use the AVPacket's duration */
+               if (packet->duration) {
+                       return FFmpegSubtitlePeriod(start, start + ContentTime::from_seconds(packet->duration * av_q2d(stream->time_base)));
+               } else {
+                       return FFmpegSubtitlePeriod(start);
+               }
        }
 
-       return FFmpegSubtitlePeriod (
-               packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3),
-               packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3)
-               );
+       return FFmpegSubtitlePeriod (start, packet_time + ContentTime::from_seconds(sub.end_display_time / 1e3));
 }
 
 
@@ -347,3 +358,23 @@ FFmpeg::pts_offset (vector<shared_ptr<FFmpegAudioStream>> audio_streams, optiona
 
        return po;
 }
+
+
+AVFrame *
+FFmpeg::audio_frame (shared_ptr<const FFmpegAudioStream> stream)
+{
+       auto iter = _audio_frame.find(stream);
+       if (iter != _audio_frame.end()) {
+               return iter->second;
+       }
+
+       auto frame = av_frame_alloc ();
+       if (frame == nullptr) {
+               throw std::bad_alloc();
+       }
+
+       _audio_frame[stream] = frame;
+       return frame;
+
+}
+