{
boost::mutex::scoped_lock lm (_mutex);
-DCPOMATIC_DISABLE_WARNINGS
- for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- avcodec_close (_format_context->streams[i]->codec);
+ for (auto& i: _codec_context) {
+ avcodec_free_context (&i);
}
-DCPOMATIC_ENABLE_WARNINGS
av_frame_free (&_frame);
avformat_close_input (&_format_context);
{
boost::mutex::scoped_lock lm (_mutex);
-DCPOMATIC_DISABLE_WARNINGS
+ _codec_context.resize (_format_context->nb_streams);
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- auto context = _format_context->streams[i]->codec;
+ auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id);
+ if (codec) {
+ auto context = avcodec_alloc_context3 (codec);
+ if (!context) {
+ throw std::bad_alloc ();
+ }
+ _codec_context[i] = context;
- context->thread_count = 8;
- context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
+ int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar);
+ if (r < 0) {
+ throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r);
+ }
- AVCodec* codec = avcodec_find_decoder (context->codec_id);
- if (codec) {
+ context->thread_count = 8;
+ context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
AVDictionary* options = nullptr;
/* This option disables decoding of DCA frame footers in our patched version
/* Enable following of links in files */
av_dict_set_int (&options, "enable_drefs", 1, 0);
- int r = avcodec_open2 (context, codec, &options);
+ r = avcodec_open2 (context, codec, &options);
if (r < 0) {
throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r);
}
dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING);
}
}
-DCPOMATIC_ENABLE_WARNINGS
}
-DCPOMATIC_DISABLE_WARNINGS
AVCodecContext *
FFmpeg::video_codec_context () const
{
return nullptr;
}
- return _format_context->streams[_video_stream.get()]->codec;
+ return _codec_context[_video_stream.get()];
}
AVCodecContext *
FFmpeg::subtitle_codec_context () const
{
- if (!_ffmpeg_content->subtitle_stream()) {
+ auto str = _ffmpeg_content->subtitle_stream();
+ if (!str) {
return nullptr;
}
- return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec;
+ return _codec_context[str->index(_format_context)];
}
-DCPOMATIC_ENABLE_WARNINGS
int
FileGroup _file_group;
AVFormatContext* _format_context = nullptr;
+ std::vector<AVCodecContext*> _codec_context;
AVFrame* _frame = nullptr;
/** Index of video stream within AVFormatContext */
bool did_something = false;
if (video) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
- if (decode_and_process_video_packet(&packet)) {
+ if (decode_and_process_video_packet(nullptr)) {
did_something = true;
}
}
for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
- auto result = decode_audio_packet (i, &packet);
- if (result.second) {
+ auto context = _codec_context[i->index(_format_context)];
+ int r = avcodec_send_packet (context, nullptr);
+ if (r < 0 && r != AVERROR_EOF) {
+ /* EOF can happen if we've already sent a flush packet */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
+ }
+ r = avcodec_receive_frame (context, _frame);
+ if (r >= 0) {
process_audio_frame (i);
did_something = true;
}
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
+FFmpegDecoder::deinterleave_audio (AVFrame* frame)
{
- DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
-
- int const size = av_samples_get_buffer_size (
- 0, stream->stream(_format_context)->codecpar->channels, _frame->nb_samples, audio_sample_format (stream), 1
- );
- DCPOMATIC_ASSERT (size >= 0);
+ auto format = static_cast<AVSampleFormat>(frame->format);
- /* XXX: can't we just use _frame->nb_samples directly here? */
/* XXX: can't we use swr_convert() to do the format conversion? */
- /* Deinterleave and convert to float */
-
- /* total_samples and frames will be rounded down here, so if there are stray samples at the end
- of the block that do not form a complete sample or frame they will be dropped.
- */
- int const total_samples = size / bytes_per_audio_sample (stream);
- int const channels = stream->channels();
- int const frames = total_samples / channels;
+ int const channels = frame->channels;
+ int const frames = frame->nb_samples;
+ int const total_samples = frames * channels;
auto audio = make_shared<AudioBuffers>(channels, frames);
auto data = audio->data();
- switch (audio_sample_format (stream)) {
+ switch (format) {
case AV_SAMPLE_FMT_U8:
{
- uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
+ auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_S16:
{
- int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
+ auto p = reinterpret_cast<int16_t *> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_S16P:
{
- int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
+ auto p = reinterpret_cast<int16_t **> (frame->data);
for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
case AV_SAMPLE_FMT_S32:
{
- int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
+ auto p = reinterpret_cast<int32_t *> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_S32P:
{
- int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
+ auto p = reinterpret_cast<int32_t **> (frame->data);
for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
case AV_SAMPLE_FMT_FLT:
{
- float* p = reinterpret_cast<float*> (_frame->data[0]);
+ auto p = reinterpret_cast<float*> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_FLTP:
{
- float** p = reinterpret_cast<float**> (_frame->data);
- DCPOMATIC_ASSERT (_frame->channels <= channels);
- /* Sometimes there aren't as many channels in the _frame as in the stream */
- for (int i = 0; i < _frame->channels; ++i) {
+ auto p = reinterpret_cast<float**> (frame->data);
+ DCPOMATIC_ASSERT (frame->channels <= channels);
+ /* Sometimes there aren't as many channels in the frame as in the stream */
+ for (int i = 0; i < frame->channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = _frame->channels; i < channels; ++i) {
+ for (int i = frame->channels; i < channels; ++i) {
audio->make_silent (i);
}
}
break;
default:
- throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
+ throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
}
return audio;
avcodec_flush_buffers (video_codec_context());
}
-DCPOMATIC_DISABLE_WARNINGS
for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
- avcodec_flush_buffers (i->stream(_format_context)->codec);
+ avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
}
-DCPOMATIC_ENABLE_WARNINGS
if (subtitle_codec_context ()) {
avcodec_flush_buffers (subtitle_codec_context ());
void
FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
{
- auto data = deinterleave_audio (stream);
+ auto data = deinterleave_audio (_frame);
ContentTime ct;
if (_frame->pts == AV_NOPTS_VALUE) {
}
-pair<int, bool>
-FFmpegDecoder::decode_audio_packet (shared_ptr<FFmpegAudioStream> stream, AVPacket* packet)
-{
- int frame_finished;
- DCPOMATIC_DISABLE_WARNINGS
- int decode_result = avcodec_decode_audio4 (stream->stream(_format_context)->codec, _frame, &frame_finished, packet);
- DCPOMATIC_ENABLE_WARNINGS
- if (decode_result < 0) {
- /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
- some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
- if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
- even in the face of such an error, so I think we should too.
-
- Returning from the method here caused mantis #352.
- */
- LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
- }
- return make_pair(decode_result, frame_finished);
-}
-
-
void
FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
{
return;
}
- /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
- several times. Make a simple copy so we can alter data and size.
- */
- AVPacket copy_packet = *packet;
+ auto context = _codec_context[stream->index(_format_context)];
- while (copy_packet.size > 0) {
- auto result = decode_audio_packet (stream, ©_packet);
- if (result.first < 0) {
- /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
- some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
- if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
- even in the face of such an error, so I think we should too.
-
- Returning from the method here caused mantis #352.
- */
- }
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * Likewise I think AVERROR_EOF should not happen.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_audio_packet"), r);
+ }
- if (result.second) {
- process_audio_frame (stream);
+ while (r >= 0) {
+ r = avcodec_receive_frame (context, _frame);
+ if (r == AVERROR(EAGAIN)) {
+ /* More input is required */
+ return;
}
- if (result.first) {
- break;
- }
-
- copy_packet.data += result.first;
- copy_packet.size -= result.first;
+ /* We choose to be relaxed here about other errors; it seems that there may be valid
+ * data to decode even if an error occurred. #352 may be related (though this was
+ * when we were using an old version of the FFmpeg API).
+ */
+ process_audio_frame (stream);
}
}
{
DCPOMATIC_ASSERT (_video_stream);
- int frame_finished;
-DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, packet) < 0 || !frame_finished) {
+ auto context = video_codec_context();
+
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0 && !(r == AVERROR_EOF && !packet)) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * AVERROR_EOF can happen during flush if we've already sent a flush packet.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_video_packet"), r);
+ }
+
+ r = avcodec_receive_frame (context, _frame);
+ if (r == AVERROR(EAGAIN) || r == AVERROR_EOF) {
+ /* More input is required, or no more frames are coming */
return false;
}
-DCPOMATIC_ENABLE_WARNINGS
+
+ /* We assume we'll only get one frame here, which I think is safe */
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
bool flush ();
+ static std::shared_ptr<AudioBuffers> deinterleave_audio (AVFrame* frame);
+
AVSampleFormat audio_sample_format (std::shared_ptr<FFmpegAudioStream> stream) const;
int bytes_per_audio_sample (std::shared_ptr<FFmpegAudioStream> stream) const;
std::shared_ptr<FFmpegAudioStream> audio_stream_from_index (int index) const;
- std::pair<int, bool> decode_audio_packet (std::shared_ptr<FFmpegAudioStream> stream, AVPacket* packet);
void process_audio_frame (std::shared_ptr<FFmpegAudioStream> stream);
bool decode_and_process_video_packet (AVPacket* packet);
void process_ass_subtitle (std::string ass, dcpomatic::ContentTime from);
void maybe_add_subtitle ();
- std::shared_ptr<AudioBuffers> deinterleave_audio (std::shared_ptr<FFmpegAudioStream> stream) const;
std::list<std::shared_ptr<VideoFilterGraph> > _filter_graphs;
boost::mutex _filter_graphs_mutex;
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
auto s = _format_context->streams[i];
-DCPOMATIC_DISABLE_WARNINGS
- if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ if (s->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
/* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
so bodge it here. No idea why we should have to do this.
*/
- if (s->codec->channel_layout == 0) {
- s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
+ if (s->codecpar->channel_layout == 0) {
+ s->codecpar->channel_layout = av_get_default_channel_layout (s->codecpar->channels);
}
+ auto codec = _codec_context[i]->codec;
+
DCPOMATIC_ASSERT (_format_context->duration != AV_NOPTS_VALUE);
- DCPOMATIC_ASSERT (s->codec->codec);
- DCPOMATIC_ASSERT (s->codec->codec->name);
+ DCPOMATIC_ASSERT (codec);
+ DCPOMATIC_ASSERT (codec->name);
_audio_streams.push_back (
make_shared<FFmpegAudioStream>(
stream_name (s),
- s->codec->codec->name,
+ codec->name,
s->id,
- s->codec->sample_rate,
- llrint ((double(_format_context->duration) / AV_TIME_BASE) * s->codec->sample_rate),
- s->codec->channels
+ s->codecpar->sample_rate,
+ llrint ((double(_format_context->duration) / AV_TIME_BASE) * s->codecpar->sample_rate),
+ s->codecpar->channels
)
);
- } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
- _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (subtitle_stream_name (s), s->id)));
+ } else if (s->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ _subtitle_streams.push_back (make_shared<FFmpegSubtitleStream>(subtitle_stream_name (s), s->id));
}
}
}
}
- auto context = _format_context->streams[packet->stream_index]->codec;
-DCPOMATIC_ENABLE_WARNINGS
+ auto context = _codec_context[packet->stream_index];
if (_video_stream && packet->stream_index == _video_stream.get()) {
video_packet (context, temporal_reference, packet);
}
if (_video_stream) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
-DCPOMATIC_DISABLE_WARNINGS
- auto context = _format_context->streams[*_video_stream]->codec;
-DCPOMATIC_ENABLE_WARNINGS
- while (video_packet(context, temporal_reference, &packet)) {}
+ auto context = _codec_context[_video_stream.get()];
+ while (video_packet(context, temporal_reference, nullptr)) {}
}
for (auto i: _audio_streams) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
-DCPOMATIC_DISABLE_WARNINGS
- audio_packet (i->stream(_format_context)->codec, i, &packet);
-DCPOMATIC_ENABLE_WARNINGS
+ auto context = _codec_context[i->index(_format_context)];
+ audio_packet(context, i, nullptr);
}
if (_video_stream) {
return false;
}
- int frame_finished;
-DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_decode_video2 (context, _frame, &frame_finished, packet) < 0 || !frame_finished) {
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0 && !(r == AVERROR_EOF && !packet)) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * AVERROR_EOF can happen during flush if we've already sent a flush packet.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegExaminer::video_packet"), r);
+ }
+
+ r = avcodec_receive_frame (context, _frame);
+ if (r == AVERROR(EAGAIN)) {
+ /* More input is required */
+ return true;
+ } else if (r == AVERROR_EOF) {
+ /* No more output is coming */
return false;
}
-DCPOMATIC_ENABLE_WARNINGS
if (!_first_video) {
_first_video = frame_time (_format_context->streams[_video_stream.get()]);
return;
}
- int frame_finished;
-DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_decode_audio4 (context, _frame, &frame_finished, packet) >= 0 && frame_finished) {
-DCPOMATIC_ENABLE_WARNINGS
- stream->first_audio = frame_time (stream->stream (_format_context));
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0 && !(r == AVERROR_EOF && !packet) && r != AVERROR(EAGAIN)) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * AVERROR_EOF can happen during flush if we've already sent a flush packet.
+ * EAGAIN means we need to do avcodec_receive_frame, so just carry on and do that.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegExaminer::audio_packet"), r);
}
+
+ if (avcodec_receive_frame (context, _frame) < 0) {
+ return;
+ }
+
+ stream->first_audio = frame_time (stream->stream(_format_context));
}
*/
-#include "ffmpeg_image_proxy.h"
+
+#include "compose.hpp"
#include "cross.h"
-#include "exceptions.h"
#include "dcpomatic_socket.h"
+#include "exceptions.h"
+#include "ffmpeg_image_proxy.h"
#include "image.h"
-#include "compose.hpp"
#include "util.h"
#include "warnings.h"
#include <dcp/raw_convert.h>
#include "i18n.h"
-using std::string;
+
using std::cout;
-using std::pair;
-using std::min;
using std::make_pair;
+using std::make_shared;
+using std::min;
+using std::pair;
using std::shared_ptr;
+using std::string;
using boost::optional;
using std::dynamic_pointer_cast;
using dcp::raw_convert;
+
FFmpegImageProxy::FFmpegImageProxy (boost::filesystem::path path, VideoRange video_range)
: _data (path)
, _video_range (video_range)
return _pos;
}
-DCPOMATIC_DISABLE_WARNINGS
ImageProxy::Result
FFmpegImageProxy::image (optional<dcp::Size>) const
std::bad_alloc ();
}
- AVCodecContext* codec_context = format_context->streams[0]->codec;
- AVCodec* codec = avcodec_find_decoder (codec_context->codec_id);
+ auto codec = avcodec_find_decoder (format_context->streams[0]->codecpar->codec_id);
DCPOMATIC_ASSERT (codec);
- r = avcodec_open2 (codec_context, codec, 0);
+ auto context = avcodec_alloc_context3 (codec);
+ if (!context) {
+ throw DecodeError (N_("avcodec_alloc_context3"), name_for_errors);
+ }
+
+ r = avcodec_open2 (context, codec, 0);
if (r < 0) {
throw DecodeError (N_("avcodec_open2"), name_for_errors, r);
}
throw DecodeError (N_("av_read_frame"), name_for_errors, r);
}
- int frame_finished;
- if (avcodec_decode_video2(codec_context, frame, &frame_finished, &packet) < 0 || !frame_finished) {
- throw DecodeError (N_("avcodec_decode_video2"), name_for_errors, r);
+ r = avcodec_send_packet (context, &packet);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_send_packet"), name_for_errors, r);
}
- AVPixelFormat const pix_fmt = static_cast<AVPixelFormat>(frame->format);
+ r = avcodec_receive_frame (context, frame);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_receive_frame"), name_for_errors, r);
+ }
- _image.reset (new Image(frame));
+ auto const pix_fmt = static_cast<AVPixelFormat>(frame->format);
+
+ _image = make_shared<Image>(frame);
if (_video_range == VideoRange::VIDEO && av_pix_fmt_desc_get(pix_fmt)->flags & AV_PIX_FMT_FLAG_RGB) {
/* Asking for the video range to be converted by libswscale (in Image) will not work for
* RGB sources since that method only processes video range in YUV and greyscale. So we have
av_packet_unref (&packet);
av_frame_free (&frame);
- avcodec_close (codec_context);
+ avcodec_free_context (&context);
avformat_close_input (&format_context);
av_free (avio_context->buffer);
av_free (avio_context);
return Result (_image, 0);
}
-DCPOMATIC_ENABLE_WARNINGS
void
FFmpegImageProxy::add_metadata (xmlpp::Node* node) const
Image::Image (AVFrame* frame)
: _size (frame->width, frame->height)
- , _pixel_format (static_cast<AVPixelFormat> (frame->format))
+ , _pixel_format (static_cast<AVPixelFormat>(frame->format))
, _aligned (true)
{
+ DCPOMATIC_ASSERT (_pixel_format != AV_PIX_FMT_NONE);
+
allocate ();
for (int i = 0; i < planes(); ++i) {