ffmpeg_options = {}
if target.platform != 'linux' or target.distro != 'arch':
- deps = [('ffmpeg-cdist', 'e89aa4a', ffmpeg_options)]
+ deps = [('ffmpeg-cdist', '107f9af8', ffmpeg_options)]
else:
# Use distro-provided FFmpeg on Arch
deps = []
deps.append(('openssl', '7f29dd5'))
if can_build_disk(target):
deps.append(('lwext4', '9d20ec5'))
- deps.append(('ffcmp', 'a915540'))
+ deps.append(('ffcmp', '6259cd4'))
return deps
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/channel_layout.h>
+#include <libavutil/opt.h>
}
#include <iostream>
return buffer;
}
-void *
-AudioFilterGraph::sink_parameters () const
-{
- AVABufferSinkParams* sink_params = av_abuffersink_params_alloc ();
-
- AVSampleFormat* sample_fmts = new AVSampleFormat[2];
- sample_fmts[0] = AV_SAMPLE_FMT_FLTP;
- sample_fmts[1] = AV_SAMPLE_FMT_NONE;
- sink_params->sample_fmts = sample_fmts;
- int64_t* channel_layouts = new int64_t[2];
- channel_layouts[0] = _channel_layout;
- channel_layouts[1] = -1;
- sink_params->channel_layouts = channel_layouts;
+void
+AudioFilterGraph::set_parameters (AVFilterContext* context) const
+{
+ AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+ int r = av_opt_set_int_list (context, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ DCPOMATIC_ASSERT (r >= 0);
- sink_params->sample_rates = new int[2];
- sink_params->sample_rates[0] = _sample_rate;
- sink_params->sample_rates[1] = -1;
+ int64_t channel_layouts[] = { _channel_layout, -1 };
+ r = av_opt_set_int_list (context, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN);
+ DCPOMATIC_ASSERT (r >= 0);
- return sink_params;
+ int sample_rates[] = { _sample_rate, -1 };
+ r = av_opt_set_int_list (context, "sample_rates", sample_rates, -1, AV_OPT_SEARCH_CHILDREN);
+ DCPOMATIC_ASSERT (r >= 0);
}
+
string
AudioFilterGraph::src_name () const
{
void process (std::shared_ptr<const AudioBuffers> audio);
protected:
- std::string src_parameters () const;
- std::string src_name () const;
- void* sink_parameters () const;
- std::string sink_name () const;
+ std::string src_parameters () const override;
+ std::string src_name () const override;
+ void set_parameters (AVFilterContext* context) const override;
+ std::string sink_name () const override;
private:
int _sample_rate;
explicit DecodeError (std::string s)
: std::runtime_error (s)
{}
+
+ explicit DecodeError (std::string function, std::string caller)
+ : std::runtime_error (String::compose("%1 failed [%2", function, caller))
+ {}
+
+ explicit DecodeError (std::string function, std::string caller, int error)
+ : std::runtime_error (String::compose("%1 failed [%2] (%3)", function, caller, error))
+ {}
};
class CryptoError : public std::runtime_error
{}
};
+
/** @class EncodeError
* @brief A low-level problem with an encoder.
*/
explicit EncodeError (std::string s)
: std::runtime_error (s)
{}
+
+ explicit EncodeError (std::string function, std::string caller)
+ : std::runtime_error (String::compose("%1 failed [%2]", function, caller))
+ {}
+
+ explicit EncodeError (std::string function, std::string caller, int error)
+ : std::runtime_error (String::compose("%1 failed [%2] (%3)", function, caller, error))
+ {}
};
+
/** @class FileError.
* @brief Parent class for file-related errors.
*/
{
boost::mutex::scoped_lock lm (_mutex);
-DCPOMATIC_DISABLE_WARNINGS
- for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- avcodec_close (_format_context->streams[i]->codec);
+ for (auto& i: _codec_context) {
+ avcodec_free_context (&i);
}
-DCPOMATIC_ENABLE_WARNINGS
av_frame_free (&_frame);
avformat_close_input (&_format_context);
optional<int> video_stream_undefined_frame_rate;
-DCPOMATIC_DISABLE_WARNINGS
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
auto s = _format_context->streams[i];
- if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codec->codec_id)) {
+ if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) {
if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) {
/* This is definitely our video stream */
_video_stream = i;
}
}
}
-DCPOMATIC_ENABLE_WARNINGS
/* Files from iTunes sometimes have two video streams, one with the avg_frame_rate.num and .den set
to zero. Only use such a stream if there is no alternative.
_frame = av_frame_alloc ();
if (_frame == 0) {
- throw DecodeError (N_("could not allocate frame"));
+ throw std::bad_alloc ();
}
}
{
boost::mutex::scoped_lock lm (_mutex);
-DCPOMATIC_DISABLE_WARNINGS
+ _codec_context.resize (_format_context->nb_streams);
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- auto context = _format_context->streams[i]->codec;
+ auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id);
+ if (codec) {
+ auto context = avcodec_alloc_context3 (codec);
+ if (!context) {
+ throw std::bad_alloc ();
+ }
+ _codec_context[i] = context;
- context->thread_count = 8;
- context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
+ int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar);
+ if (r < 0) {
+ throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r);
+ }
- AVCodec* codec = avcodec_find_decoder (context->codec_id);
- if (codec) {
+ context->thread_count = 8;
+ context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
AVDictionary* options = nullptr;
/* This option disables decoding of DCA frame footers in our patched version
/* Enable following of links in files */
av_dict_set_int (&options, "enable_drefs", 1, 0);
- if (avcodec_open2 (context, codec, &options) < 0) {
- throw DecodeError (N_("could not open decoder"));
+ r = avcodec_open2 (context, codec, &options);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r);
}
} else {
dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING);
}
}
-DCPOMATIC_ENABLE_WARNINGS
}
-DCPOMATIC_DISABLE_WARNINGS
AVCodecContext *
FFmpeg::video_codec_context () const
{
return nullptr;
}
- return _format_context->streams[_video_stream.get()]->codec;
+ return _codec_context[_video_stream.get()];
}
AVCodecContext *
FFmpeg::subtitle_codec_context () const
{
- if (!_ffmpeg_content->subtitle_stream()) {
+ auto str = _ffmpeg_content->subtitle_stream();
+ if (!str) {
return nullptr;
}
- return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec;
+ return _codec_context[str->index(_format_context)];
}
-DCPOMATIC_ENABLE_WARNINGS
int
FileGroup _file_group;
AVFormatContext* _format_context = nullptr;
+ std::vector<AVCodecContext*> _codec_context;
AVFrame* _frame = nullptr;
/** Index of video stream within AVFormatContext */
bool did_something = false;
if (video) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
- if (decode_and_process_video_packet(&packet)) {
+ if (decode_and_process_video_packet(nullptr)) {
did_something = true;
}
}
for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
- auto result = decode_audio_packet (i, &packet);
- if (result.second) {
+ auto context = _codec_context[i->index(_format_context)];
+ int r = avcodec_send_packet (context, nullptr);
+ if (r < 0 && r != AVERROR_EOF) {
+ /* EOF can happen if we've already sent a flush packet */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
+ }
+ r = avcodec_receive_frame (context, _frame);
+ if (r >= 0) {
process_audio_frame (i);
did_something = true;
}
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
+FFmpegDecoder::deinterleave_audio (AVFrame* frame)
{
- DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
-
-DCPOMATIC_DISABLE_WARNINGS
- int const size = av_samples_get_buffer_size (
- 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
- );
-DCPOMATIC_ENABLE_WARNINGS
- DCPOMATIC_ASSERT (size >= 0);
+ auto format = static_cast<AVSampleFormat>(frame->format);
- /* XXX: can't we just use _frame->nb_samples directly here? */
/* XXX: can't we use swr_convert() to do the format conversion? */
- /* Deinterleave and convert to float */
-
- /* total_samples and frames will be rounded down here, so if there are stray samples at the end
- of the block that do not form a complete sample or frame they will be dropped.
- */
- int const total_samples = size / bytes_per_audio_sample (stream);
- int const channels = stream->channels();
- int const frames = total_samples / channels;
+ int const channels = frame->channels;
+ int const frames = frame->nb_samples;
+ int const total_samples = frames * channels;
auto audio = make_shared<AudioBuffers>(channels, frames);
auto data = audio->data();
- switch (audio_sample_format (stream)) {
+ switch (format) {
case AV_SAMPLE_FMT_U8:
{
- uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
+ auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_S16:
{
- int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
+ auto p = reinterpret_cast<int16_t *> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_S16P:
{
- int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
+ auto p = reinterpret_cast<int16_t **> (frame->data);
for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
case AV_SAMPLE_FMT_S32:
{
- int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
+ auto p = reinterpret_cast<int32_t *> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_S32P:
{
- int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
+ auto p = reinterpret_cast<int32_t **> (frame->data);
for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
case AV_SAMPLE_FMT_FLT:
{
- float* p = reinterpret_cast<float*> (_frame->data[0]);
+ auto p = reinterpret_cast<float*> (frame->data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_FLTP:
{
- float** p = reinterpret_cast<float**> (_frame->data);
- DCPOMATIC_ASSERT (_frame->channels <= channels);
- /* Sometimes there aren't as many channels in the _frame as in the stream */
- for (int i = 0; i < _frame->channels; ++i) {
+ auto p = reinterpret_cast<float**> (frame->data);
+ DCPOMATIC_ASSERT (frame->channels <= channels);
+ /* Sometimes there aren't as many channels in the frame as in the stream */
+ for (int i = 0; i < frame->channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = _frame->channels; i < channels; ++i) {
+ for (int i = frame->channels; i < channels; ++i) {
audio->make_silent (i);
}
}
break;
default:
- throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
+ throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
}
return audio;
AVSampleFormat
FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
{
-DCPOMATIC_DISABLE_WARNINGS
- return stream->stream (_format_context)->codec->sample_fmt;
-DCPOMATIC_ENABLE_WARNINGS
+ return static_cast<AVSampleFormat>(stream->stream(_format_context)->codecpar->format);
}
avcodec_flush_buffers (video_codec_context());
}
-DCPOMATIC_DISABLE_WARNINGS
for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
- avcodec_flush_buffers (i->stream(_format_context)->codec);
+ avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
}
-DCPOMATIC_ENABLE_WARNINGS
if (subtitle_codec_context ()) {
avcodec_flush_buffers (subtitle_codec_context ());
void
FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
{
- auto data = deinterleave_audio (stream);
+ auto data = deinterleave_audio (_frame);
ContentTime ct;
if (_frame->pts == AV_NOPTS_VALUE) {
}
-pair<int, bool>
-FFmpegDecoder::decode_audio_packet (shared_ptr<FFmpegAudioStream> stream, AVPacket* packet)
-{
- int frame_finished;
- DCPOMATIC_DISABLE_WARNINGS
- int decode_result = avcodec_decode_audio4 (stream->stream(_format_context)->codec, _frame, &frame_finished, packet);
- DCPOMATIC_ENABLE_WARNINGS
- if (decode_result < 0) {
- /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
- some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
- if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
- even in the face of such an error, so I think we should too.
-
- Returning from the method here caused mantis #352.
- */
- LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
- }
- return make_pair(decode_result, frame_finished);
-}
-
-
void
FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
{
return;
}
- /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
- several times. Make a simple copy so we can alter data and size.
- */
- AVPacket copy_packet = *packet;
+ auto context = _codec_context[stream->index(_format_context)];
- while (copy_packet.size > 0) {
- auto result = decode_audio_packet (stream, ©_packet);
- if (result.first < 0) {
- /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
- some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
- if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
- even in the face of such an error, so I think we should too.
-
- Returning from the method here caused mantis #352.
- */
- }
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * Likewise I think AVERROR_EOF should not happen.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_audio_packet"), r);
+ }
- if (result.second) {
- process_audio_frame (stream);
+ while (r >= 0) {
+ r = avcodec_receive_frame (context, _frame);
+ if (r == AVERROR(EAGAIN)) {
+ /* More input is required */
+ return;
}
- if (result.first) {
- break;
- }
-
- copy_packet.data += result.first;
- copy_packet.size -= result.first;
+ /* We choose to be relaxed here about other errors; it seems that there may be valid
+ * data to decode even if an error occurred. #352 may be related (though this was
+ * when we were using an old version of the FFmpeg API).
+ */
+ process_audio_frame (stream);
}
}
{
DCPOMATIC_ASSERT (_video_stream);
- int frame_finished;
-DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, packet) < 0 || !frame_finished) {
+ auto context = video_codec_context();
+
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0 && !(r == AVERROR_EOF && !packet)) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * AVERROR_EOF can happen during flush if we've already sent a flush packet.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_video_packet"), r);
+ }
+
+ r = avcodec_receive_frame (context, _frame);
+ if (r == AVERROR(EAGAIN) || r == AVERROR_EOF) {
+ /* More input is required, or no more frames are coming */
return false;
}
-DCPOMATIC_ENABLE_WARNINGS
+
+ /* We assume we'll only get one frame here, which I think is safe */
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
bool flush ();
+ static std::shared_ptr<AudioBuffers> deinterleave_audio (AVFrame* frame);
+
AVSampleFormat audio_sample_format (std::shared_ptr<FFmpegAudioStream> stream) const;
int bytes_per_audio_sample (std::shared_ptr<FFmpegAudioStream> stream) const;
std::shared_ptr<FFmpegAudioStream> audio_stream_from_index (int index) const;
- std::pair<int, bool> decode_audio_packet (std::shared_ptr<FFmpegAudioStream> stream, AVPacket* packet);
void process_audio_frame (std::shared_ptr<FFmpegAudioStream> stream);
bool decode_and_process_video_packet (AVPacket* packet);
void process_ass_subtitle (std::string ass, dcpomatic::ContentTime from);
void maybe_add_subtitle ();
- std::shared_ptr<AudioBuffers> deinterleave_audio (std::shared_ptr<FFmpegAudioStream> stream) const;
std::list<std::shared_ptr<VideoFilterGraph> > _filter_graphs;
boost::mutex _filter_graphs_mutex;
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
auto s = _format_context->streams[i];
-DCPOMATIC_DISABLE_WARNINGS
- if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ if (s->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
/* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
so bodge it here. No idea why we should have to do this.
*/
- if (s->codec->channel_layout == 0) {
- s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
+ if (s->codecpar->channel_layout == 0) {
+ s->codecpar->channel_layout = av_get_default_channel_layout (s->codecpar->channels);
}
+ auto codec = _codec_context[i]->codec;
+
DCPOMATIC_ASSERT (_format_context->duration != AV_NOPTS_VALUE);
- DCPOMATIC_ASSERT (s->codec->codec);
- DCPOMATIC_ASSERT (s->codec->codec->name);
+ DCPOMATIC_ASSERT (codec);
+ DCPOMATIC_ASSERT (codec->name);
_audio_streams.push_back (
make_shared<FFmpegAudioStream>(
stream_name (s),
- s->codec->codec->name,
+ codec->name,
s->id,
- s->codec->sample_rate,
- llrint ((double(_format_context->duration) / AV_TIME_BASE) * s->codec->sample_rate),
- s->codec->channels
+ s->codecpar->sample_rate,
+ llrint ((double(_format_context->duration) / AV_TIME_BASE) * s->codecpar->sample_rate),
+ s->codecpar->channels
)
);
- } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
- _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (subtitle_stream_name (s), s->id)));
+ } else if (s->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ _subtitle_streams.push_back (make_shared<FFmpegSubtitleStream>(subtitle_stream_name (s), s->id));
}
}
}
}
- auto context = _format_context->streams[packet->stream_index]->codec;
-DCPOMATIC_ENABLE_WARNINGS
+ auto context = _codec_context[packet->stream_index];
if (_video_stream && packet->stream_index == _video_stream.get()) {
video_packet (context, temporal_reference, packet);
}
if (_video_stream) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
-DCPOMATIC_DISABLE_WARNINGS
- auto context = _format_context->streams[*_video_stream]->codec;
-DCPOMATIC_ENABLE_WARNINGS
- while (video_packet(context, temporal_reference, &packet)) {}
+ auto context = _codec_context[_video_stream.get()];
+ while (video_packet(context, temporal_reference, nullptr)) {}
}
for (auto i: _audio_streams) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = nullptr;
- packet.size = 0;
-DCPOMATIC_DISABLE_WARNINGS
- audio_packet (i->stream(_format_context)->codec, i, &packet);
-DCPOMATIC_ENABLE_WARNINGS
+ auto context = _codec_context[i->index(_format_context)];
+ audio_packet(context, i, nullptr);
}
if (_video_stream) {
return false;
}
- int frame_finished;
-DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_decode_video2 (context, _frame, &frame_finished, packet) < 0 || !frame_finished) {
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0 && !(r == AVERROR_EOF && !packet)) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * AVERROR_EOF can happen during flush if we've already sent a flush packet.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegExaminer::video_packet"), r);
+ }
+
+ r = avcodec_receive_frame (context, _frame);
+ if (r == AVERROR(EAGAIN)) {
+ /* More input is required */
+ return true;
+ } else if (r == AVERROR_EOF) {
+ /* No more output is coming */
return false;
}
-DCPOMATIC_ENABLE_WARNINGS
if (!_first_video) {
_first_video = frame_time (_format_context->streams[_video_stream.get()]);
return;
}
- int frame_finished;
-DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_decode_audio4 (context, _frame, &frame_finished, packet) >= 0 && frame_finished) {
-DCPOMATIC_ENABLE_WARNINGS
- stream->first_audio = frame_time (stream->stream (_format_context));
+ int r = avcodec_send_packet (context, packet);
+ if (r < 0 && !(r == AVERROR_EOF && !packet) && r != AVERROR(EAGAIN)) {
+ /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
+ * AVERROR_EOF can happen during flush if we've already sent a flush packet.
+ * EAGAIN means we need to do avcodec_receive_frame, so just carry on and do that.
+ */
+ throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegExaminer::audio_packet"), r);
+ }
+
+ if (avcodec_receive_frame (context, _frame) < 0) {
+ return;
}
+
+ stream->first_audio = frame_time (stream->stream(_format_context));
}
{
optional<ContentTime> t;
-DCPOMATIC_DISABLE_WARNINGS
- int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
-DCPOMATIC_ENABLE_WARNINGS
+ int64_t const bet = _frame->best_effort_timestamp;
if (bet != AV_NOPTS_VALUE) {
t = ContentTime::from_seconds (bet * av_q2d (s->time_base));
}
#include "ffmpeg_encoder.h"
+#include "ffmpeg_wrapper.h"
#include "film.h"
#include "job.h"
#include "player.h"
{
_codec = avcodec_find_encoder_by_name (codec_name.c_str());
if (!_codec) {
- throw runtime_error (String::compose("could not find FFmpeg encoder %1", codec_name));
+ throw EncodeError (String::compose("avcodec_find_encoder_by_name failed for %1", codec_name));
}
_codec_context = avcodec_alloc_context3 (_codec);
if (!_codec_context) {
- throw runtime_error ("could not allocate FFmpeg audio context");
+ throw std::bad_alloc ();
}
- avcodec_get_context_defaults3 (_codec_context, _codec);
-
/* XXX: configurable */
_codec_context->bit_rate = channels * 128 * 1024;
_codec_context->sample_fmt = sample_format;
_codec_context->channel_layout = av_get_default_channel_layout (channels);
_codec_context->channels = channels;
+ int r = avcodec_open2 (_codec_context, _codec, 0);
+ if (r < 0) {
+ throw EncodeError (N_("avcodec_open2"), N_("ExportAudioStream::ExportAudioStream"), r);
+ }
+
_stream = avformat_new_stream (format_context, _codec);
if (!_stream) {
- throw runtime_error ("could not create FFmpeg output audio stream");
+ throw EncodeError (N_("avformat_new_stream"), N_("ExportAudioStream::ExportAudioStream"));
}
_stream->id = stream_index;
_stream->disposition |= AV_DISPOSITION_DEFAULT;
-DCPOMATIC_DISABLE_WARNINGS
- _stream->codec = _codec_context;
-DCPOMATIC_ENABLE_WARNINGS
-
- int r = avcodec_open2 (_codec_context, _codec, 0);
+ r = avcodec_parameters_from_context (_stream->codecpar, _codec_context);
if (r < 0) {
- char buffer[256];
- av_strerror (r, buffer, sizeof(buffer));
- throw runtime_error (String::compose("could not open FFmpeg audio codec (%1)", buffer));
+ throw EncodeError (N_("avcodec_parameters_from_context"), N_("ExportAudioStream::ExportAudioStream"), r);
}
}
bool flush ()
{
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
- bool flushed = false;
-
- int got_packet;
-DCPOMATIC_DISABLE_WARNINGS
- avcodec_encode_audio2 (_codec_context, &packet, 0, &got_packet);
-DCPOMATIC_ENABLE_WARNINGS
- if (got_packet) {
- packet.stream_index = 0;
- av_interleaved_write_frame (_format_context, &packet);
- } else {
- flushed = true;
+ int r = avcodec_send_frame (_codec_context, nullptr);
+ if (r < 0 && r != AVERROR_EOF) {
+ /* We get EOF if we've already flushed the stream once */
+ throw EncodeError (N_("avcodec_send_frame"), N_("ExportAudioStream::flush"), r);
+ }
+
+ ffmpeg::Packet packet;
+ r = avcodec_receive_packet (_codec_context, packet.get());
+ if (r == AVERROR_EOF) {
+ return true;
+ } else if (r < 0) {
+ throw EncodeError (N_("avcodec_receive_packet"), N_("ExportAudioStream::flush"), r);
}
- av_packet_unref (&packet);
- return flushed;
+
+ packet->stream_index = _stream_index;
+ av_interleaved_write_frame (_format_context, packet.get());
+ return false;
}
void write (int size, int channel_offset, int channels, float** data, int64_t sample_offset)
DCPOMATIC_ASSERT (samples);
frame->nb_samples = size;
+ frame->format = _codec_context->sample_fmt;
+ frame->channels = channels;
int r = avcodec_fill_audio_frame (frame, channels, _codec_context->sample_fmt, (const uint8_t *) samples, buffer_size, 0);
DCPOMATIC_ASSERT (r >= 0);
DCPOMATIC_ASSERT (_codec_context->time_base.num == 1);
frame->pts = sample_offset * _codec_context->time_base.den / _codec_context->sample_rate;
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
- int got_packet;
-
- DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_encode_audio2 (_codec_context, &packet, frame, &got_packet) < 0) {
- throw EncodeError ("FFmpeg audio encode failed");
+ r = avcodec_send_frame (_codec_context, frame);
+ av_free (samples);
+ av_frame_free (&frame);
+ if (r < 0) {
+ throw EncodeError (N_("avcodec_send_frame"), N_("ExportAudioStream::write"), r);
}
- DCPOMATIC_ENABLE_WARNINGS
- if (got_packet && packet.size) {
- packet.stream_index = _stream_index;
- av_interleaved_write_frame (_format_context, &packet);
- av_packet_unref (&packet);
+ ffmpeg::Packet packet;
+ r = avcodec_receive_packet (_codec_context, packet.get());
+ if (r < 0 && r != AVERROR(EAGAIN)) {
+ throw EncodeError (N_("avcodec_receive_packet"), N_("ExportAudioStream::write"), r);
+ } else if (r >= 0) {
+ packet->stream_index = _stream_index;
+ av_interleaved_write_frame (_format_context, packet.get());
}
-
- av_free (samples);
- av_frame_free (&frame);
}
private:
int r = avformat_alloc_output_context2 (&_format_context, 0, 0, _output.string().c_str());
if (!_format_context) {
- throw runtime_error (String::compose("could not allocate FFmpeg format context (%1)", r));
+ throw EncodeError (N_("avformat_alloc_output_context2"), "FFmpegFileEncoder::FFmpegFileEncoder", r);
}
setup_video ();
r = avio_open_boost (&_format_context->pb, _output, AVIO_FLAG_WRITE);
if (r < 0) {
- throw runtime_error (String::compose("could not open FFmpeg output file %1 (%2)", _output.string(), r));
+ throw EncodeError (String::compose(_("Could not open output file %1 (%2)"), _output.string(), r));
}
AVDictionary* options = nullptr;
- if (avformat_write_header (_format_context, &options) < 0) {
- throw runtime_error ("could not write header to FFmpeg output file");
+ r = avformat_write_header (_format_context, &options);
+ if (r < 0) {
+ throw EncodeError (N_("avformat_write_header"), N_("FFmpegFileEncoder::FFmpegFileEncoder"), r);
}
_pending_audio.reset (new AudioBuffers(channels, 0));
{
_video_codec = avcodec_find_encoder_by_name (_video_codec_name.c_str());
if (!_video_codec) {
- throw runtime_error (String::compose ("could not find FFmpeg encoder %1", _video_codec_name));
+ throw EncodeError (String::compose("avcodec_find_encoder_by_name failed for %1", _video_codec_name));
}
_video_codec_context = avcodec_alloc_context3 (_video_codec);
if (!_video_codec_context) {
- throw runtime_error ("could not allocate FFmpeg video context");
+ throw std::bad_alloc ();
}
- avcodec_get_context_defaults3 (_video_codec_context, _video_codec);
-
/* Variable quantisation */
_video_codec_context->global_quality = 0;
_video_codec_context->width = _video_frame_size.width;
_video_codec_context->pix_fmt = _pixel_format;
_video_codec_context->flags |= AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_GLOBAL_HEADER;
+ if (avcodec_open2 (_video_codec_context, _video_codec, &_video_options) < 0) {
+ throw EncodeError (N_("avcodec_open"), N_("FFmpegFileEncoder::setup_video"));
+ }
+
_video_stream = avformat_new_stream (_format_context, _video_codec);
if (!_video_stream) {
- throw runtime_error ("could not create FFmpeg output video stream");
+ throw EncodeError (N_("avformat_new_stream"), N_("FFmpegFileEncoder::setup_video"));
}
-DCPOMATIC_DISABLE_WARNINGS
_video_stream->id = _video_stream_index;
- _video_stream->codec = _video_codec_context;
-DCPOMATIC_ENABLE_WARNINGS
-
- if (avcodec_open2 (_video_codec_context, _video_codec, &_video_options) < 0) {
- throw runtime_error ("could not open FFmpeg video codec");
+ int r = avcodec_parameters_from_context (_video_stream->codecpar, _video_codec_context);
+ if (r < 0) {
+ throw EncodeError (N_("avcodec_parameters_from_context"), N_("FFmpegFileEncoder::setup_video"), r);
}
}
bool flushed_audio = false;
while (!flushed_video || !flushed_audio) {
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
-
- int got_packet;
-DCPOMATIC_DISABLE_WARNINGS
- avcodec_encode_video2 (_video_codec_context, &packet, 0, &got_packet);
-DCPOMATIC_ENABLE_WARNINGS
- if (got_packet) {
- packet.stream_index = 0;
- av_interleaved_write_frame (_format_context, &packet);
- } else {
+ int r = avcodec_send_frame (_video_codec_context, nullptr);
+ if (r < 0 && r != AVERROR_EOF) {
+ /* We get EOF if we've already flushed the stream once */
+ throw EncodeError (N_("avcodec_send_frame"), N_("FFmpegFileEncoder::flush"), r);
+ }
+
+ ffmpeg::Packet packet;
+ r = avcodec_receive_packet (_video_codec_context, packet.get());
+ if (r == AVERROR_EOF) {
flushed_video = true;
+ } else if (r < 0) {
+ throw EncodeError (N_("avcodec_receive_packet"), N_("FFmpegFileEncoder::flush"), r);
+ } else {
+ packet->stream_index = _video_stream_index;
+ av_interleaved_write_frame (_format_context, packet.get());
}
- av_packet_unref (&packet);
flushed_audio = true;
for (auto i: _audio_streams) {
DCPOMATIC_ASSERT (_video_stream->time_base.num == 1);
frame->pts = time.get() * _video_stream->time_base.den / DCPTime::HZ;
- AVPacket packet;
- av_init_packet (&packet);
- packet.data = 0;
- packet.size = 0;
-
- int got_packet;
-DCPOMATIC_DISABLE_WARNINGS
- if (avcodec_encode_video2 (_video_codec_context, &packet, frame, &got_packet) < 0) {
- throw EncodeError ("FFmpeg video encode failed");
+ int r = avcodec_send_frame (_video_codec_context, frame);
+ av_frame_free (&frame);
+ if (r < 0) {
+ throw EncodeError (N_("avcodec_send_frame"), N_("FFmpegFileEncoder::video"), r);
}
-DCPOMATIC_ENABLE_WARNINGS
- if (got_packet && packet.size) {
- packet.stream_index = _video_stream_index;
- av_interleaved_write_frame (_format_context, &packet);
- av_packet_unref (&packet);
+ ffmpeg::Packet packet;
+ r = avcodec_receive_packet (_video_codec_context, packet.get());
+ if (r < 0 && r != AVERROR(EAGAIN)) {
+ throw EncodeError (N_("avcodec_receive_packet"), N_("FFmpegFileEncoder::video"), r);
+ } else if (r >= 0) {
+ packet->stream_index = _video_stream_index;
+ av_interleaved_write_frame (_format_context, packet.get());
}
-
- av_frame_free (&frame);
-
}
*/
-#include "ffmpeg_image_proxy.h"
+
+#include "compose.hpp"
#include "cross.h"
-#include "exceptions.h"
#include "dcpomatic_socket.h"
+#include "exceptions.h"
+#include "ffmpeg_image_proxy.h"
#include "image.h"
-#include "compose.hpp"
#include "util.h"
#include "warnings.h"
#include <dcp/raw_convert.h>
#include "i18n.h"
-using std::string;
+
using std::cout;
-using std::pair;
-using std::min;
using std::make_pair;
+using std::make_shared;
+using std::min;
+using std::pair;
using std::shared_ptr;
+using std::string;
using boost::optional;
using std::dynamic_pointer_cast;
using dcp::raw_convert;
+
FFmpegImageProxy::FFmpegImageProxy (boost::filesystem::path path, VideoRange video_range)
: _data (path)
, _video_range (video_range)
return _pos;
}
-DCPOMATIC_DISABLE_WARNINGS
ImageProxy::Result
FFmpegImageProxy::image (optional<dcp::Size>) const
{
+ auto constexpr name_for_errors = "FFmpegImageProxy::image";
+
boost::mutex::scoped_lock lm (_mutex);
if (_image) {
}
}
- if (avformat_find_stream_info(format_context, 0) < 0) {
- throw DecodeError (_("could not find stream information"));
+ int r = avformat_find_stream_info(format_context, 0);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_find_stream_info"), name_for_errors, r);
}
DCPOMATIC_ASSERT (format_context->nb_streams == 1);
AVFrame* frame = av_frame_alloc ();
if (!frame) {
- throw DecodeError (N_("could not allocate frame"));
+ std::bad_alloc ();
}
- AVCodecContext* codec_context = format_context->streams[0]->codec;
- AVCodec* codec = avcodec_find_decoder (codec_context->codec_id);
+ auto codec = avcodec_find_decoder (format_context->streams[0]->codecpar->codec_id);
DCPOMATIC_ASSERT (codec);
- if (avcodec_open2 (codec_context, codec, 0) < 0) {
- throw DecodeError (N_("could not open decoder"));
+ auto context = avcodec_alloc_context3 (codec);
+ if (!context) {
+ throw DecodeError (N_("avcodec_alloc_context3"), name_for_errors);
+ }
+
+ r = avcodec_open2 (context, codec, 0);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_open2"), name_for_errors, r);
}
AVPacket packet;
- int r = av_read_frame (format_context, &packet);
+ r = av_read_frame (format_context, &packet);
+ if (r < 0) {
+ throw DecodeError (N_("av_read_frame"), name_for_errors, r);
+ }
+
+ r = avcodec_send_packet (context, &packet);
if (r < 0) {
- throw DecodeError (N_("could not read frame"));
+ throw DecodeError (N_("avcodec_send_packet"), name_for_errors, r);
}
- int frame_finished;
- if (avcodec_decode_video2(codec_context, frame, &frame_finished, &packet) < 0 || !frame_finished) {
- throw DecodeError (N_("could not decode video"));
+ r = avcodec_receive_frame (context, frame);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_receive_frame"), name_for_errors, r);
}
- AVPixelFormat const pix_fmt = static_cast<AVPixelFormat>(frame->format);
+ auto const pix_fmt = static_cast<AVPixelFormat>(frame->format);
- _image.reset (new Image(frame));
+ _image = make_shared<Image>(frame);
if (_video_range == VideoRange::VIDEO && av_pix_fmt_desc_get(pix_fmt)->flags & AV_PIX_FMT_FLAG_RGB) {
/* Asking for the video range to be converted by libswscale (in Image) will not work for
* RGB sources since that method only processes video range in YUV and greyscale. So we have
av_packet_unref (&packet);
av_frame_free (&frame);
- avcodec_close (codec_context);
+ avcodec_free_context (&context);
avformat_close_input (&format_context);
av_free (avio_context->buffer);
av_free (avio_context);
return Result (_image, 0);
}
-DCPOMATIC_ENABLE_WARNINGS
void
FFmpegImageProxy::add_metadata (xmlpp::Node* node) const
--- /dev/null
+/*
+ Copyright (C) 2021 Carl Hetherington <cth@carlh.net>
+
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ DCP-o-matic is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+extern "C" {
+#include <libavformat/avformat.h>
+}
+#include "ffmpeg_wrapper.h"
+#include <new>
+
+
+using namespace ffmpeg;
+
+
+Packet::Packet ()
+{
+ _packet = av_packet_alloc ();
+ if (!_packet) {
+ throw std::bad_alloc ();
+ }
+}
+
+
+Packet::~Packet ()
+{
+ av_packet_free (&_packet);
+}
+
--- /dev/null
+/*
+ Copyright (C) 2021 Carl Hetherington <cth@carlh.net>
+
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ DCP-o-matic is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+struct AVPacket;
+
+
+namespace ffmpeg {
+
+class Packet
+{
+public:
+ Packet ();
+ ~Packet ();
+
+ Packet (Packet const&) = delete;
+ Packet& operator= (Packet const&) = delete;
+
+ AVPacket* operator->() const {
+ return _packet;
+ }
+
+ AVPacket* get () const {
+ return _packet;
+ }
+
+private:
+ AVPacket* _packet = nullptr;
+};
+
+}
+
*/
+
/** @file src/lib/filter_graph.cc
* @brief A graph of FFmpeg filters.
*/
+
#include "filter_graph.h"
#include "filter.h"
#include "exceptions.h"
#include "i18n.h"
+
using std::string;
using std::list;
using std::pair;
using std::weak_ptr;
using dcp::Size;
+
/** Construct a FilterGraph for the settings in a piece of content */
FilterGraph::FilterGraph ()
: _graph (0)
throw DecodeError (N_("could not create filter graph."));
}
- AVFilter const * buffer_src = avfilter_get_by_name (src_name().c_str());
+ auto const buffer_src = avfilter_get_by_name (src_name().c_str());
if (!buffer_src) {
throw DecodeError (N_("could not find buffer src filter"));
}
- AVFilter const * buffer_sink = avfilter_get_by_name (sink_name().c_str());
+ auto const buffer_sink = avfilter_get_by_name (sink_name().c_str());
if (!buffer_sink) {
throw DecodeError (N_("Could not create buffer sink filter"));
}
throw DecodeError (N_("could not create buffer source"));
}
- void* sink_params = sink_parameters ();
-
- if (avfilter_graph_create_filter (&_buffer_sink_context, buffer_sink, N_("out"), 0, sink_params, _graph) < 0) {
+ if (avfilter_graph_create_filter (&_buffer_sink_context, buffer_sink, N_("out"), nullptr, nullptr, _graph) < 0) {
throw DecodeError (N_("could not create buffer sink."));
}
- av_free (sink_params);
+ set_parameters (_buffer_sink_context);
- AVFilterInOut* outputs = avfilter_inout_alloc ();
+ auto outputs = avfilter_inout_alloc ();
outputs->name = av_strdup(N_("in"));
outputs->filter_ctx = _buffer_src_context;
outputs->pad_idx = 0;
outputs->next = 0;
- AVFilterInOut* inputs = avfilter_inout_alloc ();
+ auto inputs = avfilter_inout_alloc ();
inputs->name = av_strdup(N_("out"));
inputs->filter_ctx = _buffer_sink_context;
inputs->pad_idx = 0;
int e = avfilter_graph_config (_graph, 0);
if (e < 0) {
- throw DecodeError (String::compose (N_("could not configure filter graph (%1)"), e));
+ throw DecodeError (String::compose(N_("could not configure filter graph (%1)"), e));
}
}
+
FilterGraph::~FilterGraph ()
{
if (_frame) {
}
}
+
AVFilterContext *
FilterGraph::get (string name)
{
- return avfilter_graph_get_filter (_graph, name.c_str ());
+ return avfilter_graph_get_filter (_graph, name.c_str());
}
protected:
virtual std::string src_parameters () const = 0;
virtual std::string src_name () const = 0;
- virtual void* sink_parameters () const = 0;
+ virtual void set_parameters (AVFilterContext* context) const = 0;
virtual std::string sink_name () const = 0;
AVFilterGraph* _graph;
Image::Image (AVFrame* frame)
: _size (frame->width, frame->height)
- , _pixel_format (static_cast<AVPixelFormat> (frame->format))
+ , _pixel_format (static_cast<AVPixelFormat>(frame->format))
, _aligned (true)
{
+ DCPOMATIC_ASSERT (_pixel_format != AV_PIX_FMT_NONE);
+
allocate ();
for (int i = 0; i < planes(); ++i) {
extern "C" {
#include <libavfilter/buffersrc.h>
#include <libavfilter/buffersink.h>
+#include <libavutil/opt.h>
}
#include "i18n.h"
{
list<pair<shared_ptr<Image>, int64_t>> images;
-DCPOMATIC_DISABLE_WARNINGS
if (_copy) {
- images.push_back (make_pair(make_shared<Image>(frame), av_frame_get_best_effort_timestamp (frame)));
+ images.push_back (make_pair(make_shared<Image>(frame), frame->best_effort_timestamp));
} else {
int r = av_buffersrc_write_frame (_buffer_src_context, frame);
if (r < 0) {
break;
}
- images.push_back (make_pair(make_shared<Image>(_frame), av_frame_get_best_effort_timestamp (_frame)));
+ images.push_back (make_pair(make_shared<Image>(_frame), frame->best_effort_timestamp));
av_frame_unref (_frame);
}
}
-DCPOMATIC_ENABLE_WARNINGS
return images;
}
}
-void *
-VideoFilterGraph::sink_parameters () const
+void
+VideoFilterGraph::set_parameters (AVFilterContext* context) const
{
- auto sink_params = av_buffersink_params_alloc ();
- auto pixel_fmts = new AVPixelFormat[2];
- pixel_fmts[0] = _pixel_format;
- pixel_fmts[1] = AV_PIX_FMT_NONE;
- sink_params->pixel_fmts = pixel_fmts;
- return sink_params;
+ AVPixelFormat pix_fmts[] = { _pixel_format, AV_PIX_FMT_NONE };
+ int r = av_opt_set_int_list (context, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ DCPOMATIC_ASSERT (r >= 0);
}
std::list<std::pair<std::shared_ptr<Image>, int64_t>> process (AVFrame * frame);
protected:
- std::string src_parameters () const;
- std::string src_name () const;
- void* sink_parameters () const;
- std::string sink_name () const;
+ std::string src_parameters () const override;
+ std::string src_name () const override;
+ void set_parameters (AVFilterContext* context) const override;
+ std::string sink_name () const override;
private:
dcp::Size _size; ///< size of the images that this chain can process
ffmpeg_content.cc
ffmpeg_decoder.cc
ffmpeg_encoder.cc
- ffmpeg_file_encoder.cc
ffmpeg_examiner.cc
+ ffmpeg_file_encoder.cc
+ ffmpeg_image_proxy.cc
ffmpeg_stream.cc
ffmpeg_subtitle_stream.cc
+ ffmpeg_wrapper.cc
film.cc
filter.cc
- ffmpeg_image_proxy.cc
font.cc
font_data.cc
frame_interval_checker.cc
BOOST_CHECK_EQUAL (examiner->audio_streams()[0]->frame_rate(), 48000);
BOOST_CHECK_EQUAL (examiner->audio_streams()[0]->channels(), 2);
BOOST_CHECK_EQUAL (examiner->audio_streams()[1]->frame_rate(), 48000);
- BOOST_CHECK_EQUAL (examiner->audio_streams()[1]->channels(), 6);
+ BOOST_CHECK_EQUAL (examiner->audio_streams()[1]->channels(), 5);
}