using boost::dynamic_pointer_cast;
using libdcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, DecodeOptions o, Job* j)
- : Decoder (f, o, j)
- , VideoDecoder (f, o, j)
- , AudioDecoder (f, o, j)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, DecodeOptions o)
+ : Decoder (f, o)
+ , VideoDecoder (f, o)
+ , AudioDecoder (f, o)
, _format_context (0)
, _video_stream (-1)
, _frame (0)
/* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
so bodge it here. No idea why we should have to do this.
*/
-
+
if (s->codec->channel_layout == 0) {
s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
}
void
FFmpegDecoder::setup_subtitle ()
{
- if (!_subtitle_stream) {
+ if (!_subtitle_stream || _subtitle_stream->id() >= _format_context->nb_streams) {
return;
}
int frame_finished;
- while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- filter_and_emit_video (_frame);
+ if (_opt.decode_video) {
+ while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
+ filter_and_emit_video (_frame);
+ }
}
if (_audio_stream && _opt.decode_audio) {
);
assert (_audio_codec_context->channels == _film->audio_channels());
- Audio (deinterleave_audio (_frame->data[0], data_size));
+ Audio (deinterleave_audio (_frame->data, data_size));
}
}
shared_ptr<FFmpegAudioStream> ffa = dynamic_pointer_cast<FFmpegAudioStream> (_audio_stream);
- if (_packet.stream_index == _video_stream) {
+ if (_packet.stream_index == _video_stream && _opt.decode_video) {
int frame_finished;
int const r = avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet);
was before this packet. Until then audio is thrown away.
*/
- if (_first_video && _first_video.get() <= source_pts_seconds) {
+ if ((_first_video && _first_video.get() <= source_pts_seconds) || !_opt.decode_video) {
- if (!_first_audio) {
+ if (!_first_audio && _opt.decode_video) {
_first_audio = source_pts_seconds;
/* This is our first audio frame, and if we've arrived here we must have had our
);
assert (_audio_codec_context->channels == _film->audio_channels());
- Audio (deinterleave_audio (_frame->data[0], data_size));
+ Audio (deinterleave_audio (_frame->data, data_size));
}
}
return false;
}
+/** @param data pointer to array of pointers to buffers.
+ * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
+ */
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (uint8_t* data, int size)
+FFmpegDecoder::deinterleave_audio (uint8_t** data, int size)
{
assert (_film->audio_channels());
assert (bytes_per_audio_sample());
switch (audio_sample_format()) {
case AV_SAMPLE_FMT_S16:
{
- int16_t* p = reinterpret_cast<int16_t *> (data);
+ int16_t* p = reinterpret_cast<int16_t *> (data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_S16P:
{
- int16_t* p = reinterpret_cast<int16_t *> (data);
+ int16_t** p = reinterpret_cast<int16_t **> (data);
for (int i = 0; i < _film->audio_channels(); ++i) {
for (int j = 0; j < frames; ++j) {
- audio->data(i)[j] = static_cast<float>(*p++) / (1 << 15);
+ audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
}
}
}
case AV_SAMPLE_FMT_S32:
{
- int32_t* p = reinterpret_cast<int32_t *> (data);
+ int32_t* p = reinterpret_cast<int32_t *> (data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_FLT:
{
- float* p = reinterpret_cast<float*> (data);
+ float* p = reinterpret_cast<float*> (data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
case AV_SAMPLE_FMT_FLTP:
{
- float* p = reinterpret_cast<float*> (data);
+ float** p = reinterpret_cast<float**> (data);
for (int i = 0; i < _film->audio_channels(); ++i) {
- memcpy (audio->data(i), p, frames * sizeof(float));
- p += frames;
+ memcpy (audio->data(i), p[i], frames * sizeof(float));
}
}
break;
return _audio_codec_context->sample_fmt;
}
-Size
+libdcp::Size
FFmpegDecoder::native_size () const
{
- return Size (_video_codec_context->width, _video_codec_context->height);
+ return libdcp::Size (_video_codec_context->width, _video_codec_context->height);
}
PixelFormat
shared_ptr<FilterGraph> graph;
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
+ while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
++i;
}
if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+ graph.reset (new FilterGraph (_film, this, libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format));
_filter_graphs.push_back (graph);
_film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
} else {