using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
+using libdcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const DecodeOptions> o, Job* j)
- : Decoder (f, o, j)
- , VideoDecoder (f, o, j)
- , AudioDecoder (f, o, j)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, DecodeOptions o)
+ : Decoder (f, o)
+ , VideoDecoder (f, o)
+ , AudioDecoder (f, o)
, _format_context (0)
, _video_stream (-1)
, _frame (0)
setup_audio ();
setup_subtitle ();
- if (!o->video_sync) {
+ if (!o.video_sync) {
_first_video = 0;
}
}
/* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
so bodge it here. No idea why we should have to do this.
*/
-
+
if (s->codec->channel_layout == 0) {
s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
}
void
FFmpegDecoder::setup_subtitle ()
{
- if (!_subtitle_stream) {
+ if (!_subtitle_stream || _subtitle_stream->id() >= _format_context->nb_streams) {
return;
}
int frame_finished;
- while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- filter_and_emit_video (_frame);
+ if (_opt.decode_video) {
+ while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
+ filter_and_emit_video (_frame);
+ }
}
- if (_audio_stream && _opt->decode_audio) {
+ if (_audio_stream && _opt.decode_audio) {
while (avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
int const data_size = av_samples_get_buffer_size (
0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
);
assert (_audio_codec_context->channels == _film->audio_channels());
- Audio (deinterleave_audio (_frame->data[0], data_size));
+ Audio (deinterleave_audio (_frame->data, data_size));
}
}
shared_ptr<FFmpegAudioStream> ffa = dynamic_pointer_cast<FFmpegAudioStream> (_audio_stream);
- if (_packet.stream_index == _video_stream) {
+ if (_packet.stream_index == _video_stream && _opt.decode_video) {
int frame_finished;
int const r = avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet);
_film->log()->log (String::compose ("Used only %1 bytes of %2 in packet", r, _packet.size));
}
- if (_opt->video_sync) {
+ if (_opt.video_sync) {
out_with_sync ();
} else {
filter_and_emit_video (_frame);
}
}
- } else if (ffa && _packet.stream_index == ffa->id() && _opt->decode_audio) {
+ } else if (ffa && _packet.stream_index == ffa->id() && _opt.decode_audio) {
int frame_finished;
if (avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
was before this packet. Until then audio is thrown away.
*/
- if (_first_video && _first_video.get() <= source_pts_seconds) {
+ if ((_first_video && _first_video.get() <= source_pts_seconds) || !_opt.decode_video) {
- if (!_first_audio) {
+ if (!_first_audio && _opt.decode_video) {
_first_audio = source_pts_seconds;
/* This is our first audio frame, and if we've arrived here we must have had our
);
assert (_audio_codec_context->channels == _film->audio_channels());
- Audio (deinterleave_audio (_frame->data[0], data_size));
+ Audio (deinterleave_audio (_frame->data, data_size));
}
}
- } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream->id() && _opt->decode_subtitles && _first_video) {
+ } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream->id() && _opt.decode_subtitles && _first_video) {
int got_subtitle;
AVSubtitle sub;
return false;
}
+/** @param data pointer to array of pointers to buffers.
+ * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
+ */
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (uint8_t* data, int size)
+FFmpegDecoder::deinterleave_audio (uint8_t** data, int size)
{
assert (_film->audio_channels());
assert (bytes_per_audio_sample());
switch (audio_sample_format()) {
case AV_SAMPLE_FMT_S16:
{
- int16_t* p = (int16_t *) data;
+ int16_t* p = reinterpret_cast<int16_t *> (data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
}
break;
+ case AV_SAMPLE_FMT_S16P:
+ {
+ int16_t** p = reinterpret_cast<int16_t **> (data);
+ for (int i = 0; i < _film->audio_channels(); ++i) {
+ for (int j = 0; j < frames; ++j) {
+ audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
+ }
+ }
+ }
+ break;
+
case AV_SAMPLE_FMT_S32:
{
- int32_t* p = (int32_t *) data;
+ int32_t* p = reinterpret_cast<int32_t *> (data[0]);
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = float(*p++) / (1 << 31);
+ audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
++channel;
if (channel == _film->audio_channels()) {
}
}
}
+ break;
+ case AV_SAMPLE_FMT_FLT:
+ {
+ float* p = reinterpret_cast<float*> (data[0]);
+ int sample = 0;
+ int channel = 0;
+ for (int i = 0; i < total_samples; ++i) {
+ audio->data(channel)[sample] = *p++;
+
+ ++channel;
+ if (channel == _film->audio_channels()) {
+ channel = 0;
+ ++sample;
+ }
+ }
+ }
+ break;
+
case AV_SAMPLE_FMT_FLTP:
{
- float* p = reinterpret_cast<float*> (data);
+ float** p = reinterpret_cast<float**> (data);
for (int i = 0; i < _film->audio_channels(); ++i) {
- memcpy (audio->data(i), p, frames * sizeof(float));
- p += frames;
+ memcpy (audio->data(i), p[i], frames * sizeof(float));
}
}
break;
return _audio_codec_context->sample_fmt;
}
-Size
+libdcp::Size
FFmpegDecoder::native_size () const
{
- return Size (_video_codec_context->width, _video_codec_context->height);
+ return libdcp::Size (_video_codec_context->width, _video_codec_context->height);
}
PixelFormat
shared_ptr<FilterGraph> graph;
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
+ while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
++i;
}
if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+ graph.reset (new FilterGraph (_film, this, libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format));
_filter_graphs.push_back (graph);
_film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
} else {
list<shared_ptr<Image> > images = graph->process (frame);
- double const st = av_frame_get_best_effort_timestamp(_frame) * av_q2d (_format_context->streams[_video_stream]->time_base);
-
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
- emit_video (*i, st);
+ emit_video (*i, frame_time ());
}
}
String::compose (
"Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
- )
+ )
);
}
}
return (double(_format_context->duration) / AV_TIME_BASE) * frames_per_second();
}
+double
+FFmpegDecoder::frame_time () const
+{
+ return av_frame_get_best_effort_timestamp(_frame) * av_q2d (_format_context->streams[_video_stream]->time_base);
+}
+