#include <stdint.h>
#include <boost/lexical_cast.hpp>
extern "C" {
-#include <tiffio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
using boost::dynamic_pointer_cast;
using libdcp::Size;
+boost::mutex FFmpegDecoder::_mutex;
+
FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, DecodeOptions o)
: Decoder (f, o)
, VideoDecoder (f, o)
FFmpegDecoder::~FFmpegDecoder ()
{
+ boost::mutex::scoped_lock lm (_mutex);
+
if (_audio_codec_context) {
avcodec_close (_audio_codec_context);
}
void
FFmpegDecoder::setup_video ()
{
+ boost::mutex::scoped_lock lm (_mutex);
+
_video_codec_context = _format_context->streams[_video_stream]->codec;
_video_codec = avcodec_find_decoder (_video_codec_context->codec_id);
void
FFmpegDecoder::setup_audio ()
{
+ boost::mutex::scoped_lock lm (_mutex);
+
if (!_audio_stream) {
return;
}
void
FFmpegDecoder::setup_subtitle ()
{
+ boost::mutex::scoped_lock lm (_mutex);
+
if (!_subtitle_stream || _subtitle_stream->id() >= int (_format_context->nb_streams)) {
return;
}
FFmpegDecoder::stream_name (AVStream* s) const
{
stringstream n;
-
- AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0);
- if (lang) {
- n << lang->value;
- }
-
- AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0);
- if (title) {
- if (!n.str().empty()) {
- n << N_(" ");
+
+ if (s->metadata) {
+ AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0);
+ if (lang) {
+ n << lang->value;
+ }
+
+ AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0);
+ if (title) {
+ if (!n.str().empty()) {
+ n << N_(" ");
+ }
+ n << title->value;
}
- n << title->value;
}
if (n.str().empty()) {
void
FFmpegDecoder::filter_and_emit_video ()
{
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet == AV_NOPTS_VALUE) {
+ _film->log()->log ("Dropping frame without PTS");
+ return;
+ }
shared_ptr<FilterGraph> graph;
- list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
- ++i;
- }
-
- if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
- _filter_graphs.push_back (graph);
- _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
- } else {
- graph = *i;
+ {
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+
+ list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+ while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
+ ++i;
+ }
+
+ if (i == _filter_graphs.end ()) {
+ graph = filter_graph_factory (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format);
+ _filter_graphs.push_back (graph);
+ _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+ } else {
+ graph = *i;
+ }
}
list<shared_ptr<Image> > images = graph->process (_frame);
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
- int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
- if (bet != AV_NOPTS_VALUE) {
- emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
- } else {
- _film->log()->log ("Dropping frame without PTS");
- }
+ emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
}
}
int frame_finished;
int const decode_result = avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, ©_packet);
- if (decode_result >= 0) {
- if (frame_finished) {
+ if (decode_result < 0) {
+ /* error */
+ break;
+ }
+
+ if (frame_finished) {
- /* Where we are in the source, in seconds */
- double const source_pts_seconds = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame);
-
- int const data_size = av_samples_get_buffer_size (
- 0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
- );
-
- assert (_audio_codec_context->channels == _film->audio_channels());
- Audio (deinterleave_audio (_frame->data, data_size), source_pts_seconds);
- }
+ /* Where we are in the source, in seconds */
+ double const source_pts_seconds = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
+ * av_frame_get_best_effort_timestamp(_frame);
+
+ int const data_size = av_samples_get_buffer_size (
+ 0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
+ );
- copy_packet.data += decode_result;
- copy_packet.size -= decode_result;
+ assert (_audio_codec_context->channels == _film->audio_channels());
+ Audio (deinterleave_audio (_frame->data, data_size), source_pts_seconds);
}
+
+ copy_packet.data += decode_result;
+ copy_packet.size -= decode_result;
}
}