Rename PRORES -> PRORES_HQ
[dcpomatic.git] / src / lib / ffmpeg_file_encoder.cc
index 99c974d3ea049dbcdd841e38a0407c6cb9d49e75..17f6f55cb01413ddebdda3b52df944ce95f8c092 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2017-2018 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2017-2021 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
 
 */
 
+
+#include "compose.hpp"
+#include "cross.h"
 #include "ffmpeg_encoder.h"
+#include "ffmpeg_wrapper.h"
 #include "film.h"
+#include "image.h"
 #include "job.h"
+#include "log.h"
 #include "player.h"
 #include "player_video.h"
-#include "log.h"
-#include "image.h"
-#include "cross.h"
-#include "butler.h"
-#include "compose.hpp"
+extern "C" {
+#include <libavutil/channel_layout.h>
+}
 #include <iostream>
 
 #include "i18n.h"
 
-using std::string;
-using std::runtime_error;
+
 using std::cout;
-using std::pair;
-using boost::shared_ptr;
+using std::make_shared;
+using std::shared_ptr;
+using std::string;
 using boost::bind;
-using boost::weak_ptr;
+using namespace dcpomatic;
+#if BOOST_VERSION >= 106100
+using namespace boost::placeholders;
+#endif
+
 
 int FFmpegFileEncoder::_video_stream_index = 0;
-int FFmpegFileEncoder::_audio_stream_index = 1;
+int FFmpegFileEncoder::_audio_stream_index_base = 1;
+
 
-static AVPixelFormat
-force_pixel_format (AVPixelFormat, AVPixelFormat out)
+class ExportAudioStream
 {
-       return out;
-}
+public:
+       ExportAudioStream (string codec_name, int channels, int frame_rate, AVSampleFormat sample_format, AVFormatContext* format_context, int stream_index)
+               : _format_context (format_context)
+               , _stream_index (stream_index)
+       {
+               _codec = avcodec_find_encoder_by_name (codec_name.c_str());
+               if (!_codec) {
+                       throw EncodeError (String::compose("avcodec_find_encoder_by_name failed for %1", codec_name));
+               }
+
+               _codec_context = avcodec_alloc_context3 (_codec);
+               if (!_codec_context) {
+                       throw std::bad_alloc ();
+               }
+
+               /* XXX: configurable */
+               _codec_context->bit_rate = channels * 128 * 1024;
+               _codec_context->sample_fmt = sample_format;
+               _codec_context->sample_rate = frame_rate;
+               _codec_context->channel_layout = av_get_default_channel_layout (channels);
+               _codec_context->channels = channels;
+
+               int r = avcodec_open2 (_codec_context, _codec, 0);
+               if (r < 0) {
+                       throw EncodeError (N_("avcodec_open2"), N_("ExportAudioStream::ExportAudioStream"), r);
+               }
+
+               _stream = avformat_new_stream (format_context, _codec);
+               if (!_stream) {
+                       throw EncodeError (N_("avformat_new_stream"), N_("ExportAudioStream::ExportAudioStream"));
+               }
+
+               _stream->id = stream_index;
+               _stream->disposition |= AV_DISPOSITION_DEFAULT;
+               r = avcodec_parameters_from_context (_stream->codecpar, _codec_context);
+               if (r < 0) {
+                       throw EncodeError (N_("avcodec_parameters_from_context"), N_("ExportAudioStream::ExportAudioStream"), r);
+               }
+       }
+
+       ~ExportAudioStream ()
+       {
+               avcodec_close (_codec_context);
+       }
+
+       ExportAudioStream (ExportAudioStream const&) = delete;
+       ExportAudioStream& operator= (ExportAudioStream const&) = delete;
+
+       int frame_size () const {
+               return _codec_context->frame_size;
+       }
+
+       bool flush ()
+       {
+               int r = avcodec_send_frame (_codec_context, nullptr);
+               if (r < 0 && r != AVERROR_EOF) {
+                       /* We get EOF if we've already flushed the stream once */
+                       throw EncodeError (N_("avcodec_send_frame"), N_("ExportAudioStream::flush"), r);
+               }
+
+               ffmpeg::Packet packet;
+               r = avcodec_receive_packet (_codec_context, packet.get());
+               if (r == AVERROR_EOF) {
+                       return true;
+               } else if (r < 0) {
+                       throw EncodeError (N_("avcodec_receive_packet"), N_("ExportAudioStream::flush"), r);
+               }
+
+               packet->stream_index = _stream_index;
+               av_interleaved_write_frame (_format_context, packet.get());
+               return false;
+       }
+
+       void write (int size, int channel_offset, int channels, float* const* data, int64_t sample_offset)
+       {
+               DCPOMATIC_ASSERT (size);
+
+               auto frame = av_frame_alloc ();
+               DCPOMATIC_ASSERT (frame);
+
+               int line_size;
+               int const buffer_size = av_samples_get_buffer_size (&line_size, channels, size, _codec_context->sample_fmt, 0);
+               DCPOMATIC_ASSERT (buffer_size >= 0);
+
+               auto samples = av_malloc (buffer_size);
+               DCPOMATIC_ASSERT (samples);
+
+               frame->nb_samples = size;
+               frame->format = _codec_context->sample_fmt;
+               frame->channels = channels;
+               int r = avcodec_fill_audio_frame (frame, channels, _codec_context->sample_fmt, (const uint8_t *) samples, buffer_size, 0);
+               DCPOMATIC_ASSERT (r >= 0);
+
+               switch (_codec_context->sample_fmt) {
+               case AV_SAMPLE_FMT_S16:
+               {
+                       int16_t* q = reinterpret_cast<int16_t*> (samples);
+                       for (int i = 0; i < size; ++i) {
+                               for (int j = 0; j < channels; ++j) {
+                                       *q++ = data[j + channel_offset][i] * 32767;
+                               }
+                       }
+                       break;
+               }
+               case AV_SAMPLE_FMT_S32:
+               {
+                       int32_t* q = reinterpret_cast<int32_t*> (samples);
+                       for (int i = 0; i < size; ++i) {
+                               for (int j = 0; j < channels; ++j) {
+                                       *q++ = data[j + channel_offset][i] * 2147483647;
+                               }
+                       }
+                       break;
+               }
+               case AV_SAMPLE_FMT_FLTP:
+               {
+                       for (int i = 0; i < channels; ++i) {
+                               memcpy (reinterpret_cast<float*>(static_cast<uint8_t*>(samples) + i * line_size), data[i + channel_offset], sizeof(float) * size);
+                       }
+                       break;
+               }
+               default:
+                       DCPOMATIC_ASSERT (false);
+               }
+
+               DCPOMATIC_ASSERT (_codec_context->time_base.num == 1);
+               frame->pts = sample_offset * _codec_context->time_base.den / _codec_context->sample_rate;
+
+               r = avcodec_send_frame (_codec_context, frame);
+               av_free (samples);
+               av_frame_free (&frame);
+               if (r < 0) {
+                       throw EncodeError (N_("avcodec_send_frame"), N_("ExportAudioStream::write"), r);
+               }
+
+               ffmpeg::Packet packet;
+               r = avcodec_receive_packet (_codec_context, packet.get());
+               if (r < 0 && r != AVERROR(EAGAIN)) {
+                       throw EncodeError (N_("avcodec_receive_packet"), N_("ExportAudioStream::write"), r);
+               } else if (r >= 0) {
+                       packet->stream_index = _stream_index;
+                       av_interleaved_write_frame (_format_context, packet.get());
+               }
+       }
+
+private:
+       AVFormatContext* _format_context;
+       AVCodec const * _codec;
+       AVCodecContext* _codec_context;
+       AVStream* _stream;
+       int _stream_index;
+};
+
 
 FFmpegFileEncoder::FFmpegFileEncoder (
        dcp::Size video_frame_size,
        int video_frame_rate,
        int audio_frame_rate,
        int channels,
-       shared_ptr<Log> log,
        ExportFormat format,
+       bool audio_stream_per_channel,
        int x264_crf,
        boost::filesystem::path output
        )
-       : _video_options (0)
+       : _audio_stream_per_channel (audio_stream_per_channel)
        , _audio_channels (channels)
        , _output (output)
        , _video_frame_size (video_frame_size)
        , _video_frame_rate (video_frame_rate)
        , _audio_frame_rate (audio_frame_rate)
-       , _log (log)
 {
+       _pixel_format = pixel_format (format);
+
        switch (format) {
-       case EXPORT_FORMAT_PRORES:
-               _pixel_format = AV_PIX_FMT_YUV422P10;
+       case ExportFormat::PRORES_HQ:
                _sample_format = AV_SAMPLE_FMT_S16;
                _video_codec_name = "prores_ks";
                _audio_codec_name = "pcm_s16le";
                av_dict_set (&_video_options, "profile", "3", 0);
                av_dict_set (&_video_options, "threads", "auto", 0);
                break;
-       case EXPORT_FORMAT_H264:
-               _pixel_format = AV_PIX_FMT_YUV420P;
+       case ExportFormat::H264_AAC:
                _sample_format = AV_SAMPLE_FMT_FLTP;
                _video_codec_name = "libx264";
                _audio_codec_name = "aac";
                av_dict_set_int (&_video_options, "crf", x264_crf, 0);
                break;
+       default:
+               DCPOMATIC_ASSERT (false);
+       }
+
+       int r = avformat_alloc_output_context2 (&_format_context, 0, 0, _output.string().c_str());
+       if (!_format_context) {
+               throw EncodeError (N_("avformat_alloc_output_context2"), "FFmpegFileEncoder::FFmpegFileEncoder", r);
        }
 
        setup_video ();
        setup_audio ();
 
-       avformat_alloc_output_context2 (&_format_context, 0, 0, _output.string().c_str());
-       if (!_format_context) {
-               throw runtime_error ("could not allocate FFmpeg format context");
+       r = avio_open_boost (&_format_context->pb, _output, AVIO_FLAG_WRITE);
+       if (r < 0) {
+               throw EncodeError (String::compose(_("Could not open output file %1 (%2)"), _output.string(), r));
        }
 
-       _video_stream = avformat_new_stream (_format_context, _video_codec);
-       if (!_video_stream) {
-               throw runtime_error ("could not create FFmpeg output video stream");
-       }
+       AVDictionary* options = nullptr;
 
-       _audio_stream = avformat_new_stream (_format_context, _audio_codec);
-       if (!_audio_stream) {
-               throw runtime_error ("could not create FFmpeg output audio stream");
+       r = avformat_write_header (_format_context, &options);
+       if (r < 0) {
+               throw EncodeError (N_("avformat_write_header"), N_("FFmpegFileEncoder::FFmpegFileEncoder"), r);
        }
 
-       _video_stream->id = _video_stream_index;
-       _video_stream->codec = _video_codec_context;
+       _pending_audio = make_shared<AudioBuffers>(channels, 0);
+}
 
-       _audio_stream->id = _audio_stream_index;
-       _audio_stream->codec = _audio_codec_context;
 
-       if (avcodec_open2 (_video_codec_context, _video_codec, &_video_options) < 0) {
-               throw runtime_error ("could not open FFmpeg video codec");
-       }
-
-       int r = avcodec_open2 (_audio_codec_context, _audio_codec, 0);
-       if (r < 0) {
-               char buffer[256];
-               av_strerror (r, buffer, sizeof(buffer));
-               throw runtime_error (String::compose ("could not open FFmpeg audio codec (%1)", buffer));
-       }
+FFmpegFileEncoder::~FFmpegFileEncoder ()
+{
+       _audio_streams.clear ();
+       avcodec_close (_video_codec_context);
+       avformat_free_context (_format_context);
+}
 
-       if (avio_open_boost (&_format_context->pb, _output, AVIO_FLAG_WRITE) < 0) {
-               throw runtime_error ("could not open FFmpeg output file");
-       }
 
-       if (avformat_write_header (_format_context, 0) < 0) {
-               throw runtime_error ("could not write header to FFmpeg output file");
+AVPixelFormat
+FFmpegFileEncoder::pixel_format (ExportFormat format)
+{
+       switch (format) {
+       case ExportFormat::PRORES_HQ:
+               return AV_PIX_FMT_YUV422P10;
+       case ExportFormat::H264_AAC:
+               return AV_PIX_FMT_YUV420P;
+       default:
+               DCPOMATIC_ASSERT (false);
        }
 
-       _pending_audio.reset (new AudioBuffers(channels, 0));
+       return AV_PIX_FMT_YUV422P10;
 }
 
+
 void
 FFmpegFileEncoder::setup_video ()
 {
        _video_codec = avcodec_find_encoder_by_name (_video_codec_name.c_str());
        if (!_video_codec) {
-               throw runtime_error (String::compose ("could not find FFmpeg encoder %1", _video_codec_name));
+               throw EncodeError (String::compose("avcodec_find_encoder_by_name failed for %1", _video_codec_name));
        }
 
        _video_codec_context = avcodec_alloc_context3 (_video_codec);
        if (!_video_codec_context) {
-               throw runtime_error ("could not allocate FFmpeg video context");
+               throw std::bad_alloc ();
        }
 
-       avcodec_get_context_defaults3 (_video_codec_context, _video_codec);
-
        /* Variable quantisation */
        _video_codec_context->global_quality = 0;
        _video_codec_context->width = _video_frame_size.width;
@@ -153,31 +313,40 @@ FFmpegFileEncoder::setup_video ()
        _video_codec_context->time_base = (AVRational) { 1, _video_frame_rate };
        _video_codec_context->pix_fmt = _pixel_format;
        _video_codec_context->flags |= AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_GLOBAL_HEADER;
-}
 
-void
-FFmpegFileEncoder::setup_audio ()
-{
-       _audio_codec = avcodec_find_encoder_by_name (_audio_codec_name.c_str());
-       if (!_audio_codec) {
-               throw runtime_error (String::compose ("could not find FFmpeg encoder %1", _audio_codec_name));
+       if (avcodec_open2 (_video_codec_context, _video_codec, &_video_options) < 0) {
+               throw EncodeError (N_("avcodec_open"), N_("FFmpegFileEncoder::setup_video"));
        }
 
-       _audio_codec_context = avcodec_alloc_context3 (_audio_codec);
-       if (!_audio_codec_context) {
-               throw runtime_error ("could not allocate FFmpeg audio context");
+       _video_stream = avformat_new_stream (_format_context, _video_codec);
+       if (!_video_stream) {
+               throw EncodeError (N_("avformat_new_stream"), N_("FFmpegFileEncoder::setup_video"));
        }
 
-       avcodec_get_context_defaults3 (_audio_codec_context, _audio_codec);
+       _video_stream->id = _video_stream_index;
+       int r = avcodec_parameters_from_context (_video_stream->codecpar, _video_codec_context);
+       if (r < 0) {
+               throw EncodeError (N_("avcodec_parameters_from_context"), N_("FFmpegFileEncoder::setup_video"), r);
+       }
+}
+
 
-       /* XXX: configurable */
-       _audio_codec_context->bit_rate = 256 * 1024;
-       _audio_codec_context->sample_fmt = _sample_format;
-       _audio_codec_context->sample_rate = _audio_frame_rate;
-       _audio_codec_context->channel_layout = av_get_default_channel_layout (_audio_channels);
-       _audio_codec_context->channels = _audio_channels;
+void
+FFmpegFileEncoder::setup_audio ()
+{
+       int const streams = _audio_stream_per_channel ? _audio_channels : 1;
+       int const channels_per_stream = _audio_stream_per_channel ? 1 : _audio_channels;
+
+       for (int i = 0; i < streams; ++i) {
+               _audio_streams.push_back(
+                       make_shared<ExportAudioStream>(
+                               _audio_codec_name, channels_per_stream, _audio_frame_rate, _sample_format, _format_context, _audio_stream_index_base + i
+                               )
+                       );
+       }
 }
 
+
 void
 FFmpegFileEncoder::flush ()
 {
@@ -189,59 +358,55 @@ FFmpegFileEncoder::flush ()
        bool flushed_audio = false;
 
        while (!flushed_video || !flushed_audio) {
-               AVPacket packet;
-               av_init_packet (&packet);
-               packet.data = 0;
-               packet.size = 0;
-
-               int got_packet;
-               avcodec_encode_video2 (_video_codec_context, &packet, 0, &got_packet);
-               if (got_packet) {
-                       packet.stream_index = 0;
-                       av_interleaved_write_frame (_format_context, &packet);
-               } else {
-                       flushed_video = true;
+               int r = avcodec_send_frame (_video_codec_context, nullptr);
+               if (r < 0 && r != AVERROR_EOF) {
+                       /* We get EOF if we've already flushed the stream once */
+                       throw EncodeError (N_("avcodec_send_frame"), N_("FFmpegFileEncoder::flush"), r);
                }
-               av_packet_unref (&packet);
-
-               av_init_packet (&packet);
-               packet.data = 0;
-               packet.size = 0;
 
-               avcodec_encode_audio2 (_audio_codec_context, &packet, 0, &got_packet);
-               if (got_packet) {
-                       packet.stream_index = 0;
-                       av_interleaved_write_frame (_format_context, &packet);
+               ffmpeg::Packet packet;
+               r = avcodec_receive_packet (_video_codec_context, packet.get());
+               if (r == AVERROR_EOF) {
+                       flushed_video = true;
+               } else if (r < 0) {
+                       throw EncodeError (N_("avcodec_receive_packet"), N_("FFmpegFileEncoder::flush"), r);
                } else {
-                       flushed_audio = true;
+                       packet->stream_index = _video_stream_index;
+                       av_interleaved_write_frame (_format_context, packet.get());
+               }
+
+               flushed_audio = true;
+               for (auto i: _audio_streams) {
+                       if (!i->flush()) {
+                               flushed_audio = false;
+                       }
                }
-               av_packet_unref (&packet);
        }
 
        av_write_trailer (_format_context);
-
-       avcodec_close (_video_codec_context);
-       avcodec_close (_audio_codec_context);
-       avio_close (_format_context->pb);
-       avformat_free_context (_format_context);
 }
 
+
 void
 FFmpegFileEncoder::video (shared_ptr<PlayerVideo> video, DCPTime time)
 {
-       shared_ptr<Image> image = video->image (
-               boost::optional<dcp::NoteHandler>(bind(&Log::dcp_log, _log.get(), _1, _2)),
-               bind (&force_pixel_format, _1, _pixel_format),
-               true,
+       /* All our output formats are video range at the moment */
+       auto image = video->image (
+               bind (&PlayerVideo::force, _pixel_format),
+               VideoRange::VIDEO,
                false
                );
 
-       AVFrame* frame = av_frame_alloc ();
+       auto frame = av_frame_alloc ();
        DCPOMATIC_ASSERT (frame);
 
-       _pending_images[image->data()[0]] = image;
+       {
+               boost::mutex::scoped_lock lm (_pending_images_mutex);
+               _pending_images[image->data()[0]] = image;
+       }
+
        for (int i = 0; i < 3; ++i) {
-               AVBufferRef* buffer = av_buffer_create(image->data()[i], image->stride()[i] * image->size().height, &buffer_free, this, 0);
+               auto buffer = av_buffer_create(image->data()[i], image->stride()[i] * image->size().height, &buffer_free, this, 0);
                frame->buf[i] = av_buffer_ref (buffer);
                frame->data[i] = buffer->data;
                frame->linesize[i] = image->stride()[i];
@@ -251,35 +416,34 @@ FFmpegFileEncoder::video (shared_ptr<PlayerVideo> video, DCPTime time)
        frame->width = image->size().width;
        frame->height = image->size().height;
        frame->format = _pixel_format;
-       frame->pts = time.seconds() / av_q2d (_video_stream->time_base);
-
-       AVPacket packet;
-       av_init_packet (&packet);
-       packet.data = 0;
-       packet.size = 0;
+       DCPOMATIC_ASSERT (_video_stream->time_base.num == 1);
+       frame->pts = time.get() * _video_stream->time_base.den / DCPTime::HZ;
 
-       int got_packet;
-       if (avcodec_encode_video2 (_video_codec_context, &packet, frame, &got_packet) < 0) {
-               throw EncodeError ("FFmpeg video encode failed");
+       int r = avcodec_send_frame (_video_codec_context, frame);
+       av_frame_free (&frame);
+       if (r < 0) {
+               throw EncodeError (N_("avcodec_send_frame"), N_("FFmpegFileEncoder::video"), r);
        }
 
-       if (got_packet && packet.size) {
-               packet.stream_index = _video_stream_index;
-               av_interleaved_write_frame (_format_context, &packet);
-               av_packet_unref (&packet);
+       ffmpeg::Packet packet;
+       r = avcodec_receive_packet (_video_codec_context, packet.get());
+       if (r < 0 && r != AVERROR(EAGAIN)) {
+               throw EncodeError (N_("avcodec_receive_packet"), N_("FFmpegFileEncoder::video"), r);
+       } else if (r >= 0) {
+               packet->stream_index = _video_stream_index;
+               av_interleaved_write_frame (_format_context, packet.get());
        }
-
-       av_frame_free (&frame);
-
 }
 
+
 /** Called when the player gives us some audio */
 void
 FFmpegFileEncoder::audio (shared_ptr<AudioBuffers> audio)
 {
        _pending_audio->append (audio);
 
-       int frame_size = _audio_codec_context->frame_size;
+       DCPOMATIC_ASSERT (!_audio_streams.empty());
+       int frame_size = _audio_streams[0]->frame_size();
        if (frame_size == 0) {
                /* codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE */
                frame_size = _audio_frame_rate / _video_frame_rate;
@@ -290,88 +454,46 @@ FFmpegFileEncoder::audio (shared_ptr<AudioBuffers> audio)
        }
 }
 
+
 void
 FFmpegFileEncoder::audio_frame (int size)
 {
-       DCPOMATIC_ASSERT (size);
-
-       AVFrame* frame = av_frame_alloc ();
-       DCPOMATIC_ASSERT (frame);
-
-       int const channels = _pending_audio->channels();
-       DCPOMATIC_ASSERT (channels);
-
-       int const buffer_size = av_samples_get_buffer_size (0, channels, size, _audio_codec_context->sample_fmt, 0);
-       DCPOMATIC_ASSERT (buffer_size >= 0);
-
-       void* samples = av_malloc (buffer_size);
-       DCPOMATIC_ASSERT (samples);
-
-       frame->nb_samples = size;
-       int r = avcodec_fill_audio_frame (frame, channels, _audio_codec_context->sample_fmt, (const uint8_t *) samples, buffer_size, 0);
-       DCPOMATIC_ASSERT (r >= 0);
-
-       float** p = _pending_audio->data ();
-       switch (_audio_codec_context->sample_fmt) {
-       case AV_SAMPLE_FMT_S16:
-       {
-               int16_t* q = reinterpret_cast<int16_t*> (samples);
-               for (int i = 0; i < size; ++i) {
-                       for (int j = 0; j < channels; ++j) {
-                               *q++ = p[j][i] * 32767;
-                       }
+       if (_audio_stream_per_channel) {
+               int offset = 0;
+               for (auto i: _audio_streams) {
+                       i->write (size, offset, 1, _pending_audio->data(), _audio_frames);
+                       ++offset;
                }
-               break;
-       }
-       case AV_SAMPLE_FMT_FLTP:
-       {
-               float* q = reinterpret_cast<float*> (samples);
-               for (int i = 0; i < channels; ++i) {
-                       memcpy (q, p[i], sizeof(float) * size);
-                       q += size;
-               }
-               break;
-       }
-       default:
-               DCPOMATIC_ASSERT (false);
+       } else {
+               DCPOMATIC_ASSERT (!_audio_streams.empty());
+               DCPOMATIC_ASSERT (_pending_audio->channels());
+               _audio_streams[0]->write (size, 0, _pending_audio->channels(), _pending_audio->data(), _audio_frames);
        }
 
-       AVPacket packet;
-       av_init_packet (&packet);
-       packet.data = 0;
-       packet.size = 0;
-
-       int got_packet;
-       if (avcodec_encode_audio2 (_audio_codec_context, &packet, frame, &got_packet) < 0) {
-               throw EncodeError ("FFmpeg audio encode failed");
-       }
-
-       if (got_packet && packet.size) {
-               packet.stream_index = _audio_stream_index;
-               av_interleaved_write_frame (_format_context, &packet);
-               av_packet_unref (&packet);
-       }
-
-       av_free (samples);
-       av_frame_free (&frame);
-
        _pending_audio->trim_start (size);
+       _audio_frames += size;
 }
 
+
 void
 FFmpegFileEncoder::subtitle (PlayerText, DCPTimePeriod)
 {
 
 }
 
+
 void
 FFmpegFileEncoder::buffer_free (void* opaque, uint8_t* data)
 {
        reinterpret_cast<FFmpegFileEncoder*>(opaque)->buffer_free2(data);
 }
 
+
 void
 FFmpegFileEncoder::buffer_free2 (uint8_t* data)
 {
-       _pending_images.erase (data);
+       boost::mutex::scoped_lock lm (_pending_images_mutex);
+       if (_pending_images.find(data) != _pending_images.end()) {
+               _pending_images.erase (data);
+       }
 }