Strictly I think we should be putting each component of an image into _pending_images.
[dcpomatic.git] / src / lib / ffmpeg_file_encoder.cc
index 29ee7455b5d4ed680ff93c0791946cbb1fb9b917..791edb9bcfff2a0c2995b561876098c5d6c8ab70 100644 (file)
 */
 
 
+#include "compose.hpp"
+#include "cross.h"
 #include "ffmpeg_encoder.h"
 #include "ffmpeg_wrapper.h"
 #include "film.h"
+#include "image.h"
 #include "job.h"
+#include "log.h"
 #include "player.h"
 #include "player_video.h"
-#include "log.h"
-#include "image.h"
-#include "cross.h"
-#include "compose.hpp"
+extern "C" {
+#include <libavutil/channel_layout.h>
+}
 #include <iostream>
 
 #include "i18n.h"
 
 using std::cout;
 using std::make_shared;
-using std::pair;
-using std::runtime_error;
 using std::shared_ptr;
 using std::string;
-using std::weak_ptr;
 using boost::bind;
-using boost::optional;
 using namespace dcpomatic;
 #if BOOST_VERSION >= 106100
 using namespace boost::placeholders;
@@ -128,14 +127,15 @@ public:
                return false;
        }
 
-       void write (int size, int channel_offset, int channels, float** data, int64_t sample_offset)
+       void write (int size, int channel_offset, int channels, float* const* data, int64_t sample_offset)
        {
                DCPOMATIC_ASSERT (size);
 
                auto frame = av_frame_alloc ();
                DCPOMATIC_ASSERT (frame);
 
-               int const buffer_size = av_samples_get_buffer_size (0, channels, size, _codec_context->sample_fmt, 0);
+               int line_size;
+               int const buffer_size = av_samples_get_buffer_size (&line_size, channels, size, _codec_context->sample_fmt, 0);
                DCPOMATIC_ASSERT (buffer_size >= 0);
 
                auto samples = av_malloc (buffer_size);
@@ -170,10 +170,8 @@ public:
                }
                case AV_SAMPLE_FMT_FLTP:
                {
-                       float* q = reinterpret_cast<float*> (samples);
                        for (int i = 0; i < channels; ++i) {
-                               memcpy (q, data[i + channel_offset], sizeof(float) * size);
-                               q += size;
+                               memcpy (reinterpret_cast<float*>(static_cast<uint8_t*>(samples) + i * line_size), data[i + channel_offset], sizeof(float) * size);
                        }
                        break;
                }
@@ -203,7 +201,7 @@ public:
 
 private:
        AVFormatContext* _format_context;
-       AVCodec* _codec;
+       AVCodec const * _codec;
        AVCodecContext* _codec_context;
        AVStream* _stream;
        int _stream_index;
@@ -230,7 +228,14 @@ FFmpegFileEncoder::FFmpegFileEncoder (
        _pixel_format = pixel_format (format);
 
        switch (format) {
-       case ExportFormat::PRORES:
+       case ExportFormat::PRORES_4444:
+               _sample_format = AV_SAMPLE_FMT_S16;
+               _video_codec_name = "prores_ks";
+               _audio_codec_name = "pcm_s16le";
+               av_dict_set(&_video_options, "profile", "4", 0);
+               av_dict_set(&_video_options, "threads", "auto", 0);
+               break;
+       case ExportFormat::PRORES_HQ:
                _sample_format = AV_SAMPLE_FMT_S16;
                _video_codec_name = "prores_ks";
                _audio_codec_name = "pcm_s16le";
@@ -243,12 +248,6 @@ FFmpegFileEncoder::FFmpegFileEncoder (
                _audio_codec_name = "aac";
                av_dict_set_int (&_video_options, "crf", x264_crf, 0);
                break;
-       case ExportFormat::H264_PCM:
-               _sample_format = AV_SAMPLE_FMT_S32;
-               _video_codec_name = "libx264";
-               _audio_codec_name = "pcm_s24le";
-               av_dict_set_int (&_video_options, "crf", x264_crf, 0);
-               break;
        default:
                DCPOMATIC_ASSERT (false);
        }
@@ -273,7 +272,7 @@ FFmpegFileEncoder::FFmpegFileEncoder (
                throw EncodeError (N_("avformat_write_header"), N_("FFmpegFileEncoder::FFmpegFileEncoder"), r);
        }
 
-       _pending_audio.reset (new AudioBuffers(channels, 0));
+       _pending_audio = make_shared<AudioBuffers>(channels, 0);
 }
 
 
@@ -281,6 +280,7 @@ FFmpegFileEncoder::~FFmpegFileEncoder ()
 {
        _audio_streams.clear ();
        avcodec_close (_video_codec_context);
+       avio_close (_format_context->pb);
        avformat_free_context (_format_context);
 }
 
@@ -289,10 +289,11 @@ AVPixelFormat
 FFmpegFileEncoder::pixel_format (ExportFormat format)
 {
        switch (format) {
-       case ExportFormat::PRORES:
+       case ExportFormat::PRORES_4444:
+               return AV_PIX_FMT_YUV444P10;
+       case ExportFormat::PRORES_HQ:
                return AV_PIX_FMT_YUV422P10;
        case ExportFormat::H264_AAC:
-       case ExportFormat::H264_PCM:
                return AV_PIX_FMT_YUV420P;
        default:
                DCPOMATIC_ASSERT (false);
@@ -401,22 +402,27 @@ FFmpegFileEncoder::video (shared_ptr<PlayerVideo> video, DCPTime time)
 {
        /* All our output formats are video range at the moment */
        auto image = video->image (
-               bind (&PlayerVideo::force, _1, _pixel_format),
+               bind (&PlayerVideo::force, _pixel_format),
                VideoRange::VIDEO,
-               true,
                false
                );
 
        auto frame = av_frame_alloc ();
        DCPOMATIC_ASSERT (frame);
 
-       {
-               boost::mutex::scoped_lock lm (_pending_images_mutex);
-               _pending_images[image->data()[0]] = image;
-       }
-
        for (int i = 0; i < 3; ++i) {
-               AVBufferRef* buffer = av_buffer_create(image->data()[i], image->stride()[i] * image->size().height, &buffer_free, this, 0);
+               {
+                       boost::mutex::scoped_lock lm (_pending_images_mutex);
+                       auto key = image->data()[i];
+                       auto iter = _pending_images.find(key);
+                       if (iter != _pending_images.end()) {
+                               iter->second.second++;
+                       } else {
+                               _pending_images[key] = { image, 1 };
+                       }
+               }
+
+               auto buffer = av_buffer_create(image->data()[i], image->stride()[i] * image->size().height, &buffer_free, this, 0);
                frame->buf[i] = av_buffer_ref (buffer);
                frame->data[i] = buffer->data;
                frame->linesize[i] = image->stride()[i];
@@ -503,7 +509,11 @@ void
 FFmpegFileEncoder::buffer_free2 (uint8_t* data)
 {
        boost::mutex::scoped_lock lm (_pending_images_mutex);
-       if (_pending_images.find(data) != _pending_images.end()) {
-               _pending_images.erase (data);
+       auto iter = _pending_images.find(data);
+       if (iter != _pending_images.end()) {
+               iter->second.second--;
+               if (iter->second.second == 0) {
+                       _pending_images.erase(data);
+               }
        }
 }