X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=dffc00e0ea2ed6d1e101f1711035dd22bef9d67a;hb=ad1ef39eda58b3a919ea3b7084401a0439409ec6;hp=fb72d1aee90315c59fbc4010785d4b926f37ce6f;hpb=4e79cb88c22a7b2d52381f0a1a1ffdb5015fa617;p=dcpomatic.git diff --git a/src/lib/image.cc b/src/lib/image.cc index fb72d1aee..dffc00e0e 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -1,237 +1,459 @@ /* - Copyright (C) 2012 Carl Hetherington + Copyright (C) 2012-2016 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ /** @file src/image.cc - * @brief A set of classes to describe video images. + * @brief A class to describe a video image. */ -#include -#include -#include -#include -#include -#include -#include +#include "image.h" +#include "exceptions.h" +#include "timer.h" +#include "rect.h" +#include "util.h" +#include "compose.hpp" +#include "dcpomatic_socket.h" +#include +#include extern "C" { -#include -#include #include -#include -#include #include +#include +#include } -#include "image.h" -#include "exceptions.h" -#include "scaler.h" +#include +#if HAVE_VALGRIND_MEMCHECK_H +#include +#endif +#include -using namespace std; -using namespace boost; +#include "i18n.h" + +using std::string; +using std::min; +using std::max; +using std::cout; +using std::cerr; +using std::list; +using std::runtime_error; +using boost::shared_ptr; +using dcp::Size; -/** @param n Component index. - * @return Number of lines in the image for the given component. - */ int -Image::lines (int n) const +Image::vertical_factor (int n) const { - switch (_pixel_format) { - case PIX_FMT_YUV420P: - if (n == 0) { - return size().height; - } else { - return size().height / 2; - } - break; - case PIX_FMT_RGB24: - case PIX_FMT_RGBA: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV422P: - return size().height; - default: - assert (false); + if (n == 0) { + return 1; } - return 0; + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("line_factor()", _pixel_format); + } + + return pow (2.0f, d->log2_chroma_h); } -/** @return Number of components */ int -Image::components () const +Image::horizontal_factor (int n) const { - switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV422P: - return 3; - case PIX_FMT_RGB24: - case PIX_FMT_RGBA: + if (n == 0) { return 1; - default: - assert (false); } - return 0; + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("sample_size()", _pixel_format); + } + + return pow (2.0f, d->log2_chroma_w); +} + +/** @param n Component index. + * @return Number of samples (i.e. pixels, unless sub-sampled) in each direction for this component. + */ +dcp::Size +Image::sample_size (int n) const +{ + return dcp::Size ( + lrint (ceil (static_cast(size().width) / horizontal_factor (n))), + lrint (ceil (static_cast(size().height) / vertical_factor (n))) + ); +} + +/** @return Number of planes */ +int +Image::planes () const +{ + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("planes()", _pixel_format); + } + + if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) { + return 1; + } + + return d->nb_components; } +/** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size'. + * @param crop Amount to crop by. + * @param inter_size Size to scale the cropped image to. + * @param out_size Size of output frame; if this is larger than inter_size there will be black padding. + * @param yuv_to_rgb YUV to RGB transformation to use, if required. + * @param out_format Output pixel format. + * @param out_aligned true to make the output image aligned. + * @param fast Try to be fast at the possible expense of quality; at present this means using + * fast bilinear rather than bicubic scaling. + */ shared_ptr -Image::scale (Size out_size, Scaler const * scaler, bool aligned) const +Image::crop_scale_window ( + Crop crop, dcp::Size inter_size, dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast + ) const { - assert (scaler); + /* Empirical testing suggests that sws_scale() will crash if + the input image is not aligned. + */ + DCPOMATIC_ASSERT (aligned ()); + + DCPOMATIC_ASSERT (out_size.width >= inter_size.width); + DCPOMATIC_ASSERT (out_size.height >= inter_size.height); + + /* Here's an image of out_size. Below we may write to it starting at an offset so we get some padding. + Hence we want to write in the following pattern: + + block start write start line end + |..(padding)..|<------line-size------------->|..(padding)..| + |..(padding)..|<------line-size------------->|..(padding)..| + |..(padding)..|<------line-size------------->|..(padding)..| + + where line-size is of the smaller (inter_size) image and the full padded line length is that of + out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size. + However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full + specified *stride*. This does not matter until we get to the last line: - shared_ptr scaled (new SimpleImage (pixel_format(), out_size, aligned)); + block start write start line end + |..(padding)..|<------line-size------------->|XXXwrittenXXX| + |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX| + |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX + ^^^^ out of bounds + To get around this, we ask Image to overallocate its buffers by the overrun. + */ + + shared_ptr out (new Image (out_format, out_size, out_aligned, (out_size.width - inter_size.width) / 2)); + out->make_black (); + + /* Size of the image after any crop */ + dcp::Size const cropped_size = crop.apply (size ()); + + /* Scale context for a scale from cropped_size to inter_size */ struct SwsContext* scale_context = sws_getContext ( - size().width, size().height, pixel_format(), - out_size.width, out_size.height, pixel_format(), - scaler->ffmpeg_id (), 0, 0, 0 + cropped_size.width, cropped_size.height, pixel_format(), + inter_size.width, inter_size.height, out_format, + fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0 ); + if (!scale_context) { + throw runtime_error (N_("Could not allocate SwsContext")); + } + + DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); + int const lut[dcp::YUV_TO_RGB_COUNT] = { + SWS_CS_ITU601, + SWS_CS_ITU709 + }; + + sws_setColorspaceDetails ( + scale_context, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + 0, 1 << 16, 1 << 16 + ); + + AVPixFmtDescriptor const * in_desc = av_pix_fmt_desc_get (_pixel_format); + if (!in_desc) { + throw PixelFormatError ("crop_scale_window()", _pixel_format); + } + + /* Prepare input data pointers with crop */ + uint8_t* scale_in_data[planes()]; + for (int c = 0; c < planes(); ++c) { + /* To work out the crop in bytes, start by multiplying + the crop by the (average) bytes per pixel. Then + round down so that we don't crop a subsampled pixel until + we've cropped all of its Y-channel pixels. + */ + int const x = lrintf (bytes_per_pixel(c) * crop.left) & ~ ((int) in_desc->log2_chroma_w); + scale_in_data[c] = data()[c] + x + stride()[c] * (crop.top / vertical_factor(c)); + } + + /* Corner of the image within out_size */ + Position const corner ((out_size.width - inter_size.width) / 2, (out_size.height - inter_size.height) / 2); + + AVPixFmtDescriptor const * out_desc = av_pix_fmt_desc_get (out_format); + if (!out_desc) { + throw PixelFormatError ("crop_scale_window()", out_format); + } + + uint8_t* scale_out_data[out->planes()]; + for (int c = 0; c < out->planes(); ++c) { + /* See the note in the crop loop above */ + int const x = lrintf (out->bytes_per_pixel(c) * corner.x) & ~ ((int) out_desc->log2_chroma_w); + scale_out_data[c] = out->data()[c] + x + out->stride()[c] * (corner.y / out->vertical_factor(c)); + } + sws_scale ( scale_context, - data(), stride(), - 0, size().height, - scaled->data(), scaled->stride() + scale_in_data, stride(), + 0, cropped_size.height, + scale_out_data, out->stride() ); sws_freeContext (scale_context); - return scaled; + return out; } -/** Scale this image to a given size and convert it to RGB. - * @param out_size Output image size in pixels. - * @param scaler Scaler to use. - */ shared_ptr -Image::scale_and_convert_to_rgb (Size out_size, int padding, Scaler const * scaler, bool aligned) const +Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const { - assert (scaler); + return scale(size(), yuv_to_rgb, out_format, out_aligned, fast); +} - Size content_size = out_size; - content_size.width -= (padding * 2); +/** @param out_size Size to scale to. + * @param yuv_to_rgb YUVToRGB transform transform to use, if required. + * @param out_format Output pixel format. + * @param out_aligned true to make an aligned output image. + * @param fast Try to be fast at the possible expense of quality; at present this means using + * fast bilinear rather than bicubic scaling. + */ +shared_ptr +Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const +{ + /* Empirical testing suggests that sws_scale() will crash if + the input image is not aligned. + */ + DCPOMATIC_ASSERT (aligned ()); - shared_ptr rgb (new SimpleImage (PIX_FMT_RGB24, content_size, aligned)); + shared_ptr scaled (new Image (out_format, out_size, out_aligned)); struct SwsContext* scale_context = sws_getContext ( size().width, size().height, pixel_format(), - content_size.width, content_size.height, PIX_FMT_RGB24, - scaler->ffmpeg_id (), 0, 0, 0 + out_size.width, out_size.height, out_format, + (fast ? SWS_FAST_BILINEAR : SWS_BICUBIC) | SWS_ACCURATE_RND, 0, 0, 0 + ); + + DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); + int const lut[dcp::YUV_TO_RGB_COUNT] = { + SWS_CS_ITU601, + SWS_CS_ITU709 + }; + + sws_setColorspaceDetails ( + scale_context, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + 0, 1 << 16, 1 << 16 ); - /* Scale and convert to RGB from whatever its currently in (which may be RGB) */ sws_scale ( scale_context, data(), stride(), 0, size().height, - rgb->data(), rgb->stride() + scaled->data(), scaled->stride() ); - /* Put the image in the right place in a black frame if are padding; this is - a bit grubby and expensive, but probably inconsequential in the great - scheme of things. - */ - if (padding > 0) { - shared_ptr padded_rgb (new SimpleImage (PIX_FMT_RGB24, out_size, aligned)); - padded_rgb->make_black (); - - /* XXX: we are cheating a bit here; we know the frame is RGB so we can - make assumptions about its composition. - */ - uint8_t* p = padded_rgb->data()[0] + padding * 3; - uint8_t* q = rgb->data()[0]; - for (int j = 0; j < rgb->lines(0); ++j) { - memcpy (p, q, rgb->line_size()[0]); - p += padded_rgb->stride()[0]; - q += rgb->stride()[0]; - } - - rgb = padded_rgb; - } - sws_freeContext (scale_context); - return rgb; + return scaled; } -/** Run a FFmpeg post-process on this image and return the processed version. - * @param pp Flags for the required set of post processes. - * @return Post-processed image. - */ -shared_ptr -Image::post_process (string pp, bool aligned) const +/** Blacken a YUV image whose bits per pixel is rounded up to 16 */ +void +Image::yuv_16_black (uint16_t v, bool alpha) { - shared_ptr out (new SimpleImage (pixel_format(), size (), aligned)); - - int pp_format = 0; - switch (pixel_format()) { - case PIX_FMT_YUV420P: - pp_format = PP_FORMAT_420; - break; - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV422P: - pp_format = PP_FORMAT_422; - break; - default: - assert (false); + memset (data()[0], 0, sample_size(0).height * stride()[0]); + for (int i = 1; i < 3; ++i) { + int16_t* p = reinterpret_cast (data()[i]); + int const lines = sample_size(i).height; + for (int y = 0; y < lines; ++y) { + /* We divide by 2 here because we are writing 2 bytes at a time */ + for (int x = 0; x < line_size()[i] / 2; ++x) { + p[x] = v; + } + p += stride()[i] / 2; + } } - - pp_mode* mode = pp_get_mode_by_name_and_quality (pp.c_str (), PP_QUALITY_MAX); - pp_context* context = pp_get_context (size().width, size().height, pp_format | PP_CPU_CAPS_MMX2); - pp_postprocess ( - (const uint8_t **) data(), stride(), - out->data(), out->stride(), - size().width, size().height, - 0, 0, mode, context, 0 - ); - - pp_free_mode (mode); - pp_free_context (context); + if (alpha) { + memset (data()[3], 0, sample_size(3).height * stride()[3]); + } +} - return out; +uint16_t +Image::swap_16 (uint16_t v) +{ + return ((v >> 8) & 0xff) | ((v & 0xff) << 8); } void Image::make_black () { + /* U/V black value for 8-bit colour */ + static uint8_t const eight_bit_uv = (1 << 7) - 1; + /* U/V black value for 9-bit colour */ + static uint16_t const nine_bit_uv = (1 << 8) - 1; + /* U/V black value for 10-bit colour */ + static uint16_t const ten_bit_uv = (1 << 9) - 1; + /* U/V black value for 16-bit colour */ + static uint16_t const sixteen_bit_uv = (1 << 15) - 1; + switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV422P: - memset (data()[0], 0, lines(0) * stride()[0]); - memset (data()[1], 0x80, lines(1) * stride()[1]); - memset (data()[2], 0x80, lines(2) * stride()[2]); + case AV_PIX_FMT_YUV420P: + case AV_PIX_FMT_YUV422P: + case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_YUV411P: + memset (data()[0], 0, sample_size(0).height * stride()[0]); + memset (data()[1], eight_bit_uv, sample_size(1).height * stride()[1]); + memset (data()[2], eight_bit_uv, sample_size(2).height * stride()[2]); + break; + + case AV_PIX_FMT_YUVJ420P: + case AV_PIX_FMT_YUVJ422P: + case AV_PIX_FMT_YUVJ444P: + memset (data()[0], 0, sample_size(0).height * stride()[0]); + memset (data()[1], eight_bit_uv + 1, sample_size(1).height * stride()[1]); + memset (data()[2], eight_bit_uv + 1, sample_size(2).height * stride()[2]); + break; + + case AV_PIX_FMT_YUV422P9LE: + case AV_PIX_FMT_YUV444P9LE: + yuv_16_black (nine_bit_uv, false); + break; + + case AV_PIX_FMT_YUV422P9BE: + case AV_PIX_FMT_YUV444P9BE: + yuv_16_black (swap_16 (nine_bit_uv), false); + break; + + case AV_PIX_FMT_YUV422P10LE: + case AV_PIX_FMT_YUV444P10LE: + yuv_16_black (ten_bit_uv, false); + break; + + case AV_PIX_FMT_YUV422P16LE: + case AV_PIX_FMT_YUV444P16LE: + yuv_16_black (sixteen_bit_uv, false); break; - case PIX_FMT_RGB24: - memset (data()[0], 0, lines(0) * stride()[0]); + case AV_PIX_FMT_YUV444P10BE: + case AV_PIX_FMT_YUV422P10BE: + yuv_16_black (swap_16 (ten_bit_uv), false); break; + case AV_PIX_FMT_YUVA420P9BE: + case AV_PIX_FMT_YUVA422P9BE: + case AV_PIX_FMT_YUVA444P9BE: + yuv_16_black (swap_16 (nine_bit_uv), true); + break; + + case AV_PIX_FMT_YUVA420P9LE: + case AV_PIX_FMT_YUVA422P9LE: + case AV_PIX_FMT_YUVA444P9LE: + yuv_16_black (nine_bit_uv, true); + break; + + case AV_PIX_FMT_YUVA420P10BE: + case AV_PIX_FMT_YUVA422P10BE: + case AV_PIX_FMT_YUVA444P10BE: + yuv_16_black (swap_16 (ten_bit_uv), true); + break; + + case AV_PIX_FMT_YUVA420P10LE: + case AV_PIX_FMT_YUVA422P10LE: + case AV_PIX_FMT_YUVA444P10LE: + yuv_16_black (ten_bit_uv, true); + break; + + case AV_PIX_FMT_YUVA420P16BE: + case AV_PIX_FMT_YUVA422P16BE: + case AV_PIX_FMT_YUVA444P16BE: + yuv_16_black (swap_16 (sixteen_bit_uv), true); + break; + + case AV_PIX_FMT_YUVA420P16LE: + case AV_PIX_FMT_YUVA422P16LE: + case AV_PIX_FMT_YUVA444P16LE: + yuv_16_black (sixteen_bit_uv, true); + break; + + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGB555LE: + case AV_PIX_FMT_RGB48LE: + case AV_PIX_FMT_RGB48BE: + case AV_PIX_FMT_XYZ12LE: + memset (data()[0], 0, sample_size(0).height * stride()[0]); + break; + + case AV_PIX_FMT_UYVY422: + { + int const Y = sample_size(0).height; + int const X = line_size()[0]; + uint8_t* p = data()[0]; + for (int y = 0; y < Y; ++y) { + for (int x = 0; x < X / 4; ++x) { + *p++ = eight_bit_uv; // Cb + *p++ = 0; // Y0 + *p++ = eight_bit_uv; // Cr + *p++ = 0; // Y1 + } + } + break; + } + default: - assert (false); + throw PixelFormatError ("make_black()", _pixel_format); } } void -Image::alpha_blend (shared_ptr other, Position position) +Image::make_transparent () { - /* Only implemented for RGBA onto RGB24 so far */ - assert (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGBA); + if (_pixel_format != AV_PIX_FMT_BGRA) { + throw PixelFormatError ("make_transparent()", _pixel_format); + } + + memset (data()[0], 0, sample_size(0).height * stride()[0]); +} + +void +Image::alpha_blend (shared_ptr other, Position position) +{ + /* We're blending BGRA images; first byte is blue, second byte is green, third byte red, fourth byte alpha */ + DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_BGRA); + int const other_bpp = 4; int start_tx = position.x; int start_ox = 0; @@ -249,27 +471,249 @@ Image::alpha_blend (shared_ptr other, Position position) start_ty = 0; } - for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { - uint8_t* tp = data()[0] + ty * stride()[0] + position.x * 3; - uint8_t* op = other->data()[0] + oy * other->stride()[0]; - for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { - float const alpha = float (op[3]) / 255; - tp[0] = (tp[0] * (1 - alpha)) + op[0] * alpha; - tp[1] = (tp[1] * (1 - alpha)) + op[1] * alpha; - tp[2] = (tp[2] * (1 - alpha)) + op[2] * alpha; - tp += 3; - op += 4; + switch (_pixel_format) { + case AV_PIX_FMT_RGB24: + { + /* Going onto RGB24. First byte is red, second green, third blue */ + int const this_bpp = 3; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + tp[0] = op[2] * alpha + tp[0] * (1 - alpha); + tp[1] = op[1] * alpha + tp[1] * (1 - alpha); + tp[2] = op[0] * alpha + tp[2] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } + case AV_PIX_FMT_BGRA: + { + int const this_bpp = 4; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + tp[0] = op[0] * alpha + tp[0] * (1 - alpha); + tp[1] = op[1] * alpha + tp[1] * (1 - alpha); + tp[2] = op[2] * alpha + tp[2] * (1 - alpha); + tp[3] = op[3] * alpha + tp[3] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } } + break; + } + case AV_PIX_FMT_RGBA: + { + int const this_bpp = 4; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + tp[0] = op[2] * alpha + tp[0] * (1 - alpha); + tp[1] = op[1] * alpha + tp[1] * (1 - alpha); + tp[2] = op[0] * alpha + tp[2] * (1 - alpha); + tp[3] = op[3] * alpha + tp[3] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } + case AV_PIX_FMT_RGB48LE: + { + int const this_bpp = 6; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + /* Blend high bytes */ + tp[1] = op[2] * alpha + tp[1] * (1 - alpha); + tp[3] = op[1] * alpha + tp[3] * (1 - alpha); + tp[5] = op[0] * alpha + tp[5] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } + case AV_PIX_FMT_XYZ12LE: + { + dcp::ColourConversion conv = dcp::ColourConversion::srgb_to_xyz(); + double fast_matrix[9]; + dcp::combined_rgb_to_xyz (conv, fast_matrix); + double const * lut_in = conv.in()->lut (8, false); + double const * lut_out = conv.out()->lut (16, true); + int const this_bpp = 6; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint16_t* tp = reinterpret_cast (data()[0] + ty * stride()[0] + start_tx * this_bpp); + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + + /* Convert sRGB to XYZ; op is BGRA. First, input gamma LUT */ + double const r = lut_in[op[2]]; + double const g = lut_in[op[1]]; + double const b = lut_in[op[0]]; + + /* RGB to XYZ, including Bradford transform and DCI companding */ + double const x = max (0.0, min (65535.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2])); + double const y = max (0.0, min (65535.0, r * fast_matrix[3] + g * fast_matrix[4] + b * fast_matrix[5])); + double const z = max (0.0, min (65535.0, r * fast_matrix[6] + g * fast_matrix[7] + b * fast_matrix[8])); + + /* Out gamma LUT and blend */ + tp[0] = lrint(lut_out[lrint(x)] * 65535) * alpha + tp[0] * (1 - alpha); + tp[1] = lrint(lut_out[lrint(y)] * 65535) * alpha + tp[1] * (1 - alpha); + tp[2] = lrint(lut_out[lrint(z)] * 65535) * alpha + tp[2] * (1 - alpha); + + tp += this_bpp / 2; + op += other_bpp; + } + } + break; + } + case AV_PIX_FMT_YUV420P: + { + shared_ptr yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false); + dcp::Size const ts = size(); + dcp::Size const os = yuv->size(); + for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) { + int const hty = ty / 2; + int const hoy = oy / 2; + uint8_t* tY = data()[0] + (ty * stride()[0]) + start_tx; + uint8_t* tU = data()[1] + (hty * stride()[1]) + start_tx / 2; + uint8_t* tV = data()[2] + (hty * stride()[2]) + start_tx / 2; + uint8_t* oY = yuv->data()[0] + (oy * yuv->stride()[0]) + start_ox; + uint8_t* oU = yuv->data()[1] + (hoy * yuv->stride()[1]) + start_ox / 2; + uint8_t* oV = yuv->data()[2] + (hoy * yuv->stride()[2]) + start_ox / 2; + uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4; + for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) { + float const a = float(alpha[3]) / 255; + *tY = *oY * a + *tY * (1 - a); + *tU = *oU * a + *tU * (1 - a); + *tV = *oV * a + *tV * (1 - a); + ++tY; + ++oY; + if (tx % 2) { + ++tU; + ++tV; + } + if (ox % 2) { + ++oU; + ++oV; + } + alpha += 4; + } + } + break; + } + case AV_PIX_FMT_YUV420P10: + { + shared_ptr yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false); + dcp::Size const ts = size(); + dcp::Size const os = yuv->size(); + for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) { + int const hty = ty / 2; + int const hoy = oy / 2; + uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx; + uint16_t* tU = ((uint16_t *) (data()[1] + (hty * stride()[1]))) + start_tx / 2; + uint16_t* tV = ((uint16_t *) (data()[2] + (hty * stride()[2]))) + start_tx / 2; + uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox; + uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (hoy * yuv->stride()[1]))) + start_ox / 2; + uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (hoy * yuv->stride()[2]))) + start_ox / 2; + uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4; + for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) { + float const a = float(alpha[3]) / 255; + *tY = *oY * a + *tY * (1 - a); + *tU = *oU * a + *tU * (1 - a); + *tV = *oV * a + *tV * (1 - a); + ++tY; + ++oY; + if (tx % 2) { + ++tU; + ++tV; + } + if (ox % 2) { + ++oU; + ++oV; + } + alpha += 4; + } + } + break; + } + case AV_PIX_FMT_YUV422P10LE: + { + shared_ptr yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false); + dcp::Size const ts = size(); + dcp::Size const os = yuv->size(); + for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) { + uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx; + uint16_t* tU = ((uint16_t *) (data()[1] + (ty * stride()[1]))) + start_tx / 2; + uint16_t* tV = ((uint16_t *) (data()[2] + (ty * stride()[2]))) + start_tx / 2; + uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox; + uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (oy * yuv->stride()[1]))) + start_ox / 2; + uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (oy * yuv->stride()[2]))) + start_ox / 2; + uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4; + for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) { + float const a = float(alpha[3]) / 255; + *tY = *oY * a + *tY * (1 - a); + *tU = *oU * a + *tU * (1 - a); + *tV = *oV * a + *tV * (1 - a); + ++tY; + ++oY; + if (tx % 2) { + ++tU; + ++tV; + } + if (ox % 2) { + ++oU; + ++oV; + } + alpha += 4; + } + } + break; + } + default: + throw PixelFormatError ("alpha_blend()", _pixel_format); + } +} + +void +Image::copy (shared_ptr other, Position position) +{ + /* Only implemented for RGB24 onto RGB24 so far */ + DCPOMATIC_ASSERT (_pixel_format == AV_PIX_FMT_RGB24 && other->pixel_format() == AV_PIX_FMT_RGB24); + DCPOMATIC_ASSERT (position.x >= 0 && position.y >= 0); + + int const N = min (position.x + other->size().width, size().width) - position.x; + for (int ty = position.y, oy = 0; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t * const tp = data()[0] + ty * stride()[0] + position.x * 3; + uint8_t * const op = other->data()[0] + oy * other->stride()[0]; + memcpy (tp, op, N * 3); } } void Image::read_from_socket (shared_ptr socket) { - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { uint8_t* p = data()[i]; - for (int y = 0; y < lines(i); ++y) { - socket->read_definite_and_consume (p, line_size()[i], 30); + int const lines = sample_size(i).height; + for (int y = 0; y < lines; ++y) { + socket->read (p, line_size()[i]); p += stride()[i]; } } @@ -278,180 +722,532 @@ Image::read_from_socket (shared_ptr socket) void Image::write_to_socket (shared_ptr socket) const { - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { uint8_t* p = data()[i]; - for (int y = 0; y < lines(i); ++y) { - socket->write (p, line_size()[i], 30); + int const lines = sample_size(i).height; + for (int y = 0; y < lines; ++y) { + socket->write (p, line_size()[i]); p += stride()[i]; } } } -/** Construct a SimpleImage of a given size and format, allocating memory +float +Image::bytes_per_pixel (int c) const +{ + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("bytes_per_pixel()", _pixel_format); + } + + if (c >= planes()) { + return 0; + } + + float bpp[4] = { 0, 0, 0, 0 }; + +#ifdef DCPOMATIC_HAVE_AVCOMPONENTDESCRIPTOR_DEPTH_MINUS1 + bpp[0] = floor ((d->comp[0].depth_minus1 + 8) / 8); + if (d->nb_components > 1) { + bpp[1] = floor ((d->comp[1].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 2) { + bpp[2] = floor ((d->comp[2].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 3) { + bpp[3] = floor ((d->comp[3].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w); + } +#else + bpp[0] = floor ((d->comp[0].depth + 7) / 8); + if (d->nb_components > 1) { + bpp[1] = floor ((d->comp[1].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 2) { + bpp[2] = floor ((d->comp[2].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 3) { + bpp[3] = floor ((d->comp[3].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w); + } +#endif + + if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) { + /* Not planar; sum them up */ + return bpp[0] + bpp[1] + bpp[2] + bpp[3]; + } + + return bpp[c]; +} + +/** Construct a Image of a given size and format, allocating memory * as required. * * @param p Pixel format. * @param s Size in pixels. + * @param aligned true to make each row of this image aligned to a 32-byte boundary. + * @param extra_pixels Amount of extra "run-off" memory to allocate at the end of each plane in pixels. */ -SimpleImage::SimpleImage (AVPixelFormat p, Size s, bool aligned) - : Image (p) - , _size (s) +Image::Image (AVPixelFormat p, dcp::Size s, bool aligned, int extra_pixels) + : _size (s) + , _pixel_format (p) , _aligned (aligned) + , _extra_pixels (extra_pixels) +{ + allocate (); +} + +void +Image::allocate () { - _data = (uint8_t **) av_malloc (4 * sizeof (uint8_t *)); + _data = (uint8_t **) wrapped_av_malloc (4 * sizeof (uint8_t *)); _data[0] = _data[1] = _data[2] = _data[3] = 0; - - _line_size = (int *) av_malloc (4 * sizeof (int)); + + _line_size = (int *) wrapped_av_malloc (4 * sizeof (int)); _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0; - - _stride = (int *) av_malloc (4 * sizeof (int)); + + _stride = (int *) wrapped_av_malloc (4 * sizeof (int)); _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0; - switch (p) { - case PIX_FMT_RGB24: - _line_size[0] = s.width * 3; - break; - case PIX_FMT_RGBA: - _line_size[0] = s.width * 4; - break; - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P: - _line_size[0] = s.width; - _line_size[1] = s.width / 2; - _line_size[2] = s.width / 2; - break; - case PIX_FMT_YUV422P10LE: - _line_size[0] = s.width * 2; - _line_size[1] = s.width; - _line_size[2] = s.width; - break; - default: - assert (false); + for (int i = 0; i < planes(); ++i) { + _line_size[i] = ceil (_size.width * bytes_per_pixel(i)); + _stride[i] = stride_round_up (i, _line_size, _aligned ? 32 : 1); + + /* The assembler function ff_rgb24ToY_avx (in libswscale/x86/input.asm) + uses a 16-byte fetch to read three bytes (R/G/B) of image data. + Hence on the last pixel of the last line it reads over the end of + the actual data by 1 byte. If the width of an image is a multiple + of the stride alignment there will be no padding at the end of image lines. + OS X crashes on this illegal read, though other operating systems don't + seem to mind. The nasty + 1 in this malloc makes sure there is always a byte + for that instruction to read safely. + + Further to the above, valgrind is now telling me that ff_rgb24ToY_ssse3 + over-reads by more then _avx. I can't follow the code to work out how much, + so I'll just over-allocate by 32 bytes and have done with it. Empirical + testing suggests that it works. + */ + _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + _extra_pixels * bytes_per_pixel(i) + 32); +#if HAVE_VALGRIND_MEMCHECK_H + /* The data between the end of the line size and the stride is undefined but processed by + libswscale, causing lots of valgrind errors. Mark it all defined to quell these errors. + */ + VALGRIND_MAKE_MEM_DEFINED (_data[i], _stride[i] * sample_size(i).height + _extra_pixels * bytes_per_pixel(i) + 32); +#endif } +} - for (int i = 0; i < components(); ++i) { - _stride[i] = stride_round_up (i, _line_size, _aligned ? 32 : 1); - _data[i] = (uint8_t *) av_malloc (_stride[i] * lines (i)); +Image::Image (Image const & other) + : boost::enable_shared_from_this(other) + , _size (other._size) + , _pixel_format (other._pixel_format) + , _aligned (other._aligned) + , _extra_pixels (other._extra_pixels) +{ + allocate (); + + for (int i = 0; i < planes(); ++i) { + uint8_t* p = _data[i]; + uint8_t* q = other._data[i]; + int const lines = sample_size(i).height; + for (int j = 0; j < lines; ++j) { + memcpy (p, q, _line_size[i]); + p += stride()[i]; + q += other.stride()[i]; + } } } -/** Destroy a SimpleImage */ -SimpleImage::~SimpleImage () +Image::Image (AVFrame* frame) + : _size (frame->width, frame->height) + , _pixel_format (static_cast (frame->format)) + , _aligned (true) + , _extra_pixels (0) { - for (int i = 0; i < components(); ++i) { - av_free (_data[i]); + allocate (); + + for (int i = 0; i < planes(); ++i) { + uint8_t* p = _data[i]; + uint8_t* q = frame->data[i]; + int const lines = sample_size(i).height; + for (int j = 0; j < lines; ++j) { + memcpy (p, q, _line_size[i]); + p += stride()[i]; + /* AVFrame's linesize is what we call `stride' */ + q += frame->linesize[i]; + } } +} - av_free (_data); - av_free (_line_size); - av_free (_stride); +Image::Image (shared_ptr other, bool aligned) + : _size (other->_size) + , _pixel_format (other->_pixel_format) + , _aligned (aligned) + , _extra_pixels (other->_extra_pixels) +{ + allocate (); + + for (int i = 0; i < planes(); ++i) { + DCPOMATIC_ASSERT (line_size()[i] == other->line_size()[i]); + uint8_t* p = _data[i]; + uint8_t* q = other->data()[i]; + int const lines = sample_size(i).height; + for (int j = 0; j < lines; ++j) { + memcpy (p, q, line_size()[i]); + p += stride()[i]; + q += other->stride()[i]; + } + } } -SimpleImage::SimpleImage (shared_ptr im, bool aligned) - : Image (im->pixel_format()) +Image& +Image::operator= (Image const & other) { - assert (components() == im->components()); + if (this == &other) { + return *this; + } - for (int c = 0; c < components(); ++c) { + Image tmp (other); + swap (tmp); + return *this; +} - assert (line_size()[c] == im->line_size()[c]); +void +Image::swap (Image & other) +{ + std::swap (_size, other._size); + std::swap (_pixel_format, other._pixel_format); - uint8_t* t = data()[c]; - uint8_t* o = im->data()[c]; - - for (int y = 0; y < lines(c); ++y) { - memcpy (t, o, line_size()[c]); - t += stride()[c]; - o += im->stride()[c]; - } + for (int i = 0; i < 4; ++i) { + std::swap (_data[i], other._data[i]); + std::swap (_line_size[i], other._line_size[i]); + std::swap (_stride[i], other._stride[i]); } + + std::swap (_aligned, other._aligned); + std::swap (_extra_pixels, other._extra_pixels); } -uint8_t ** -SimpleImage::data () const +/** Destroy a Image */ +Image::~Image () +{ + for (int i = 0; i < planes(); ++i) { + av_free (_data[i]); + } + + av_free (_data); + av_free (_line_size); + av_free (_stride); +} + +uint8_t * const * +Image::data () const { return _data; } -int * -SimpleImage::line_size () const +int const * +Image::line_size () const { return _line_size; } -int * -SimpleImage::stride () const +int const * +Image::stride () const { return _stride; } -Size -SimpleImage::size () const +dcp::Size +Image::size () const { return _size; } -FilterBufferImage::FilterBufferImage (AVPixelFormat p, AVFilterBufferRef* b) - : Image (p) - , _buffer (b) +bool +Image::aligned () const { - + return _aligned; } -FilterBufferImage::~FilterBufferImage () +PositionImage +merge (list images) { - avfilter_unref_buffer (_buffer); + if (images.empty ()) { + return PositionImage (); + } + + if (images.size() == 1) { + return images.front (); + } + + dcpomatic::Rect all (images.front().position, images.front().image->size().width, images.front().image->size().height); + for (list::const_iterator i = images.begin(); i != images.end(); ++i) { + all.extend (dcpomatic::Rect (i->position, i->image->size().width, i->image->size().height)); + } + + shared_ptr merged (new Image (images.front().image->pixel_format (), dcp::Size (all.width, all.height), true)); + merged->make_transparent (); + for (list::const_iterator i = images.begin(); i != images.end(); ++i) { + merged->alpha_blend (i->image, i->position - all.position()); + } + + return PositionImage (merged, all.position ()); } -uint8_t ** -FilterBufferImage::data () const +bool +operator== (Image const & a, Image const & b) { - return _buffer->data; + if (a.planes() != b.planes() || a.pixel_format() != b.pixel_format() || a.aligned() != b.aligned()) { + return false; + } + + for (int c = 0; c < a.planes(); ++c) { + if (a.sample_size(c).height != b.sample_size(c).height || a.line_size()[c] != b.line_size()[c] || a.stride()[c] != b.stride()[c]) { + return false; + } + + uint8_t* p = a.data()[c]; + uint8_t* q = b.data()[c]; + int const lines = a.sample_size(c).height; + for (int y = 0; y < lines; ++y) { + if (memcmp (p, q, a.line_size()[c]) != 0) { + return false; + } + + p += a.stride()[c]; + q += b.stride()[c]; + } + } + + return true; } -int * -FilterBufferImage::line_size () const +/** Fade the image. + * @param f Amount to fade by; 0 is black, 1 is no fade. + */ +void +Image::fade (float f) { - return _buffer->linesize; + switch (_pixel_format) { + case AV_PIX_FMT_YUV420P: + case AV_PIX_FMT_YUV422P: + case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_YUV411P: + case AV_PIX_FMT_YUVJ420P: + case AV_PIX_FMT_YUVJ422P: + case AV_PIX_FMT_YUVJ444P: + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGB555LE: + /* 8-bit */ + for (int c = 0; c < 3; ++c) { + uint8_t* p = data()[c]; + int const lines = sample_size(c).height; + for (int y = 0; y < lines; ++y) { + uint8_t* q = p; + for (int x = 0; x < line_size()[c]; ++x) { + *q = int (float (*q) * f); + ++q; + } + p += stride()[c]; + } + } + break; + + case AV_PIX_FMT_YUV422P9LE: + case AV_PIX_FMT_YUV444P9LE: + case AV_PIX_FMT_YUV422P10LE: + case AV_PIX_FMT_YUV444P10LE: + case AV_PIX_FMT_YUV422P16LE: + case AV_PIX_FMT_YUV444P16LE: + case AV_PIX_FMT_YUVA420P9LE: + case AV_PIX_FMT_YUVA422P9LE: + case AV_PIX_FMT_YUVA444P9LE: + case AV_PIX_FMT_YUVA420P10LE: + case AV_PIX_FMT_YUVA422P10LE: + case AV_PIX_FMT_YUVA444P10LE: + case AV_PIX_FMT_RGB48LE: + case AV_PIX_FMT_XYZ12LE: + /* 16-bit little-endian */ + for (int c = 0; c < 3; ++c) { + int const stride_pixels = stride()[c] / 2; + int const line_size_pixels = line_size()[c] / 2; + uint16_t* p = reinterpret_cast (data()[c]); + int const lines = sample_size(c).height; + for (int y = 0; y < lines; ++y) { + uint16_t* q = p; + for (int x = 0; x < line_size_pixels; ++x) { + *q = int (float (*q) * f); + ++q; + } + p += stride_pixels; + } + } + break; + + case AV_PIX_FMT_YUV422P9BE: + case AV_PIX_FMT_YUV444P9BE: + case AV_PIX_FMT_YUV444P10BE: + case AV_PIX_FMT_YUV422P10BE: + case AV_PIX_FMT_YUVA420P9BE: + case AV_PIX_FMT_YUVA422P9BE: + case AV_PIX_FMT_YUVA444P9BE: + case AV_PIX_FMT_YUVA420P10BE: + case AV_PIX_FMT_YUVA422P10BE: + case AV_PIX_FMT_YUVA444P10BE: + case AV_PIX_FMT_YUVA420P16BE: + case AV_PIX_FMT_YUVA422P16BE: + case AV_PIX_FMT_YUVA444P16BE: + case AV_PIX_FMT_RGB48BE: + /* 16-bit big-endian */ + for (int c = 0; c < 3; ++c) { + int const stride_pixels = stride()[c] / 2; + int const line_size_pixels = line_size()[c] / 2; + uint16_t* p = reinterpret_cast (data()[c]); + int const lines = sample_size(c).height; + for (int y = 0; y < lines; ++y) { + uint16_t* q = p; + for (int x = 0; x < line_size_pixels; ++x) { + *q = swap_16 (int (float (swap_16 (*q)) * f)); + ++q; + } + p += stride_pixels; + } + } + break; + + case AV_PIX_FMT_UYVY422: + { + int const Y = sample_size(0).height; + int const X = line_size()[0]; + uint8_t* p = data()[0]; + for (int y = 0; y < Y; ++y) { + for (int x = 0; x < X; ++x) { + *p = int (float (*p) * f); + ++p; + } + } + break; + } + + default: + throw PixelFormatError ("fade()", _pixel_format); + } } -int * -FilterBufferImage::stride () const +shared_ptr +Image::ensure_aligned (shared_ptr image) { - /* XXX? */ - return _buffer->linesize; + if (image->aligned()) { + return image; + } + + return shared_ptr (new Image (image, true)); } -Size -FilterBufferImage::size () const +size_t +Image::memory_used () const { - return Size (_buffer->video->w, _buffer->video->h); + size_t m = 0; + for (int i = 0; i < planes(); ++i) { + m += _stride[i] * sample_size(i).height; + } + return m; } -RGBPlusAlphaImage::RGBPlusAlphaImage (shared_ptr im) - : SimpleImage (im->pixel_format(), im->size(), false) +class Memory { - assert (im->pixel_format() == PIX_FMT_RGBA); +public: + Memory () + : data(0) + , size(0) + {} + + ~Memory () + { + free (data); + } - _alpha = (uint8_t *) av_malloc (im->size().width * im->size().height); + uint8_t* data; + size_t size; +}; - uint8_t* in = im->data()[0]; - uint8_t* out = data()[0]; - uint8_t* out_alpha = _alpha; - for (int y = 0; y < im->size().height; ++y) { - uint8_t* in_r = in; - for (int x = 0; x < im->size().width; ++x) { - *out++ = *in_r++; - *out++ = *in_r++; - *out++ = *in_r++; - *out_alpha++ = *in_r++; - } +static void +png_write_data (png_structp png_ptr, png_bytep data, png_size_t length) +{ + Memory* mem = reinterpret_cast(png_get_io_ptr(png_ptr)); + size_t size = mem->size + length; - in += im->stride()[0]; + if (mem->data) { + mem->data = reinterpret_cast(realloc(mem->data, size)); + } else { + mem->data = reinterpret_cast(malloc(size)); } + + if (!mem->data) { + throw EncodeError (N_("could not allocate memory for PNG")); + } + + memcpy (mem->data + mem->size, data, length); + mem->size += length; +} + +static void +png_flush (png_structp) +{ + } -RGBPlusAlphaImage::~RGBPlusAlphaImage () +static void +png_error_fn (png_structp png_ptr, char const * message) { - av_free (_alpha); + reinterpret_cast(png_get_error_ptr(png_ptr))->png_error (message); +} + +void +Image::png_error (char const * message) +{ + throw EncodeError (String::compose ("Error during PNG write: %1", message)); +} + +dcp::Data +Image::as_png () const +{ + DCPOMATIC_ASSERT (bytes_per_pixel(0) == 4); + DCPOMATIC_ASSERT (planes() == 1); + DCPOMATIC_ASSERT (pixel_format() == AV_PIX_FMT_BGRA); + + /* error handling? */ + png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, reinterpret_cast(const_cast(this)), png_error_fn, 0); + if (!png_ptr) { + throw EncodeError (N_("could not create PNG write struct")); + } + + Memory state; + + png_set_write_fn (png_ptr, &state, png_write_data, png_flush); + + png_infop info_ptr = png_create_info_struct(png_ptr); + if (!info_ptr) { + png_destroy_write_struct (&png_ptr, &info_ptr); + throw EncodeError (N_("could not create PNG info struct")); + } + + png_set_IHDR (png_ptr, info_ptr, size().width, size().height, 8, PNG_COLOR_TYPE_RGBA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); + + png_byte ** row_pointers = reinterpret_cast(png_malloc(png_ptr, size().height * sizeof(png_byte *))); + for (int i = 0; i < size().height; ++i) { + row_pointers[i] = (png_byte *) (data()[0] + i * stride()[0]); + } + + png_write_info (png_ptr, info_ptr); + png_write_image (png_ptr, row_pointers); + png_write_end (png_ptr, info_ptr); + + png_destroy_write_struct (&png_ptr, &info_ptr); + png_free (png_ptr, row_pointers); + + return dcp::Data (state.data, state.size); }