X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=71a3a5bccd9b174b5ffea94f7eecbac250265952;hb=a306df9145d16046e51e8b7ff5222e341e98fdbd;hp=0c7a0ef0da26bfad6ea20f96ec2a5316af678a9a;hpb=a978f3ac575f1af017002c861480d5203cf0a34e;p=dcpomatic.git diff --git a/src/lib/image.cc b/src/lib/image.cc index 0c7a0ef0d..71a3a5bcc 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -1,19 +1,20 @@ /* - Copyright (C) 2012-2015 Carl Hetherington + Copyright (C) 2012-2016 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ @@ -26,12 +27,12 @@ #include "timer.h" #include "rect.h" #include "util.h" -#include "md5_digester.h" #include "dcpomatic_socket.h" extern "C" { #include #include #include +#include } #include @@ -42,6 +43,7 @@ using std::min; using std::cout; using std::cerr; using std::list; +using std::runtime_error; using boost::shared_ptr; using dcp::Size; @@ -76,8 +78,8 @@ Image::sample_size (int n) const } return dcp::Size ( - rint (ceil (static_cast(size().width) / horizontal_factor)), - rint (ceil (static_cast(size().height) / line_factor (n))) + lrint (ceil (static_cast(size().width) / horizontal_factor)), + lrint (ceil (static_cast(size().height) / line_factor (n))) ); } @@ -101,17 +103,20 @@ Image::planes () const throw PixelFormatError ("planes()", _pixel_format); } - if ((d->flags & PIX_FMT_PLANAR) == 0) { + if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) { return 1; } return d->nb_components; } -/** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size' */ +/** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size'. + * @param fast Try to be fast at the possible expense of quality; at present this means using + * fast bilinear rather than bicubic scaling. + */ shared_ptr Image::crop_scale_window ( - Crop crop, dcp::Size inter_size, dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned + Crop crop, dcp::Size inter_size, dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast ) const { /* Empirical testing suggests that sws_scale() will crash if @@ -122,8 +127,29 @@ Image::crop_scale_window ( DCPOMATIC_ASSERT (out_size.width >= inter_size.width); DCPOMATIC_ASSERT (out_size.height >= inter_size.height); - /* Here's an image of out_size */ - shared_ptr out (new Image (out_format, out_size, out_aligned)); + /* Here's an image of out_size. Below we may write to it starting at an offset so we get some padding. + Hence we want to write in the following pattern: + + block start write start line end + |..(padding)..|<------line-size------------->|..(padding)..| + |..(padding)..|<------line-size------------->|..(padding)..| + |..(padding)..|<------line-size------------->|..(padding)..| + + where line-size is of the smaller (inter_size) image and the full padded line length is that of + out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size. + However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full + specified *stride*. This does not matter until we get to the last line: + + block start write start line end + |..(padding)..|<------line-size------------->|XXXwrittenXXX| + |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX| + |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX + ^^^^ out of bounds + + To get around this, we ask Image to overallocate its buffers by the overrun. + */ + + shared_ptr out (new Image (out_format, out_size, out_aligned, (out_size.width - inter_size.width) / 2)); out->make_black (); /* Size of the image after any crop */ @@ -133,11 +159,11 @@ Image::crop_scale_window ( struct SwsContext* scale_context = sws_getContext ( cropped_size.width, cropped_size.height, pixel_format(), inter_size.width, inter_size.height, out_format, - SWS_BICUBIC, 0, 0, 0 + fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0 ); if (!scale_context) { - throw StringError (N_("Could not allocate SwsContext")); + throw runtime_error (N_("Could not allocate SwsContext")); } DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); @@ -166,7 +192,7 @@ Image::crop_scale_window ( round down so that we don't crop a subsampled pixel until we've cropped all of its Y-channel pixels. */ - int const x = int (rint (bytes_per_pixel(c) * crop.left)) & ~ ((int) desc->log2_chroma_w); + int const x = lrintf (bytes_per_pixel(c) * crop.left) & ~ ((int) desc->log2_chroma_w); scale_in_data[c] = data()[c] + x + stride()[c] * (crop.top / line_factor(c)); } @@ -175,7 +201,7 @@ Image::crop_scale_window ( uint8_t* scale_out_data[out->planes()]; for (int c = 0; c < out->planes(); ++c) { - scale_out_data[c] = out->data()[c] + int (rint (out->bytes_per_pixel(c) * corner.x)) + out->stride()[c] * corner.y; + scale_out_data[c] = out->data()[c] + lrintf (out->bytes_per_pixel(c) * corner.x) + out->stride()[c] * corner.y; } sws_scale ( @@ -190,8 +216,11 @@ Image::crop_scale_window ( return out; } +/** @param fast Try to be fast at the possible expense of quality; at present this means using + * fast bilinear rather than bicubic scaling. + */ shared_ptr -Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned) const +Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const { /* Empirical testing suggests that sws_scale() will crash if the input image is not aligned. @@ -203,7 +232,7 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo struct SwsContext* scale_context = sws_getContext ( size().width, size().height, pixel_format(), out_size.width, out_size.height, out_format, - SWS_BICUBIC, 0, 0, 0 + fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0 ); DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); @@ -272,45 +301,45 @@ Image::make_black () static uint16_t const sixteen_bit_uv = (1 << 15) - 1; switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P: - case PIX_FMT_YUV444P: - case PIX_FMT_YUV411P: + case AV_PIX_FMT_YUV420P: + case AV_PIX_FMT_YUV422P: + case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_YUV411P: memset (data()[0], 0, sample_size(0).height * stride()[0]); memset (data()[1], eight_bit_uv, sample_size(1).height * stride()[1]); memset (data()[2], eight_bit_uv, sample_size(2).height * stride()[2]); break; - case PIX_FMT_YUVJ420P: - case PIX_FMT_YUVJ422P: - case PIX_FMT_YUVJ444P: + case AV_PIX_FMT_YUVJ420P: + case AV_PIX_FMT_YUVJ422P: + case AV_PIX_FMT_YUVJ444P: memset (data()[0], 0, sample_size(0).height * stride()[0]); memset (data()[1], eight_bit_uv + 1, sample_size(1).height * stride()[1]); memset (data()[2], eight_bit_uv + 1, sample_size(2).height * stride()[2]); break; - case PIX_FMT_YUV422P9LE: - case PIX_FMT_YUV444P9LE: + case AV_PIX_FMT_YUV422P9LE: + case AV_PIX_FMT_YUV444P9LE: yuv_16_black (nine_bit_uv, false); break; - case PIX_FMT_YUV422P9BE: - case PIX_FMT_YUV444P9BE: + case AV_PIX_FMT_YUV422P9BE: + case AV_PIX_FMT_YUV444P9BE: yuv_16_black (swap_16 (nine_bit_uv), false); break; - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV444P10LE: + case AV_PIX_FMT_YUV422P10LE: + case AV_PIX_FMT_YUV444P10LE: yuv_16_black (ten_bit_uv, false); break; - case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: + case AV_PIX_FMT_YUV422P16LE: + case AV_PIX_FMT_YUV444P16LE: yuv_16_black (sixteen_bit_uv, false); break; - case PIX_FMT_YUV444P10BE: - case PIX_FMT_YUV422P10BE: + case AV_PIX_FMT_YUV444P10BE: + case AV_PIX_FMT_YUV422P10BE: yuv_16_black (swap_16 (ten_bit_uv), false); break; @@ -350,18 +379,19 @@ Image::make_black () yuv_16_black (sixteen_bit_uv, true); break; - case PIX_FMT_RGB24: - case PIX_FMT_ARGB: - case PIX_FMT_RGBA: - case PIX_FMT_ABGR: - case PIX_FMT_BGRA: - case PIX_FMT_RGB555LE: - case PIX_FMT_RGB48LE: - case PIX_FMT_RGB48BE: + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGB555LE: + case AV_PIX_FMT_RGB48LE: + case AV_PIX_FMT_RGB48BE: + case AV_PIX_FMT_XYZ12LE: memset (data()[0], 0, sample_size(0).height * stride()[0]); break; - case PIX_FMT_UYVY422: + case AV_PIX_FMT_UYVY422: { int const Y = sample_size(0).height; int const X = line_size()[0]; @@ -385,7 +415,7 @@ Image::make_black () void Image::make_transparent () { - if (_pixel_format != PIX_FMT_RGBA) { + if (_pixel_format != AV_PIX_FMT_RGBA) { throw PixelFormatError ("make_transparent()", _pixel_format); } @@ -395,7 +425,8 @@ Image::make_transparent () void Image::alpha_blend (shared_ptr other, Position position) { - DCPOMATIC_ASSERT (other->pixel_format() == PIX_FMT_RGBA); + /* We're blending RGBA images; first byte is blue, second byte is green, third byte blue, fourth byte alpha */ + DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_RGBA); int const other_bpp = 4; int start_tx = position.x; @@ -415,17 +446,18 @@ Image::alpha_blend (shared_ptr other, Position position) } switch (_pixel_format) { - case PIX_FMT_RGB24: + case AV_PIX_FMT_RGB24: { + /* Going onto RGB24. First byte is red, second green, third blue */ int const this_bpp = 3; for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; uint8_t* op = other->data()[0] + oy * other->stride()[0]; for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { float const alpha = float (op[3]) / 255; - tp[0] = op[0] * alpha + tp[0] * (1 - alpha); + tp[0] = op[2] * alpha + tp[0] * (1 - alpha); tp[1] = op[1] * alpha + tp[1] * (1 - alpha); - tp[2] = op[2] * alpha + tp[2] * (1 - alpha); + tp[2] = op[0] * alpha + tp[2] * (1 - alpha); tp += this_bpp; op += other_bpp; @@ -433,8 +465,8 @@ Image::alpha_blend (shared_ptr other, Position position) } break; } - case PIX_FMT_BGRA: - case PIX_FMT_RGBA: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGBA: { int const this_bpp = 4; for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { @@ -453,7 +485,7 @@ Image::alpha_blend (shared_ptr other, Position position) } break; } - case PIX_FMT_RGB48LE: + case AV_PIX_FMT_RGB48LE: { int const this_bpp = 6; for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { @@ -461,10 +493,36 @@ Image::alpha_blend (shared_ptr other, Position position) uint8_t* op = other->data()[0] + oy * other->stride()[0]; for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { float const alpha = float (op[3]) / 255; - /* Blend high bytes */ - tp[1] = op[0] * alpha + tp[1] * (1 - alpha); + /* Blend high bytes; the RGBA in op appears to be BGRA */ + tp[1] = op[2] * alpha + tp[1] * (1 - alpha); tp[3] = op[1] * alpha + tp[3] * (1 - alpha); - tp[5] = op[2] * alpha + tp[5] * (1 - alpha); + tp[5] = op[0] * alpha + tp[5] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } + case AV_PIX_FMT_XYZ12LE: + { + boost::numeric::ublas::matrix matrix = dcp::ColourConversion::srgb_to_xyz().rgb_to_xyz(); + int const this_bpp = 6; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + + /* Convert sRGB to XYZ; op is BGRA */ + int const x = matrix(0, 0) * op[2] + matrix(0, 1) * op[1] + matrix(0, 2) * op[0]; + int const y = matrix(1, 0) * op[2] + matrix(1, 1) * op[1] + matrix(1, 2) * op[0]; + int const z = matrix(2, 0) * op[2] + matrix(2, 1) * op[1] + matrix(2, 2) * op[0]; + + /* Blend high bytes */ + tp[1] = min (x, 255) * alpha + tp[1] * (1 - alpha); + tp[3] = min (y, 255) * alpha + tp[3] * (1 - alpha); + tp[5] = min (z, 255) * alpha + tp[5] * (1 - alpha); tp += this_bpp; op += other_bpp; @@ -481,7 +539,7 @@ void Image::copy (shared_ptr other, Position position) { /* Only implemented for RGB24 onto RGB24 so far */ - DCPOMATIC_ASSERT (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGB24); + DCPOMATIC_ASSERT (_pixel_format == AV_PIX_FMT_RGB24 && other->pixel_format() == AV_PIX_FMT_RGB24); DCPOMATIC_ASSERT (position.x >= 0 && position.y >= 0); int const N = min (position.x + other->size().width, size().width) - position.x; @@ -532,18 +590,31 @@ Image::bytes_per_pixel (int c) const float bpp[4] = { 0, 0, 0, 0 }; - bpp[0] = floor ((d->comp[0].depth_minus1 + 1 + 7) / 8); +#ifdef DCPOMATIC_HAVE_AVCOMPONENTDESCRIPTOR_DEPTH_MINUS1 + bpp[0] = floor ((d->comp[0].depth_minus1 + 8) / 8); + if (d->nb_components > 1) { + bpp[1] = floor ((d->comp[1].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 2) { + bpp[2] = floor ((d->comp[2].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 3) { + bpp[3] = floor ((d->comp[3].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w); + } +#else + bpp[0] = floor ((d->comp[0].depth + 7) / 8); if (d->nb_components > 1) { - bpp[1] = floor ((d->comp[1].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); + bpp[1] = floor ((d->comp[1].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w); } if (d->nb_components > 2) { - bpp[2] = floor ((d->comp[2].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); + bpp[2] = floor ((d->comp[2].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w); } if (d->nb_components > 3) { - bpp[3] = floor ((d->comp[3].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); + bpp[3] = floor ((d->comp[3].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w); } +#endif - if ((d->flags & PIX_FMT_PLANAR) == 0) { + if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) { /* Not planar; sum them up */ return bpp[0] + bpp[1] + bpp[2] + bpp[3]; } @@ -556,11 +627,13 @@ Image::bytes_per_pixel (int c) const * * @param p Pixel format. * @param s Size in pixels. + * @param extra_pixels Amount of extra "run-off" memory to allocate at the end of each plane in pixels. */ -Image::Image (AVPixelFormat p, dcp::Size s, bool aligned) +Image::Image (AVPixelFormat p, dcp::Size s, bool aligned, int extra_pixels) : _size (s) , _pixel_format (p) , _aligned (aligned) + , _extra_pixels (extra_pixels) { allocate (); } @@ -595,7 +668,7 @@ Image::allocate () so I'll just over-allocate by 32 bytes and have done with it. Empirical testing suggests that it works. */ - _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + 32); + _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + _extra_pixels * bytes_per_pixel(i) + 32); } } @@ -603,6 +676,7 @@ Image::Image (Image const & other) : _size (other._size) , _pixel_format (other._pixel_format) , _aligned (other._aligned) + , _extra_pixels (other._extra_pixels) { allocate (); @@ -622,6 +696,7 @@ Image::Image (AVFrame* frame) : _size (frame->width, frame->height) , _pixel_format (static_cast (frame->format)) , _aligned (true) + , _extra_pixels (0) { allocate (); @@ -642,6 +717,7 @@ Image::Image (shared_ptr other, bool aligned) : _size (other->_size) , _pixel_format (other->_pixel_format) , _aligned (aligned) + , _extra_pixels (other->_extra_pixels) { allocate (); @@ -787,19 +863,19 @@ void Image::fade (float f) { switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P: - case PIX_FMT_YUV444P: - case PIX_FMT_YUV411P: - case PIX_FMT_YUVJ420P: - case PIX_FMT_YUVJ422P: - case PIX_FMT_YUVJ444P: - case PIX_FMT_RGB24: - case PIX_FMT_ARGB: - case PIX_FMT_RGBA: - case PIX_FMT_ABGR: - case PIX_FMT_BGRA: - case PIX_FMT_RGB555LE: + case AV_PIX_FMT_YUV420P: + case AV_PIX_FMT_YUV422P: + case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_YUV411P: + case AV_PIX_FMT_YUVJ420P: + case AV_PIX_FMT_YUVJ422P: + case AV_PIX_FMT_YUVJ444P: + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGB555LE: /* 8-bit */ for (int c = 0; c < 3; ++c) { uint8_t* p = data()[c]; @@ -815,12 +891,12 @@ Image::fade (float f) } break; - case PIX_FMT_YUV422P9LE: - case PIX_FMT_YUV444P9LE: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV444P10LE: - case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: + case AV_PIX_FMT_YUV422P9LE: + case AV_PIX_FMT_YUV444P9LE: + case AV_PIX_FMT_YUV422P10LE: + case AV_PIX_FMT_YUV444P10LE: + case AV_PIX_FMT_YUV422P16LE: + case AV_PIX_FMT_YUV444P16LE: case AV_PIX_FMT_YUVA420P9LE: case AV_PIX_FMT_YUVA422P9LE: case AV_PIX_FMT_YUVA444P9LE: @@ -828,6 +904,7 @@ Image::fade (float f) case AV_PIX_FMT_YUVA422P10LE: case AV_PIX_FMT_YUVA444P10LE: case AV_PIX_FMT_RGB48LE: + case AV_PIX_FMT_XYZ12LE: /* 16-bit little-endian */ for (int c = 0; c < 3; ++c) { int const stride_pixels = stride()[c] / 2; @@ -845,10 +922,10 @@ Image::fade (float f) } break; - case PIX_FMT_YUV422P9BE: - case PIX_FMT_YUV444P9BE: - case PIX_FMT_YUV444P10BE: - case PIX_FMT_YUV422P10BE: + case AV_PIX_FMT_YUV422P9BE: + case AV_PIX_FMT_YUV444P9BE: + case AV_PIX_FMT_YUV444P10BE: + case AV_PIX_FMT_YUV422P10BE: case AV_PIX_FMT_YUVA420P9BE: case AV_PIX_FMT_YUVA422P9BE: case AV_PIX_FMT_YUVA444P9BE: @@ -876,7 +953,7 @@ Image::fade (float f) } break; - case PIX_FMT_UYVY422: + case AV_PIX_FMT_UYVY422: { int const Y = sample_size(0).height; int const X = line_size()[0];