X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=c403b61abfe9cee0daea7d74cdccd49820275b60;hb=e60bb3e51bd1508b149e6b8f6608f09b5196ae26;hp=2eb2dbe28b013363e0852684761202ec550613d1;hpb=5e4f001bf32e3cdf65efa34803d70e6c1c00c66b;p=dcpomatic.git diff --git a/src/lib/image.cc b/src/lib/image.cc index 2eb2dbe28..c403b61ab 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2014 Carl Hetherington + Copyright (C) 2012-2015 Carl Hetherington This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -21,18 +21,19 @@ * @brief A class to describe a video image. */ -#include -extern "C" { -#include -#include -#include -} #include "image.h" #include "exceptions.h" -#include "scaler.h" #include "timer.h" #include "rect.h" +#include "util.h" #include "md5_digester.h" +#include "dcpomatic_socket.h" +extern "C" { +#include +#include +#include +} +#include #include "i18n.h" @@ -55,7 +56,7 @@ Image::line_factor (int n) const if (!d) { throw PixelFormatError ("lines()", _pixel_format); } - + return pow (2.0f, d->log2_chroma_h); } @@ -80,22 +81,23 @@ Image::components () const if ((d->flags & PIX_FMT_PLANAR) == 0) { return 1; } - + return d->nb_components; } /** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size' */ shared_ptr -Image::crop_scale_window (Crop crop, dcp::Size inter_size, dcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const +Image::crop_scale_window ( + Crop crop, dcp::Size inter_size, dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned + ) const { - assert (scaler); /* Empirical testing suggests that sws_scale() will crash if the input image is not aligned. */ - assert (aligned ()); + DCPOMATIC_ASSERT (aligned ()); - assert (out_size.width >= inter_size.width); - assert (out_size.height >= inter_size.height); + DCPOMATIC_ASSERT (out_size.width >= inter_size.width); + DCPOMATIC_ASSERT (out_size.height >= inter_size.height); /* Here's an image of out_size */ shared_ptr out (new Image (out_format, out_size, out_aligned)); @@ -108,13 +110,26 @@ Image::crop_scale_window (Crop crop, dcp::Size inter_size, dcp::Size out_size, S struct SwsContext* scale_context = sws_getContext ( cropped_size.width, cropped_size.height, pixel_format(), inter_size.width, inter_size.height, out_format, - scaler->ffmpeg_id (), 0, 0, 0 + SWS_BICUBIC, 0, 0, 0 ); if (!scale_context) { throw StringError (N_("Could not allocate SwsContext")); } + DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); + int const lut[dcp::YUV_TO_RGB_COUNT] = { + SWS_CS_ITU601, + SWS_CS_ITU709 + }; + + sws_setColorspaceDetails ( + scale_context, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + 0, 1 << 16, 1 << 16 + ); + /* Prepare input data pointers with crop */ uint8_t* scale_in_data[components()]; for (int c = 0; c < components(); ++c) { @@ -138,24 +153,36 @@ Image::crop_scale_window (Crop crop, dcp::Size inter_size, dcp::Size out_size, S sws_freeContext (scale_context); - return out; + return out; } shared_ptr -Image::scale (dcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const +Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned) const { - assert (scaler); /* Empirical testing suggests that sws_scale() will crash if the input image is not aligned. */ - assert (aligned ()); + DCPOMATIC_ASSERT (aligned ()); shared_ptr scaled (new Image (out_format, out_size, out_aligned)); struct SwsContext* scale_context = sws_getContext ( size().width, size().height, pixel_format(), out_size.width, out_size.height, out_format, - scaler->ffmpeg_id (), 0, 0, 0 + SWS_BICUBIC, 0, 0, 0 + ); + + DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); + int const lut[dcp::YUV_TO_RGB_COUNT] = { + SWS_CS_ITU601, + SWS_CS_ITU709 + }; + + sws_setColorspaceDetails ( + scale_context, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + sws_getCoefficients (lut[yuv_to_rgb]), 0, + 0, 1 << 16, 1 << 16 ); sws_scale ( @@ -236,7 +263,7 @@ Image::make_black () static uint16_t const ten_bit_uv = (1 << 9) - 1; /* U/V black value for 16-bit colour */ static uint16_t const sixteen_bit_uv = (1 << 15) - 1; - + switch (_pixel_format) { case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: @@ -264,7 +291,7 @@ Image::make_black () case PIX_FMT_YUV444P9BE: yuv_16_black (swap_16 (nine_bit_uv), false); break; - + case PIX_FMT_YUV422P10LE: case PIX_FMT_YUV444P10LE: yuv_16_black (ten_bit_uv, false); @@ -274,7 +301,7 @@ Image::make_black () case PIX_FMT_YUV444P16LE: yuv_16_black (sixteen_bit_uv, false); break; - + case PIX_FMT_YUV444P10BE: case PIX_FMT_YUV422P10BE: yuv_16_black (swap_16 (ten_bit_uv), false); @@ -285,31 +312,31 @@ Image::make_black () case AV_PIX_FMT_YUVA444P9BE: yuv_16_black (swap_16 (nine_bit_uv), true); break; - + case AV_PIX_FMT_YUVA420P9LE: case AV_PIX_FMT_YUVA422P9LE: case AV_PIX_FMT_YUVA444P9LE: yuv_16_black (nine_bit_uv, true); break; - + case AV_PIX_FMT_YUVA420P10BE: case AV_PIX_FMT_YUVA422P10BE: case AV_PIX_FMT_YUVA444P10BE: yuv_16_black (swap_16 (ten_bit_uv), true); break; - + case AV_PIX_FMT_YUVA420P10LE: case AV_PIX_FMT_YUVA422P10LE: case AV_PIX_FMT_YUVA444P10LE: yuv_16_black (ten_bit_uv, true); break; - + case AV_PIX_FMT_YUVA420P16BE: case AV_PIX_FMT_YUVA422P16BE: case AV_PIX_FMT_YUVA444P16BE: yuv_16_black (swap_16 (sixteen_bit_uv), true); break; - + case AV_PIX_FMT_YUVA420P16LE: case AV_PIX_FMT_YUVA422P16LE: case AV_PIX_FMT_YUVA444P16LE: @@ -322,6 +349,8 @@ Image::make_black () case PIX_FMT_ABGR: case PIX_FMT_BGRA: case PIX_FMT_RGB555LE: + case PIX_FMT_RGB48LE: + case PIX_FMT_RGB48BE: memset (data()[0], 0, lines(0) * stride()[0]); break; @@ -359,22 +388,9 @@ Image::make_transparent () void Image::alpha_blend (shared_ptr other, Position position) { - assert (other->pixel_format() == PIX_FMT_RGBA); + DCPOMATIC_ASSERT (other->pixel_format() == PIX_FMT_RGBA); int const other_bpp = 4; - int this_bpp = 0; - switch (_pixel_format) { - case PIX_FMT_BGRA: - case PIX_FMT_RGBA: - this_bpp = 4; - break; - case PIX_FMT_RGB24: - this_bpp = 3; - break; - default: - assert (false); - } - int start_tx = position.x; int start_ox = 0; @@ -391,19 +407,66 @@ Image::alpha_blend (shared_ptr other, Position position) start_ty = 0; } - for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { - uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; - uint8_t* op = other->data()[0] + oy * other->stride()[0]; - for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { - float const alpha = float (op[3]) / 255; - tp[0] = op[0] + (tp[0] * (1 - alpha)); - tp[1] = op[1] + (tp[1] * (1 - alpha)); - tp[2] = op[2] + (tp[2] * (1 - alpha)); - tp[3] = op[3] + (tp[3] * (1 - alpha)); - - tp += this_bpp; - op += other_bpp; + switch (_pixel_format) { + case PIX_FMT_RGB24: + { + int const this_bpp = 3; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + tp[0] = op[0] * alpha + tp[0] * (1 - alpha); + tp[1] = op[1] * alpha + tp[1] * (1 - alpha); + tp[2] = op[2] * alpha + tp[2] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } + case PIX_FMT_BGRA: + case PIX_FMT_RGBA: + { + int const this_bpp = 4; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + tp[0] = op[0] * alpha + tp[0] * (1 - alpha); + tp[1] = op[1] * alpha + tp[1] * (1 - alpha); + tp[2] = op[2] * alpha + tp[2] * (1 - alpha); + tp[3] = op[3] * alpha + tp[3] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } } + break; + } + case PIX_FMT_RGB48LE: + { + int const this_bpp = 6; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + /* Blend high bytes */ + tp[1] = op[0] * alpha + tp[1] * (1 - alpha); + tp[3] = op[1] * alpha + tp[3] * (1 - alpha); + tp[5] = op[2] * alpha + tp[5] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } + default: + DCPOMATIC_ASSERT (false); } } @@ -411,8 +474,8 @@ void Image::copy (shared_ptr other, Position position) { /* Only implemented for RGB24 onto RGB24 so far */ - assert (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGB24); - assert (position.x >= 0 && position.y >= 0); + DCPOMATIC_ASSERT (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGB24); + DCPOMATIC_ASSERT (position.x >= 0 && position.y >= 0); int const N = min (position.x + other->size().width, size().width) - position.x; for (int ty = position.y, oy = 0; ty < size().height && oy < other->size().height; ++ty, ++oy) { @@ -420,7 +483,7 @@ Image::copy (shared_ptr other, Position position) uint8_t * const op = other->data()[0] + oy * other->stride()[0]; memcpy (tp, op, N * 3); } -} +} void Image::read_from_socket (shared_ptr socket) @@ -471,7 +534,7 @@ Image::bytes_per_pixel (int c) const if (d->nb_components > 3) { bpp[3] = floor ((d->comp[3].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); } - + if ((d->flags & PIX_FMT_PLANAR) == 0) { /* Not planar; sum them up */ return bpp[0] + bpp[1] + bpp[2] + bpp[3]; @@ -487,7 +550,7 @@ Image::bytes_per_pixel (int c) const * @param s Size in pixels. */ Image::Image (AVPixelFormat p, dcp::Size s, bool aligned) - : dcp::Image (s) + : _size (s) , _pixel_format (p) , _aligned (aligned) { @@ -499,10 +562,10 @@ Image::allocate () { _data = (uint8_t **) wrapped_av_malloc (4 * sizeof (uint8_t *)); _data[0] = _data[1] = _data[2] = _data[3] = 0; - + _line_size = (int *) wrapped_av_malloc (4 * sizeof (int)); _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0; - + _stride = (int *) wrapped_av_malloc (4 * sizeof (int)); _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0; @@ -529,8 +592,8 @@ Image::allocate () } Image::Image (Image const & other) - : dcp::Image (other) - , _pixel_format (other._pixel_format) + : _size (other._size) + , _pixel_format (other._pixel_format) , _aligned (other._aligned) { allocate (); @@ -547,7 +610,7 @@ Image::Image (Image const & other) } Image::Image (AVFrame* frame) - : dcp::Image (dcp::Size (frame->width, frame->height)) + : _size (frame->width, frame->height) , _pixel_format (static_cast (frame->format)) , _aligned (true) { @@ -566,14 +629,14 @@ Image::Image (AVFrame* frame) } Image::Image (shared_ptr other, bool aligned) - : dcp::Image (other) + : _size (other->_size) , _pixel_format (other->_pixel_format) , _aligned (aligned) { allocate (); for (int i = 0; i < components(); ++i) { - assert(line_size()[i] == other->line_size()[i]); + DCPOMATIC_ASSERT (line_size()[i] == other->line_size()[i]); uint8_t* p = _data[i]; uint8_t* q = other->data()[i]; for (int j = 0; j < lines(i); ++j) { @@ -599,8 +662,7 @@ Image::operator= (Image const & other) void Image::swap (Image & other) { - dcp::Image::swap (other); - + std::swap (_size, other._size); std::swap (_pixel_format, other._pixel_format); for (int i = 0; i < 4; ++i) { @@ -624,7 +686,7 @@ Image::~Image () av_free (_stride); } -uint8_t ** +uint8_t * const * Image::data () const { return _data; @@ -636,7 +698,7 @@ Image::line_size () const return _line_size; } -int * +int const * Image::stride () const { return _stride; @@ -679,14 +741,141 @@ merge (list images) return PositionImage (merged, all.position ()); } -string -Image::digest () const +bool +operator== (Image const & a, Image const & b) { - MD5Digester digester; + if (a.components() != b.components() || a.pixel_format() != b.pixel_format() || a.aligned() != b.aligned()) { + return false; + } - for (int i = 0; i < components(); ++i) { - digester.add (data()[i], line_size()[i]); + for (int c = 0; c < a.components(); ++c) { + if (a.lines(c) != b.lines(c) || a.line_size()[c] != b.line_size()[c] || a.stride()[c] != b.stride()[c]) { + return false; + } + + uint8_t* p = a.data()[c]; + uint8_t* q = b.data()[c]; + for (int y = 0; y < a.lines(c); ++y) { + if (memcmp (p, q, a.line_size()[c]) != 0) { + return false; + } + + p += a.stride()[c]; + q += b.stride()[c]; + } + } + + return true; +} + +/** Fade the image. + * @param f Amount to fade by; 0 is black, 1 is no fade. + */ +void +Image::fade (float f) +{ + switch (_pixel_format) { + case PIX_FMT_YUV420P: + case PIX_FMT_YUV422P: + case PIX_FMT_YUV444P: + case PIX_FMT_YUV411P: + case PIX_FMT_YUVJ420P: + case PIX_FMT_YUVJ422P: + case PIX_FMT_YUVJ444P: + case PIX_FMT_RGB24: + case PIX_FMT_ARGB: + case PIX_FMT_RGBA: + case PIX_FMT_ABGR: + case PIX_FMT_BGRA: + case PIX_FMT_RGB555LE: + /* 8-bit */ + for (int c = 0; c < 3; ++c) { + uint8_t* p = data()[c]; + for (int y = 0; y < lines(c); ++y) { + uint8_t* q = p; + for (int x = 0; x < line_size()[c]; ++x) { + *q = int (float (*q) * f); + ++q; + } + p += stride()[c]; + } + } + break; + + case PIX_FMT_YUV422P9LE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV444P10LE: + case PIX_FMT_YUV422P16LE: + case PIX_FMT_YUV444P16LE: + case AV_PIX_FMT_YUVA420P9LE: + case AV_PIX_FMT_YUVA422P9LE: + case AV_PIX_FMT_YUVA444P9LE: + case AV_PIX_FMT_YUVA420P10LE: + case AV_PIX_FMT_YUVA422P10LE: + case AV_PIX_FMT_YUVA444P10LE: + case AV_PIX_FMT_RGB48LE: + /* 16-bit little-endian */ + for (int c = 0; c < 3; ++c) { + int const stride_pixels = stride()[c] / 2; + int const line_size_pixels = line_size()[c] / 2; + uint16_t* p = reinterpret_cast (data()[c]); + for (int y = 0; y < lines(c); ++y) { + uint16_t* q = p; + for (int x = 0; x < line_size_pixels; ++x) { + *q = int (float (*q) * f); + ++q; + } + p += stride_pixels; + } + } + break; + + case PIX_FMT_YUV422P9BE: + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV422P10BE: + case AV_PIX_FMT_YUVA420P9BE: + case AV_PIX_FMT_YUVA422P9BE: + case AV_PIX_FMT_YUVA444P9BE: + case AV_PIX_FMT_YUVA420P10BE: + case AV_PIX_FMT_YUVA422P10BE: + case AV_PIX_FMT_YUVA444P10BE: + case AV_PIX_FMT_YUVA420P16BE: + case AV_PIX_FMT_YUVA422P16BE: + case AV_PIX_FMT_YUVA444P16BE: + case AV_PIX_FMT_RGB48BE: + /* 16-bit big-endian */ + for (int c = 0; c < 3; ++c) { + int const stride_pixels = stride()[c] / 2; + int const line_size_pixels = line_size()[c] / 2; + uint16_t* p = reinterpret_cast (data()[c]); + for (int y = 0; y < lines(c); ++y) { + uint16_t* q = p; + for (int x = 0; x < line_size_pixels; ++x) { + *q = swap_16 (int (float (swap_16 (*q)) * f)); + ++q; + } + p += stride_pixels; + } + } + break; + + case PIX_FMT_UYVY422: + { + int const Y = lines(0); + int const X = line_size()[0]; + uint8_t* p = data()[0]; + for (int y = 0; y < Y; ++y) { + for (int x = 0; x < X; ++x) { + *p = int (float (*p) * f); + ++p; + } + } + break; } - return digester.get (); + default: + throw PixelFormatError ("fade()", _pixel_format); + } }