X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=6835d0c26d967557df34fadfa06ba1e9680da53f;hb=71d56fbe3ba5974505469d2c8b7efcdef4eb8adc;hp=c403b61abfe9cee0daea7d74cdccd49820275b60;hpb=e60bb3e51bd1508b149e6b8f6608f09b5196ae26;p=dcpomatic.git diff --git a/src/lib/image.cc b/src/lib/image.cc index c403b61ab..6835d0c26 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -32,6 +32,7 @@ extern "C" { #include #include #include +#include } #include @@ -42,6 +43,7 @@ using std::min; using std::cout; using std::cerr; using std::list; +using std::runtime_error; using boost::shared_ptr; using dcp::Size; @@ -54,22 +56,33 @@ Image::line_factor (int n) const AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); if (!d) { - throw PixelFormatError ("lines()", _pixel_format); + throw PixelFormatError ("line_factor()", _pixel_format); } return pow (2.0f, d->log2_chroma_h); } /** @param n Component index. - * @return Number of lines in the image for the given component. + * @return Number of samples (i.e. pixels, unless sub-sampled) in each direction for this component. */ -int -Image::lines (int n) const +dcp::Size +Image::sample_size (int n) const { - return rint (ceil (static_cast(size().height) / line_factor (n))); + int horizontal_factor = 1; + if (n > 0) { + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get (_pixel_format); + if (!d) { + throw PixelFormatError ("sample_size()", _pixel_format); + } + horizontal_factor = pow (2.0f, d->log2_chroma_w); + } + + return dcp::Size ( + lrint (ceil (static_cast(size().width) / horizontal_factor)), + lrint (ceil (static_cast(size().height) / line_factor (n))) + ); } -/** @return Number of components */ int Image::components () const { @@ -78,7 +91,19 @@ Image::components () const throw PixelFormatError ("components()", _pixel_format); } - if ((d->flags & PIX_FMT_PLANAR) == 0) { + return d->nb_components; +} + +/** @return Number of planes */ +int +Image::planes () const +{ + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("planes()", _pixel_format); + } + + if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) { return 1; } @@ -114,7 +139,7 @@ Image::crop_scale_window ( ); if (!scale_context) { - throw StringError (N_("Could not allocate SwsContext")); + throw runtime_error (N_("Could not allocate SwsContext")); } DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); @@ -130,18 +155,29 @@ Image::crop_scale_window ( 0, 1 << 16, 1 << 16 ); + AVPixFmtDescriptor const * desc = av_pix_fmt_desc_get (_pixel_format); + if (!desc) { + throw PixelFormatError ("crop_scale_window()", _pixel_format); + } + /* Prepare input data pointers with crop */ - uint8_t* scale_in_data[components()]; - for (int c = 0; c < components(); ++c) { - scale_in_data[c] = data()[c] + int (rint (bytes_per_pixel(c) * crop.left)) + stride()[c] * (crop.top / line_factor(c)); + uint8_t* scale_in_data[planes()]; + for (int c = 0; c < planes(); ++c) { + /* To work out the crop in bytes, start by multiplying + the crop by the (average) bytes per pixel. Then + round down so that we don't crop a subsampled pixel until + we've cropped all of its Y-channel pixels. + */ + int const x = lrintf (bytes_per_pixel(c) * crop.left) & ~ ((int) desc->log2_chroma_w); + scale_in_data[c] = data()[c] + x + stride()[c] * (crop.top / line_factor(c)); } /* Corner of the image within out_size */ Position const corner ((out_size.width - inter_size.width) / 2, (out_size.height - inter_size.height) / 2); - uint8_t* scale_out_data[out->components()]; - for (int c = 0; c < out->components(); ++c) { - scale_out_data[c] = out->data()[c] + int (rint (out->bytes_per_pixel(c) * corner.x)) + out->stride()[c] * corner.y; + uint8_t* scale_out_data[out->planes()]; + for (int c = 0; c < out->planes(); ++c) { + scale_out_data[c] = out->data()[c] + lrintf (out->bytes_per_pixel(c) * corner.x) + out->stride()[c] * corner.y; } sws_scale ( @@ -197,42 +233,15 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo return scaled; } -shared_ptr -Image::crop (Crop crop, bool aligned) const -{ - dcp::Size cropped_size = crop.apply (size ()); - shared_ptr out (new Image (pixel_format(), cropped_size, aligned)); - - for (int c = 0; c < components(); ++c) { - int const crop_left_in_bytes = bytes_per_pixel(c) * crop.left; - /* bytes_per_pixel() could be a fraction; in this case the stride will be rounded - up, and we need to make sure that we copy over the width (up to the stride) - rather than short of the width; hence the ceil() here. - */ - int const cropped_width_in_bytes = ceil (bytes_per_pixel(c) * cropped_size.width); - - /* Start of the source line, cropped from the top but not the left */ - uint8_t* in_p = data()[c] + (crop.top / out->line_factor(c)) * stride()[c]; - uint8_t* out_p = out->data()[c]; - - for (int y = 0; y < out->lines(c); ++y) { - memcpy (out_p, in_p + crop_left_in_bytes, cropped_width_in_bytes); - in_p += stride()[c]; - out_p += out->stride()[c]; - } - } - - return out; -} - /** Blacken a YUV image whose bits per pixel is rounded up to 16 */ void Image::yuv_16_black (uint16_t v, bool alpha) { - memset (data()[0], 0, lines(0) * stride()[0]); + memset (data()[0], 0, sample_size(0).height * stride()[0]); for (int i = 1; i < 3; ++i) { int16_t* p = reinterpret_cast (data()[i]); - for (int y = 0; y < lines(i); ++y) { + int const lines = sample_size(i).height; + for (int y = 0; y < lines; ++y) { /* We divide by 2 here because we are writing 2 bytes at a time */ for (int x = 0; x < line_size()[i] / 2; ++x) { p[x] = v; @@ -242,7 +251,7 @@ Image::yuv_16_black (uint16_t v, bool alpha) } if (alpha) { - memset (data()[3], 0, lines(3) * stride()[3]); + memset (data()[3], 0, sample_size(3).height * stride()[3]); } } @@ -265,45 +274,45 @@ Image::make_black () static uint16_t const sixteen_bit_uv = (1 << 15) - 1; switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P: - case PIX_FMT_YUV444P: - case PIX_FMT_YUV411P: - memset (data()[0], 0, lines(0) * stride()[0]); - memset (data()[1], eight_bit_uv, lines(1) * stride()[1]); - memset (data()[2], eight_bit_uv, lines(2) * stride()[2]); + case AV_PIX_FMT_YUV420P: + case AV_PIX_FMT_YUV422P: + case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_YUV411P: + memset (data()[0], 0, sample_size(0).height * stride()[0]); + memset (data()[1], eight_bit_uv, sample_size(1).height * stride()[1]); + memset (data()[2], eight_bit_uv, sample_size(2).height * stride()[2]); break; - case PIX_FMT_YUVJ420P: - case PIX_FMT_YUVJ422P: - case PIX_FMT_YUVJ444P: - memset (data()[0], 0, lines(0) * stride()[0]); - memset (data()[1], eight_bit_uv + 1, lines(1) * stride()[1]); - memset (data()[2], eight_bit_uv + 1, lines(2) * stride()[2]); + case AV_PIX_FMT_YUVJ420P: + case AV_PIX_FMT_YUVJ422P: + case AV_PIX_FMT_YUVJ444P: + memset (data()[0], 0, sample_size(0).height * stride()[0]); + memset (data()[1], eight_bit_uv + 1, sample_size(1).height * stride()[1]); + memset (data()[2], eight_bit_uv + 1, sample_size(2).height * stride()[2]); break; - case PIX_FMT_YUV422P9LE: - case PIX_FMT_YUV444P9LE: + case AV_PIX_FMT_YUV422P9LE: + case AV_PIX_FMT_YUV444P9LE: yuv_16_black (nine_bit_uv, false); break; - case PIX_FMT_YUV422P9BE: - case PIX_FMT_YUV444P9BE: + case AV_PIX_FMT_YUV422P9BE: + case AV_PIX_FMT_YUV444P9BE: yuv_16_black (swap_16 (nine_bit_uv), false); break; - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV444P10LE: + case AV_PIX_FMT_YUV422P10LE: + case AV_PIX_FMT_YUV444P10LE: yuv_16_black (ten_bit_uv, false); break; - case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: + case AV_PIX_FMT_YUV422P16LE: + case AV_PIX_FMT_YUV444P16LE: yuv_16_black (sixteen_bit_uv, false); break; - case PIX_FMT_YUV444P10BE: - case PIX_FMT_YUV422P10BE: + case AV_PIX_FMT_YUV444P10BE: + case AV_PIX_FMT_YUV422P10BE: yuv_16_black (swap_16 (ten_bit_uv), false); break; @@ -343,20 +352,21 @@ Image::make_black () yuv_16_black (sixteen_bit_uv, true); break; - case PIX_FMT_RGB24: - case PIX_FMT_ARGB: - case PIX_FMT_RGBA: - case PIX_FMT_ABGR: - case PIX_FMT_BGRA: - case PIX_FMT_RGB555LE: - case PIX_FMT_RGB48LE: - case PIX_FMT_RGB48BE: - memset (data()[0], 0, lines(0) * stride()[0]); + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGB555LE: + case AV_PIX_FMT_RGB48LE: + case AV_PIX_FMT_RGB48BE: + case AV_PIX_FMT_XYZ12LE: + memset (data()[0], 0, sample_size(0).height * stride()[0]); break; - case PIX_FMT_UYVY422: + case AV_PIX_FMT_UYVY422: { - int const Y = lines(0); + int const Y = sample_size(0).height; int const X = line_size()[0]; uint8_t* p = data()[0]; for (int y = 0; y < Y; ++y) { @@ -378,17 +388,17 @@ Image::make_black () void Image::make_transparent () { - if (_pixel_format != PIX_FMT_RGBA) { + if (_pixel_format != AV_PIX_FMT_RGBA) { throw PixelFormatError ("make_transparent()", _pixel_format); } - memset (data()[0], 0, lines(0) * stride()[0]); + memset (data()[0], 0, sample_size(0).height * stride()[0]); } void Image::alpha_blend (shared_ptr other, Position position) { - DCPOMATIC_ASSERT (other->pixel_format() == PIX_FMT_RGBA); + DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_RGBA); int const other_bpp = 4; int start_tx = position.x; @@ -408,7 +418,7 @@ Image::alpha_blend (shared_ptr other, Position position) } switch (_pixel_format) { - case PIX_FMT_RGB24: + case AV_PIX_FMT_RGB24: { int const this_bpp = 3; for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { @@ -426,8 +436,8 @@ Image::alpha_blend (shared_ptr other, Position position) } break; } - case PIX_FMT_BGRA: - case PIX_FMT_RGBA: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGBA: { int const this_bpp = 4; for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { @@ -446,7 +456,7 @@ Image::alpha_blend (shared_ptr other, Position position) } break; } - case PIX_FMT_RGB48LE: + case AV_PIX_FMT_RGB48LE: { int const this_bpp = 6; for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { @@ -454,10 +464,35 @@ Image::alpha_blend (shared_ptr other, Position position) uint8_t* op = other->data()[0] + oy * other->stride()[0]; for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { float const alpha = float (op[3]) / 255; - /* Blend high bytes */ - tp[1] = op[0] * alpha + tp[1] * (1 - alpha); + /* Blend high bytes; the RGBA in op appears to be BGRA */ + tp[1] = op[2] * alpha + tp[1] * (1 - alpha); tp[3] = op[1] * alpha + tp[3] * (1 - alpha); - tp[5] = op[2] * alpha + tp[5] * (1 - alpha); + tp[5] = op[0] * alpha + tp[5] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } + case AV_PIX_FMT_XYZ12LE: + { + int const this_bpp = 6; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + + /* Convert sRGB to XYZ; op is BGRA */ + int const x = 0.4124564 + op[2] + 0.3575761 * op[1] + 0.1804375 * op[0]; + int const y = 0.2126729 + op[2] + 0.7151522 * op[1] + 0.0721750 * op[0]; + int const z = 0.0193339 + op[2] + 0.1191920 * op[1] + 0.9503041 * op[0]; + + /* Blend high bytes */ + tp[1] = min (x, 255) * alpha + tp[1] * (1 - alpha); + tp[3] = min (y, 255) * alpha + tp[3] * (1 - alpha); + tp[5] = min (z, 255) * alpha + tp[5] * (1 - alpha); tp += this_bpp; op += other_bpp; @@ -474,7 +509,7 @@ void Image::copy (shared_ptr other, Position position) { /* Only implemented for RGB24 onto RGB24 so far */ - DCPOMATIC_ASSERT (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGB24); + DCPOMATIC_ASSERT (_pixel_format == AV_PIX_FMT_RGB24 && other->pixel_format() == AV_PIX_FMT_RGB24); DCPOMATIC_ASSERT (position.x >= 0 && position.y >= 0); int const N = min (position.x + other->size().width, size().width) - position.x; @@ -488,9 +523,10 @@ Image::copy (shared_ptr other, Position position) void Image::read_from_socket (shared_ptr socket) { - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { uint8_t* p = data()[i]; - for (int y = 0; y < lines(i); ++y) { + int const lines = sample_size(i).height; + for (int y = 0; y < lines; ++y) { socket->read (p, line_size()[i]); p += stride()[i]; } @@ -500,25 +536,25 @@ Image::read_from_socket (shared_ptr socket) void Image::write_to_socket (shared_ptr socket) const { - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { uint8_t* p = data()[i]; - for (int y = 0; y < lines(i); ++y) { + int const lines = sample_size(i).height; + for (int y = 0; y < lines; ++y) { socket->write (p, line_size()[i]); p += stride()[i]; } } } - float Image::bytes_per_pixel (int c) const { AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); if (!d) { - throw PixelFormatError ("lines()", _pixel_format); + throw PixelFormatError ("bytes_per_pixel()", _pixel_format); } - if (c >= components()) { + if (c >= planes()) { return 0; } @@ -535,7 +571,7 @@ Image::bytes_per_pixel (int c) const bpp[3] = floor ((d->comp[3].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); } - if ((d->flags & PIX_FMT_PLANAR) == 0) { + if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) { /* Not planar; sum them up */ return bpp[0] + bpp[1] + bpp[2] + bpp[3]; } @@ -569,7 +605,7 @@ Image::allocate () _stride = (int *) wrapped_av_malloc (4 * sizeof (int)); _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0; - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { _line_size[i] = ceil (_size.width * bytes_per_pixel(i)); _stride[i] = stride_round_up (i, _line_size, _aligned ? 32 : 1); @@ -587,7 +623,7 @@ Image::allocate () so I'll just over-allocate by 32 bytes and have done with it. Empirical testing suggests that it works. */ - _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * lines (i) + 32); + _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + 32); } } @@ -598,10 +634,11 @@ Image::Image (Image const & other) { allocate (); - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { uint8_t* p = _data[i]; uint8_t* q = other._data[i]; - for (int j = 0; j < lines(i); ++j) { + int const lines = sample_size(i).height; + for (int j = 0; j < lines; ++j) { memcpy (p, q, _line_size[i]); p += stride()[i]; q += other.stride()[i]; @@ -616,10 +653,11 @@ Image::Image (AVFrame* frame) { allocate (); - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { uint8_t* p = _data[i]; uint8_t* q = frame->data[i]; - for (int j = 0; j < lines(i); ++j) { + int const lines = sample_size(i).height; + for (int j = 0; j < lines; ++j) { memcpy (p, q, _line_size[i]); p += stride()[i]; /* AVFrame's linesize is what we call `stride' */ @@ -635,11 +673,12 @@ Image::Image (shared_ptr other, bool aligned) { allocate (); - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { DCPOMATIC_ASSERT (line_size()[i] == other->line_size()[i]); uint8_t* p = _data[i]; uint8_t* q = other->data()[i]; - for (int j = 0; j < lines(i); ++j) { + int const lines = sample_size(i).height; + for (int j = 0; j < lines; ++j) { memcpy (p, q, line_size()[i]); p += stride()[i]; q += other->stride()[i]; @@ -677,7 +716,7 @@ Image::swap (Image & other) /** Destroy a Image */ Image::~Image () { - for (int i = 0; i < components(); ++i) { + for (int i = 0; i < planes(); ++i) { av_free (_data[i]); } @@ -692,7 +731,7 @@ Image::data () const return _data; } -int * +int const * Image::line_size () const { return _line_size; @@ -744,18 +783,19 @@ merge (list images) bool operator== (Image const & a, Image const & b) { - if (a.components() != b.components() || a.pixel_format() != b.pixel_format() || a.aligned() != b.aligned()) { + if (a.planes() != b.planes() || a.pixel_format() != b.pixel_format() || a.aligned() != b.aligned()) { return false; } - for (int c = 0; c < a.components(); ++c) { - if (a.lines(c) != b.lines(c) || a.line_size()[c] != b.line_size()[c] || a.stride()[c] != b.stride()[c]) { + for (int c = 0; c < a.planes(); ++c) { + if (a.sample_size(c).height != b.sample_size(c).height || a.line_size()[c] != b.line_size()[c] || a.stride()[c] != b.stride()[c]) { return false; } uint8_t* p = a.data()[c]; uint8_t* q = b.data()[c]; - for (int y = 0; y < a.lines(c); ++y) { + int const lines = a.sample_size(c).height; + for (int y = 0; y < lines; ++y) { if (memcmp (p, q, a.line_size()[c]) != 0) { return false; } @@ -775,23 +815,24 @@ void Image::fade (float f) { switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P: - case PIX_FMT_YUV444P: - case PIX_FMT_YUV411P: - case PIX_FMT_YUVJ420P: - case PIX_FMT_YUVJ422P: - case PIX_FMT_YUVJ444P: - case PIX_FMT_RGB24: - case PIX_FMT_ARGB: - case PIX_FMT_RGBA: - case PIX_FMT_ABGR: - case PIX_FMT_BGRA: - case PIX_FMT_RGB555LE: + case AV_PIX_FMT_YUV420P: + case AV_PIX_FMT_YUV422P: + case AV_PIX_FMT_YUV444P: + case AV_PIX_FMT_YUV411P: + case AV_PIX_FMT_YUVJ420P: + case AV_PIX_FMT_YUVJ422P: + case AV_PIX_FMT_YUVJ444P: + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGB555LE: /* 8-bit */ for (int c = 0; c < 3; ++c) { uint8_t* p = data()[c]; - for (int y = 0; y < lines(c); ++y) { + int const lines = sample_size(c).height; + for (int y = 0; y < lines; ++y) { uint8_t* q = p; for (int x = 0; x < line_size()[c]; ++x) { *q = int (float (*q) * f); @@ -802,12 +843,12 @@ Image::fade (float f) } break; - case PIX_FMT_YUV422P9LE: - case PIX_FMT_YUV444P9LE: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV444P10LE: - case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: + case AV_PIX_FMT_YUV422P9LE: + case AV_PIX_FMT_YUV444P9LE: + case AV_PIX_FMT_YUV422P10LE: + case AV_PIX_FMT_YUV444P10LE: + case AV_PIX_FMT_YUV422P16LE: + case AV_PIX_FMT_YUV444P16LE: case AV_PIX_FMT_YUVA420P9LE: case AV_PIX_FMT_YUVA422P9LE: case AV_PIX_FMT_YUVA444P9LE: @@ -815,12 +856,14 @@ Image::fade (float f) case AV_PIX_FMT_YUVA422P10LE: case AV_PIX_FMT_YUVA444P10LE: case AV_PIX_FMT_RGB48LE: + case AV_PIX_FMT_XYZ12LE: /* 16-bit little-endian */ for (int c = 0; c < 3; ++c) { int const stride_pixels = stride()[c] / 2; int const line_size_pixels = line_size()[c] / 2; uint16_t* p = reinterpret_cast (data()[c]); - for (int y = 0; y < lines(c); ++y) { + int const lines = sample_size(c).height; + for (int y = 0; y < lines; ++y) { uint16_t* q = p; for (int x = 0; x < line_size_pixels; ++x) { *q = int (float (*q) * f); @@ -831,10 +874,10 @@ Image::fade (float f) } break; - case PIX_FMT_YUV422P9BE: - case PIX_FMT_YUV444P9BE: - case PIX_FMT_YUV444P10BE: - case PIX_FMT_YUV422P10BE: + case AV_PIX_FMT_YUV422P9BE: + case AV_PIX_FMT_YUV444P9BE: + case AV_PIX_FMT_YUV444P10BE: + case AV_PIX_FMT_YUV422P10BE: case AV_PIX_FMT_YUVA420P9BE: case AV_PIX_FMT_YUVA422P9BE: case AV_PIX_FMT_YUVA444P9BE: @@ -850,7 +893,8 @@ Image::fade (float f) int const stride_pixels = stride()[c] / 2; int const line_size_pixels = line_size()[c] / 2; uint16_t* p = reinterpret_cast (data()[c]); - for (int y = 0; y < lines(c); ++y) { + int const lines = sample_size(c).height; + for (int y = 0; y < lines; ++y) { uint16_t* q = p; for (int x = 0; x < line_size_pixels; ++x) { *q = swap_16 (int (float (swap_16 (*q)) * f)); @@ -861,9 +905,9 @@ Image::fade (float f) } break; - case PIX_FMT_UYVY422: + case AV_PIX_FMT_UYVY422: { - int const Y = lines(0); + int const Y = sample_size(0).height; int const X = line_size()[0]; uint8_t* p = data()[0]; for (int y = 0; y < Y; ++y) {