2 Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 /** @file src/image.cc
23 * @brief A class to describe a video image.
27 #include "compose.hpp"
28 #include "dcpomatic_socket.h"
29 #include "exceptions.h"
35 #include <dcp/rgb_xyz.h>
36 #include <dcp/transfer_function.h>
37 DCPOMATIC_DISABLE_WARNINGS
39 #include <libavutil/frame.h>
40 #include <libavutil/pixdesc.h>
41 #include <libavutil/pixfmt.h>
42 #include <libswscale/swscale.h>
44 DCPOMATIC_ENABLE_WARNINGS
46 #if HAVE_VALGRIND_MEMCHECK_H
47 #include <valgrind/memcheck.h>
58 using std::make_shared;
61 using std::runtime_error;
62 using std::shared_ptr;
67 /** The memory alignment, in bytes, used for each row of an image if Alignment::PADDED is requested */
68 int constexpr ALIGNMENT = 64;
70 /* U/V black value for 8-bit colour */
71 static uint8_t const eight_bit_uv = (1 << 7) - 1;
72 /* U/V black value for 9-bit colour */
73 static uint16_t const nine_bit_uv = (1 << 8) - 1;
74 /* U/V black value for 10-bit colour */
75 static uint16_t const ten_bit_uv = (1 << 9) - 1;
76 /* U/V black value for 16-bit colour */
77 static uint16_t const sixteen_bit_uv = (1 << 15) - 1;
81 Image::vertical_factor (int n) const
87 auto d = av_pix_fmt_desc_get(_pixel_format);
89 throw PixelFormatError ("line_factor()", _pixel_format);
92 return lrintf(powf(2.0f, d->log2_chroma_h));
96 Image::horizontal_factor (int n) const
102 auto d = av_pix_fmt_desc_get(_pixel_format);
104 throw PixelFormatError ("sample_size()", _pixel_format);
107 return lrintf(powf(2.0f, d->log2_chroma_w));
111 /** @param n Component index.
112 * @return Number of samples (i.e. pixels, unless sub-sampled) in each direction for this component.
115 Image::sample_size (int n) const
118 lrint (ceil(static_cast<double>(size().width) / horizontal_factor(n))),
119 lrint (ceil(static_cast<double>(size().height) / vertical_factor(n)))
124 /** @return Number of planes */
126 Image::planes () const
128 if (_pixel_format == AV_PIX_FMT_PAL8) {
132 auto d = av_pix_fmt_desc_get(_pixel_format);
134 throw PixelFormatError ("planes()", _pixel_format);
137 if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
141 return d->nb_components;
147 round_width_for_subsampling (int p, AVPixFmtDescriptor const * desc)
149 return p & ~ ((1 << desc->log2_chroma_w) - 1);
155 round_height_for_subsampling (int p, AVPixFmtDescriptor const * desc)
157 return p & ~ ((1 << desc->log2_chroma_h) - 1);
161 /** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size'.
162 * @param crop Amount to crop by.
163 * @param inter_size Size to scale the cropped image to.
164 * @param out_size Size of output frame; if this is larger than inter_size there will be black padding.
165 * @param yuv_to_rgb YUV to RGB transformation to use, if required.
166 * @param video_range Video range of the image.
167 * @param out_format Output pixel format.
168 * @param out_aligned true to make the output image aligned.
169 * @param out_video_range Video range to use for the output image.
170 * @param fast Try to be fast at the possible expense of quality; at present this means using
171 * fast bilinear rather than bicubic scaling.
174 Image::crop_scale_window (
176 dcp::Size inter_size,
178 dcp::YUVToRGB yuv_to_rgb,
179 VideoRange video_range,
180 AVPixelFormat out_format,
181 VideoRange out_video_range,
182 Alignment out_alignment,
186 /* Empirical testing suggests that sws_scale() will crash if
187 the input image is not padded.
189 DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
191 DCPOMATIC_ASSERT (out_size.width >= inter_size.width);
192 DCPOMATIC_ASSERT (out_size.height >= inter_size.height);
194 static boost::optional<ImageInformation> before;
196 std::cout << "csw video_range=" << static_cast<int>(video_range) << " out_video_range=" << static_cast<int>(out_video_range) << "\n";
197 auto this_info = image_information(*this);
199 before->merge(this_info);
204 std::cout << "before Y " << before->minima[0] << " " << before->maxima[0] << "\n";
205 std::cout << "before U " << before->minima[1] << " " << before->maxima[1] << "\n";
206 std::cout << "before V " << before->minima[2] << " " << before->maxima[2] << "\n";
208 auto out = make_shared<Image>(out_format, out_size, out_alignment);
211 auto in_desc = av_pix_fmt_desc_get (_pixel_format);
213 throw PixelFormatError ("crop_scale_window()", _pixel_format);
216 /* Round down so that we crop only the number of pixels that is straightforward
217 * considering any subsampling.
220 round_width_for_subsampling(crop.left, in_desc),
221 round_width_for_subsampling(crop.right, in_desc),
222 round_height_for_subsampling(crop.top, in_desc),
223 round_height_for_subsampling(crop.bottom, in_desc)
226 /* Also check that we aren't cropping more image than there actually is */
227 if ((corrected_crop.left + corrected_crop.right) >= (size().width - 4)) {
228 corrected_crop.left = 0;
229 corrected_crop.right = size().width - 4;
232 if ((corrected_crop.top + corrected_crop.bottom) >= (size().height - 4)) {
233 corrected_crop.top = 0;
234 corrected_crop.bottom = size().height - 4;
237 /* Size of the image after any crop */
238 auto const cropped_size = corrected_crop.apply (size());
240 /* Scale context for a scale from cropped_size to inter_size */
241 auto scale_context = sws_getContext (
242 cropped_size.width, cropped_size.height, pixel_format(),
243 inter_size.width, inter_size.height, out_format,
244 fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0
247 if (!scale_context) {
248 throw runtime_error (N_("Could not allocate SwsContext"));
251 DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
252 int const lut[static_cast<int>(dcp::YUVToRGB::COUNT)] = {
257 /* The 3rd parameter here is:
258 0 -> source range MPEG (i.e. "video", 16-235)
259 1 -> source range JPEG (i.e. "full", 0-255)
261 0 -> destination range MPEG (i.e. "video", 16-235)
262 1 -> destination range JPEG (i.e. "full", 0-255)
264 But remember: sws_setColorspaceDetails ignores these
265 parameters unless the both source and destination images
266 are isYUV or isGray. (If either is not, it uses video range).
268 std::cout << "passing " << (video_range == VideoRange::VIDEO ? 0 : 1) << " " << (out_video_range == VideoRange::VIDEO ? 0 : 1) << "\n";
269 sws_setColorspaceDetails (
271 sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), video_range == VideoRange::VIDEO ? 0 : 1,
272 sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), out_video_range == VideoRange::VIDEO ? 0 : 1,
276 /* Prepare input data pointers with crop */
277 uint8_t* scale_in_data[planes()];
278 for (int c = 0; c < planes(); ++c) {
279 int const x = lrintf(bytes_per_pixel(c) * corrected_crop.left);
280 scale_in_data[c] = data()[c] + x + stride()[c] * (corrected_crop.top / vertical_factor(c));
283 auto out_desc = av_pix_fmt_desc_get (out_format);
285 throw PixelFormatError ("crop_scale_window()", out_format);
288 /* Corner of the image within out_size */
289 Position<int> const corner (
290 round_width_for_subsampling((out_size.width - inter_size.width) / 2, out_desc),
291 round_height_for_subsampling((out_size.height - inter_size.height) / 2, out_desc)
294 uint8_t* scale_out_data[out->planes()];
295 for (int c = 0; c < out->planes(); ++c) {
296 int const x = lrintf(out->bytes_per_pixel(c) * corner.x);
297 scale_out_data[c] = out->data()[c] + x + out->stride()[c] * (corner.y / out->vertical_factor(c));
302 scale_in_data, stride(),
303 0, cropped_size.height,
304 scale_out_data, out->stride()
307 sws_freeContext (scale_context);
309 if (corrected_crop != Crop() && cropped_size == inter_size) {
310 /* We are cropping without any scaling or pixel format conversion, so FFmpeg may have left some
311 data behind in our image. Clear it out. It may get to the point where we should just stop
312 trying to be clever with cropping.
314 out->make_part_black (corner.x + cropped_size.width, out_size.width - cropped_size.width);
317 std::cout << "CHECK IT " << (video_range == VideoRange::VIDEO) << " " << (out_video_range == VideoRange::FULL) << " " << (av_pix_fmt_desc_get(_pixel_format)->flags & AV_PIX_FMT_FLAG_RGB) << "\n";
320 video_range == VideoRange::VIDEO &&
321 out_video_range == VideoRange::FULL &&
322 av_pix_fmt_desc_get(_pixel_format)->flags & AV_PIX_FMT_FLAG_RGB
324 /* libswscale will not convert video range for RGB sources, so we have to do it ourselves */
325 std::cout << "doing the v->f conversion ourselves.\n";
326 out->video_range_to_full_range ();
329 boost::optional<ImageInformation> after;
331 this_info = image_information(*out);
333 after->merge(this_info);
338 std::cout << "after Y " << after->minima[0] << " " << after->maxima[0] << "\n";
339 std::cout << "after U " << after->minima[1] << " " << after->maxima[1] << "\n";
340 std::cout << "after V " << after->minima[2] << " " << after->maxima[2] << "\n";
347 Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
349 return scale(size(), yuv_to_rgb, out_format, out_alignment, fast);
353 /** @param out_size Size to scale to.
354 * @param yuv_to_rgb YUVToRGB transform transform to use, if required.
355 * @param out_format Output pixel format.
356 * @param out_aligment Output alignment.
357 * @param fast Try to be fast at the possible expense of quality; at present this means using
358 * fast bilinear rather than bicubic scaling.
361 Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
363 /* Empirical testing suggests that sws_scale() will crash if
364 the input image alignment is not PADDED.
366 DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
368 auto scaled = make_shared<Image>(out_format, out_size, out_alignment);
369 auto scale_context = sws_getContext (
370 size().width, size().height, pixel_format(),
371 out_size.width, out_size.height, out_format,
372 (fast ? SWS_FAST_BILINEAR : SWS_BICUBIC) | SWS_ACCURATE_RND, 0, 0, 0
375 DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
376 int const lut[static_cast<int>(dcp::YUVToRGB::COUNT)] = {
381 /* The 3rd parameter here is:
382 0 -> source range MPEG (i.e. "video", 16-235)
383 1 -> source range JPEG (i.e. "full", 0-255)
385 0 -> destination range MPEG (i.e. "video", 16-235)
386 1 -> destination range JPEG (i.e. "full", 0-255)
388 But remember: sws_setColorspaceDetails ignores these
389 parameters unless the corresponding image isYUV or isGray.
390 (If it's neither, it uses video range).
392 sws_setColorspaceDetails (
394 sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), 0,
395 sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), 0,
403 scaled->data(), scaled->stride()
406 sws_freeContext (scale_context);
412 /** Blacken a YUV image whose bits per pixel is rounded up to 16 */
414 Image::yuv_16_black (uint16_t v, bool alpha)
416 memset (data()[0], 0, sample_size(0).height * stride()[0]);
417 for (int i = 1; i < 3; ++i) {
418 auto p = reinterpret_cast<int16_t*> (data()[i]);
419 int const lines = sample_size(i).height;
420 for (int y = 0; y < lines; ++y) {
421 /* We divide by 2 here because we are writing 2 bytes at a time */
422 for (int x = 0; x < line_size()[i] / 2; ++x) {
425 p += stride()[i] / 2;
430 memset (data()[3], 0, sample_size(3).height * stride()[3]);
436 Image::swap_16 (uint16_t v)
438 return ((v >> 8) & 0xff) | ((v & 0xff) << 8);
443 Image::make_part_black (int const start, int const width)
445 auto y_part = [&]() {
446 int const bpp = bytes_per_pixel(0);
447 int const h = sample_size(0).height;
448 int const s = stride()[0];
450 for (int y = 0; y < h; ++y) {
451 memset (p + start * bpp, 0, width * bpp);
456 switch (_pixel_format) {
457 case AV_PIX_FMT_RGB24:
458 case AV_PIX_FMT_ARGB:
459 case AV_PIX_FMT_RGBA:
460 case AV_PIX_FMT_ABGR:
461 case AV_PIX_FMT_BGRA:
462 case AV_PIX_FMT_RGB555LE:
463 case AV_PIX_FMT_RGB48LE:
464 case AV_PIX_FMT_RGB48BE:
465 case AV_PIX_FMT_XYZ12LE:
467 int const h = sample_size(0).height;
468 int const bpp = bytes_per_pixel(0);
469 int const s = stride()[0];
470 uint8_t* p = data()[0];
471 for (int y = 0; y < h; y++) {
472 memset (p + start * bpp, 0, width * bpp);
477 case AV_PIX_FMT_YUV420P:
480 for (int i = 1; i < 3; ++i) {
482 int const h = sample_size(i).height;
483 for (int y = 0; y < h; ++y) {
484 for (int x = start / 2; x < (start + width) / 2; ++x) {
492 case AV_PIX_FMT_YUV422P10LE:
495 for (int i = 1; i < 3; ++i) {
496 auto p = reinterpret_cast<int16_t*>(data()[i]);
497 int const h = sample_size(i).height;
498 for (int y = 0; y < h; ++y) {
499 for (int x = start / 2; x < (start + width) / 2; ++x) {
502 p += stride()[i] / 2;
508 throw PixelFormatError ("make_part_black()", _pixel_format);
516 switch (_pixel_format) {
517 case AV_PIX_FMT_YUV420P:
518 case AV_PIX_FMT_YUV422P:
519 case AV_PIX_FMT_YUV444P:
520 case AV_PIX_FMT_YUV411P:
521 memset (data()[0], 0, sample_size(0).height * stride()[0]);
522 memset (data()[1], eight_bit_uv, sample_size(1).height * stride()[1]);
523 memset (data()[2], eight_bit_uv, sample_size(2).height * stride()[2]);
526 case AV_PIX_FMT_YUVJ420P:
527 case AV_PIX_FMT_YUVJ422P:
528 case AV_PIX_FMT_YUVJ444P:
529 memset (data()[0], 0, sample_size(0).height * stride()[0]);
530 memset (data()[1], eight_bit_uv + 1, sample_size(1).height * stride()[1]);
531 memset (data()[2], eight_bit_uv + 1, sample_size(2).height * stride()[2]);
534 case AV_PIX_FMT_YUV422P9LE:
535 case AV_PIX_FMT_YUV444P9LE:
536 yuv_16_black (nine_bit_uv, false);
539 case AV_PIX_FMT_YUV422P9BE:
540 case AV_PIX_FMT_YUV444P9BE:
541 yuv_16_black (swap_16 (nine_bit_uv), false);
544 case AV_PIX_FMT_YUV422P10LE:
545 case AV_PIX_FMT_YUV444P10LE:
546 yuv_16_black (ten_bit_uv, false);
549 case AV_PIX_FMT_YUV422P16LE:
550 case AV_PIX_FMT_YUV444P16LE:
551 yuv_16_black (sixteen_bit_uv, false);
554 case AV_PIX_FMT_YUV444P10BE:
555 case AV_PIX_FMT_YUV422P10BE:
556 yuv_16_black (swap_16 (ten_bit_uv), false);
559 case AV_PIX_FMT_YUVA420P9BE:
560 case AV_PIX_FMT_YUVA422P9BE:
561 case AV_PIX_FMT_YUVA444P9BE:
562 yuv_16_black (swap_16 (nine_bit_uv), true);
565 case AV_PIX_FMT_YUVA420P9LE:
566 case AV_PIX_FMT_YUVA422P9LE:
567 case AV_PIX_FMT_YUVA444P9LE:
568 yuv_16_black (nine_bit_uv, true);
571 case AV_PIX_FMT_YUVA420P10BE:
572 case AV_PIX_FMT_YUVA422P10BE:
573 case AV_PIX_FMT_YUVA444P10BE:
574 yuv_16_black (swap_16 (ten_bit_uv), true);
577 case AV_PIX_FMT_YUVA420P10LE:
578 case AV_PIX_FMT_YUVA422P10LE:
579 case AV_PIX_FMT_YUVA444P10LE:
580 yuv_16_black (ten_bit_uv, true);
583 case AV_PIX_FMT_YUVA420P16BE:
584 case AV_PIX_FMT_YUVA422P16BE:
585 case AV_PIX_FMT_YUVA444P16BE:
586 yuv_16_black (swap_16 (sixteen_bit_uv), true);
589 case AV_PIX_FMT_YUVA420P16LE:
590 case AV_PIX_FMT_YUVA422P16LE:
591 case AV_PIX_FMT_YUVA444P16LE:
592 yuv_16_black (sixteen_bit_uv, true);
595 case AV_PIX_FMT_RGB24:
596 case AV_PIX_FMT_ARGB:
597 case AV_PIX_FMT_RGBA:
598 case AV_PIX_FMT_ABGR:
599 case AV_PIX_FMT_BGRA:
600 case AV_PIX_FMT_RGB555LE:
601 case AV_PIX_FMT_RGB48LE:
602 case AV_PIX_FMT_RGB48BE:
603 case AV_PIX_FMT_XYZ12LE:
604 memset (data()[0], 0, sample_size(0).height * stride()[0]);
607 case AV_PIX_FMT_UYVY422:
609 int const Y = sample_size(0).height;
610 int const X = line_size()[0];
611 uint8_t* p = data()[0];
612 for (int y = 0; y < Y; ++y) {
613 for (int x = 0; x < X / 4; ++x) {
614 *p++ = eight_bit_uv; // Cb
616 *p++ = eight_bit_uv; // Cr
624 throw PixelFormatError ("make_black()", _pixel_format);
630 Image::make_transparent ()
632 if (_pixel_format != AV_PIX_FMT_BGRA && _pixel_format != AV_PIX_FMT_RGBA) {
633 throw PixelFormatError ("make_transparent()", _pixel_format);
636 memset (data()[0], 0, sample_size(0).height * stride()[0]);
641 Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
643 /* We're blending RGBA or BGRA images */
644 DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_BGRA || other->pixel_format() == AV_PIX_FMT_RGBA);
645 int const blue = other->pixel_format() == AV_PIX_FMT_BGRA ? 0 : 2;
646 int const red = other->pixel_format() == AV_PIX_FMT_BGRA ? 2 : 0;
648 int const other_bpp = 4;
650 int start_tx = position.x;
654 start_ox = -start_tx;
658 int start_ty = position.y;
662 start_oy = -start_ty;
666 switch (_pixel_format) {
667 case AV_PIX_FMT_RGB24:
669 /* Going onto RGB24. First byte is red, second green, third blue */
670 int const this_bpp = 3;
671 for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
672 uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp;
673 uint8_t* op = other->data()[0] + oy * other->stride()[0];
674 for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
675 float const alpha = float (op[3]) / 255;
676 tp[0] = op[red] * alpha + tp[0] * (1 - alpha);
677 tp[1] = op[1] * alpha + tp[1] * (1 - alpha);
678 tp[2] = op[blue] * alpha + tp[2] * (1 - alpha);
686 case AV_PIX_FMT_BGRA:
688 int const this_bpp = 4;
689 for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
690 uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp;
691 uint8_t* op = other->data()[0] + oy * other->stride()[0];
692 for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
693 float const alpha = float (op[3]) / 255;
694 tp[0] = op[blue] * alpha + tp[0] * (1 - alpha);
695 tp[1] = op[1] * alpha + tp[1] * (1 - alpha);
696 tp[2] = op[red] * alpha + tp[2] * (1 - alpha);
697 tp[3] = op[3] * alpha + tp[3] * (1 - alpha);
705 case AV_PIX_FMT_RGBA:
707 int const this_bpp = 4;
708 for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
709 uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp;
710 uint8_t* op = other->data()[0] + oy * other->stride()[0];
711 for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
712 float const alpha = float (op[3]) / 255;
713 tp[0] = op[red] * alpha + tp[0] * (1 - alpha);
714 tp[1] = op[1] * alpha + tp[1] * (1 - alpha);
715 tp[2] = op[blue] * alpha + tp[2] * (1 - alpha);
716 tp[3] = op[3] * alpha + tp[3] * (1 - alpha);
724 case AV_PIX_FMT_RGB48LE:
726 int const this_bpp = 6;
727 for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
728 uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp;
729 uint8_t* op = other->data()[0] + oy * other->stride()[0];
730 for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
731 float const alpha = float (op[3]) / 255;
732 /* Blend high bytes */
733 tp[1] = op[red] * alpha + tp[1] * (1 - alpha);
734 tp[3] = op[1] * alpha + tp[3] * (1 - alpha);
735 tp[5] = op[blue] * alpha + tp[5] * (1 - alpha);
743 case AV_PIX_FMT_XYZ12LE:
745 auto conv = dcp::ColourConversion::srgb_to_xyz();
746 double fast_matrix[9];
747 dcp::combined_rgb_to_xyz (conv, fast_matrix);
748 double const * lut_in = conv.in()->lut (8, false);
749 double const * lut_out = conv.out()->lut (16, true);
750 int const this_bpp = 6;
751 for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
752 uint16_t* tp = reinterpret_cast<uint16_t*> (data()[0] + ty * stride()[0] + start_tx * this_bpp);
753 uint8_t* op = other->data()[0] + oy * other->stride()[0];
754 for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
755 float const alpha = float (op[3]) / 255;
757 /* Convert sRGB to XYZ; op is BGRA. First, input gamma LUT */
758 double const r = lut_in[op[red]];
759 double const g = lut_in[op[1]];
760 double const b = lut_in[op[blue]];
762 /* RGB to XYZ, including Bradford transform and DCI companding */
763 double const x = max (0.0, min (65535.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2]));
764 double const y = max (0.0, min (65535.0, r * fast_matrix[3] + g * fast_matrix[4] + b * fast_matrix[5]));
765 double const z = max (0.0, min (65535.0, r * fast_matrix[6] + g * fast_matrix[7] + b * fast_matrix[8]));
767 /* Out gamma LUT and blend */
768 tp[0] = lrint(lut_out[lrint(x)] * 65535) * alpha + tp[0] * (1 - alpha);
769 tp[1] = lrint(lut_out[lrint(y)] * 65535) * alpha + tp[1] * (1 - alpha);
770 tp[2] = lrint(lut_out[lrint(z)] * 65535) * alpha + tp[2] * (1 - alpha);
778 case AV_PIX_FMT_YUV420P:
780 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
781 dcp::Size const ts = size();
782 dcp::Size const os = yuv->size();
783 for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
784 int const hty = ty / 2;
785 int const hoy = oy / 2;
786 uint8_t* tY = data()[0] + (ty * stride()[0]) + start_tx;
787 uint8_t* tU = data()[1] + (hty * stride()[1]) + start_tx / 2;
788 uint8_t* tV = data()[2] + (hty * stride()[2]) + start_tx / 2;
789 uint8_t* oY = yuv->data()[0] + (oy * yuv->stride()[0]) + start_ox;
790 uint8_t* oU = yuv->data()[1] + (hoy * yuv->stride()[1]) + start_ox / 2;
791 uint8_t* oV = yuv->data()[2] + (hoy * yuv->stride()[2]) + start_ox / 2;
792 uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
793 for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
794 float const a = float(alpha[3]) / 255;
795 *tY = *oY * a + *tY * (1 - a);
796 *tU = *oU * a + *tU * (1 - a);
797 *tV = *oV * a + *tV * (1 - a);
813 case AV_PIX_FMT_YUV420P10:
815 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
816 dcp::Size const ts = size();
817 dcp::Size const os = yuv->size();
818 for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
819 int const hty = ty / 2;
820 int const hoy = oy / 2;
821 uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx;
822 uint16_t* tU = ((uint16_t *) (data()[1] + (hty * stride()[1]))) + start_tx / 2;
823 uint16_t* tV = ((uint16_t *) (data()[2] + (hty * stride()[2]))) + start_tx / 2;
824 uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox;
825 uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (hoy * yuv->stride()[1]))) + start_ox / 2;
826 uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (hoy * yuv->stride()[2]))) + start_ox / 2;
827 uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
828 for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
829 float const a = float(alpha[3]) / 255;
830 *tY = *oY * a + *tY * (1 - a);
831 *tU = *oU * a + *tU * (1 - a);
832 *tV = *oV * a + *tV * (1 - a);
848 case AV_PIX_FMT_YUV422P10LE:
850 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
851 dcp::Size const ts = size();
852 dcp::Size const os = yuv->size();
853 for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
854 uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx;
855 uint16_t* tU = ((uint16_t *) (data()[1] + (ty * stride()[1]))) + start_tx / 2;
856 uint16_t* tV = ((uint16_t *) (data()[2] + (ty * stride()[2]))) + start_tx / 2;
857 uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox;
858 uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (oy * yuv->stride()[1]))) + start_ox / 2;
859 uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (oy * yuv->stride()[2]))) + start_ox / 2;
860 uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
861 for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
862 float const a = float(alpha[3]) / 255;
863 *tY = *oY * a + *tY * (1 - a);
864 *tU = *oU * a + *tU * (1 - a);
865 *tV = *oV * a + *tV * (1 - a);
882 throw PixelFormatError ("alpha_blend()", _pixel_format);
888 Image::copy (shared_ptr<const Image> other, Position<int> position)
890 /* Only implemented for RGB24 onto RGB24 so far */
891 DCPOMATIC_ASSERT (_pixel_format == AV_PIX_FMT_RGB24 && other->pixel_format() == AV_PIX_FMT_RGB24);
892 DCPOMATIC_ASSERT (position.x >= 0 && position.y >= 0);
894 int const N = min (position.x + other->size().width, size().width) - position.x;
895 for (int ty = position.y, oy = 0; ty < size().height && oy < other->size().height; ++ty, ++oy) {
896 uint8_t * const tp = data()[0] + ty * stride()[0] + position.x * 3;
897 uint8_t * const op = other->data()[0] + oy * other->stride()[0];
898 memcpy (tp, op, N * 3);
904 Image::read_from_socket (shared_ptr<Socket> socket)
906 for (int i = 0; i < planes(); ++i) {
907 uint8_t* p = data()[i];
908 int const lines = sample_size(i).height;
909 for (int y = 0; y < lines; ++y) {
910 socket->read (p, line_size()[i]);
918 Image::write_to_socket (shared_ptr<Socket> socket) const
920 for (int i = 0; i < planes(); ++i) {
921 uint8_t* p = data()[i];
922 int const lines = sample_size(i).height;
923 for (int y = 0; y < lines; ++y) {
924 socket->write (p, line_size()[i]);
932 Image::bytes_per_pixel (int c) const
934 auto d = av_pix_fmt_desc_get(_pixel_format);
936 throw PixelFormatError ("bytes_per_pixel()", _pixel_format);
943 float bpp[4] = { 0, 0, 0, 0 };
945 #ifdef DCPOMATIC_HAVE_AVCOMPONENTDESCRIPTOR_DEPTH_MINUS1
946 bpp[0] = floor ((d->comp[0].depth_minus1 + 8) / 8);
947 if (d->nb_components > 1) {
948 bpp[1] = floor ((d->comp[1].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
950 if (d->nb_components > 2) {
951 bpp[2] = floor ((d->comp[2].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
953 if (d->nb_components > 3) {
954 bpp[3] = floor ((d->comp[3].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
957 bpp[0] = floor ((d->comp[0].depth + 7) / 8);
958 if (d->nb_components > 1) {
959 bpp[1] = floor ((d->comp[1].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
961 if (d->nb_components > 2) {
962 bpp[2] = floor ((d->comp[2].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
964 if (d->nb_components > 3) {
965 bpp[3] = floor ((d->comp[3].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
969 if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
970 /* Not planar; sum them up */
971 return bpp[0] + bpp[1] + bpp[2] + bpp[3];
978 /** Construct a Image of a given size and format, allocating memory
981 * @param p Pixel format.
982 * @param s Size in pixels.
983 * @param alignment PADDED to make each row of this image aligned to a ALIGNMENT-byte boundary, otherwise COMPACT.
985 Image::Image (AVPixelFormat p, dcp::Size s, Alignment alignment)
988 , _alignment (alignment)
997 _data = (uint8_t **) wrapped_av_malloc (4 * sizeof (uint8_t *));
998 _data[0] = _data[1] = _data[2] = _data[3] = 0;
1000 _line_size = (int *) wrapped_av_malloc (4 * sizeof (int));
1001 _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0;
1003 _stride = (int *) wrapped_av_malloc (4 * sizeof (int));
1004 _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0;
1006 auto stride_round_up = [](int stride, int t) {
1007 int const a = stride + (t - 1);
1011 for (int i = 0; i < planes(); ++i) {
1012 _line_size[i] = ceil (_size.width * bytes_per_pixel(i));
1013 _stride[i] = stride_round_up (_line_size[i], _alignment == Alignment::PADDED ? ALIGNMENT : 1);
1015 /* The assembler function ff_rgb24ToY_avx (in libswscale/x86/input.asm)
1016 uses a 16-byte fetch to read three bytes (R/G/B) of image data.
1017 Hence on the last pixel of the last line it reads over the end of
1018 the actual data by 1 byte. If the width of an image is a multiple
1019 of the stride alignment there will be no padding at the end of image lines.
1020 OS X crashes on this illegal read, though other operating systems don't
1021 seem to mind. The nasty + 1 in this malloc makes sure there is always a byte
1022 for that instruction to read safely.
1024 Further to the above, valgrind is now telling me that ff_rgb24ToY_ssse3
1025 over-reads by more then _avx. I can't follow the code to work out how much,
1026 so I'll just over-allocate by ALIGNMENT bytes and have done with it. Empirical
1027 testing suggests that it works.
1029 In addition to these concerns, we may read/write as much as a whole extra line
1030 at the end of each plane in cases where we are messing with offsets in order to
1031 do pad or crop. To solve this we over-allocate by an extra _stride[i] bytes.
1033 As an example: we may write to images starting at an offset so we get some padding.
1034 Hence we want to write in the following pattern:
1036 block start write start line end
1037 |..(padding)..|<------line-size------------->|..(padding)..|
1038 |..(padding)..|<------line-size------------->|..(padding)..|
1039 |..(padding)..|<------line-size------------->|..(padding)..|
1041 where line-size is of the smaller (inter_size) image and the full padded line length is that of
1042 out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size.
1043 However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full
1044 specified *stride*. This does not matter until we get to the last line:
1046 block start write start line end
1047 |..(padding)..|<------line-size------------->|XXXwrittenXXX|
1048 |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX|
1049 |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX
1052 _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * (sample_size(i).height + 1) + ALIGNMENT);
1053 #if HAVE_VALGRIND_MEMCHECK_H
1054 /* The data between the end of the line size and the stride is undefined but processed by
1055 libswscale, causing lots of valgrind errors. Mark it all defined to quell these errors.
1057 VALGRIND_MAKE_MEM_DEFINED (_data[i], _stride[i] * (sample_size(i).height + 1) + ALIGNMENT);
1063 Image::Image (Image const & other)
1064 : std::enable_shared_from_this<Image>(other)
1065 , _size (other._size)
1066 , _pixel_format (other._pixel_format)
1067 , _alignment (other._alignment)
1071 for (int i = 0; i < planes(); ++i) {
1072 uint8_t* p = _data[i];
1073 uint8_t* q = other._data[i];
1074 int const lines = sample_size(i).height;
1075 for (int j = 0; j < lines; ++j) {
1076 memcpy (p, q, _line_size[i]);
1078 q += other.stride()[i];
1084 Image::Image (AVFrame const * frame, Alignment alignment)
1085 : _size (frame->width, frame->height)
1086 , _pixel_format (static_cast<AVPixelFormat>(frame->format))
1087 , _alignment (alignment)
1089 DCPOMATIC_ASSERT (_pixel_format != AV_PIX_FMT_NONE);
1093 for (int i = 0; i < planes(); ++i) {
1094 uint8_t* p = _data[i];
1095 uint8_t* q = frame->data[i];
1096 int const lines = sample_size(i).height;
1097 for (int j = 0; j < lines; ++j) {
1098 memcpy (p, q, _line_size[i]);
1100 /* AVFrame's linesize is what we call `stride' */
1101 q += frame->linesize[i];
1107 Image::Image (shared_ptr<const Image> other, Alignment alignment)
1108 : _size (other->_size)
1109 , _pixel_format (other->_pixel_format)
1110 , _alignment (alignment)
1114 for (int i = 0; i < planes(); ++i) {
1115 DCPOMATIC_ASSERT (line_size()[i] == other->line_size()[i]);
1116 uint8_t* p = _data[i];
1117 uint8_t* q = other->data()[i];
1118 int const lines = sample_size(i).height;
1119 for (int j = 0; j < lines; ++j) {
1120 memcpy (p, q, line_size()[i]);
1122 q += other->stride()[i];
1129 Image::operator= (Image const & other)
1131 if (this == &other) {
1142 Image::swap (Image & other)
1144 std::swap (_size, other._size);
1145 std::swap (_pixel_format, other._pixel_format);
1147 for (int i = 0; i < 4; ++i) {
1148 std::swap (_data[i], other._data[i]);
1149 std::swap (_line_size[i], other._line_size[i]);
1150 std::swap (_stride[i], other._stride[i]);
1153 std::swap (_alignment, other._alignment);
1159 for (int i = 0; i < planes(); ++i) {
1164 av_free (_line_size);
1170 Image::data () const
1177 Image::line_size () const
1184 Image::stride () const
1191 Image::size () const
1198 Image::alignment () const
1205 merge (list<PositionImage> images, Image::Alignment alignment)
1207 if (images.empty ()) {
1211 if (images.size() == 1) {
1212 images.front().image = Image::ensure_alignment(images.front().image, alignment);
1213 return images.front();
1216 dcpomatic::Rect<int> all (images.front().position, images.front().image->size().width, images.front().image->size().height);
1217 for (auto const& i: images) {
1218 all.extend (dcpomatic::Rect<int>(i.position, i.image->size().width, i.image->size().height));
1221 auto merged = make_shared<Image>(images.front().image->pixel_format(), dcp::Size(all.width, all.height), alignment);
1222 merged->make_transparent ();
1223 for (auto const& i: images) {
1224 merged->alpha_blend (i.image, i.position - all.position());
1227 return PositionImage (merged, all.position ());
1232 operator== (Image const & a, Image const & b)
1234 if (a.planes() != b.planes() || a.pixel_format() != b.pixel_format() || a.alignment() != b.alignment()) {
1238 for (int c = 0; c < a.planes(); ++c) {
1239 if (a.sample_size(c).height != b.sample_size(c).height || a.line_size()[c] != b.line_size()[c] || a.stride()[c] != b.stride()[c]) {
1243 uint8_t* p = a.data()[c];
1244 uint8_t* q = b.data()[c];
1245 int const lines = a.sample_size(c).height;
1246 for (int y = 0; y < lines; ++y) {
1247 if (memcmp (p, q, a.line_size()[c]) != 0) {
1261 * @param f Amount to fade by; 0 is black, 1 is no fade.
1264 Image::fade (float f)
1266 /* U/V black value for 8-bit colour */
1267 static int const eight_bit_uv = (1 << 7) - 1;
1268 /* U/V black value for 10-bit colour */
1269 static uint16_t const ten_bit_uv = (1 << 9) - 1;
1271 switch (_pixel_format) {
1272 case AV_PIX_FMT_YUV420P:
1275 uint8_t* p = data()[0];
1276 int const lines = sample_size(0).height;
1277 for (int y = 0; y < lines; ++y) {
1279 for (int x = 0; x < line_size()[0]; ++x) {
1280 *q = int(float(*q) * f);
1287 for (int c = 1; c < 3; ++c) {
1288 uint8_t* p = data()[c];
1289 int const lines = sample_size(c).height;
1290 for (int y = 0; y < lines; ++y) {
1292 for (int x = 0; x < line_size()[c]; ++x) {
1293 *q = eight_bit_uv + int((int(*q) - eight_bit_uv) * f);
1303 case AV_PIX_FMT_RGB24:
1306 uint8_t* p = data()[0];
1307 int const lines = sample_size(0).height;
1308 for (int y = 0; y < lines; ++y) {
1310 for (int x = 0; x < line_size()[0]; ++x) {
1311 *q = int (float (*q) * f);
1319 case AV_PIX_FMT_XYZ12LE:
1320 case AV_PIX_FMT_RGB48LE:
1321 /* 16-bit little-endian */
1322 for (int c = 0; c < 3; ++c) {
1323 int const stride_pixels = stride()[c] / 2;
1324 int const line_size_pixels = line_size()[c] / 2;
1325 uint16_t* p = reinterpret_cast<uint16_t*> (data()[c]);
1326 int const lines = sample_size(c).height;
1327 for (int y = 0; y < lines; ++y) {
1329 for (int x = 0; x < line_size_pixels; ++x) {
1330 *q = int (float (*q) * f);
1338 case AV_PIX_FMT_YUV422P10LE:
1342 int const stride_pixels = stride()[0] / 2;
1343 int const line_size_pixels = line_size()[0] / 2;
1344 uint16_t* p = reinterpret_cast<uint16_t*> (data()[0]);
1345 int const lines = sample_size(0).height;
1346 for (int y = 0; y < lines; ++y) {
1348 for (int x = 0; x < line_size_pixels; ++x) {
1349 *q = int(float(*q) * f);
1357 for (int c = 1; c < 3; ++c) {
1358 int const stride_pixels = stride()[c] / 2;
1359 int const line_size_pixels = line_size()[c] / 2;
1360 uint16_t* p = reinterpret_cast<uint16_t*> (data()[c]);
1361 int const lines = sample_size(c).height;
1362 for (int y = 0; y < lines; ++y) {
1364 for (int x = 0; x < line_size_pixels; ++x) {
1365 *q = ten_bit_uv + int((int(*q) - ten_bit_uv) * f);
1376 throw PixelFormatError ("fade()", _pixel_format);
1381 shared_ptr<const Image>
1382 Image::ensure_alignment (shared_ptr<const Image> image, Image::Alignment alignment)
1384 if (image->alignment() == alignment) {
1388 return make_shared<Image>(image, alignment);
1393 Image::memory_used () const
1396 for (int i = 0; i < planes(); ++i) {
1397 m += _stride[i] * sample_size(i).height;
1422 png_write_data (png_structp png_ptr, png_bytep data, png_size_t length)
1424 auto mem = reinterpret_cast<Memory*>(png_get_io_ptr(png_ptr));
1425 size_t size = mem->size + length;
1428 mem->data = reinterpret_cast<uint8_t*>(realloc(mem->data, size));
1430 mem->data = reinterpret_cast<uint8_t*>(malloc(size));
1434 throw EncodeError (N_("could not allocate memory for PNG"));
1437 memcpy (mem->data + mem->size, data, length);
1438 mem->size += length;
1443 png_flush (png_structp)
1450 png_error_fn (png_structp png_ptr, char const * message)
1452 reinterpret_cast<Image*>(png_get_error_ptr(png_ptr))->png_error (message);
1457 Image::png_error (char const * message)
1459 throw EncodeError (String::compose ("Error during PNG write: %1", message));
1464 Image::as_png () const
1466 DCPOMATIC_ASSERT (bytes_per_pixel(0) == 4);
1467 DCPOMATIC_ASSERT (planes() == 1);
1468 if (pixel_format() != AV_PIX_FMT_RGBA) {
1469 return convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, Image::Alignment::PADDED, false)->as_png();
1472 /* error handling? */
1473 png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, reinterpret_cast<void*>(const_cast<Image*>(this)), png_error_fn, 0);
1475 throw EncodeError (N_("could not create PNG write struct"));
1480 png_set_write_fn (png_ptr, &state, png_write_data, png_flush);
1482 png_infop info_ptr = png_create_info_struct(png_ptr);
1484 png_destroy_write_struct (&png_ptr, &info_ptr);
1485 throw EncodeError (N_("could not create PNG info struct"));
1488 png_set_IHDR (png_ptr, info_ptr, size().width, size().height, 8, PNG_COLOR_TYPE_RGBA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
1490 png_byte ** row_pointers = reinterpret_cast<png_byte **>(png_malloc(png_ptr, size().height * sizeof(png_byte *)));
1491 for (int i = 0; i < size().height; ++i) {
1492 row_pointers[i] = (png_byte *) (data()[0] + i * stride()[0]);
1495 png_write_info (png_ptr, info_ptr);
1496 png_write_image (png_ptr, row_pointers);
1497 png_write_end (png_ptr, info_ptr);
1499 png_destroy_write_struct (&png_ptr, &info_ptr);
1500 png_free (png_ptr, row_pointers);
1502 return dcp::ArrayData (state.data, state.size);
1507 Image::video_range_to_full_range ()
1509 switch (_pixel_format) {
1510 case AV_PIX_FMT_RGB24:
1512 float const factor = 256.0 / 219.0;
1513 uint8_t* p = data()[0];
1514 int const lines = sample_size(0).height;
1515 for (int y = 0; y < lines; ++y) {
1517 for (int x = 0; x < line_size()[0]; ++x) {
1518 *q = clamp(lrintf((*q - 16) * factor), 0L, 255L);
1525 case AV_PIX_FMT_RGB48LE:
1527 float const factor = 65536.0 / 56064.0;
1528 uint16_t* p = reinterpret_cast<uint16_t*>(data()[0]);
1529 int const lines = sample_size(0).height;
1530 for (int y = 0; y < lines; ++y) {
1532 int const line_size_pixels = line_size()[0] / 2;
1533 for (int x = 0; x < line_size_pixels; ++x) {
1534 *q = clamp(lrintf((*q - 4096) * factor), 0L, 65535L);
1537 p += stride()[0] / 2;
1541 case AV_PIX_FMT_GBRP12LE:
1543 float const factor = 4096.0 / 3504.0;
1544 for (int c = 0; c < 3; ++c) {
1545 uint16_t* p = reinterpret_cast<uint16_t*>(data()[c]);
1546 int const lines = sample_size(c).height;
1547 for (int y = 0; y < lines; ++y) {
1549 int const line_size_pixels = line_size()[c] / 2;
1550 for (int x = 0; x < line_size_pixels; ++x) {
1551 *q = clamp(lrintf((*q - 256) * factor), 0L, 4095L);
1559 throw PixelFormatError ("video_range_to_full_range()", _pixel_format);
1565 image_information (Image const& image)
1567 ImageInformation info;
1569 switch (image.pixel_format()) {
1570 case AV_PIX_FMT_YUV444P12LE:
1571 DCPOMATIC_ASSERT(image.planes() == 3);
1572 info.minima[0] = info.minima[1] = info.minima[2] = INT32_MAX;
1573 info.maxima[0] = info.maxima[1] = info.maxima[2] = 0;
1574 for (int plane = 0; plane < image.planes(); ++plane) {
1575 for (int y = 0; y < image.size().height; ++y) {
1576 uint16_t* p = reinterpret_cast<uint16_t*>(image.data()[plane] + image.stride()[plane] * y);
1577 for (int x = 0; x < image.size().width; ++x) {
1578 info.minima[plane] = std::min(info.minima[plane], static_cast<int32_t>(*p));
1579 info.maxima[plane] = std::max(info.maxima[plane], static_cast<int32_t>(*p));
1585 case AV_PIX_FMT_RGB48LE:
1586 DCPOMATIC_ASSERT(image.planes() == 1);
1587 info.minima[0] = info.minima[1] = info.minima[2] = INT32_MAX;
1588 info.maxima[0] = info.maxima[1] = info.maxima[2] = 0;
1589 for (int y = 0; y < image.size().height; ++y) {
1590 uint16_t* p = reinterpret_cast<uint16_t*>(image.data()[0] + image.stride()[0] * y);
1591 for (int x = 0; x < image.size().width; ++x) {
1592 info.minima[0] = std::min(info.minima[0], static_cast<int32_t>(*p));
1593 info.maxima[0] = std::max(info.maxima[0], static_cast<int32_t>(*p));
1595 info.minima[1] = std::min(info.minima[1], static_cast<int32_t>(*p));
1596 info.maxima[1] = std::max(info.maxima[1], static_cast<int32_t>(*p));
1598 info.minima[2] = std::min(info.minima[2], static_cast<int32_t>(*p));
1599 info.maxima[2] = std::max(info.maxima[2], static_cast<int32_t>(*p));
1605 DCPOMATIC_ASSERT(false);
1613 ImageInformation::merge (ImageInformation const& other)
1615 for (int i = 0; i < 3; ++i) {
1616 minima[i] = std::min(minima[i], other.minima[i]);
1617 maxima[i] = std::max(maxima[i], other.maxima[i]);