2 Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 /** @file src/image.cc
23 * @brief A class to describe a video image.
27 #include "compose.hpp"
28 #include "dcpomatic_assert.h"
29 #include "dcpomatic_socket.h"
30 #include "enum_indexed_vector.h"
31 #include "exceptions.h"
33 #include "maths_util.h"
34 #include "memory_util.h"
37 #include <dcp/rgb_xyz.h>
38 #include <dcp/transfer_function.h>
39 #include <dcp/warnings.h>
40 LIBDCP_DISABLE_WARNINGS
42 #include <libavutil/frame.h>
43 #include <libavutil/pixdesc.h>
44 #include <libavutil/pixfmt.h>
45 #include <libswscale/swscale.h>
47 LIBDCP_ENABLE_WARNINGS
48 #if HAVE_VALGRIND_MEMCHECK_H
49 #include <valgrind/memcheck.h>
60 using std::make_shared;
63 using std::runtime_error;
64 using std::shared_ptr;
69 /** The memory alignment, in bytes, used for each row of an image if Alignment::PADDED is requested */
70 int constexpr ALIGNMENT = 64;
72 /* U/V black value for 8-bit colour */
73 static uint8_t const eight_bit_uv = (1 << 7) - 1;
74 /* U/V black value for 9-bit colour */
75 static uint16_t const nine_bit_uv = (1 << 8) - 1;
76 /* U/V black value for 10-bit colour */
77 static uint16_t const ten_bit_uv = (1 << 9) - 1;
78 /* U/V black value for 16-bit colour */
79 static uint16_t const sixteen_bit_uv = (1 << 15) - 1;
83 Image::vertical_factor (int n) const
89 auto d = av_pix_fmt_desc_get(_pixel_format);
91 throw PixelFormatError ("line_factor()", _pixel_format);
94 return lrintf(powf(2.0f, d->log2_chroma_h));
98 Image::horizontal_factor (int n) const
104 auto d = av_pix_fmt_desc_get(_pixel_format);
106 throw PixelFormatError ("sample_size()", _pixel_format);
109 return lrintf(powf(2.0f, d->log2_chroma_w));
113 /** @param n Component index.
114 * @return Number of samples (i.e. pixels, unless sub-sampled) in each direction for this component.
117 Image::sample_size (int n) const
120 lrint (ceil(static_cast<double>(size().width) / horizontal_factor(n))),
121 lrint (ceil(static_cast<double>(size().height) / vertical_factor(n)))
126 /** @return Number of planes */
128 Image::planes () const
130 if (_pixel_format == AV_PIX_FMT_PAL8) {
134 auto d = av_pix_fmt_desc_get(_pixel_format);
136 throw PixelFormatError ("planes()", _pixel_format);
139 if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
143 return d->nb_components;
149 round_width_for_subsampling (int p, AVPixFmtDescriptor const * desc)
151 return p & ~ ((1 << desc->log2_chroma_w) - 1);
157 round_height_for_subsampling (int p, AVPixFmtDescriptor const * desc)
159 return p & ~ ((1 << desc->log2_chroma_h) - 1);
163 /** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size'.
164 * @param crop Amount to crop by.
165 * @param inter_size Size to scale the cropped image to.
166 * @param out_size Size of output frame; if this is larger than inter_size there will be black padding.
167 * @param yuv_to_rgb YUV to RGB transformation to use, if required.
168 * @param video_range Video range of the image.
169 * @param out_format Output pixel format.
170 * @param out_aligned true to make the output image aligned.
171 * @param out_video_range Video range to use for the output image.
172 * @param fast Try to be fast at the possible expense of quality; at present this means using
173 * fast bilinear rather than bicubic scaling.
176 Image::crop_scale_window (
178 dcp::Size inter_size,
180 dcp::YUVToRGB yuv_to_rgb,
181 VideoRange video_range,
182 AVPixelFormat out_format,
183 VideoRange out_video_range,
184 Alignment out_alignment,
188 /* Empirical testing suggests that sws_scale() will crash if
189 the input image is not padded.
191 DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
193 DCPOMATIC_ASSERT (out_size.width >= inter_size.width);
194 DCPOMATIC_ASSERT (out_size.height >= inter_size.height);
196 auto out = make_shared<Image>(out_format, out_size, out_alignment);
199 auto in_desc = av_pix_fmt_desc_get (_pixel_format);
201 throw PixelFormatError ("crop_scale_window()", _pixel_format);
204 /* Round down so that we crop only the number of pixels that is straightforward
205 * considering any subsampling.
208 round_width_for_subsampling(crop.left, in_desc),
209 round_width_for_subsampling(crop.right, in_desc),
210 round_height_for_subsampling(crop.top, in_desc),
211 round_height_for_subsampling(crop.bottom, in_desc)
214 /* Also check that we aren't cropping more image than there actually is */
215 if ((corrected_crop.left + corrected_crop.right) >= (size().width - 4)) {
216 corrected_crop.left = 0;
217 corrected_crop.right = size().width - 4;
220 if ((corrected_crop.top + corrected_crop.bottom) >= (size().height - 4)) {
221 corrected_crop.top = 0;
222 corrected_crop.bottom = size().height - 4;
225 /* Size of the image after any crop */
226 auto const cropped_size = corrected_crop.apply (size());
228 /* Scale context for a scale from cropped_size to inter_size */
229 auto scale_context = sws_getContext (
230 cropped_size.width, cropped_size.height, pixel_format(),
231 inter_size.width, inter_size.height, out_format,
232 fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0
235 if (!scale_context) {
236 throw runtime_error (N_("Could not allocate SwsContext"));
239 DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
240 EnumIndexedVector<int, dcp::YUVToRGB> lut;
241 lut[dcp::YUVToRGB::REC601] = SWS_CS_ITU601;
242 lut[dcp::YUVToRGB::REC709] = SWS_CS_ITU709;
243 lut[dcp::YUVToRGB::REC2020] = SWS_CS_BT2020;
245 /* The 3rd parameter here is:
246 0 -> source range MPEG (i.e. "video", 16-235)
247 1 -> source range JPEG (i.e. "full", 0-255)
249 0 -> destination range MPEG (i.e. "video", 16-235)
250 1 -> destination range JPEG (i.e. "full", 0-255)
252 But remember: sws_setColorspaceDetails ignores these
253 parameters unless the both source and destination images
254 are isYUV or isGray. (If either is not, it uses video range).
256 sws_setColorspaceDetails (
258 sws_getCoefficients(lut[yuv_to_rgb]), video_range == VideoRange::VIDEO ? 0 : 1,
259 sws_getCoefficients(lut[yuv_to_rgb]), out_video_range == VideoRange::VIDEO ? 0 : 1,
263 /* Prepare input data pointers with crop */
264 uint8_t* scale_in_data[planes()];
265 for (int c = 0; c < planes(); ++c) {
266 int const x = lrintf(bytes_per_pixel(c) * corrected_crop.left);
267 scale_in_data[c] = data()[c] + x + stride()[c] * (corrected_crop.top / vertical_factor(c));
270 auto out_desc = av_pix_fmt_desc_get (out_format);
272 throw PixelFormatError ("crop_scale_window()", out_format);
275 /* Corner of the image within out_size */
276 Position<int> const corner (
277 round_width_for_subsampling((out_size.width - inter_size.width) / 2, out_desc),
278 round_height_for_subsampling((out_size.height - inter_size.height) / 2, out_desc)
281 uint8_t* scale_out_data[out->planes()];
282 for (int c = 0; c < out->planes(); ++c) {
283 int const x = lrintf(out->bytes_per_pixel(c) * corner.x);
284 scale_out_data[c] = out->data()[c] + x + out->stride()[c] * (corner.y / out->vertical_factor(c));
289 scale_in_data, stride(),
290 0, cropped_size.height,
291 scale_out_data, out->stride()
294 sws_freeContext (scale_context);
296 /* There are some cases where there will be unwanted image data left in the image at this point:
298 * 1. When we are cropping without any scaling or pixel format conversion.
299 * 2. When we are scaling to certain sizes and placing the result into a larger
302 * Clear out the sides of the image to take care of those cases.
304 auto const pad = (out_size.width - inter_size.width) / 2;
305 out->make_part_black(0, pad);
306 out->make_part_black(corner.x + inter_size.width, pad);
309 video_range == VideoRange::VIDEO &&
310 out_video_range == VideoRange::FULL &&
311 av_pix_fmt_desc_get(_pixel_format)->flags & AV_PIX_FMT_FLAG_RGB
313 /* libswscale will not convert video range for RGB sources, so we have to do it ourselves */
314 out->video_range_to_full_range ();
322 Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
324 return scale(size(), yuv_to_rgb, out_format, out_alignment, fast);
328 /** @param out_size Size to scale to.
329 * @param yuv_to_rgb YUVToRGB transform transform to use, if required.
330 * @param out_format Output pixel format.
331 * @param out_alignment Output alignment.
332 * @param fast Try to be fast at the possible expense of quality; at present this means using
333 * fast bilinear rather than bicubic scaling.
336 Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
338 /* Empirical testing suggests that sws_scale() will crash if
339 the input image alignment is not PADDED.
341 DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
342 DCPOMATIC_ASSERT(size().width > 0);
343 DCPOMATIC_ASSERT(size().height > 0);
344 DCPOMATIC_ASSERT(out_size.width > 0);
345 DCPOMATIC_ASSERT(out_size.height > 0);
347 auto scaled = make_shared<Image>(out_format, out_size, out_alignment);
348 auto scale_context = sws_getContext (
349 size().width, size().height, pixel_format(),
350 out_size.width, out_size.height, out_format,
351 (fast ? SWS_FAST_BILINEAR : SWS_BICUBIC) | SWS_ACCURATE_RND, 0, 0, 0
354 DCPOMATIC_ASSERT(scale_context);
356 DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
357 EnumIndexedVector<int, dcp::YUVToRGB> lut;
358 lut[dcp::YUVToRGB::REC601] = SWS_CS_ITU601;
359 lut[dcp::YUVToRGB::REC709] = SWS_CS_ITU709;
360 lut[dcp::YUVToRGB::REC2020] = SWS_CS_BT2020;
362 /* The 3rd parameter here is:
363 0 -> source range MPEG (i.e. "video", 16-235)
364 1 -> source range JPEG (i.e. "full", 0-255)
366 0 -> destination range MPEG (i.e. "video", 16-235)
367 1 -> destination range JPEG (i.e. "full", 0-255)
369 But remember: sws_setColorspaceDetails ignores these
370 parameters unless the corresponding image isYUV or isGray.
371 (If it's neither, it uses video range).
373 sws_setColorspaceDetails (
375 sws_getCoefficients(lut[yuv_to_rgb]), 0,
376 sws_getCoefficients(lut[yuv_to_rgb]), 0,
384 scaled->data(), scaled->stride()
387 sws_freeContext (scale_context);
393 /** Blacken a YUV image whose bits per pixel is rounded up to 16 */
395 Image::yuv_16_black (uint16_t v, bool alpha)
397 memset (data()[0], 0, sample_size(0).height * stride()[0]);
398 for (int i = 1; i < 3; ++i) {
399 auto p = reinterpret_cast<int16_t*> (data()[i]);
400 int const lines = sample_size(i).height;
401 for (int y = 0; y < lines; ++y) {
402 /* We divide by 2 here because we are writing 2 bytes at a time */
403 for (int x = 0; x < line_size()[i] / 2; ++x) {
406 p += stride()[i] / 2;
411 memset (data()[3], 0, sample_size(3).height * stride()[3]);
417 Image::swap_16 (uint16_t v)
419 return ((v >> 8) & 0xff) | ((v & 0xff) << 8);
424 Image::make_part_black (int const start, int const width)
426 auto y_part = [&]() {
427 int const bpp = bytes_per_pixel(0);
428 int const h = sample_size(0).height;
429 int const s = stride()[0];
431 for (int y = 0; y < h; ++y) {
432 memset (p + start * bpp, 0, width * bpp);
437 switch (_pixel_format) {
438 case AV_PIX_FMT_RGB24:
439 case AV_PIX_FMT_ARGB:
440 case AV_PIX_FMT_RGBA:
441 case AV_PIX_FMT_ABGR:
442 case AV_PIX_FMT_BGRA:
443 case AV_PIX_FMT_RGB555LE:
444 case AV_PIX_FMT_RGB48LE:
445 case AV_PIX_FMT_RGB48BE:
446 case AV_PIX_FMT_XYZ12LE:
448 int const h = sample_size(0).height;
449 int const bpp = bytes_per_pixel(0);
450 int const s = stride()[0];
451 uint8_t* p = data()[0];
452 for (int y = 0; y < h; y++) {
453 memset (p + start * bpp, 0, width * bpp);
458 case AV_PIX_FMT_YUV420P:
461 for (int i = 1; i < 3; ++i) {
463 int const h = sample_size(i).height;
464 for (int y = 0; y < h; ++y) {
465 for (int x = start / 2; x < (start + width) / 2; ++x) {
473 case AV_PIX_FMT_YUV422P10LE:
476 for (int i = 1; i < 3; ++i) {
477 auto p = reinterpret_cast<int16_t*>(data()[i]);
478 int const h = sample_size(i).height;
479 for (int y = 0; y < h; ++y) {
480 for (int x = start / 2; x < (start + width) / 2; ++x) {
483 p += stride()[i] / 2;
488 case AV_PIX_FMT_YUV444P10LE:
491 for (int i = 1; i < 3; ++i) {
492 auto p = reinterpret_cast<int16_t*>(data()[i]);
493 int const h = sample_size(i).height;
494 for (int y = 0; y < h; ++y) {
495 for (int x = start; x < (start + width); ++x) {
498 p += stride()[i] / 2;
504 throw PixelFormatError ("make_part_black()", _pixel_format);
512 switch (_pixel_format) {
513 case AV_PIX_FMT_YUV420P:
514 case AV_PIX_FMT_YUV422P:
515 case AV_PIX_FMT_YUV444P:
516 case AV_PIX_FMT_YUV411P:
517 memset (data()[0], 0, sample_size(0).height * stride()[0]);
518 memset (data()[1], eight_bit_uv, sample_size(1).height * stride()[1]);
519 memset (data()[2], eight_bit_uv, sample_size(2).height * stride()[2]);
522 case AV_PIX_FMT_YUVJ420P:
523 case AV_PIX_FMT_YUVJ422P:
524 case AV_PIX_FMT_YUVJ444P:
525 memset (data()[0], 0, sample_size(0).height * stride()[0]);
526 memset (data()[1], eight_bit_uv + 1, sample_size(1).height * stride()[1]);
527 memset (data()[2], eight_bit_uv + 1, sample_size(2).height * stride()[2]);
530 case AV_PIX_FMT_YUV422P9LE:
531 case AV_PIX_FMT_YUV444P9LE:
532 yuv_16_black (nine_bit_uv, false);
535 case AV_PIX_FMT_YUV422P9BE:
536 case AV_PIX_FMT_YUV444P9BE:
537 yuv_16_black (swap_16 (nine_bit_uv), false);
540 case AV_PIX_FMT_YUV422P10LE:
541 case AV_PIX_FMT_YUV444P10LE:
542 yuv_16_black (ten_bit_uv, false);
545 case AV_PIX_FMT_YUV422P16LE:
546 case AV_PIX_FMT_YUV444P16LE:
547 yuv_16_black (sixteen_bit_uv, false);
550 case AV_PIX_FMT_YUV444P10BE:
551 case AV_PIX_FMT_YUV422P10BE:
552 yuv_16_black (swap_16 (ten_bit_uv), false);
555 case AV_PIX_FMT_YUVA420P9BE:
556 case AV_PIX_FMT_YUVA422P9BE:
557 case AV_PIX_FMT_YUVA444P9BE:
558 yuv_16_black (swap_16 (nine_bit_uv), true);
561 case AV_PIX_FMT_YUVA420P9LE:
562 case AV_PIX_FMT_YUVA422P9LE:
563 case AV_PIX_FMT_YUVA444P9LE:
564 yuv_16_black (nine_bit_uv, true);
567 case AV_PIX_FMT_YUVA420P10BE:
568 case AV_PIX_FMT_YUVA422P10BE:
569 case AV_PIX_FMT_YUVA444P10BE:
570 yuv_16_black (swap_16 (ten_bit_uv), true);
573 case AV_PIX_FMT_YUVA420P10LE:
574 case AV_PIX_FMT_YUVA422P10LE:
575 case AV_PIX_FMT_YUVA444P10LE:
576 yuv_16_black (ten_bit_uv, true);
579 case AV_PIX_FMT_YUVA420P16BE:
580 case AV_PIX_FMT_YUVA422P16BE:
581 case AV_PIX_FMT_YUVA444P16BE:
582 yuv_16_black (swap_16 (sixteen_bit_uv), true);
585 case AV_PIX_FMT_YUVA420P16LE:
586 case AV_PIX_FMT_YUVA422P16LE:
587 case AV_PIX_FMT_YUVA444P16LE:
588 yuv_16_black (sixteen_bit_uv, true);
591 case AV_PIX_FMT_RGB24:
592 case AV_PIX_FMT_ARGB:
593 case AV_PIX_FMT_RGBA:
594 case AV_PIX_FMT_ABGR:
595 case AV_PIX_FMT_BGRA:
596 case AV_PIX_FMT_RGB555LE:
597 case AV_PIX_FMT_RGB48LE:
598 case AV_PIX_FMT_RGB48BE:
599 case AV_PIX_FMT_XYZ12LE:
600 memset (data()[0], 0, sample_size(0).height * stride()[0]);
603 case AV_PIX_FMT_UYVY422:
605 int const Y = sample_size(0).height;
606 int const X = line_size()[0];
607 uint8_t* p = data()[0];
608 for (int y = 0; y < Y; ++y) {
609 for (int x = 0; x < X / 4; ++x) {
610 *p++ = eight_bit_uv; // Cb
612 *p++ = eight_bit_uv; // Cr
620 throw PixelFormatError ("make_black()", _pixel_format);
626 Image::make_transparent ()
628 if (_pixel_format != AV_PIX_FMT_BGRA && _pixel_format != AV_PIX_FMT_RGBA && _pixel_format != AV_PIX_FMT_RGBA64BE) {
629 throw PixelFormatError ("make_transparent()", _pixel_format);
632 memset (data()[0], 0, sample_size(0).height * stride()[0]);
641 uint8_t* const* data;
645 uint8_t* line_pointer(int y) const {
646 return data[0] + y * stride[0] + start_x * bpp;
651 /** Parameters of the other image (the one being blended onto the target) when target and other are RGB */
652 struct OtherRGBParams
657 uint8_t* const* data;
661 uint8_t* line_pointer(int y) const {
662 return data[0] + y * stride[0];
665 float alpha_divisor() const {
666 return pow(2, bpp * 2) - 1;
671 /** Parameters of the other image (the one being blended onto the target) when target and other are YUV */
672 struct OtherYUVParams
677 uint8_t* const* data;
680 uint8_t* const* alpha_data;
681 int const* alpha_stride;
686 template <class OtherType>
688 alpha_blend_onto_rgb24(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
690 /* Going onto RGB24. First byte is red, second green, third blue */
691 auto const alpha_divisor = other.alpha_divisor();
692 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
693 auto tp = target.line_pointer(ty);
694 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
695 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
696 float const alpha = get(op + 3) / alpha_divisor;
697 tp[0] = (get(op + red) / value_divisor) * alpha + tp[0] * (1 - alpha);
698 tp[1] = (get(op + 1) / value_divisor) * alpha + tp[1] * (1 - alpha);
699 tp[2] = (get(op + blue) / value_divisor) * alpha + tp[2] * (1 - alpha);
702 op += other.bpp / sizeof(OtherType);
708 template <class OtherType>
710 alpha_blend_onto_bgra(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
712 auto const alpha_divisor = other.alpha_divisor();
713 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
714 auto tp = target.line_pointer(ty);
715 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
716 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
717 float const alpha = get(op + 3) / alpha_divisor;
718 tp[0] = (get(op + blue) / value_divisor) * alpha + tp[0] * (1 - alpha);
719 tp[1] = (get(op + 1) / value_divisor) * alpha + tp[1] * (1 - alpha);
720 tp[2] = (get(op + red) / value_divisor) * alpha + tp[2] * (1 - alpha);
721 tp[3] = (get(op + 3) / value_divisor) * alpha + tp[3] * (1 - alpha);
724 op += other.bpp / sizeof(OtherType);
730 template <class OtherType>
732 alpha_blend_onto_rgba(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
734 auto const alpha_divisor = other.alpha_divisor();
735 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
736 auto tp = target.line_pointer(ty);
737 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
738 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
739 float const alpha = get(op + 3) / alpha_divisor;
740 tp[0] = (get(op + red) / value_divisor) * alpha + tp[0] * (1 - alpha);
741 tp[1] = (get(op + 1) / value_divisor) * alpha + tp[1] * (1 - alpha);
742 tp[2] = (get(op + blue) / value_divisor) * alpha + tp[2] * (1 - alpha);
743 tp[3] = (get(op + 3) / value_divisor) * alpha + tp[3] * (1 - alpha);
746 op += other.bpp / sizeof(OtherType);
752 template <class OtherType>
754 alpha_blend_onto_rgb48le(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_scale)
756 auto const alpha_divisor = other.alpha_divisor();
757 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
758 auto tp = reinterpret_cast<uint16_t*>(target.line_pointer(ty));
759 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
760 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
761 float const alpha = get(op + 3) / alpha_divisor;
762 tp[0] = get(op + red) * value_scale * alpha + tp[0] * (1 - alpha);
763 tp[1] = get(op + 1) * value_scale * alpha + tp[1] * (1 - alpha);
764 tp[2] = get(op + blue) * value_scale * alpha + tp[2] * (1 - alpha);
766 tp += target.bpp / 2;
767 op += other.bpp / sizeof(OtherType);
773 template <class OtherType>
775 alpha_blend_onto_xyz12le(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
777 auto const alpha_divisor = other.alpha_divisor();
778 auto conv = dcp::ColourConversion::srgb_to_xyz();
779 double fast_matrix[9];
780 dcp::combined_rgb_to_xyz(conv, fast_matrix);
781 auto lut_in = conv.in()->double_lut(0, 1, 8, false);
782 auto lut_out = conv.out()->int_lut(0, 1, 16, true, 65535);
783 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
784 auto tp = reinterpret_cast<uint16_t*>(target.data[0] + ty * target.stride[0] + target.start_x * target.bpp);
785 auto op = reinterpret_cast<OtherType*>(other.data[0] + oy * other.stride[0]);
786 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
787 float const alpha = get(op + 3) / alpha_divisor;
789 /* Convert sRGB to XYZ; op is BGRA. First, input gamma LUT */
790 double const r = lut_in[get(op + red) / value_divisor];
791 double const g = lut_in[get(op + 1) / value_divisor];
792 double const b = lut_in[get(op + blue) / value_divisor];
794 /* RGB to XYZ, including Bradford transform and DCI companding */
795 double const x = max(0.0, min(1.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2]));
796 double const y = max(0.0, min(1.0, r * fast_matrix[3] + g * fast_matrix[4] + b * fast_matrix[5]));
797 double const z = max(0.0, min(1.0, r * fast_matrix[6] + g * fast_matrix[7] + b * fast_matrix[8]));
799 /* Out gamma LUT and blend */
800 tp[0] = lut_out[lrint(x * 65535)] * alpha + tp[0] * (1 - alpha);
801 tp[1] = lut_out[lrint(y * 65535)] * alpha + tp[1] * (1 - alpha);
802 tp[2] = lut_out[lrint(z * 65535)] * alpha + tp[2] * (1 - alpha);
804 tp += target.bpp / 2;
805 op += other.bpp / sizeof(OtherType);
813 alpha_blend_onto_yuv420p(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
815 auto const ts = target.size;
816 auto const os = other.size;
817 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
818 int const hty = ty / 2;
819 int const hoy = oy / 2;
820 uint8_t* tY = target.data[0] + (ty * target.stride[0]) + target.start_x;
821 uint8_t* tU = target.data[1] + (hty * target.stride[1]) + target.start_x / 2;
822 uint8_t* tV = target.data[2] + (hty * target.stride[2]) + target.start_x / 2;
823 uint8_t* oY = other.data[0] + (oy * other.stride[0]) + other.start_x;
824 uint8_t* oU = other.data[1] + (hoy * other.stride[1]) + other.start_x / 2;
825 uint8_t* oV = other.data[2] + (hoy * other.stride[2]) + other.start_x / 2;
826 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
827 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
828 float const a = get_alpha(alpha);
829 *tY = *oY * a + *tY * (1 - a);
830 *tU = *oU * a + *tU * (1 - a);
831 *tV = *oV * a + *tV * (1 - a);
842 alpha += other.alpha_bpp;
850 alpha_blend_onto_yuv420p10(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
852 auto const ts = target.size;
853 auto const os = other.size;
854 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
855 int const hty = ty / 2;
856 int const hoy = oy / 2;
857 uint16_t* tY = reinterpret_cast<uint16_t*>(target.data[0] + (ty * target.stride[0])) + target.start_x;
858 uint16_t* tU = reinterpret_cast<uint16_t*>(target.data[1] + (hty * target.stride[1])) + target.start_x / 2;
859 uint16_t* tV = reinterpret_cast<uint16_t*>(target.data[2] + (hty * target.stride[2])) + target.start_x / 2;
860 uint16_t* oY = reinterpret_cast<uint16_t*>(other.data[0] + (oy * other.stride[0])) + other.start_x;
861 uint16_t* oU = reinterpret_cast<uint16_t*>(other.data[1] + (hoy * other.stride[1])) + other.start_x / 2;
862 uint16_t* oV = reinterpret_cast<uint16_t*>(other.data[2] + (hoy * other.stride[2])) + other.start_x / 2;
863 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
864 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
865 float const a = get_alpha(alpha);
866 *tY = *oY * a + *tY * (1 - a);
867 *tU = *oU * a + *tU * (1 - a);
868 *tV = *oV * a + *tV * (1 - a);
879 alpha += other.alpha_bpp;
887 alpha_blend_onto_yuv422p9or10le(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
889 auto const ts = target.size;
890 auto const os = other.size;
891 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
892 uint16_t* tY = reinterpret_cast<uint16_t*>(target.data[0] + (ty * target.stride[0])) + target.start_x;
893 uint16_t* tU = reinterpret_cast<uint16_t*>(target.data[1] + (ty * target.stride[1])) + target.start_x / 2;
894 uint16_t* tV = reinterpret_cast<uint16_t*>(target.data[2] + (ty * target.stride[2])) + target.start_x / 2;
895 uint16_t* oY = reinterpret_cast<uint16_t*>(other.data[0] + (oy * other.stride[0])) + other.start_x;
896 uint16_t* oU = reinterpret_cast<uint16_t*>(other.data[1] + (oy * other.stride[1])) + other.start_x / 2;
897 uint16_t* oV = reinterpret_cast<uint16_t*>(other.data[2] + (oy * other.stride[2])) + other.start_x / 2;
898 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
899 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
900 float const a = get_alpha(alpha);
901 *tY = *oY * a + *tY * (1 - a);
902 *tU = *oU * a + *tU * (1 - a);
903 *tV = *oV * a + *tV * (1 - a);
914 alpha += other.alpha_bpp;
922 alpha_blend_onto_yuv444p9or10le(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
924 auto const ts = target.size;
925 auto const os = other.size;
926 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
927 uint16_t* tY = reinterpret_cast<uint16_t*>(target.data[0] + (ty * target.stride[0])) + target.start_x;
928 uint16_t* tU = reinterpret_cast<uint16_t*>(target.data[1] + (ty * target.stride[1])) + target.start_x;
929 uint16_t* tV = reinterpret_cast<uint16_t*>(target.data[2] + (ty * target.stride[2])) + target.start_x;
930 uint16_t* oY = reinterpret_cast<uint16_t*>(other.data[0] + (oy * other.stride[0])) + other.start_x;
931 uint16_t* oU = reinterpret_cast<uint16_t*>(other.data[1] + (oy * other.stride[1])) + other.start_x;
932 uint16_t* oV = reinterpret_cast<uint16_t*>(other.data[2] + (oy * other.stride[2])) + other.start_x;
933 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
934 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
935 float const a = get_alpha(alpha);
936 *tY = *oY * a + *tY * (1 - a);
937 *tU = *oU * a + *tU * (1 - a);
938 *tV = *oV * a + *tV * (1 - a);
945 alpha += other.alpha_bpp;
952 Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
955 other->pixel_format() == AV_PIX_FMT_BGRA ||
956 other->pixel_format() == AV_PIX_FMT_RGBA ||
957 other->pixel_format() == AV_PIX_FMT_RGBA64BE
960 int const blue = other->pixel_format() == AV_PIX_FMT_BGRA ? 0 : 2;
961 int const red = other->pixel_format() == AV_PIX_FMT_BGRA ? 2 : 0;
963 int start_tx = position.x;
967 start_ox = -start_tx;
971 int start_ty = position.y;
975 start_oy = -start_ty;
979 TargetParams target_params = {
988 OtherRGBParams other_rgb_params = {
994 other->pixel_format() == AV_PIX_FMT_RGBA64BE ? 8 : 4
997 OtherYUVParams other_yuv_params = {
1005 other->pixel_format() == AV_PIX_FMT_RGBA64BE ? 8 : 4
1008 auto byteswap = [](uint16_t* p) {
1009 return (*p >> 8) | ((*p & 0xff) << 8);
1012 auto pass = [](uint8_t* p) {
1016 auto get_alpha_64be = [](uint8_t* p) {
1017 return ((static_cast<int16_t>(p[6]) << 8) | p[7]) / 65535.0f;
1020 auto get_alpha_byte = [](uint8_t* p) {
1021 return p[3] / 255.0f;
1024 switch (_pixel_format) {
1025 case AV_PIX_FMT_RGB24:
1026 target_params.bpp = 3;
1027 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1028 alpha_blend_onto_rgb24<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1030 alpha_blend_onto_rgb24<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1033 case AV_PIX_FMT_BGRA:
1034 target_params.bpp = 4;
1035 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1036 alpha_blend_onto_bgra<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1038 alpha_blend_onto_bgra<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1041 case AV_PIX_FMT_RGBA:
1042 target_params.bpp = 4;
1043 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1044 alpha_blend_onto_rgba<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1046 alpha_blend_onto_rgba<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1049 case AV_PIX_FMT_RGB48LE:
1050 target_params.bpp = 6;
1051 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1052 alpha_blend_onto_rgb48le<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 1);
1054 alpha_blend_onto_rgb48le<uint8_t>(target_params, other_rgb_params, red, blue, pass, 256);
1057 case AV_PIX_FMT_XYZ12LE:
1058 target_params.bpp = 6;
1059 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1060 alpha_blend_onto_xyz12le<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1062 alpha_blend_onto_xyz12le<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1065 case AV_PIX_FMT_YUV420P:
1067 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1068 other_yuv_params.data = yuv->data();
1069 other_yuv_params.stride = yuv->stride();
1070 other_yuv_params.alpha_data = other->data();
1071 other_yuv_params.alpha_stride = other->stride();
1072 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1073 alpha_blend_onto_yuv420p(target_params, other_yuv_params, get_alpha_64be);
1075 alpha_blend_onto_yuv420p(target_params, other_yuv_params, get_alpha_byte);
1079 case AV_PIX_FMT_YUV420P10:
1081 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1082 other_yuv_params.data = yuv->data();
1083 other_yuv_params.stride = yuv->stride();
1084 other_yuv_params.alpha_data = other->data();
1085 other_yuv_params.alpha_stride = other->stride();
1086 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1087 alpha_blend_onto_yuv420p10(target_params, other_yuv_params, get_alpha_64be);
1089 alpha_blend_onto_yuv420p10(target_params, other_yuv_params, get_alpha_byte);
1093 case AV_PIX_FMT_YUV422P9LE:
1094 case AV_PIX_FMT_YUV422P10LE:
1096 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1097 other_yuv_params.data = yuv->data();
1098 other_yuv_params.stride = yuv->stride();
1099 other_yuv_params.alpha_data = other->data();
1100 other_yuv_params.alpha_stride = other->stride();
1101 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1102 alpha_blend_onto_yuv422p9or10le(target_params, other_yuv_params, get_alpha_64be);
1104 alpha_blend_onto_yuv422p9or10le(target_params, other_yuv_params, get_alpha_byte);
1108 case AV_PIX_FMT_YUV444P9LE:
1109 case AV_PIX_FMT_YUV444P10LE:
1111 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1112 other_yuv_params.data = yuv->data();
1113 other_yuv_params.stride = yuv->stride();
1114 other_yuv_params.alpha_data = other->data();
1115 other_yuv_params.alpha_stride = other->stride();
1116 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1117 alpha_blend_onto_yuv444p9or10le(target_params, other_yuv_params, get_alpha_64be);
1119 alpha_blend_onto_yuv444p9or10le(target_params, other_yuv_params, get_alpha_byte);
1124 throw PixelFormatError ("alpha_blend()", _pixel_format);
1130 Image::copy (shared_ptr<const Image> other, Position<int> position)
1132 /* Only implemented for RGB24 onto RGB24 so far */
1133 DCPOMATIC_ASSERT (_pixel_format == AV_PIX_FMT_RGB24 && other->pixel_format() == AV_PIX_FMT_RGB24);
1134 DCPOMATIC_ASSERT (position.x >= 0 && position.y >= 0);
1136 int const N = min (position.x + other->size().width, size().width) - position.x;
1137 for (int ty = position.y, oy = 0; ty < size().height && oy < other->size().height; ++ty, ++oy) {
1138 uint8_t * const tp = data()[0] + ty * stride()[0] + position.x * 3;
1139 uint8_t * const op = other->data()[0] + oy * other->stride()[0];
1140 memcpy (tp, op, N * 3);
1146 Image::read_from_socket (shared_ptr<Socket> socket)
1148 for (int i = 0; i < planes(); ++i) {
1149 uint8_t* p = data()[i];
1150 int const lines = sample_size(i).height;
1151 for (int y = 0; y < lines; ++y) {
1152 socket->read (p, line_size()[i]);
1160 Image::write_to_socket (shared_ptr<Socket> socket) const
1162 for (int i = 0; i < planes(); ++i) {
1163 uint8_t* p = data()[i];
1164 int const lines = sample_size(i).height;
1165 for (int y = 0; y < lines; ++y) {
1166 socket->write (p, line_size()[i]);
1174 Image::bytes_per_pixel (int c) const
1176 auto d = av_pix_fmt_desc_get(_pixel_format);
1178 throw PixelFormatError ("bytes_per_pixel()", _pixel_format);
1181 if (c >= planes()) {
1185 float bpp[4] = { 0, 0, 0, 0 };
1187 #ifdef DCPOMATIC_HAVE_AVCOMPONENTDESCRIPTOR_DEPTH_MINUS1
1188 bpp[0] = floor ((d->comp[0].depth_minus1 + 8) / 8);
1189 if (d->nb_components > 1) {
1190 bpp[1] = floor ((d->comp[1].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
1192 if (d->nb_components > 2) {
1193 bpp[2] = floor ((d->comp[2].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
1195 if (d->nb_components > 3) {
1196 bpp[3] = floor ((d->comp[3].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
1199 bpp[0] = floor ((d->comp[0].depth + 7) / 8);
1200 if (d->nb_components > 1) {
1201 bpp[1] = floor ((d->comp[1].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
1203 if (d->nb_components > 2) {
1204 bpp[2] = floor ((d->comp[2].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
1206 if (d->nb_components > 3) {
1207 bpp[3] = floor ((d->comp[3].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
1211 if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
1212 /* Not planar; sum them up */
1213 return bpp[0] + bpp[1] + bpp[2] + bpp[3];
1220 /** Construct a Image of a given size and format, allocating memory
1223 * @param p Pixel format.
1224 * @param s Size in pixels.
1225 * @param alignment PADDED to make each row of this image aligned to a ALIGNMENT-byte boundary, otherwise COMPACT.
1227 Image::Image (AVPixelFormat p, dcp::Size s, Alignment alignment)
1230 , _alignment (alignment)
1239 _data = (uint8_t **) wrapped_av_malloc (4 * sizeof (uint8_t *));
1240 _data[0] = _data[1] = _data[2] = _data[3] = 0;
1242 _line_size = (int *) wrapped_av_malloc (4 * sizeof (int));
1243 _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0;
1245 _stride = (int *) wrapped_av_malloc (4 * sizeof (int));
1246 _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0;
1248 auto stride_round_up = [](int stride, int t) {
1249 int const a = stride + (t - 1);
1253 for (int i = 0; i < planes(); ++i) {
1254 _line_size[i] = ceil (_size.width * bytes_per_pixel(i));
1255 _stride[i] = stride_round_up (_line_size[i], _alignment == Alignment::PADDED ? ALIGNMENT : 1);
1257 /* The assembler function ff_rgb24ToY_avx (in libswscale/x86/input.asm)
1258 uses a 16-byte fetch to read three bytes (R/G/B) of image data.
1259 Hence on the last pixel of the last line it reads over the end of
1260 the actual data by 1 byte. If the width of an image is a multiple
1261 of the stride alignment there will be no padding at the end of image lines.
1262 OS X crashes on this illegal read, though other operating systems don't
1263 seem to mind. The nasty + 1 in this malloc makes sure there is always a byte
1264 for that instruction to read safely.
1266 Further to the above, valgrind is now telling me that ff_rgb24ToY_ssse3
1267 over-reads by more then _avx. I can't follow the code to work out how much,
1268 so I'll just over-allocate by ALIGNMENT bytes and have done with it. Empirical
1269 testing suggests that it works.
1271 In addition to these concerns, we may read/write as much as a whole extra line
1272 at the end of each plane in cases where we are messing with offsets in order to
1273 do pad or crop. To solve this we over-allocate by an extra _stride[i] bytes.
1275 As an example: we may write to images starting at an offset so we get some padding.
1276 Hence we want to write in the following pattern:
1278 block start write start line end
1279 |..(padding)..|<------line-size------------->|..(padding)..|
1280 |..(padding)..|<------line-size------------->|..(padding)..|
1281 |..(padding)..|<------line-size------------->|..(padding)..|
1283 where line-size is of the smaller (inter_size) image and the full padded line length is that of
1284 out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size.
1285 However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full
1286 specified *stride*. This does not matter until we get to the last line:
1288 block start write start line end
1289 |..(padding)..|<------line-size------------->|XXXwrittenXXX|
1290 |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX|
1291 |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX
1294 _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * (sample_size(i).height + 1) + ALIGNMENT);
1295 #if HAVE_VALGRIND_MEMCHECK_H
1296 /* The data between the end of the line size and the stride is undefined but processed by
1297 libswscale, causing lots of valgrind errors. Mark it all defined to quell these errors.
1299 VALGRIND_MAKE_MEM_DEFINED (_data[i], _stride[i] * (sample_size(i).height + 1) + ALIGNMENT);
1305 Image::Image (Image const & other)
1306 : std::enable_shared_from_this<Image>(other)
1307 , _size (other._size)
1308 , _pixel_format (other._pixel_format)
1309 , _alignment (other._alignment)
1313 for (int i = 0; i < planes(); ++i) {
1314 uint8_t* p = _data[i];
1315 uint8_t* q = other._data[i];
1316 int const lines = sample_size(i).height;
1317 for (int j = 0; j < lines; ++j) {
1318 memcpy (p, q, _line_size[i]);
1320 q += other.stride()[i];
1326 Image::Image (AVFrame const * frame, Alignment alignment)
1327 : _size (frame->width, frame->height)
1328 , _pixel_format (static_cast<AVPixelFormat>(frame->format))
1329 , _alignment (alignment)
1331 DCPOMATIC_ASSERT (_pixel_format != AV_PIX_FMT_NONE);
1335 for (int i = 0; i < planes(); ++i) {
1336 uint8_t* p = _data[i];
1337 uint8_t* q = frame->data[i];
1338 int const lines = sample_size(i).height;
1339 for (int j = 0; j < lines; ++j) {
1340 memcpy (p, q, _line_size[i]);
1342 /* AVFrame's linesize is what we call `stride' */
1343 q += frame->linesize[i];
1349 Image::Image (shared_ptr<const Image> other, Alignment alignment)
1350 : _size (other->_size)
1351 , _pixel_format (other->_pixel_format)
1352 , _alignment (alignment)
1356 for (int i = 0; i < planes(); ++i) {
1357 DCPOMATIC_ASSERT (line_size()[i] == other->line_size()[i]);
1358 uint8_t* p = _data[i];
1359 uint8_t* q = other->data()[i];
1360 int const lines = sample_size(i).height;
1361 for (int j = 0; j < lines; ++j) {
1362 memcpy (p, q, line_size()[i]);
1364 q += other->stride()[i];
1371 Image::operator= (Image const & other)
1373 if (this == &other) {
1384 Image::swap (Image & other)
1386 std::swap (_size, other._size);
1387 std::swap (_pixel_format, other._pixel_format);
1389 for (int i = 0; i < 4; ++i) {
1390 std::swap (_data[i], other._data[i]);
1391 std::swap (_line_size[i], other._line_size[i]);
1392 std::swap (_stride[i], other._stride[i]);
1395 std::swap (_alignment, other._alignment);
1401 for (int i = 0; i < planes(); ++i) {
1406 av_free (_line_size);
1412 Image::data () const
1419 Image::line_size () const
1426 Image::stride () const
1433 Image::size () const
1440 Image::alignment () const
1447 merge (list<PositionImage> images, Image::Alignment alignment)
1449 if (images.empty ()) {
1453 if (images.size() == 1) {
1454 images.front().image = Image::ensure_alignment(images.front().image, alignment);
1455 return images.front();
1458 dcpomatic::Rect<int> all (images.front().position, images.front().image->size().width, images.front().image->size().height);
1459 for (auto const& i: images) {
1460 all.extend (dcpomatic::Rect<int>(i.position, i.image->size().width, i.image->size().height));
1463 auto merged = make_shared<Image>(images.front().image->pixel_format(), dcp::Size(all.width, all.height), alignment);
1464 merged->make_transparent ();
1465 for (auto const& i: images) {
1466 merged->alpha_blend (i.image, i.position - all.position());
1469 return PositionImage (merged, all.position ());
1474 operator== (Image const & a, Image const & b)
1476 if (a.planes() != b.planes() || a.pixel_format() != b.pixel_format() || a.alignment() != b.alignment()) {
1480 for (int c = 0; c < a.planes(); ++c) {
1481 if (a.sample_size(c).height != b.sample_size(c).height || a.line_size()[c] != b.line_size()[c] || a.stride()[c] != b.stride()[c]) {
1485 uint8_t* p = a.data()[c];
1486 uint8_t* q = b.data()[c];
1487 int const lines = a.sample_size(c).height;
1488 for (int y = 0; y < lines; ++y) {
1489 if (memcmp (p, q, a.line_size()[c]) != 0) {
1503 * @param f Amount to fade by; 0 is black, 1 is no fade.
1506 Image::fade (float f)
1508 /* U/V black value for 8-bit colour */
1509 static int const eight_bit_uv = (1 << 7) - 1;
1510 /* U/V black value for 10-bit colour */
1511 static uint16_t const ten_bit_uv = (1 << 9) - 1;
1513 switch (_pixel_format) {
1514 case AV_PIX_FMT_YUV420P:
1517 uint8_t* p = data()[0];
1518 int const lines = sample_size(0).height;
1519 for (int y = 0; y < lines; ++y) {
1521 for (int x = 0; x < line_size()[0]; ++x) {
1522 *q = int(float(*q) * f);
1529 for (int c = 1; c < 3; ++c) {
1530 uint8_t* p = data()[c];
1531 int const lines = sample_size(c).height;
1532 for (int y = 0; y < lines; ++y) {
1534 for (int x = 0; x < line_size()[c]; ++x) {
1535 *q = eight_bit_uv + int((int(*q) - eight_bit_uv) * f);
1545 case AV_PIX_FMT_RGB24:
1548 uint8_t* p = data()[0];
1549 int const lines = sample_size(0).height;
1550 for (int y = 0; y < lines; ++y) {
1552 for (int x = 0; x < line_size()[0]; ++x) {
1553 *q = int (float (*q) * f);
1561 case AV_PIX_FMT_XYZ12LE:
1562 case AV_PIX_FMT_RGB48LE:
1563 /* 16-bit little-endian */
1564 for (int c = 0; c < 3; ++c) {
1565 int const stride_pixels = stride()[c] / 2;
1566 int const line_size_pixels = line_size()[c] / 2;
1567 uint16_t* p = reinterpret_cast<uint16_t*> (data()[c]);
1568 int const lines = sample_size(c).height;
1569 for (int y = 0; y < lines; ++y) {
1571 for (int x = 0; x < line_size_pixels; ++x) {
1572 *q = int (float (*q) * f);
1580 case AV_PIX_FMT_YUV422P10LE:
1584 int const stride_pixels = stride()[0] / 2;
1585 int const line_size_pixels = line_size()[0] / 2;
1586 uint16_t* p = reinterpret_cast<uint16_t*> (data()[0]);
1587 int const lines = sample_size(0).height;
1588 for (int y = 0; y < lines; ++y) {
1590 for (int x = 0; x < line_size_pixels; ++x) {
1591 *q = int(float(*q) * f);
1599 for (int c = 1; c < 3; ++c) {
1600 int const stride_pixels = stride()[c] / 2;
1601 int const line_size_pixels = line_size()[c] / 2;
1602 uint16_t* p = reinterpret_cast<uint16_t*> (data()[c]);
1603 int const lines = sample_size(c).height;
1604 for (int y = 0; y < lines; ++y) {
1606 for (int x = 0; x < line_size_pixels; ++x) {
1607 *q = ten_bit_uv + int((int(*q) - ten_bit_uv) * f);
1618 throw PixelFormatError ("fade()", _pixel_format);
1623 shared_ptr<const Image>
1624 Image::ensure_alignment (shared_ptr<const Image> image, Image::Alignment alignment)
1626 if (image->alignment() == alignment) {
1630 return make_shared<Image>(image, alignment);
1635 Image::memory_used () const
1638 for (int i = 0; i < planes(); ++i) {
1639 m += _stride[i] * sample_size(i).height;
1646 Image::video_range_to_full_range ()
1648 switch (_pixel_format) {
1649 case AV_PIX_FMT_RGB24:
1651 float const factor = 256.0 / 219.0;
1652 uint8_t* p = data()[0];
1653 int const lines = sample_size(0).height;
1654 for (int y = 0; y < lines; ++y) {
1656 for (int x = 0; x < line_size()[0]; ++x) {
1657 *q = clamp(lrintf((*q - 16) * factor), 0L, 255L);
1664 case AV_PIX_FMT_RGB48LE:
1666 float const factor = 65536.0 / 56064.0;
1667 uint16_t* p = reinterpret_cast<uint16_t*>(data()[0]);
1668 int const lines = sample_size(0).height;
1669 for (int y = 0; y < lines; ++y) {
1671 int const line_size_pixels = line_size()[0] / 2;
1672 for (int x = 0; x < line_size_pixels; ++x) {
1673 *q = clamp(lrintf((*q - 4096) * factor), 0L, 65535L);
1676 p += stride()[0] / 2;
1680 case AV_PIX_FMT_GBRP12LE:
1682 float const factor = 4096.0 / 3504.0;
1683 for (int c = 0; c < 3; ++c) {
1684 uint16_t* p = reinterpret_cast<uint16_t*>(data()[c]);
1685 int const lines = sample_size(c).height;
1686 for (int y = 0; y < lines; ++y) {
1688 int const line_size_pixels = line_size()[c] / 2;
1689 for (int x = 0; x < line_size_pixels; ++x) {
1690 *q = clamp(lrintf((*q - 256) * factor), 0L, 4095L);
1698 throw PixelFormatError ("video_range_to_full_range()", _pixel_format);