2 Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 /** @file src/image.cc
23 * @brief A class to describe a video image.
27 #include "compose.hpp"
28 #include "dcpomatic_assert.h"
29 #include "dcpomatic_socket.h"
30 #include "enum_indexed_vector.h"
31 #include "exceptions.h"
33 #include "maths_util.h"
34 #include "memory_util.h"
37 #include <dcp/rgb_xyz.h>
38 #include <dcp/transfer_function.h>
39 #include <dcp/warnings.h>
40 LIBDCP_DISABLE_WARNINGS
42 #include <libavutil/frame.h>
43 #include <libavutil/pixdesc.h>
44 #include <libavutil/pixfmt.h>
45 #include <libswscale/swscale.h>
47 LIBDCP_ENABLE_WARNINGS
48 #if HAVE_VALGRIND_MEMCHECK_H
49 #include <valgrind/memcheck.h>
60 using std::make_shared;
63 using std::runtime_error;
64 using std::shared_ptr;
69 /** The memory alignment, in bytes, used for each row of an image if Alignment::PADDED is requested */
70 int constexpr ALIGNMENT = 64;
72 /* U/V black value for 8-bit colour */
73 static uint8_t const eight_bit_uv = (1 << 7) - 1;
74 /* U/V black value for 9-bit colour */
75 static uint16_t const nine_bit_uv = (1 << 8) - 1;
76 /* U/V black value for 10-bit colour */
77 static uint16_t const ten_bit_uv = (1 << 9) - 1;
78 /* U/V black value for 16-bit colour */
79 static uint16_t const sixteen_bit_uv = (1 << 15) - 1;
83 Image::vertical_factor (int n) const
89 auto d = av_pix_fmt_desc_get(_pixel_format);
91 throw PixelFormatError ("line_factor()", _pixel_format);
94 return lrintf(powf(2.0f, d->log2_chroma_h));
98 Image::horizontal_factor (int n) const
104 auto d = av_pix_fmt_desc_get(_pixel_format);
106 throw PixelFormatError ("sample_size()", _pixel_format);
109 return lrintf(powf(2.0f, d->log2_chroma_w));
113 /** @param n Component index.
114 * @return Number of samples (i.e. pixels, unless sub-sampled) in each direction for this component.
117 Image::sample_size (int n) const
120 lrint (ceil(static_cast<double>(size().width) / horizontal_factor(n))),
121 lrint (ceil(static_cast<double>(size().height) / vertical_factor(n)))
126 /** @return Number of planes */
128 Image::planes () const
130 if (_pixel_format == AV_PIX_FMT_PAL8) {
134 auto d = av_pix_fmt_desc_get(_pixel_format);
136 throw PixelFormatError ("planes()", _pixel_format);
139 if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
143 return d->nb_components;
149 round_width_for_subsampling (int p, AVPixFmtDescriptor const * desc)
151 return p & ~ ((1 << desc->log2_chroma_w) - 1);
157 round_height_for_subsampling (int p, AVPixFmtDescriptor const * desc)
159 return p & ~ ((1 << desc->log2_chroma_h) - 1);
163 /** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size'.
164 * @param crop Amount to crop by.
165 * @param inter_size Size to scale the cropped image to.
166 * @param out_size Size of output frame; if this is larger than inter_size there will be black padding.
167 * @param yuv_to_rgb YUV to RGB transformation to use, if required.
168 * @param video_range Video range of the image.
169 * @param out_format Output pixel format.
170 * @param out_aligned true to make the output image aligned.
171 * @param out_video_range Video range to use for the output image.
172 * @param fast Try to be fast at the possible expense of quality; at present this means using
173 * fast bilinear rather than bicubic scaling.
176 Image::crop_scale_window (
178 dcp::Size inter_size,
180 dcp::YUVToRGB yuv_to_rgb,
181 VideoRange video_range,
182 AVPixelFormat out_format,
183 VideoRange out_video_range,
184 Alignment out_alignment,
188 /* Empirical testing suggests that sws_scale() will crash if
189 the input image is not padded.
191 DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
193 DCPOMATIC_ASSERT (out_size.width >= inter_size.width);
194 DCPOMATIC_ASSERT (out_size.height >= inter_size.height);
196 auto out = make_shared<Image>(out_format, out_size, out_alignment);
199 auto in_desc = av_pix_fmt_desc_get (_pixel_format);
201 throw PixelFormatError ("crop_scale_window()", _pixel_format);
204 /* Round down so that we crop only the number of pixels that is straightforward
205 * considering any subsampling.
208 round_width_for_subsampling(crop.left, in_desc),
209 round_width_for_subsampling(crop.right, in_desc),
210 round_height_for_subsampling(crop.top, in_desc),
211 round_height_for_subsampling(crop.bottom, in_desc)
214 /* Also check that we aren't cropping more image than there actually is */
215 if ((corrected_crop.left + corrected_crop.right) >= (size().width - 4)) {
216 corrected_crop.left = 0;
217 corrected_crop.right = size().width - 4;
220 if ((corrected_crop.top + corrected_crop.bottom) >= (size().height - 4)) {
221 corrected_crop.top = 0;
222 corrected_crop.bottom = size().height - 4;
225 /* Size of the image after any crop */
226 auto const cropped_size = corrected_crop.apply (size());
228 /* Scale context for a scale from cropped_size to inter_size */
229 auto scale_context = sws_getContext (
230 cropped_size.width, cropped_size.height, pixel_format(),
231 inter_size.width, inter_size.height, out_format,
232 fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0
235 if (!scale_context) {
236 throw runtime_error (N_("Could not allocate SwsContext"));
239 DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
240 EnumIndexedVector<int, dcp::YUVToRGB> lut;
241 lut[dcp::YUVToRGB::REC601] = SWS_CS_ITU601;
242 lut[dcp::YUVToRGB::REC709] = SWS_CS_ITU709;
243 lut[dcp::YUVToRGB::REC2020] = SWS_CS_BT2020;
245 /* The 3rd parameter here is:
246 0 -> source range MPEG (i.e. "video", 16-235)
247 1 -> source range JPEG (i.e. "full", 0-255)
249 0 -> destination range MPEG (i.e. "video", 16-235)
250 1 -> destination range JPEG (i.e. "full", 0-255)
252 But remember: sws_setColorspaceDetails ignores these
253 parameters unless the both source and destination images
254 are isYUV or isGray. (If either is not, it uses video range).
256 sws_setColorspaceDetails (
258 sws_getCoefficients(lut[yuv_to_rgb]), video_range == VideoRange::VIDEO ? 0 : 1,
259 sws_getCoefficients(lut[yuv_to_rgb]), out_video_range == VideoRange::VIDEO ? 0 : 1,
263 /* Prepare input data pointers with crop */
264 uint8_t* scale_in_data[planes()];
265 for (int c = 0; c < planes(); ++c) {
266 int const x = lrintf(bytes_per_pixel(c) * corrected_crop.left);
267 scale_in_data[c] = data()[c] + x + stride()[c] * (corrected_crop.top / vertical_factor(c));
270 auto out_desc = av_pix_fmt_desc_get (out_format);
272 throw PixelFormatError ("crop_scale_window()", out_format);
275 /* Corner of the image within out_size */
276 Position<int> const corner (
277 round_width_for_subsampling((out_size.width - inter_size.width) / 2, out_desc),
278 round_height_for_subsampling((out_size.height - inter_size.height) / 2, out_desc)
281 uint8_t* scale_out_data[out->planes()];
282 for (int c = 0; c < out->planes(); ++c) {
283 int const x = lrintf(out->bytes_per_pixel(c) * corner.x);
284 scale_out_data[c] = out->data()[c] + x + out->stride()[c] * (corner.y / out->vertical_factor(c));
289 scale_in_data, stride(),
290 0, cropped_size.height,
291 scale_out_data, out->stride()
294 sws_freeContext (scale_context);
296 /* There are some cases where there will be unwanted image data left in the image at this point:
298 * 1. When we are cropping without any scaling or pixel format conversion.
299 * 2. When we are scaling to certain sizes and placing the result into a larger
302 * Clear out the sides of the image to take care of those cases.
304 auto const pad = (out_size.width - inter_size.width) / 2;
305 out->make_part_black(0, pad);
306 out->make_part_black(corner.x + inter_size.width, pad);
309 video_range == VideoRange::VIDEO &&
310 out_video_range == VideoRange::FULL &&
311 av_pix_fmt_desc_get(_pixel_format)->flags & AV_PIX_FMT_FLAG_RGB
313 /* libswscale will not convert video range for RGB sources, so we have to do it ourselves */
314 out->video_range_to_full_range ();
322 Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
324 return scale(size(), yuv_to_rgb, out_format, out_alignment, fast);
328 /** @param out_size Size to scale to.
329 * @param yuv_to_rgb YUVToRGB transform transform to use, if required.
330 * @param out_format Output pixel format.
331 * @param out_alignment Output alignment.
332 * @param fast Try to be fast at the possible expense of quality; at present this means using
333 * fast bilinear rather than bicubic scaling.
336 Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
338 /* Empirical testing suggests that sws_scale() will crash if
339 the input image alignment is not PADDED.
341 DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
342 DCPOMATIC_ASSERT(size().width > 0);
343 DCPOMATIC_ASSERT(size().height > 0);
344 DCPOMATIC_ASSERT(out_size.width > 0);
345 DCPOMATIC_ASSERT(out_size.height > 0);
347 auto scaled = make_shared<Image>(out_format, out_size, out_alignment);
348 auto scale_context = sws_getContext (
349 size().width, size().height, pixel_format(),
350 out_size.width, out_size.height, out_format,
351 (fast ? SWS_FAST_BILINEAR : SWS_BICUBIC) | SWS_ACCURATE_RND, 0, 0, 0
354 DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
355 EnumIndexedVector<int, dcp::YUVToRGB> lut;
356 lut[dcp::YUVToRGB::REC601] = SWS_CS_ITU601;
357 lut[dcp::YUVToRGB::REC709] = SWS_CS_ITU709;
358 lut[dcp::YUVToRGB::REC2020] = SWS_CS_BT2020;
360 /* The 3rd parameter here is:
361 0 -> source range MPEG (i.e. "video", 16-235)
362 1 -> source range JPEG (i.e. "full", 0-255)
364 0 -> destination range MPEG (i.e. "video", 16-235)
365 1 -> destination range JPEG (i.e. "full", 0-255)
367 But remember: sws_setColorspaceDetails ignores these
368 parameters unless the corresponding image isYUV or isGray.
369 (If it's neither, it uses video range).
371 sws_setColorspaceDetails (
373 sws_getCoefficients(lut[yuv_to_rgb]), 0,
374 sws_getCoefficients(lut[yuv_to_rgb]), 0,
382 scaled->data(), scaled->stride()
385 sws_freeContext (scale_context);
391 /** Blacken a YUV image whose bits per pixel is rounded up to 16 */
393 Image::yuv_16_black (uint16_t v, bool alpha)
395 memset (data()[0], 0, sample_size(0).height * stride()[0]);
396 for (int i = 1; i < 3; ++i) {
397 auto p = reinterpret_cast<int16_t*> (data()[i]);
398 int const lines = sample_size(i).height;
399 for (int y = 0; y < lines; ++y) {
400 /* We divide by 2 here because we are writing 2 bytes at a time */
401 for (int x = 0; x < line_size()[i] / 2; ++x) {
404 p += stride()[i] / 2;
409 memset (data()[3], 0, sample_size(3).height * stride()[3]);
415 Image::swap_16 (uint16_t v)
417 return ((v >> 8) & 0xff) | ((v & 0xff) << 8);
422 Image::make_part_black (int const start, int const width)
424 auto y_part = [&]() {
425 int const bpp = bytes_per_pixel(0);
426 int const h = sample_size(0).height;
427 int const s = stride()[0];
429 for (int y = 0; y < h; ++y) {
430 memset (p + start * bpp, 0, width * bpp);
435 switch (_pixel_format) {
436 case AV_PIX_FMT_RGB24:
437 case AV_PIX_FMT_ARGB:
438 case AV_PIX_FMT_RGBA:
439 case AV_PIX_FMT_ABGR:
440 case AV_PIX_FMT_BGRA:
441 case AV_PIX_FMT_RGB555LE:
442 case AV_PIX_FMT_RGB48LE:
443 case AV_PIX_FMT_RGB48BE:
444 case AV_PIX_FMT_XYZ12LE:
446 int const h = sample_size(0).height;
447 int const bpp = bytes_per_pixel(0);
448 int const s = stride()[0];
449 uint8_t* p = data()[0];
450 for (int y = 0; y < h; y++) {
451 memset (p + start * bpp, 0, width * bpp);
456 case AV_PIX_FMT_YUV420P:
459 for (int i = 1; i < 3; ++i) {
461 int const h = sample_size(i).height;
462 for (int y = 0; y < h; ++y) {
463 for (int x = start / 2; x < (start + width) / 2; ++x) {
471 case AV_PIX_FMT_YUV422P10LE:
474 for (int i = 1; i < 3; ++i) {
475 auto p = reinterpret_cast<int16_t*>(data()[i]);
476 int const h = sample_size(i).height;
477 for (int y = 0; y < h; ++y) {
478 for (int x = start / 2; x < (start + width) / 2; ++x) {
481 p += stride()[i] / 2;
486 case AV_PIX_FMT_YUV444P10LE:
489 for (int i = 1; i < 3; ++i) {
490 auto p = reinterpret_cast<int16_t*>(data()[i]);
491 int const h = sample_size(i).height;
492 for (int y = 0; y < h; ++y) {
493 for (int x = start; x < (start + width); ++x) {
496 p += stride()[i] / 2;
502 throw PixelFormatError ("make_part_black()", _pixel_format);
510 switch (_pixel_format) {
511 case AV_PIX_FMT_YUV420P:
512 case AV_PIX_FMT_YUV422P:
513 case AV_PIX_FMT_YUV444P:
514 case AV_PIX_FMT_YUV411P:
515 memset (data()[0], 0, sample_size(0).height * stride()[0]);
516 memset (data()[1], eight_bit_uv, sample_size(1).height * stride()[1]);
517 memset (data()[2], eight_bit_uv, sample_size(2).height * stride()[2]);
520 case AV_PIX_FMT_YUVJ420P:
521 case AV_PIX_FMT_YUVJ422P:
522 case AV_PIX_FMT_YUVJ444P:
523 memset (data()[0], 0, sample_size(0).height * stride()[0]);
524 memset (data()[1], eight_bit_uv + 1, sample_size(1).height * stride()[1]);
525 memset (data()[2], eight_bit_uv + 1, sample_size(2).height * stride()[2]);
528 case AV_PIX_FMT_YUV422P9LE:
529 case AV_PIX_FMT_YUV444P9LE:
530 yuv_16_black (nine_bit_uv, false);
533 case AV_PIX_FMT_YUV422P9BE:
534 case AV_PIX_FMT_YUV444P9BE:
535 yuv_16_black (swap_16 (nine_bit_uv), false);
538 case AV_PIX_FMT_YUV422P10LE:
539 case AV_PIX_FMT_YUV444P10LE:
540 yuv_16_black (ten_bit_uv, false);
543 case AV_PIX_FMT_YUV422P16LE:
544 case AV_PIX_FMT_YUV444P16LE:
545 yuv_16_black (sixteen_bit_uv, false);
548 case AV_PIX_FMT_YUV444P10BE:
549 case AV_PIX_FMT_YUV422P10BE:
550 yuv_16_black (swap_16 (ten_bit_uv), false);
553 case AV_PIX_FMT_YUVA420P9BE:
554 case AV_PIX_FMT_YUVA422P9BE:
555 case AV_PIX_FMT_YUVA444P9BE:
556 yuv_16_black (swap_16 (nine_bit_uv), true);
559 case AV_PIX_FMT_YUVA420P9LE:
560 case AV_PIX_FMT_YUVA422P9LE:
561 case AV_PIX_FMT_YUVA444P9LE:
562 yuv_16_black (nine_bit_uv, true);
565 case AV_PIX_FMT_YUVA420P10BE:
566 case AV_PIX_FMT_YUVA422P10BE:
567 case AV_PIX_FMT_YUVA444P10BE:
568 yuv_16_black (swap_16 (ten_bit_uv), true);
571 case AV_PIX_FMT_YUVA420P10LE:
572 case AV_PIX_FMT_YUVA422P10LE:
573 case AV_PIX_FMT_YUVA444P10LE:
574 yuv_16_black (ten_bit_uv, true);
577 case AV_PIX_FMT_YUVA420P16BE:
578 case AV_PIX_FMT_YUVA422P16BE:
579 case AV_PIX_FMT_YUVA444P16BE:
580 yuv_16_black (swap_16 (sixteen_bit_uv), true);
583 case AV_PIX_FMT_YUVA420P16LE:
584 case AV_PIX_FMT_YUVA422P16LE:
585 case AV_PIX_FMT_YUVA444P16LE:
586 yuv_16_black (sixteen_bit_uv, true);
589 case AV_PIX_FMT_RGB24:
590 case AV_PIX_FMT_ARGB:
591 case AV_PIX_FMT_RGBA:
592 case AV_PIX_FMT_ABGR:
593 case AV_PIX_FMT_BGRA:
594 case AV_PIX_FMT_RGB555LE:
595 case AV_PIX_FMT_RGB48LE:
596 case AV_PIX_FMT_RGB48BE:
597 case AV_PIX_FMT_XYZ12LE:
598 memset (data()[0], 0, sample_size(0).height * stride()[0]);
601 case AV_PIX_FMT_UYVY422:
603 int const Y = sample_size(0).height;
604 int const X = line_size()[0];
605 uint8_t* p = data()[0];
606 for (int y = 0; y < Y; ++y) {
607 for (int x = 0; x < X / 4; ++x) {
608 *p++ = eight_bit_uv; // Cb
610 *p++ = eight_bit_uv; // Cr
618 throw PixelFormatError ("make_black()", _pixel_format);
624 Image::make_transparent ()
626 if (_pixel_format != AV_PIX_FMT_BGRA && _pixel_format != AV_PIX_FMT_RGBA && _pixel_format != AV_PIX_FMT_RGBA64BE) {
627 throw PixelFormatError ("make_transparent()", _pixel_format);
630 memset (data()[0], 0, sample_size(0).height * stride()[0]);
639 uint8_t* const* data;
643 uint8_t* line_pointer(int y) const {
644 return data[0] + y * stride[0] + start_x * bpp;
649 /** Parameters of the other image (the one being blended onto the target) when target and other are RGB */
650 struct OtherRGBParams
655 uint8_t* const* data;
659 uint8_t* line_pointer(int y) const {
660 return data[0] + y * stride[0];
663 float alpha_divisor() const {
664 return pow(2, bpp * 2) - 1;
669 /** Parameters of the other image (the one being blended onto the target) when target and other are YUV */
670 struct OtherYUVParams
675 uint8_t* const* data;
678 uint8_t* const* alpha_data;
679 int const* alpha_stride;
684 template <class OtherType>
686 alpha_blend_onto_rgb24(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
688 /* Going onto RGB24. First byte is red, second green, third blue */
689 auto const alpha_divisor = other.alpha_divisor();
690 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
691 auto tp = target.line_pointer(ty);
692 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
693 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
694 float const alpha = get(op + 3) / alpha_divisor;
695 tp[0] = (get(op + red) / value_divisor) * alpha + tp[0] * (1 - alpha);
696 tp[1] = (get(op + 1) / value_divisor) * alpha + tp[1] * (1 - alpha);
697 tp[2] = (get(op + blue) / value_divisor) * alpha + tp[2] * (1 - alpha);
700 op += other.bpp / sizeof(OtherType);
706 template <class OtherType>
708 alpha_blend_onto_bgra(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
710 auto const alpha_divisor = other.alpha_divisor();
711 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
712 auto tp = target.line_pointer(ty);
713 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
714 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
715 float const alpha = get(op + 3) / alpha_divisor;
716 tp[0] = (get(op + blue) / value_divisor) * alpha + tp[0] * (1 - alpha);
717 tp[1] = (get(op + 1) / value_divisor) * alpha + tp[1] * (1 - alpha);
718 tp[2] = (get(op + red) / value_divisor) * alpha + tp[2] * (1 - alpha);
719 tp[3] = (get(op + 3) / value_divisor) * alpha + tp[3] * (1 - alpha);
722 op += other.bpp / sizeof(OtherType);
728 template <class OtherType>
730 alpha_blend_onto_rgba(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
732 auto const alpha_divisor = other.alpha_divisor();
733 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
734 auto tp = target.line_pointer(ty);
735 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
736 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
737 float const alpha = get(op + 3) / alpha_divisor;
738 tp[0] = (get(op + red) / value_divisor) * alpha + tp[0] * (1 - alpha);
739 tp[1] = (get(op + 1) / value_divisor) * alpha + tp[1] * (1 - alpha);
740 tp[2] = (get(op + blue) / value_divisor) * alpha + tp[2] * (1 - alpha);
741 tp[3] = (get(op + 3) / value_divisor) * alpha + tp[3] * (1 - alpha);
744 op += other.bpp / sizeof(OtherType);
750 template <class OtherType>
752 alpha_blend_onto_rgb48le(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_scale)
754 auto const alpha_divisor = other.alpha_divisor();
755 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
756 auto tp = reinterpret_cast<uint16_t*>(target.line_pointer(ty));
757 auto op = reinterpret_cast<OtherType*>(other.line_pointer(oy));
758 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
759 float const alpha = get(op + 3) / alpha_divisor;
760 tp[0] = get(op + red) * value_scale * alpha + tp[0] * (1 - alpha);
761 tp[1] = get(op + 1) * value_scale * alpha + tp[1] * (1 - alpha);
762 tp[2] = get(op + blue) * value_scale * alpha + tp[2] * (1 - alpha);
764 tp += target.bpp / 2;
765 op += other.bpp / sizeof(OtherType);
771 template <class OtherType>
773 alpha_blend_onto_xyz12le(TargetParams const& target, OtherRGBParams const& other, int red, int blue, std::function<float (OtherType*)> get, int value_divisor)
775 auto const alpha_divisor = other.alpha_divisor();
776 auto conv = dcp::ColourConversion::srgb_to_xyz();
777 double fast_matrix[9];
778 dcp::combined_rgb_to_xyz(conv, fast_matrix);
779 auto lut_in = conv.in()->double_lut(0, 1, 8, false);
780 auto lut_out = conv.out()->int_lut(0, 1, 16, true, 65535);
781 for (int ty = target.start_y, oy = other.start_y; ty < target.size.height && oy < other.size.height; ++ty, ++oy) {
782 auto tp = reinterpret_cast<uint16_t*>(target.data[0] + ty * target.stride[0] + target.start_x * target.bpp);
783 auto op = reinterpret_cast<OtherType*>(other.data[0] + oy * other.stride[0]);
784 for (int tx = target.start_x, ox = other.start_x; tx < target.size.width && ox < other.size.width; ++tx, ++ox) {
785 float const alpha = get(op + 3) / alpha_divisor;
787 /* Convert sRGB to XYZ; op is BGRA. First, input gamma LUT */
788 double const r = lut_in[get(op + red) / value_divisor];
789 double const g = lut_in[get(op + 1) / value_divisor];
790 double const b = lut_in[get(op + blue) / value_divisor];
792 /* RGB to XYZ, including Bradford transform and DCI companding */
793 double const x = max(0.0, min(1.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2]));
794 double const y = max(0.0, min(1.0, r * fast_matrix[3] + g * fast_matrix[4] + b * fast_matrix[5]));
795 double const z = max(0.0, min(1.0, r * fast_matrix[6] + g * fast_matrix[7] + b * fast_matrix[8]));
797 /* Out gamma LUT and blend */
798 tp[0] = lut_out[lrint(x * 65535)] * alpha + tp[0] * (1 - alpha);
799 tp[1] = lut_out[lrint(y * 65535)] * alpha + tp[1] * (1 - alpha);
800 tp[2] = lut_out[lrint(z * 65535)] * alpha + tp[2] * (1 - alpha);
802 tp += target.bpp / 2;
803 op += other.bpp / sizeof(OtherType);
811 alpha_blend_onto_yuv420p(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
813 auto const ts = target.size;
814 auto const os = other.size;
815 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
816 int const hty = ty / 2;
817 int const hoy = oy / 2;
818 uint8_t* tY = target.data[0] + (ty * target.stride[0]) + target.start_x;
819 uint8_t* tU = target.data[1] + (hty * target.stride[1]) + target.start_x / 2;
820 uint8_t* tV = target.data[2] + (hty * target.stride[2]) + target.start_x / 2;
821 uint8_t* oY = other.data[0] + (oy * other.stride[0]) + other.start_x;
822 uint8_t* oU = other.data[1] + (hoy * other.stride[1]) + other.start_x / 2;
823 uint8_t* oV = other.data[2] + (hoy * other.stride[2]) + other.start_x / 2;
824 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
825 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
826 float const a = get_alpha(alpha);
827 *tY = *oY * a + *tY * (1 - a);
828 *tU = *oU * a + *tU * (1 - a);
829 *tV = *oV * a + *tV * (1 - a);
840 alpha += other.alpha_bpp;
848 alpha_blend_onto_yuv420p10(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
850 auto const ts = target.size;
851 auto const os = other.size;
852 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
853 int const hty = ty / 2;
854 int const hoy = oy / 2;
855 uint16_t* tY = reinterpret_cast<uint16_t*>(target.data[0] + (ty * target.stride[0])) + target.start_x;
856 uint16_t* tU = reinterpret_cast<uint16_t*>(target.data[1] + (hty * target.stride[1])) + target.start_x / 2;
857 uint16_t* tV = reinterpret_cast<uint16_t*>(target.data[2] + (hty * target.stride[2])) + target.start_x / 2;
858 uint16_t* oY = reinterpret_cast<uint16_t*>(other.data[0] + (oy * other.stride[0])) + other.start_x;
859 uint16_t* oU = reinterpret_cast<uint16_t*>(other.data[1] + (hoy * other.stride[1])) + other.start_x / 2;
860 uint16_t* oV = reinterpret_cast<uint16_t*>(other.data[2] + (hoy * other.stride[2])) + other.start_x / 2;
861 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
862 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
863 float const a = get_alpha(alpha);
864 *tY = *oY * a + *tY * (1 - a);
865 *tU = *oU * a + *tU * (1 - a);
866 *tV = *oV * a + *tV * (1 - a);
877 alpha += other.alpha_bpp;
885 alpha_blend_onto_yuv422p9or10le(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
887 auto const ts = target.size;
888 auto const os = other.size;
889 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
890 uint16_t* tY = reinterpret_cast<uint16_t*>(target.data[0] + (ty * target.stride[0])) + target.start_x;
891 uint16_t* tU = reinterpret_cast<uint16_t*>(target.data[1] + (ty * target.stride[1])) + target.start_x / 2;
892 uint16_t* tV = reinterpret_cast<uint16_t*>(target.data[2] + (ty * target.stride[2])) + target.start_x / 2;
893 uint16_t* oY = reinterpret_cast<uint16_t*>(other.data[0] + (oy * other.stride[0])) + other.start_x;
894 uint16_t* oU = reinterpret_cast<uint16_t*>(other.data[1] + (oy * other.stride[1])) + other.start_x / 2;
895 uint16_t* oV = reinterpret_cast<uint16_t*>(other.data[2] + (oy * other.stride[2])) + other.start_x / 2;
896 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
897 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
898 float const a = get_alpha(alpha);
899 *tY = *oY * a + *tY * (1 - a);
900 *tU = *oU * a + *tU * (1 - a);
901 *tV = *oV * a + *tV * (1 - a);
912 alpha += other.alpha_bpp;
920 alpha_blend_onto_yuv444p9or10le(TargetParams const& target, OtherYUVParams const& other, std::function<float (uint8_t* data)> get_alpha)
922 auto const ts = target.size;
923 auto const os = other.size;
924 for (int ty = target.start_y, oy = other.start_y; ty < ts.height && oy < os.height; ++ty, ++oy) {
925 uint16_t* tY = reinterpret_cast<uint16_t*>(target.data[0] + (ty * target.stride[0])) + target.start_x;
926 uint16_t* tU = reinterpret_cast<uint16_t*>(target.data[1] + (ty * target.stride[1])) + target.start_x;
927 uint16_t* tV = reinterpret_cast<uint16_t*>(target.data[2] + (ty * target.stride[2])) + target.start_x;
928 uint16_t* oY = reinterpret_cast<uint16_t*>(other.data[0] + (oy * other.stride[0])) + other.start_x;
929 uint16_t* oU = reinterpret_cast<uint16_t*>(other.data[1] + (oy * other.stride[1])) + other.start_x;
930 uint16_t* oV = reinterpret_cast<uint16_t*>(other.data[2] + (oy * other.stride[2])) + other.start_x;
931 uint8_t* alpha = other.alpha_data[0] + (oy * other.alpha_stride[0]) + other.start_x * other.alpha_bpp;
932 for (int tx = target.start_x, ox = other.start_x; tx < ts.width && ox < os.width; ++tx, ++ox) {
933 float const a = get_alpha(alpha);
934 *tY = *oY * a + *tY * (1 - a);
935 *tU = *oU * a + *tU * (1 - a);
936 *tV = *oV * a + *tV * (1 - a);
943 alpha += other.alpha_bpp;
950 Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
953 other->pixel_format() == AV_PIX_FMT_BGRA ||
954 other->pixel_format() == AV_PIX_FMT_RGBA ||
955 other->pixel_format() == AV_PIX_FMT_RGBA64BE
958 int const blue = other->pixel_format() == AV_PIX_FMT_BGRA ? 0 : 2;
959 int const red = other->pixel_format() == AV_PIX_FMT_BGRA ? 2 : 0;
961 int start_tx = position.x;
965 start_ox = -start_tx;
969 int start_ty = position.y;
973 start_oy = -start_ty;
977 TargetParams target_params = {
986 OtherRGBParams other_rgb_params = {
992 other->pixel_format() == AV_PIX_FMT_RGBA64BE ? 8 : 4
995 OtherYUVParams other_yuv_params = {
1003 other->pixel_format() == AV_PIX_FMT_RGBA64BE ? 8 : 4
1006 auto byteswap = [](uint16_t* p) {
1007 return (*p >> 8) | ((*p & 0xff) << 8);
1010 auto pass = [](uint8_t* p) {
1014 auto get_alpha_64be = [](uint8_t* p) {
1015 return ((static_cast<int16_t>(p[6]) << 8) | p[7]) / 65535.0f;
1018 auto get_alpha_byte = [](uint8_t* p) {
1019 return p[3] / 255.0f;
1022 switch (_pixel_format) {
1023 case AV_PIX_FMT_RGB24:
1024 target_params.bpp = 3;
1025 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1026 alpha_blend_onto_rgb24<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1028 alpha_blend_onto_rgb24<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1031 case AV_PIX_FMT_BGRA:
1032 target_params.bpp = 4;
1033 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1034 alpha_blend_onto_bgra<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1036 alpha_blend_onto_bgra<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1039 case AV_PIX_FMT_RGBA:
1040 target_params.bpp = 4;
1041 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1042 alpha_blend_onto_rgba<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1044 alpha_blend_onto_rgba<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1047 case AV_PIX_FMT_RGB48LE:
1048 target_params.bpp = 6;
1049 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1050 alpha_blend_onto_rgb48le<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 1);
1052 alpha_blend_onto_rgb48le<uint8_t>(target_params, other_rgb_params, red, blue, pass, 256);
1055 case AV_PIX_FMT_XYZ12LE:
1056 target_params.bpp = 6;
1057 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1058 alpha_blend_onto_xyz12le<uint16_t>(target_params, other_rgb_params, red, blue, byteswap, 256);
1060 alpha_blend_onto_xyz12le<uint8_t>(target_params, other_rgb_params, red, blue, pass, 1);
1063 case AV_PIX_FMT_YUV420P:
1065 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1066 other_yuv_params.data = yuv->data();
1067 other_yuv_params.stride = yuv->stride();
1068 other_yuv_params.alpha_data = other->data();
1069 other_yuv_params.alpha_stride = other->stride();
1070 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1071 alpha_blend_onto_yuv420p(target_params, other_yuv_params, get_alpha_64be);
1073 alpha_blend_onto_yuv420p(target_params, other_yuv_params, get_alpha_byte);
1077 case AV_PIX_FMT_YUV420P10:
1079 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1080 other_yuv_params.data = yuv->data();
1081 other_yuv_params.stride = yuv->stride();
1082 other_yuv_params.alpha_data = other->data();
1083 other_yuv_params.alpha_stride = other->stride();
1084 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1085 alpha_blend_onto_yuv420p10(target_params, other_yuv_params, get_alpha_64be);
1087 alpha_blend_onto_yuv420p10(target_params, other_yuv_params, get_alpha_byte);
1091 case AV_PIX_FMT_YUV422P9LE:
1092 case AV_PIX_FMT_YUV422P10LE:
1094 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1095 other_yuv_params.data = yuv->data();
1096 other_yuv_params.stride = yuv->stride();
1097 other_yuv_params.alpha_data = other->data();
1098 other_yuv_params.alpha_stride = other->stride();
1099 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1100 alpha_blend_onto_yuv422p9or10le(target_params, other_yuv_params, get_alpha_64be);
1102 alpha_blend_onto_yuv422p9or10le(target_params, other_yuv_params, get_alpha_byte);
1106 case AV_PIX_FMT_YUV444P9LE:
1107 case AV_PIX_FMT_YUV444P10LE:
1109 auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
1110 other_yuv_params.data = yuv->data();
1111 other_yuv_params.stride = yuv->stride();
1112 other_yuv_params.alpha_data = other->data();
1113 other_yuv_params.alpha_stride = other->stride();
1114 if (other->pixel_format() == AV_PIX_FMT_RGBA64BE) {
1115 alpha_blend_onto_yuv444p9or10le(target_params, other_yuv_params, get_alpha_64be);
1117 alpha_blend_onto_yuv444p9or10le(target_params, other_yuv_params, get_alpha_byte);
1122 throw PixelFormatError ("alpha_blend()", _pixel_format);
1128 Image::copy (shared_ptr<const Image> other, Position<int> position)
1130 /* Only implemented for RGB24 onto RGB24 so far */
1131 DCPOMATIC_ASSERT (_pixel_format == AV_PIX_FMT_RGB24 && other->pixel_format() == AV_PIX_FMT_RGB24);
1132 DCPOMATIC_ASSERT (position.x >= 0 && position.y >= 0);
1134 int const N = min (position.x + other->size().width, size().width) - position.x;
1135 for (int ty = position.y, oy = 0; ty < size().height && oy < other->size().height; ++ty, ++oy) {
1136 uint8_t * const tp = data()[0] + ty * stride()[0] + position.x * 3;
1137 uint8_t * const op = other->data()[0] + oy * other->stride()[0];
1138 memcpy (tp, op, N * 3);
1144 Image::read_from_socket (shared_ptr<Socket> socket)
1146 for (int i = 0; i < planes(); ++i) {
1147 uint8_t* p = data()[i];
1148 int const lines = sample_size(i).height;
1149 for (int y = 0; y < lines; ++y) {
1150 socket->read (p, line_size()[i]);
1158 Image::write_to_socket (shared_ptr<Socket> socket) const
1160 for (int i = 0; i < planes(); ++i) {
1161 uint8_t* p = data()[i];
1162 int const lines = sample_size(i).height;
1163 for (int y = 0; y < lines; ++y) {
1164 socket->write (p, line_size()[i]);
1172 Image::bytes_per_pixel (int c) const
1174 auto d = av_pix_fmt_desc_get(_pixel_format);
1176 throw PixelFormatError ("bytes_per_pixel()", _pixel_format);
1179 if (c >= planes()) {
1183 float bpp[4] = { 0, 0, 0, 0 };
1185 #ifdef DCPOMATIC_HAVE_AVCOMPONENTDESCRIPTOR_DEPTH_MINUS1
1186 bpp[0] = floor ((d->comp[0].depth_minus1 + 8) / 8);
1187 if (d->nb_components > 1) {
1188 bpp[1] = floor ((d->comp[1].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
1190 if (d->nb_components > 2) {
1191 bpp[2] = floor ((d->comp[2].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
1193 if (d->nb_components > 3) {
1194 bpp[3] = floor ((d->comp[3].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
1197 bpp[0] = floor ((d->comp[0].depth + 7) / 8);
1198 if (d->nb_components > 1) {
1199 bpp[1] = floor ((d->comp[1].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
1201 if (d->nb_components > 2) {
1202 bpp[2] = floor ((d->comp[2].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
1204 if (d->nb_components > 3) {
1205 bpp[3] = floor ((d->comp[3].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
1209 if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
1210 /* Not planar; sum them up */
1211 return bpp[0] + bpp[1] + bpp[2] + bpp[3];
1218 /** Construct a Image of a given size and format, allocating memory
1221 * @param p Pixel format.
1222 * @param s Size in pixels.
1223 * @param alignment PADDED to make each row of this image aligned to a ALIGNMENT-byte boundary, otherwise COMPACT.
1225 Image::Image (AVPixelFormat p, dcp::Size s, Alignment alignment)
1228 , _alignment (alignment)
1237 _data = (uint8_t **) wrapped_av_malloc (4 * sizeof (uint8_t *));
1238 _data[0] = _data[1] = _data[2] = _data[3] = 0;
1240 _line_size = (int *) wrapped_av_malloc (4 * sizeof (int));
1241 _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0;
1243 _stride = (int *) wrapped_av_malloc (4 * sizeof (int));
1244 _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0;
1246 auto stride_round_up = [](int stride, int t) {
1247 int const a = stride + (t - 1);
1251 for (int i = 0; i < planes(); ++i) {
1252 _line_size[i] = ceil (_size.width * bytes_per_pixel(i));
1253 _stride[i] = stride_round_up (_line_size[i], _alignment == Alignment::PADDED ? ALIGNMENT : 1);
1255 /* The assembler function ff_rgb24ToY_avx (in libswscale/x86/input.asm)
1256 uses a 16-byte fetch to read three bytes (R/G/B) of image data.
1257 Hence on the last pixel of the last line it reads over the end of
1258 the actual data by 1 byte. If the width of an image is a multiple
1259 of the stride alignment there will be no padding at the end of image lines.
1260 OS X crashes on this illegal read, though other operating systems don't
1261 seem to mind. The nasty + 1 in this malloc makes sure there is always a byte
1262 for that instruction to read safely.
1264 Further to the above, valgrind is now telling me that ff_rgb24ToY_ssse3
1265 over-reads by more then _avx. I can't follow the code to work out how much,
1266 so I'll just over-allocate by ALIGNMENT bytes and have done with it. Empirical
1267 testing suggests that it works.
1269 In addition to these concerns, we may read/write as much as a whole extra line
1270 at the end of each plane in cases where we are messing with offsets in order to
1271 do pad or crop. To solve this we over-allocate by an extra _stride[i] bytes.
1273 As an example: we may write to images starting at an offset so we get some padding.
1274 Hence we want to write in the following pattern:
1276 block start write start line end
1277 |..(padding)..|<------line-size------------->|..(padding)..|
1278 |..(padding)..|<------line-size------------->|..(padding)..|
1279 |..(padding)..|<------line-size------------->|..(padding)..|
1281 where line-size is of the smaller (inter_size) image and the full padded line length is that of
1282 out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size.
1283 However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full
1284 specified *stride*. This does not matter until we get to the last line:
1286 block start write start line end
1287 |..(padding)..|<------line-size------------->|XXXwrittenXXX|
1288 |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX|
1289 |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX
1292 _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * (sample_size(i).height + 1) + ALIGNMENT);
1293 #if HAVE_VALGRIND_MEMCHECK_H
1294 /* The data between the end of the line size and the stride is undefined but processed by
1295 libswscale, causing lots of valgrind errors. Mark it all defined to quell these errors.
1297 VALGRIND_MAKE_MEM_DEFINED (_data[i], _stride[i] * (sample_size(i).height + 1) + ALIGNMENT);
1303 Image::Image (Image const & other)
1304 : std::enable_shared_from_this<Image>(other)
1305 , _size (other._size)
1306 , _pixel_format (other._pixel_format)
1307 , _alignment (other._alignment)
1311 for (int i = 0; i < planes(); ++i) {
1312 uint8_t* p = _data[i];
1313 uint8_t* q = other._data[i];
1314 int const lines = sample_size(i).height;
1315 for (int j = 0; j < lines; ++j) {
1316 memcpy (p, q, _line_size[i]);
1318 q += other.stride()[i];
1324 Image::Image (AVFrame const * frame, Alignment alignment)
1325 : _size (frame->width, frame->height)
1326 , _pixel_format (static_cast<AVPixelFormat>(frame->format))
1327 , _alignment (alignment)
1329 DCPOMATIC_ASSERT (_pixel_format != AV_PIX_FMT_NONE);
1333 for (int i = 0; i < planes(); ++i) {
1334 uint8_t* p = _data[i];
1335 uint8_t* q = frame->data[i];
1336 int const lines = sample_size(i).height;
1337 for (int j = 0; j < lines; ++j) {
1338 memcpy (p, q, _line_size[i]);
1340 /* AVFrame's linesize is what we call `stride' */
1341 q += frame->linesize[i];
1347 Image::Image (shared_ptr<const Image> other, Alignment alignment)
1348 : _size (other->_size)
1349 , _pixel_format (other->_pixel_format)
1350 , _alignment (alignment)
1354 for (int i = 0; i < planes(); ++i) {
1355 DCPOMATIC_ASSERT (line_size()[i] == other->line_size()[i]);
1356 uint8_t* p = _data[i];
1357 uint8_t* q = other->data()[i];
1358 int const lines = sample_size(i).height;
1359 for (int j = 0; j < lines; ++j) {
1360 memcpy (p, q, line_size()[i]);
1362 q += other->stride()[i];
1369 Image::operator= (Image const & other)
1371 if (this == &other) {
1382 Image::swap (Image & other)
1384 std::swap (_size, other._size);
1385 std::swap (_pixel_format, other._pixel_format);
1387 for (int i = 0; i < 4; ++i) {
1388 std::swap (_data[i], other._data[i]);
1389 std::swap (_line_size[i], other._line_size[i]);
1390 std::swap (_stride[i], other._stride[i]);
1393 std::swap (_alignment, other._alignment);
1399 for (int i = 0; i < planes(); ++i) {
1404 av_free (_line_size);
1410 Image::data () const
1417 Image::line_size () const
1424 Image::stride () const
1431 Image::size () const
1438 Image::alignment () const
1445 merge (list<PositionImage> images, Image::Alignment alignment)
1447 if (images.empty ()) {
1451 if (images.size() == 1) {
1452 images.front().image = Image::ensure_alignment(images.front().image, alignment);
1453 return images.front();
1456 dcpomatic::Rect<int> all (images.front().position, images.front().image->size().width, images.front().image->size().height);
1457 for (auto const& i: images) {
1458 all.extend (dcpomatic::Rect<int>(i.position, i.image->size().width, i.image->size().height));
1461 auto merged = make_shared<Image>(images.front().image->pixel_format(), dcp::Size(all.width, all.height), alignment);
1462 merged->make_transparent ();
1463 for (auto const& i: images) {
1464 merged->alpha_blend (i.image, i.position - all.position());
1467 return PositionImage (merged, all.position ());
1472 operator== (Image const & a, Image const & b)
1474 if (a.planes() != b.planes() || a.pixel_format() != b.pixel_format() || a.alignment() != b.alignment()) {
1478 for (int c = 0; c < a.planes(); ++c) {
1479 if (a.sample_size(c).height != b.sample_size(c).height || a.line_size()[c] != b.line_size()[c] || a.stride()[c] != b.stride()[c]) {
1483 uint8_t* p = a.data()[c];
1484 uint8_t* q = b.data()[c];
1485 int const lines = a.sample_size(c).height;
1486 for (int y = 0; y < lines; ++y) {
1487 if (memcmp (p, q, a.line_size()[c]) != 0) {
1501 * @param f Amount to fade by; 0 is black, 1 is no fade.
1504 Image::fade (float f)
1506 /* U/V black value for 8-bit colour */
1507 static int const eight_bit_uv = (1 << 7) - 1;
1508 /* U/V black value for 10-bit colour */
1509 static uint16_t const ten_bit_uv = (1 << 9) - 1;
1511 switch (_pixel_format) {
1512 case AV_PIX_FMT_YUV420P:
1515 uint8_t* p = data()[0];
1516 int const lines = sample_size(0).height;
1517 for (int y = 0; y < lines; ++y) {
1519 for (int x = 0; x < line_size()[0]; ++x) {
1520 *q = int(float(*q) * f);
1527 for (int c = 1; c < 3; ++c) {
1528 uint8_t* p = data()[c];
1529 int const lines = sample_size(c).height;
1530 for (int y = 0; y < lines; ++y) {
1532 for (int x = 0; x < line_size()[c]; ++x) {
1533 *q = eight_bit_uv + int((int(*q) - eight_bit_uv) * f);
1543 case AV_PIX_FMT_RGB24:
1546 uint8_t* p = data()[0];
1547 int const lines = sample_size(0).height;
1548 for (int y = 0; y < lines; ++y) {
1550 for (int x = 0; x < line_size()[0]; ++x) {
1551 *q = int (float (*q) * f);
1559 case AV_PIX_FMT_XYZ12LE:
1560 case AV_PIX_FMT_RGB48LE:
1561 /* 16-bit little-endian */
1562 for (int c = 0; c < 3; ++c) {
1563 int const stride_pixels = stride()[c] / 2;
1564 int const line_size_pixels = line_size()[c] / 2;
1565 uint16_t* p = reinterpret_cast<uint16_t*> (data()[c]);
1566 int const lines = sample_size(c).height;
1567 for (int y = 0; y < lines; ++y) {
1569 for (int x = 0; x < line_size_pixels; ++x) {
1570 *q = int (float (*q) * f);
1578 case AV_PIX_FMT_YUV422P10LE:
1582 int const stride_pixels = stride()[0] / 2;
1583 int const line_size_pixels = line_size()[0] / 2;
1584 uint16_t* p = reinterpret_cast<uint16_t*> (data()[0]);
1585 int const lines = sample_size(0).height;
1586 for (int y = 0; y < lines; ++y) {
1588 for (int x = 0; x < line_size_pixels; ++x) {
1589 *q = int(float(*q) * f);
1597 for (int c = 1; c < 3; ++c) {
1598 int const stride_pixels = stride()[c] / 2;
1599 int const line_size_pixels = line_size()[c] / 2;
1600 uint16_t* p = reinterpret_cast<uint16_t*> (data()[c]);
1601 int const lines = sample_size(c).height;
1602 for (int y = 0; y < lines; ++y) {
1604 for (int x = 0; x < line_size_pixels; ++x) {
1605 *q = ten_bit_uv + int((int(*q) - ten_bit_uv) * f);
1616 throw PixelFormatError ("fade()", _pixel_format);
1621 shared_ptr<const Image>
1622 Image::ensure_alignment (shared_ptr<const Image> image, Image::Alignment alignment)
1624 if (image->alignment() == alignment) {
1628 return make_shared<Image>(image, alignment);
1633 Image::memory_used () const
1636 for (int i = 0; i < planes(); ++i) {
1637 m += _stride[i] * sample_size(i).height;
1644 Image::video_range_to_full_range ()
1646 switch (_pixel_format) {
1647 case AV_PIX_FMT_RGB24:
1649 float const factor = 256.0 / 219.0;
1650 uint8_t* p = data()[0];
1651 int const lines = sample_size(0).height;
1652 for (int y = 0; y < lines; ++y) {
1654 for (int x = 0; x < line_size()[0]; ++x) {
1655 *q = clamp(lrintf((*q - 16) * factor), 0L, 255L);
1662 case AV_PIX_FMT_RGB48LE:
1664 float const factor = 65536.0 / 56064.0;
1665 uint16_t* p = reinterpret_cast<uint16_t*>(data()[0]);
1666 int const lines = sample_size(0).height;
1667 for (int y = 0; y < lines; ++y) {
1669 int const line_size_pixels = line_size()[0] / 2;
1670 for (int x = 0; x < line_size_pixels; ++x) {
1671 *q = clamp(lrintf((*q - 4096) * factor), 0L, 65535L);
1674 p += stride()[0] / 2;
1678 case AV_PIX_FMT_GBRP12LE:
1680 float const factor = 4096.0 / 3504.0;
1681 for (int c = 0; c < 3; ++c) {
1682 uint16_t* p = reinterpret_cast<uint16_t*>(data()[c]);
1683 int const lines = sample_size(c).height;
1684 for (int y = 0; y < lines; ++y) {
1686 int const line_size_pixels = line_size()[c] / 2;
1687 for (int x = 0; x < line_size_pixels; ++x) {
1688 *q = clamp(lrintf((*q - 256) * factor), 0L, 4095L);
1696 throw PixelFormatError ("video_range_to_full_range()", _pixel_format);