Fix some typos in comments.
[dcpomatic.git] / src / lib / image.cc
index 799d7af49da172921006e664ef2044bd36ed179c..3aba3ebd535715a820d437cf0859f4b67a162cb9 100644 (file)
@@ -27,6 +27,7 @@
 #include "compose.hpp"
 #include "dcpomatic_assert.h"
 #include "dcpomatic_socket.h"
+#include "enum_indexed_vector.h"
 #include "exceptions.h"
 #include "image.h"
 #include "maths_util.h"
@@ -236,10 +237,10 @@ Image::crop_scale_window (
        }
 
        DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
-       int const lut[static_cast<int>(dcp::YUVToRGB::COUNT)] = {
-               SWS_CS_ITU601,
-               SWS_CS_ITU709
-       };
+       EnumIndexedVector<int, dcp::YUVToRGB> lut;
+       lut[dcp::YUVToRGB::REC601] = SWS_CS_ITU601;
+       lut[dcp::YUVToRGB::REC709] = SWS_CS_ITU709;
+       lut[dcp::YUVToRGB::REC2020] = SWS_CS_BT2020;
 
        /* The 3rd parameter here is:
           0 -> source range MPEG (i.e. "video", 16-235)
@@ -254,8 +255,8 @@ Image::crop_scale_window (
        */
        sws_setColorspaceDetails (
                scale_context,
-               sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), video_range == VideoRange::VIDEO ? 0 : 1,
-               sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), out_video_range == VideoRange::VIDEO ? 0 : 1,
+               sws_getCoefficients(lut[yuv_to_rgb]), video_range == VideoRange::VIDEO ? 0 : 1,
+               sws_getCoefficients(lut[yuv_to_rgb]), out_video_range == VideoRange::VIDEO ? 0 : 1,
                0, 1 << 16, 1 << 16
                );
 
@@ -292,13 +293,17 @@ Image::crop_scale_window (
 
        sws_freeContext (scale_context);
 
-       if (corrected_crop != Crop() && cropped_size == inter_size) {
-               /* We are cropping without any scaling or pixel format conversion, so FFmpeg may have left some
-                  data behind in our image.  Clear it out.  It may get to the point where we should just stop
-                  trying to be clever with cropping.
-               */
-               out->make_part_black (corner.x + cropped_size.width, out_size.width - cropped_size.width);
-       }
+       /* There are some cases where there will be unwanted image data left in the image at this point:
+        *
+        * 1. When we are cropping without any scaling or pixel format conversion.
+        * 2. When we are scaling to certain sizes and placing the result into a larger
+        *    black frame.
+        *
+        * Clear out the sides of the image to take care of those cases.
+        */
+       auto const pad = (out_size.width - inter_size.width) / 2;
+       out->make_part_black(0, pad);
+       out->make_part_black(corner.x + inter_size.width, pad);
 
        if (
                video_range == VideoRange::VIDEO &&
@@ -323,7 +328,7 @@ Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format,
 /** @param out_size Size to scale to.
  *  @param yuv_to_rgb YUVToRGB transform transform to use, if required.
  *  @param out_format Output pixel format.
- *  @param out_aligment Output alignment.
+ *  @param out_alignment Output alignment.
  *  @param fast Try to be fast at the possible expense of quality; at present this means using
  *  fast bilinear rather than bicubic scaling.
  */
@@ -343,10 +348,10 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo
                );
 
        DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
-       int const lut[static_cast<int>(dcp::YUVToRGB::COUNT)] = {
-               SWS_CS_ITU601,
-               SWS_CS_ITU709
-       };
+       EnumIndexedVector<int, dcp::YUVToRGB> lut;
+       lut[dcp::YUVToRGB::REC601] = SWS_CS_ITU601;
+       lut[dcp::YUVToRGB::REC709] = SWS_CS_ITU709;
+       lut[dcp::YUVToRGB::REC2020] = SWS_CS_BT2020;
 
        /* The 3rd parameter here is:
           0 -> source range MPEG (i.e. "video", 16-235)
@@ -361,8 +366,8 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo
        */
        sws_setColorspaceDetails (
                scale_context,
-               sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), 0,
-               sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), 0,
+               sws_getCoefficients(lut[yuv_to_rgb]), 0,
+               sws_getCoefficients(lut[yuv_to_rgb]), 0,
                0, 1 << 16, 1 << 16
                );
 
@@ -474,6 +479,21 @@ Image::make_part_black (int const start, int const width)
                }
                break;
        }
+       case AV_PIX_FMT_YUV444P10LE:
+       {
+               y_part();
+               for (int i = 1; i < 3; ++i) {
+                       auto p = reinterpret_cast<int16_t*>(data()[i]);
+                       int const h = sample_size(i).height;
+                       for (int y = 0; y < h; ++y) {
+                               for (int x = start; x < (start + width); ++x) {
+                                       p[x] = ten_bit_uv;
+                               }
+                               p += stride()[i] / 2;
+                       }
+               }
+               break;
+       }
        default:
                throw PixelFormatError ("make_part_black()", _pixel_format);
        }
@@ -715,8 +735,8 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
                auto conv = dcp::ColourConversion::srgb_to_xyz();
                double fast_matrix[9];
                dcp::combined_rgb_to_xyz (conv, fast_matrix);
-               double const * lut_in = conv.in()->lut (8, false);
-               double const * lut_out = conv.out()->lut (16, true);
+               auto lut_in = conv.in()->lut(0, 1, 8, false);
+               auto lut_out = conv.out()->lut(0, 1, 16, true);
                int const this_bpp = 6;
                for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
                        uint16_t* tp = reinterpret_cast<uint16_t*> (data()[0] + ty * stride()[0] + start_tx * this_bpp);
@@ -730,14 +750,14 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
                                double const b = lut_in[op[blue]];
 
                                /* RGB to XYZ, including Bradford transform and DCI companding */
-                               double const x = max (0.0, min (65535.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2]));
-                               double const y = max (0.0, min (65535.0, r * fast_matrix[3] + g * fast_matrix[4] + b * fast_matrix[5]));
-                               double const z = max (0.0, min (65535.0, r * fast_matrix[6] + g * fast_matrix[7] + b * fast_matrix[8]));
+                               double const x = max(0.0, min(1.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2]));
+                               double const y = max(0.0, min(1.0, r * fast_matrix[3] + g * fast_matrix[4] + b * fast_matrix[5]));
+                               double const z = max(0.0, min(1.0, r * fast_matrix[6] + g * fast_matrix[7] + b * fast_matrix[8]));
 
                                /* Out gamma LUT and blend */
-                               tp[0] = lrint(lut_out[lrint(x)] * 65535) * alpha + tp[0] * (1 - alpha);
-                               tp[1] = lrint(lut_out[lrint(y)] * 65535) * alpha + tp[1] * (1 - alpha);
-                               tp[2] = lrint(lut_out[lrint(z)] * 65535) * alpha + tp[2] * (1 - alpha);
+                               tp[0] = lrint(lut_out[lrint(x * 65535)] * 65535) * alpha + tp[0] * (1 - alpha);
+                               tp[1] = lrint(lut_out[lrint(y * 65535)] * 65535) * alpha + tp[1] * (1 - alpha);
+                               tp[2] = lrint(lut_out[lrint(z * 65535)] * 65535) * alpha + tp[2] * (1 - alpha);
 
                                tp += this_bpp / 2;
                                op += other_bpp;