/*
- Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
- This program is free software; you can redistribute it and/or modify
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
- This program is distributed in the hope that it will be useful,
+ DCP-o-matic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
*/
#include "timer.h"
#include "rect.h"
#include "util.h"
-#include "md5_digester.h"
#include "dcpomatic_socket.h"
+#include <dcp/rgb_xyz.h>
+#include <dcp/transfer_function.h>
extern "C" {
#include <libswscale/swscale.h>
#include <libavutil/pixfmt.h>
using std::string;
using std::min;
+using std::max;
using std::cout;
using std::cerr;
using std::list;
+using std::runtime_error;
using boost::shared_ptr;
using dcp::Size;
int
-Image::line_factor (int n) const
+Image::vertical_factor (int n) const
{
if (n == 0) {
return 1;
return pow (2.0f, d->log2_chroma_h);
}
-/** @param n Component index.
- * @return Number of samples (i.e. pixels, unless sub-sampled) in each direction for this component.
- */
-dcp::Size
-Image::sample_size (int n) const
+int
+Image::horizontal_factor (int n) const
{
int horizontal_factor = 1;
if (n > 0) {
}
horizontal_factor = pow (2.0f, d->log2_chroma_w);
}
-
- return dcp::Size (
- lrint (ceil (static_cast<double>(size().width) / horizontal_factor)),
- lrint (ceil (static_cast<double>(size().height) / line_factor (n)))
- );
+ return horizontal_factor;
}
-int
-Image::components () const
+/** @param n Component index.
+ * @return Number of samples (i.e. pixels, unless sub-sampled) in each direction for this component.
+ */
+dcp::Size
+Image::sample_size (int n) const
{
- AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format);
- if (!d) {
- throw PixelFormatError ("components()", _pixel_format);
- }
-
- return d->nb_components;
+ return dcp::Size (
+ lrint (ceil (static_cast<double>(size().width) / horizontal_factor (n))),
+ lrint (ceil (static_cast<double>(size().height) / vertical_factor (n)))
+ );
}
/** @return Number of planes */
return d->nb_components;
}
-/** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size' */
+/** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size'.
+ * @param crop Amount to crop by.
+ * @param inter_size Size to scale the cropped image to.
+ * @param out_size Size of output frame; if this is larger than inter_size there will be black padding.
+ * @param yuv_to_rgb YUV to RGB transformation to use, if required.
+ * @param out_format Output pixel format.
+ * @param out_aligned true to make the output image aligned.
+ * @param fast Try to be fast at the possible expense of quality; at present this means using
+ * fast bilinear rather than bicubic scaling.
+ */
shared_ptr<Image>
Image::crop_scale_window (
- Crop crop, dcp::Size inter_size, dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned
+ Crop crop, dcp::Size inter_size, dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast
) const
{
/* Empirical testing suggests that sws_scale() will crash if
DCPOMATIC_ASSERT (out_size.width >= inter_size.width);
DCPOMATIC_ASSERT (out_size.height >= inter_size.height);
- /* Here's an image of out_size */
- shared_ptr<Image> out (new Image (out_format, out_size, out_aligned));
+ /* Here's an image of out_size. Below we may write to it starting at an offset so we get some padding.
+ Hence we want to write in the following pattern:
+
+ block start write start line end
+ |..(padding)..|<------line-size------------->|..(padding)..|
+ |..(padding)..|<------line-size------------->|..(padding)..|
+ |..(padding)..|<------line-size------------->|..(padding)..|
+
+ where line-size is of the smaller (inter_size) image and the full padded line length is that of
+ out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size.
+ However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full
+ specified *stride*. This does not matter until we get to the last line:
+
+ block start write start line end
+ |..(padding)..|<------line-size------------->|XXXwrittenXXX|
+ |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX|
+ |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX
+ ^^^^ out of bounds
+
+ To get around this, we ask Image to overallocate its buffers by the overrun.
+ */
+
+ shared_ptr<Image> out (new Image (out_format, out_size, out_aligned, (out_size.width - inter_size.width) / 2));
out->make_black ();
/* Size of the image after any crop */
struct SwsContext* scale_context = sws_getContext (
cropped_size.width, cropped_size.height, pixel_format(),
inter_size.width, inter_size.height, out_format,
- SWS_BICUBIC, 0, 0, 0
+ fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0
);
if (!scale_context) {
- throw StringError (N_("Could not allocate SwsContext"));
+ throw runtime_error (N_("Could not allocate SwsContext"));
}
DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT);
we've cropped all of its Y-channel pixels.
*/
int const x = lrintf (bytes_per_pixel(c) * crop.left) & ~ ((int) desc->log2_chroma_w);
- scale_in_data[c] = data()[c] + x + stride()[c] * (crop.top / line_factor(c));
+ scale_in_data[c] = data()[c] + x + stride()[c] * (crop.top / vertical_factor(c));
}
/* Corner of the image within out_size */
return out;
}
+/** @param out_size Size to scale to.
+ * @param yuv_to_rgb YUVToRGB transform transform to use, if required.
+ * @param out_format Output pixel format.
+ * @param out_aligned true to make an aligned output image.
+ * @param fast Try to be fast at the possible expense of quality; at present this means using
+ * fast bilinear rather than bicubic scaling.
+ */
shared_ptr<Image>
-Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned) const
+Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const
{
/* Empirical testing suggests that sws_scale() will crash if
the input image is not aligned.
struct SwsContext* scale_context = sws_getContext (
size().width, size().height, pixel_format(),
out_size.width, out_size.height, out_format,
- SWS_BICUBIC, 0, 0, 0
+ fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0
);
DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT);
case AV_PIX_FMT_RGB555LE:
case AV_PIX_FMT_RGB48LE:
case AV_PIX_FMT_RGB48BE:
+ case AV_PIX_FMT_XYZ12LE:
memset (data()[0], 0, sample_size(0).height * stride()[0]);
break;
memset (data()[0], 0, sample_size(0).height * stride()[0]);
}
+template <class T>
+void
+component (
+ int n,
+ Image* base,
+ shared_ptr<const Image> other,
+ shared_ptr<const Image> rgba,
+ int start_base_x, int start_base_y,
+ int start_other_x, int start_other_y
+ )
+{
+ dcp::Size const base_size = base->sample_size(n);
+ dcp::Size const other_size = other->sample_size(n);
+ for (int by = start_base_y, oy = start_other_y; by < base_size.height && oy < other_size.height; ++by, ++oy) {
+ /* base image */
+ T* bp = ((T*) (base->data()[n] + by * base->stride()[n])) + start_base_x;
+ /* overlay image */
+ T* op = ((T*) (other->data()[n] + oy * other->stride()[n]));
+ /* original RGBA for alpha channel */
+ uint8_t* rp = rgba->data()[0] + oy * rgba->stride()[0];
+ for (int bx = start_base_x, ox = start_other_x; bx < base_size.width && ox < other_size.width; ++bx, ++ox) {
+ float const alpha = float (rp[3]) / 255;
+ *bp = *op * alpha + *bp * (1 - alpha);
+ ++bp;
+ ++op;
+ rp += 4;
+ }
+ }
+}
+
void
Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
{
+ /* We're blending RGBA images; first byte is blue, second byte is green, third byte blue, fourth byte alpha */
DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_RGBA);
int const other_bpp = 4;
switch (_pixel_format) {
case AV_PIX_FMT_RGB24:
{
+ /* Going onto RGB24. First byte is red, second green, third blue */
int const this_bpp = 3;
for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp;
uint8_t* op = other->data()[0] + oy * other->stride()[0];
for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
float const alpha = float (op[3]) / 255;
- tp[0] = op[0] * alpha + tp[0] * (1 - alpha);
+ tp[0] = op[2] * alpha + tp[0] * (1 - alpha);
tp[1] = op[1] * alpha + tp[1] * (1 - alpha);
- tp[2] = op[2] * alpha + tp[2] * (1 - alpha);
+ tp[2] = op[0] * alpha + tp[2] * (1 - alpha);
tp += this_bpp;
op += other_bpp;
uint8_t* op = other->data()[0] + oy * other->stride()[0];
for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
float const alpha = float (op[3]) / 255;
- /* Blend high bytes */
- tp[1] = op[0] * alpha + tp[1] * (1 - alpha);
+ /* Blend high bytes; the RGBA in op appears to be BGRA */
+ tp[1] = op[2] * alpha + tp[1] * (1 - alpha);
tp[3] = op[1] * alpha + tp[3] * (1 - alpha);
- tp[5] = op[2] * alpha + tp[5] * (1 - alpha);
+ tp[5] = op[0] * alpha + tp[5] * (1 - alpha);
tp += this_bpp;
op += other_bpp;
}
break;
}
+ case AV_PIX_FMT_XYZ12LE:
+ {
+ dcp::ColourConversion conv = dcp::ColourConversion::srgb_to_xyz();
+ double fast_matrix[9];
+ dcp::combined_rgb_to_xyz (conv, fast_matrix);
+ double const * lut_in = conv.in()->lut (8, false);
+ double const * lut_out = conv.out()->lut (16, true);
+ int const this_bpp = 6;
+ for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
+ uint16_t* tp = reinterpret_cast<uint16_t*> (data()[0] + ty * stride()[0] + start_tx * this_bpp);
+ uint8_t* op = other->data()[0] + oy * other->stride()[0];
+ for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
+ float const alpha = float (op[3]) / 255;
+
+ /* Convert sRGB to XYZ; op is BGRA. First, input gamma LUT */
+ double const r = lut_in[op[2]];
+ double const g = lut_in[op[1]];
+ double const b = lut_in[op[0]];
+
+ /* RGB to XYZ, including Bradford transform and DCI companding */
+ double const x = max (0.0, min (65535.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2]));
+ double const y = max (0.0, min (65535.0, r * fast_matrix[3] + g * fast_matrix[4] + b * fast_matrix[5]));
+ double const z = max (0.0, min (65535.0, r * fast_matrix[6] + g * fast_matrix[7] + b * fast_matrix[8]));
+
+ /* Out gamma LUT and blend */
+ tp[0] = lrint(lut_out[lrint(x)] * 65535) * alpha + tp[0] * (1 - alpha);
+ tp[1] = lrint(lut_out[lrint(y)] * 65535) * alpha + tp[1] * (1 - alpha);
+ tp[2] = lrint(lut_out[lrint(z)] * 65535) * alpha + tp[2] * (1 - alpha);
+
+ tp += this_bpp / 2;
+ op += other_bpp;
+ }
+ }
+ break;
+ }
+ case AV_PIX_FMT_YUV420P:
+ {
+ shared_ptr<Image> yuv = other->scale (other->size(), dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+ component<uint8_t> (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ component<uint8_t> (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ component<uint8_t> (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ break;
+ }
+ case AV_PIX_FMT_YUV420P10:
+ case AV_PIX_FMT_YUV422P10LE:
+ {
+ shared_ptr<Image> yuv = other->scale (other->size(), dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+ component<uint16_t> (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ component<uint8_t> (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ component<uint8_t> (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ break;
+ }
default:
- DCPOMATIC_ASSERT (false);
+ throw PixelFormatError ("alpha_blend()", _pixel_format);
}
}
float bpp[4] = { 0, 0, 0, 0 };
- bpp[0] = floor ((d->comp[0].depth_minus1 + 1 + 7) / 8);
+#ifdef DCPOMATIC_HAVE_AVCOMPONENTDESCRIPTOR_DEPTH_MINUS1
+ bpp[0] = floor ((d->comp[0].depth_minus1 + 8) / 8);
+ if (d->nb_components > 1) {
+ bpp[1] = floor ((d->comp[1].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
+ }
+ if (d->nb_components > 2) {
+ bpp[2] = floor ((d->comp[2].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
+ }
+ if (d->nb_components > 3) {
+ bpp[3] = floor ((d->comp[3].depth_minus1 + 8) / 8) / pow (2.0f, d->log2_chroma_w);
+ }
+#else
+ bpp[0] = floor ((d->comp[0].depth + 7) / 8);
if (d->nb_components > 1) {
- bpp[1] = floor ((d->comp[1].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w);
+ bpp[1] = floor ((d->comp[1].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
}
if (d->nb_components > 2) {
- bpp[2] = floor ((d->comp[2].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w);
+ bpp[2] = floor ((d->comp[2].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
}
if (d->nb_components > 3) {
- bpp[3] = floor ((d->comp[3].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w);
+ bpp[3] = floor ((d->comp[3].depth + 7) / 8) / pow (2.0f, d->log2_chroma_w);
}
+#endif
if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
/* Not planar; sum them up */
*
* @param p Pixel format.
* @param s Size in pixels.
+ * @param aligned true to make each row of this image aligned to a 32-byte boundary.
+ * @param extra_pixels Amount of extra "run-off" memory to allocate at the end of each plane in pixels.
*/
-Image::Image (AVPixelFormat p, dcp::Size s, bool aligned)
+Image::Image (AVPixelFormat p, dcp::Size s, bool aligned, int extra_pixels)
: _size (s)
, _pixel_format (p)
, _aligned (aligned)
+ , _extra_pixels (extra_pixels)
{
allocate ();
}
so I'll just over-allocate by 32 bytes and have done with it. Empirical
testing suggests that it works.
*/
- _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + 32);
+ _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + _extra_pixels * bytes_per_pixel(i) + 32);
}
}
: _size (other._size)
, _pixel_format (other._pixel_format)
, _aligned (other._aligned)
+ , _extra_pixels (other._extra_pixels)
{
allocate ();
: _size (frame->width, frame->height)
, _pixel_format (static_cast<AVPixelFormat> (frame->format))
, _aligned (true)
+ , _extra_pixels (0)
{
allocate ();
: _size (other->_size)
, _pixel_format (other->_pixel_format)
, _aligned (aligned)
+ , _extra_pixels (other->_extra_pixels)
{
allocate ();
}
std::swap (_aligned, other._aligned);
+ std::swap (_extra_pixels, other._extra_pixels);
}
/** Destroy a Image */
case AV_PIX_FMT_YUVA422P10LE:
case AV_PIX_FMT_YUVA444P10LE:
case AV_PIX_FMT_RGB48LE:
+ case AV_PIX_FMT_XYZ12LE:
/* 16-bit little-endian */
for (int c = 0; c < 3; ++c) {
int const stride_pixels = stride()[c] / 2;
throw PixelFormatError ("fade()", _pixel_format);
}
}
+
+shared_ptr<Image>
+Image::ensure_aligned (shared_ptr<Image> image)
+{
+ if (image->aligned()) {
+ return image;
+ }
+
+ return shared_ptr<Image> (new Image (image, true));
+}