X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=0b887ea62c6979fcd1ba8b100435a28267b3bf5b;hb=c45fd99b0b4a193edcebccc927793d48431a5a13;hp=f38d441850fcdc211defea1d58db9f5132e764b0;hpb=f7a38b76ea50c62624972994f6a7ea9feaaf4f6b;p=dcpomatic.git diff --git a/src/lib/image.cc b/src/lib/image.cc index f38d44185..0b887ea62 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -35,11 +35,14 @@ extern "C" { #include #include #include +#include } #include "image.h" #include "exceptions.h" #include "scaler.h" +#include "i18n.h" + using namespace std; using namespace boost; using libdcp::Size; @@ -56,45 +59,32 @@ Image::swap (Image& other) int Image::lines (int n) const { - switch (_pixel_format) { - case PIX_FMT_YUV420P: - if (n == 0) { - return size().height; - } else { - return size().height / 2; - } - break; - case PIX_FMT_RGB24: - case PIX_FMT_RGBA: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV422P: - case PIX_FMT_YUV444P: + if (n == 0) { return size().height; - default: - throw PixelFormatError ("lines()", _pixel_format); } - - return 0; + + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError (N_("lines()"), _pixel_format); + } + + return size().height / pow(2, d->log2_chroma_h); } /** @return Number of components */ int Image::components () const { - switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV422P: - case PIX_FMT_YUV444P: - return 3; - case PIX_FMT_RGB24: - case PIX_FMT_RGBA: - return 1; - default: - throw PixelFormatError ("components()", _pixel_format); + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError (N_("components()"), _pixel_format); } - return 0; + if ((d->flags & PIX_FMT_PLANAR) == 0) { + return 1; + } + + return d->nb_components; } shared_ptr @@ -201,12 +191,17 @@ Image::post_process (string pp, bool aligned) const break; case PIX_FMT_YUV422P10LE: case PIX_FMT_YUV422P: + case PIX_FMT_UYVY422: pp_format = PP_FORMAT_422; break; case PIX_FMT_YUV444P: + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV444P10LE: pp_format = PP_FORMAT_444; default: - throw PixelFormatError ("post_process", pixel_format()); + throw PixelFormatError (N_("post_process"), pixel_format()); } pp_mode* mode = pp_get_mode_by_name_and_quality (pp.c_str (), PP_QUALITY_MAX); @@ -252,37 +247,90 @@ Image::crop (Crop crop, bool aligned) const return out; } +/** Blacken a YUV image whose bits per pixel is rounded up to 16 */ +void +Image::yuv_16_black (uint16_t v) +{ + memset (data()[0], 0, lines(0) * stride()[0]); + for (int i = 1; i < 3; ++i) { + int16_t* p = reinterpret_cast (data()[i]); + for (int y = 0; y < size().height; ++y) { + for (int x = 0; x < line_size()[i] / 2; ++x) { + p[x] = v; + } + p += stride()[i] / 2; + } + } +} + +uint16_t +Image::swap_16 (uint16_t v) +{ + return ((v >> 8) & 0xff) | ((v & 0xff) << 8); +} + void Image::make_black () { + /* U/V black value for 8-bit colour */ + static uint8_t const eight_bit_uv = (1 << 7) - 1; + + /* U/V black value for 9-bit colour */ + static uint16_t const nine_bit_uv = (1 << 8) - 1; + + /* U/V black value for 10-bit colour */ + static uint16_t const ten_bit_uv = (1 << 9) - 1; + switch (_pixel_format) { case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: memset (data()[0], 0, lines(0) * stride()[0]); - memset (data()[1], 0x7f, lines(1) * stride()[1]); - memset (data()[2], 0x7f, lines(2) * stride()[2]); + memset (data()[1], eight_bit_uv, lines(1) * stride()[1]); + memset (data()[2], eight_bit_uv, lines(2) * stride()[2]); break; + case PIX_FMT_YUV422P9LE: + case PIX_FMT_YUV444P9LE: + yuv_16_black (nine_bit_uv); + break; + + case PIX_FMT_YUV422P9BE: + case PIX_FMT_YUV444P9BE: + yuv_16_black (swap_16 (nine_bit_uv)); + break; + case PIX_FMT_YUV422P10LE: - memset (data()[0], 0, lines(0) * stride()[0]); - for (int i = 1; i < 3; ++i) { - int16_t* p = reinterpret_cast (data()[i]); - for (int y = 0; y < size().height; ++y) { - for (int x = 0; x < line_size()[i] / 2; ++x) { - p[x] = (1 << 9) - 1; - } - p += stride()[i] / 2; - } - } + case PIX_FMT_YUV444P10LE: + yuv_16_black (ten_bit_uv); break; + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV422P10BE: + yuv_16_black (swap_16 (ten_bit_uv)); + case PIX_FMT_RGB24: memset (data()[0], 0, lines(0) * stride()[0]); break; + case PIX_FMT_UYVY422: + { + int const Y = lines(0); + int const X = line_size()[0]; + uint8_t* p = data()[0]; + for (int y = 0; y < Y; ++y) { + for (int x = 0; x < X / 4; ++x) { + *p++ = eight_bit_uv; // Cb + *p++ = 0; // Y0 + *p++ = eight_bit_uv; // Cr + *p++ = 0; // Y1 + } + } + break; + } + default: - assert (false); + throw PixelFormatError (N_("make_black()"), _pixel_format); } } @@ -350,46 +398,36 @@ Image::write_to_socket (shared_ptr socket) const float Image::bytes_per_pixel (int c) const { - if (c == 3) { + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError (N_("lines()"), _pixel_format); + } + + if (c >= components()) { return 0; } + + float bpp[4] = { 0, 0, 0, 0 }; + + bpp[0] = floor ((d->comp[0].depth_minus1 + 1 + 7) / 8); + if (d->nb_components > 1) { + bpp[1] = floor ((d->comp[1].depth_minus1 + 1 + 7) / 8) / pow (2, d->log2_chroma_w); + } + if (d->nb_components > 2) { + bpp[2] = floor ((d->comp[2].depth_minus1 + 1 + 7) / 8) / pow (2, d->log2_chroma_w); + } + if (d->nb_components > 3) { + bpp[3] = floor ((d->comp[3].depth_minus1 + 1 + 7) / 8) / pow (2, d->log2_chroma_w); + } - switch (_pixel_format) { - case PIX_FMT_RGB24: - if (c == 0) { - return 3; - } else { - return 0; - } - case PIX_FMT_RGBA: - if (c == 0) { - return 4; - } else { - return 0; - } - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P: - if (c == 0) { - return 1; - } else { - return 0.5; - } - case PIX_FMT_YUV422P10LE: - if (c == 0) { - return 2; - } else { - return 1; - } - case PIX_FMT_YUV444P: - return 3; - default: - assert (false); + if ((d->flags & PIX_FMT_PLANAR) == 0) { + /* Not planar; sum them up */ + return bpp[0] + bpp[1] + bpp[2] + bpp[3]; } - return 0; + return bpp[c]; } - /** Construct a SimpleImage of a given size and format, allocating memory * as required. * @@ -432,7 +470,33 @@ SimpleImage::SimpleImage (SimpleImage const & other) allocate (); for (int i = 0; i < components(); ++i) { - memcpy (_data[i], other._data[i], _line_size[i] * lines(i)); + uint8_t* p = _data[i]; + uint8_t* q = other._data[i]; + for (int j = 0; j < lines(i); ++j) { + memcpy (p, q, _line_size[i]); + p += stride()[i]; + q += other.stride()[i]; + } + } +} + +SimpleImage::SimpleImage (shared_ptr other) + : Image (*other.get()) +{ + _size = other->size (); + _aligned = true; + + allocate (); + + for (int i = 0; i < components(); ++i) { + assert(line_size()[i] == other->line_size()[i]); + uint8_t* p = _data[i]; + uint8_t* q = other->data()[i]; + for (int j = 0; j < lines(i); ++j) { + memcpy (p, q, line_size()[i]); + p += stride()[i]; + q += other->stride()[i]; + } } } @@ -506,9 +570,9 @@ SimpleImage::aligned () const return _aligned; } -FilterBufferImage::FilterBufferImage (AVPixelFormat p, AVFilterBufferRef* b) - : Image (p) - , _buffer (b) +FrameImage::FrameImage (AVFrame* frame) + : Image (static_cast (frame->format)) + , _frame (frame) { _line_size = (int *) av_malloc (4 * sizeof (int)); _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0; @@ -518,44 +582,40 @@ FilterBufferImage::FilterBufferImage (AVPixelFormat p, AVFilterBufferRef* b) } } -FilterBufferImage::~FilterBufferImage () +FrameImage::~FrameImage () { - avfilter_unref_buffer (_buffer); + av_frame_free (&_frame); av_free (_line_size); } uint8_t ** -FilterBufferImage::data () const +FrameImage::data () const { - return _buffer->data; + return _frame->data; } int * -FilterBufferImage::line_size () const +FrameImage::line_size () const { return _line_size; } int * -FilterBufferImage::stride () const +FrameImage::stride () const { - /* I've seen images where the _buffer->linesize is larger than the width - (by a small amount), suggesting that _buffer->linesize is what we call - stride. But I'm not sure. - */ - return _buffer->linesize; + /* AVFrame's `linesize' is what we call `stride' */ + return _frame->linesize; } libdcp::Size -FilterBufferImage::size () const +FrameImage::size () const { - return libdcp::Size (_buffer->video->w, _buffer->video->h); + return libdcp::Size (_frame->width, _frame->height); } bool -FilterBufferImage::aligned () const +FrameImage::aligned () const { - /* XXX? */ return true; }