diff options
| author | Carl Hetherington <cth@carlh.net> | 2016-06-29 16:01:14 +0100 |
|---|---|---|
| committer | Carl Hetherington <cth@carlh.net> | 2016-06-29 16:01:14 +0100 |
| commit | 92c691f29c5da9abca6a06605998e09f9b8103bb (patch) | |
| tree | 1ecd83269fcce0f7bf83279ea4920cdeb5b1a939 /src/lib | |
| parent | 420b50e7e5130194d8e8f4a51514c005e2df3dd0 (diff) | |
Fix handling of incorrectly-recognised JPEG2000 files.
Previously we asked libdcp whether an imported J2K file was
RGB or XYZ. The answer it gives is sometimes wrong, for reasons
that are not clear (either the files are not marked correctly,
or openjpeg is not parsing whatever metadata correctly).
However it seems that, in general, we use the user's specified
colour conversion to decide what to do with an image, rather than
asking the image what should be done to it.
Hence it makes more sense to assume that if a user specifies no
colour conversion for a J2K file then the file is XYZ.
With preview, the colour conversion from XYZ back to RGB is done
by FFmpeg, so we have to set the pixel format correctly on the
Image that comes back from J2KImageProxy. Now we get that pixel
format from the configured colourspace conversion rather than
from openjpeg's guess as to the file's colourspace.
It's a bit ugly that the only thing we ask the file about is whether
or not it is in YUV (which governs whether or not FFmpeg applies
the user's configured YUV-to-RGB conversion). Everything else is
decided by the configured conversion.
I think there's still some uglyness in here that I can't put my
finger on.
Diffstat (limited to 'src/lib')
| -rw-r--r-- | src/lib/dcp_decoder.cc | 13 | ||||
| -rw-r--r-- | src/lib/image_decoder.cc | 10 | ||||
| -rw-r--r-- | src/lib/j2k_image_proxy.cc | 31 | ||||
| -rw-r--r-- | src/lib/j2k_image_proxy.h | 13 | ||||
| -rw-r--r-- | src/lib/player.cc | 6 | ||||
| -rw-r--r-- | src/lib/video_mxf_decoder.cc | 12 |
6 files changed, 53 insertions, 32 deletions
diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc index 35fe375e9..283cb2905 100644 --- a/src/lib/dcp_decoder.cc +++ b/src/lib/dcp_decoder.cc @@ -95,15 +95,22 @@ DCPDecoder::pass (PassReason reason, bool) shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset (); int64_t const entry_point = (*_reel)->main_picture()->entry_point (); if (_mono_reader) { - video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size())), _offset + frame); + video->give ( + shared_ptr<ImageProxy> ( + new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size(), AV_PIX_FMT_XYZ12LE) + ), + _offset + frame + ); } else { video->give ( - shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT)), + shared_ptr<ImageProxy> ( + new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)), _offset + frame ); video->give ( - shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT)), + shared_ptr<ImageProxy> ( + new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)), _offset + frame ); } diff --git a/src/lib/image_decoder.cc b/src/lib/image_decoder.cc index d0973f122..f7afbc0a1 100644 --- a/src/lib/image_decoder.cc +++ b/src/lib/image_decoder.cc @@ -55,10 +55,18 @@ ImageDecoder::pass (PassReason, bool) /* Either we need an image or we are using moving images, so load one */ boost::filesystem::path path = _image_content->path (_image_content->still() ? 0 : _video_position); if (valid_j2k_file (path)) { + AVPixelFormat pf; + if (_image_content->video->colour_conversion()) { + /* We have a specified colour conversion: assume the image is RGB */ + pf = AV_PIX_FMT_RGB48LE; + } else { + /* No specified colour conversion: assume the image is XYZ */ + pf = AV_PIX_FMT_XYZ12LE; + } /* We can't extract image size from a JPEG2000 codestream without decoding it, so pass in the image content's size here. */ - _image.reset (new J2KImageProxy (path, _image_content->video->size ())); + _image.reset (new J2KImageProxy (path, _image_content->video->size(), pf)); } else { _image.reset (new MagickImageProxy (path)); } diff --git a/src/lib/j2k_image_proxy.cc b/src/lib/j2k_image_proxy.cc index 44b5ebea7..a2685bb49 100644 --- a/src/lib/j2k_image_proxy.cc +++ b/src/lib/j2k_image_proxy.cc @@ -42,23 +42,26 @@ using boost::dynamic_pointer_cast; using dcp::Data; /** Construct a J2KImageProxy from a JPEG2000 file */ -J2KImageProxy::J2KImageProxy (boost::filesystem::path path, dcp::Size size) +J2KImageProxy::J2KImageProxy (boost::filesystem::path path, dcp::Size size, AVPixelFormat pixel_format) : _data (path) , _size (size) + , _pixel_format (pixel_format) { } -J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size size) +J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size size, AVPixelFormat pixel_format) : _data (frame->j2k_size ()) , _size (size) + , _pixel_format (pixel_format) { memcpy (_data.data().get(), frame->j2k_data(), _data.size ()); } -J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size size, dcp::Eye eye) +J2KImageProxy::J2KImageProxy (shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size size, dcp::Eye eye, AVPixelFormat pixel_format) : _size (size) , _eye (eye) + , _pixel_format (pixel_format) { switch (eye) { case dcp::EYE_LEFT: @@ -79,6 +82,11 @@ J2KImageProxy::J2KImageProxy (shared_ptr<cxml::Node> xml, shared_ptr<Socket> soc _eye = static_cast<dcp::Eye> (xml->number_child<int> ("Eye")); } _data = Data (xml->number_child<int> ("Size")); + /* This only matters when we are using J2KImageProxy for the preview, which + will never use this constructor (which is only used for passing data to + encode servers). So we can put anything in here. It's a bit of a hack. + */ + _pixel_format = AV_PIX_FMT_XYZ12LE; socket->read (_data.data().get (), _data.size ()); } @@ -107,7 +115,7 @@ J2KImageProxy::image (optional<dcp::NoteHandler>) const } } - shared_ptr<Image> image (new Image (pixel_format(), _size, true)); + shared_ptr<Image> image (new Image (_pixel_format, _size, true)); /* Copy data in whatever format (sRGB or XYZ) into our Image; I'm assuming the data is 12-bit either way. @@ -160,21 +168,10 @@ J2KImageProxy::same (shared_ptr<const ImageProxy> other) const return memcmp (_data.data().get(), jp->_data.data().get(), _data.size()) == 0; } -AVPixelFormat -J2KImageProxy::pixel_format () const -{ - ensure_j2k (); - - if (_j2k->srgb ()) { - return AV_PIX_FMT_RGB48LE; - } - - return AV_PIX_FMT_XYZ12LE; -} - -J2KImageProxy::J2KImageProxy (Data data, dcp::Size size) +J2KImageProxy::J2KImageProxy (Data data, dcp::Size size, AVPixelFormat pixel_format) : _data (data) , _size (size) + , _pixel_format (pixel_format) { } diff --git a/src/lib/j2k_image_proxy.h b/src/lib/j2k_image_proxy.h index 72815a0f6..96a776f2a 100644 --- a/src/lib/j2k_image_proxy.h +++ b/src/lib/j2k_image_proxy.h @@ -30,9 +30,9 @@ namespace dcp { class J2KImageProxy : public ImageProxy { public: - J2KImageProxy (boost::filesystem::path path, dcp::Size); - J2KImageProxy (boost::shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size); - J2KImageProxy (boost::shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size, dcp::Eye); + J2KImageProxy (boost::filesystem::path path, dcp::Size, AVPixelFormat pixel_format); + J2KImageProxy (boost::shared_ptr<const dcp::MonoPictureFrame> frame, dcp::Size, AVPixelFormat pixel_format); + J2KImageProxy (boost::shared_ptr<const dcp::StereoPictureFrame> frame, dcp::Size, dcp::Eye, AVPixelFormat pixel_format); J2KImageProxy (boost::shared_ptr<cxml::Node> xml, boost::shared_ptr<Socket> socket); boost::shared_ptr<Image> image (boost::optional<dcp::NoteHandler> note = boost::optional<dcp::NoteHandler> ()) const; @@ -40,7 +40,9 @@ public: void send_binary (boost::shared_ptr<Socket>) const; /** @return true if our image is definitely the same as another, false if it is probably not */ bool same (boost::shared_ptr<const ImageProxy>) const; - AVPixelFormat pixel_format () const; + AVPixelFormat pixel_format () const { + return _pixel_format; + } dcp::Data j2k () const { return _data; @@ -54,11 +56,12 @@ private: friend struct client_server_test_j2k; /* For tests */ - J2KImageProxy (dcp::Data data, dcp::Size size); + J2KImageProxy (dcp::Data data, dcp::Size size, AVPixelFormat pixel_format); void ensure_j2k () const; dcp::Data _data; dcp::Size _size; boost::optional<dcp::Eye> _eye; mutable boost::shared_ptr<dcp::OpenJPEGImage> _j2k; + AVPixelFormat _pixel_format; }; diff --git a/src/lib/player.cc b/src/lib/player.cc index 20a3e1453..0360858cb 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -173,7 +173,8 @@ Player::playlist_content_changed (weak_ptr<Content> w, int property, bool freque property == SubtitleContentProperty::COLOUR || property == SubtitleContentProperty::OUTLINE || property == SubtitleContentProperty::OUTLINE_COLOUR || - property == FFmpegContentProperty::SUBTITLE_STREAM + property == FFmpegContentProperty::SUBTITLE_STREAM || + property == VideoContentProperty::COLOUR_CONVERSION ) { _have_valid_pieces = false; @@ -190,8 +191,7 @@ Player::playlist_content_changed (weak_ptr<Content> w, int property, bool freque property == VideoContentProperty::CROP || property == VideoContentProperty::SCALE || property == VideoContentProperty::FADE_IN || - property == VideoContentProperty::FADE_OUT || - property == VideoContentProperty::COLOUR_CONVERSION + property == VideoContentProperty::FADE_OUT ) { Changed (frequent); diff --git a/src/lib/video_mxf_decoder.cc b/src/lib/video_mxf_decoder.cc index 938d7deaf..dc4f8d60b 100644 --- a/src/lib/video_mxf_decoder.cc +++ b/src/lib/video_mxf_decoder.cc @@ -77,10 +77,16 @@ VideoMXFDecoder::pass (PassReason, bool) } if (_mono_reader) { - video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_mono_reader->get_frame(frame), _size)), frame); + video->give ( + shared_ptr<ImageProxy> (new J2KImageProxy (_mono_reader->get_frame(frame), _size, AV_PIX_FMT_XYZ12LE)), frame + ); } else { - video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_LEFT)), frame); - video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_RIGHT)), frame); + video->give ( + shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)), frame + ); + video->give ( + shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)), frame + ); } _next += ContentTime::from_frames (1, vfr); |
