#include "image.h"
#include "compose.hpp"
#include "util.h"
+#include "warnings.h"
#include <dcp/raw_convert.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
+#include <libavutil/pixdesc.h>
}
+DCPOMATIC_DISABLE_WARNINGS
#include <libxml++/libxml++.h>
+DCPOMATIC_ENABLE_WARNINGS
#include <iostream>
#include "i18n.h"
using std::pair;
using std::min;
using std::make_pair;
-using boost::shared_ptr;
+using std::shared_ptr;
using boost::optional;
-using boost::dynamic_pointer_cast;
+using std::dynamic_pointer_cast;
using dcp::raw_convert;
-FFmpegImageProxy::FFmpegImageProxy (boost::filesystem::path path)
+FFmpegImageProxy::FFmpegImageProxy (boost::filesystem::path path, VideoRange video_range)
: _data (path)
+ , _video_range (video_range)
, _pos (0)
, _path (path)
{
}
-FFmpegImageProxy::FFmpegImageProxy (dcp::Data data)
+FFmpegImageProxy::FFmpegImageProxy (dcp::ArrayData data, VideoRange video_range)
: _data (data)
+ , _video_range (video_range)
, _pos (0)
{
}
-FFmpegImageProxy::FFmpegImageProxy (shared_ptr<cxml::Node>, shared_ptr<Socket> socket)
- : _pos (0)
+FFmpegImageProxy::FFmpegImageProxy (shared_ptr<cxml::Node> node, shared_ptr<Socket> socket)
+ : _video_range (string_to_video_range(node->string_child("VideoRange")))
+ , _pos (0)
{
uint32_t const size = socket->read_uint32 ();
- _data = dcp::Data (size);
- socket->read (_data.data().get(), size);
+ _data = dcp::ArrayData (size);
+ socket->read (_data.data(), size);
}
static int
int
FFmpegImageProxy::avio_read (uint8_t* buffer, int const amount)
{
- int const to_do = min(int64_t(amount), _data.size() - _pos);
+ int const to_do = min(static_cast<int64_t>(amount), static_cast<int64_t>(_data.size()) - _pos);
if (to_do == 0) {
return AVERROR_EOF;
}
- memcpy (buffer, _data.data().get() + _pos, to_do);
+ memcpy (buffer, _data.data() + _pos, to_do);
_pos += to_do;
return to_do;
}
return _pos;
}
-pair<shared_ptr<Image>, int>
+DCPOMATIC_DISABLE_WARNINGS
+
+ImageProxy::Result
FFmpegImageProxy::image (optional<dcp::Size>) const
{
boost::mutex::scoped_lock lm (_mutex);
if (_image) {
- return make_pair (_image, 0);
+ return Result (_image, 0);
}
uint8_t* avio_buffer = static_cast<uint8_t*> (wrapped_av_malloc(4096));
e = avformat_open_input (&format_context, "foo.tga", f, &options);
}
if (e < 0) {
- throw OpenFileError (_path->string(), e, true);
+ if (_path) {
+ throw OpenFileError (_path->string(), e, OpenFileError::READ);
+ } else {
+ boost::throw_exception(DecodeError(String::compose(_("Could not decode image (%1)"), e)));
+ }
}
if (avformat_find_stream_info(format_context, 0) < 0) {
throw DecodeError (N_("could not decode video"));
}
- _image.reset (new Image (frame));
+ AVPixelFormat const pix_fmt = static_cast<AVPixelFormat>(frame->format);
+
+ _image.reset (new Image(frame));
+ if (_video_range == VideoRange::VIDEO && av_pix_fmt_desc_get(pix_fmt)->flags & AV_PIX_FMT_FLAG_RGB) {
+ /* Asking for the video range to be converted by libswscale (in Image) will not work for
+ * RGB sources since that method only processes video range in YUV and greyscale. So we have
+ * to do it ourselves here.
+ */
+ _image->video_range_to_full_range();
+ }
av_packet_unref (&packet);
av_frame_free (&frame);
av_free (avio_context->buffer);
av_free (avio_context);
- return make_pair (_image, 0);
+ return Result (_image, 0);
}
+DCPOMATIC_ENABLE_WARNINGS
+
void
FFmpegImageProxy::add_metadata (xmlpp::Node* node) const
{
node->add_child("Type")->add_child_text (N_("FFmpeg"));
+ node->add_child("VideoRange")->add_child_text(video_range_to_string(_video_range));
}
void
-FFmpegImageProxy::send_binary (shared_ptr<Socket> socket) const
+FFmpegImageProxy::write_to_socket (shared_ptr<Socket> socket) const
{
socket->write (_data.size());
- socket->write (_data.data().get(), _data.size());
+ socket->write (_data.data(), _data.size());
}
bool
return false;
}
- if (_data.size() != mp->_data.size()) {
- return false;
- }
-
- return memcmp (_data.data().get(), mp->_data.data().get(), _data.size()) == 0;
+ return _data == mp->_data;
}
size_t