#include "compose.hpp"
#include "cross.h"
+#include "dcpomatic_assert.h"
#include "dcpomatic_socket.h"
#include "exceptions.h"
#include "ffmpeg_image_proxy.h"
#include "image.h"
-#include "util.h"
-#include "warnings.h"
+#include "memory_util.h"
#include <dcp/raw_convert.h>
-DCPOMATIC_DISABLE_WARNINGS
+#include <dcp/warnings.h>
+LIBDCP_DISABLE_WARNINGS
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/pixdesc.h>
}
#include <libxml++/libxml++.h>
-DCPOMATIC_ENABLE_WARNINGS
+LIBDCP_ENABLE_WARNINGS
#include <iostream>
#include "i18n.h"
using dcp::raw_convert;
-FFmpegImageProxy::FFmpegImageProxy (boost::filesystem::path path, VideoRange video_range)
+FFmpegImageProxy::FFmpegImageProxy (boost::filesystem::path path)
: _data (path)
- , _video_range (video_range)
, _pos (0)
, _path (path)
{
}
-FFmpegImageProxy::FFmpegImageProxy (dcp::ArrayData data, VideoRange video_range)
+FFmpegImageProxy::FFmpegImageProxy (dcp::ArrayData data)
: _data (data)
- , _video_range (video_range)
, _pos (0)
{
}
-FFmpegImageProxy::FFmpegImageProxy (shared_ptr<cxml::Node> node, shared_ptr<Socket> socket)
- : _video_range (string_to_video_range(node->string_child("VideoRange")))
- , _pos (0)
+FFmpegImageProxy::FFmpegImageProxy (shared_ptr<Socket> socket)
+ : _pos (0)
{
uint32_t const size = socket->read_uint32 ();
_data = dcp::ArrayData (size);
ImageProxy::Result
-FFmpegImageProxy::image (optional<dcp::Size>) const
+FFmpegImageProxy::image (Image::Alignment alignment, optional<dcp::Size>) const
{
auto constexpr name_for_errors = "FFmpegImageProxy::image";
}
uint8_t* avio_buffer = static_cast<uint8_t*> (wrapped_av_malloc(4096));
- AVIOContext* avio_context = avio_alloc_context (avio_buffer, 4096, 0, const_cast<FFmpegImageProxy*>(this), avio_read_wrapper, 0, avio_seek_wrapper);
+ auto avio_context = avio_alloc_context (avio_buffer, 4096, 0, const_cast<FFmpegImageProxy*>(this), avio_read_wrapper, 0, avio_seek_wrapper);
AVFormatContext* format_context = avformat_alloc_context ();
format_context->pb = avio_context;
- AVDictionary* options = 0;
+ AVDictionary* options = nullptr;
/* These durations are in microseconds, and represent how far into the content file
we will look for streams.
*/
directly from the file). This code just does enough to allow the
probe code to take a hint from "foo.tga" and so try targa format.
*/
- AVInputFormat* f = av_find_input_format ("image2");
+ auto f = av_find_input_format ("image2");
format_context = avformat_alloc_context ();
format_context->pb = avio_context;
format_context->iformat = f;
int r = avformat_find_stream_info(format_context, 0);
if (r < 0) {
- throw DecodeError (N_("avcodec_find_stream_info"), name_for_errors, r);
+ throw DecodeError (N_("avcodec_find_stream_info"), name_for_errors, r, *_path);
}
DCPOMATIC_ASSERT (format_context->nb_streams == 1);
- AVFrame* frame = av_frame_alloc ();
+ auto frame = av_frame_alloc ();
if (!frame) {
std::bad_alloc ();
}
auto context = avcodec_alloc_context3 (codec);
if (!context) {
- throw DecodeError (N_("avcodec_alloc_context3"), name_for_errors);
+ throw DecodeError (N_("avcodec_alloc_context3"), name_for_errors, *_path);
}
r = avcodec_open2 (context, codec, 0);
if (r < 0) {
- throw DecodeError (N_("avcodec_open2"), name_for_errors, r);
+ throw DecodeError (N_("avcodec_open2"), name_for_errors, r, *_path);
}
AVPacket packet;
r = av_read_frame (format_context, &packet);
if (r < 0) {
- throw DecodeError (N_("av_read_frame"), name_for_errors, r);
+ throw DecodeError (N_("av_read_frame"), name_for_errors, r, *_path);
}
r = avcodec_send_packet (context, &packet);
if (r < 0) {
- throw DecodeError (N_("avcodec_send_packet"), name_for_errors, r);
+ throw DecodeError (N_("avcodec_send_packet"), name_for_errors, r, *_path);
}
r = avcodec_receive_frame (context, frame);
if (r < 0) {
- throw DecodeError (N_("avcodec_receive_frame"), name_for_errors, r);
+ throw DecodeError (N_("avcodec_receive_frame"), name_for_errors, r, *_path);
}
- auto const pix_fmt = static_cast<AVPixelFormat>(frame->format);
-
- _image = make_shared<Image>(frame);
- if (_video_range == VideoRange::VIDEO && av_pix_fmt_desc_get(pix_fmt)->flags & AV_PIX_FMT_FLAG_RGB) {
- /* Asking for the video range to be converted by libswscale (in Image) will not work for
- * RGB sources since that method only processes video range in YUV and greyscale. So we have
- * to do it ourselves here.
- */
- _image->video_range_to_full_range();
- }
+ _image = make_shared<Image>(frame, alignment);
av_packet_unref (&packet);
av_frame_free (&frame);
FFmpegImageProxy::add_metadata (xmlpp::Node* node) const
{
node->add_child("Type")->add_child_text (N_("FFmpeg"));
- node->add_child("VideoRange")->add_child_text(video_range_to_string(_video_range));
}
void
bool
FFmpegImageProxy::same (shared_ptr<const ImageProxy> other) const
{
- shared_ptr<const FFmpegImageProxy> mp = dynamic_pointer_cast<const FFmpegImageProxy> (other);
+ auto mp = dynamic_pointer_cast<const FFmpegImageProxy>(other);
if (!mp) {
return false;
}