/** @param pixel_format Pixel format functor that will be used when calling ::image on PlayerVideos coming out of this
* butler. This will be used (where possible) to prepare the PlayerVideos so that calling image() on them is quick.
- * @param aligned Same as above for the `aligned' flag.
+ * @param alignment Same as above for the `alignment' value.
* @param fast Same as above for the `fast' flag.
*/
Butler::Butler (
int audio_channels,
function<AVPixelFormat (AVPixelFormat)> pixel_format,
VideoRange video_range,
- bool aligned,
+ Image::Alignment alignment,
bool fast,
bool prepare_only_proxy
)
, _disable_audio (false)
, _pixel_format (pixel_format)
, _video_range (video_range)
- , _aligned (aligned)
+ , _alignment (alignment)
, _fast (fast)
, _prepare_only_proxy (prepare_only_proxy)
{
/* If the weak_ptr cannot be locked the video obviously no longer requires any work */
if (video) {
LOG_TIMING("start-prepare in %1", thread_id());
- video->prepare (_pixel_format, _video_range, _aligned, _fast, _prepare_only_proxy);
+ video->prepare (_pixel_format, _video_range, _alignment, _fast, _prepare_only_proxy);
LOG_TIMING("finish-prepare in %1", thread_id());
}
}
int audio_channels,
std::function<AVPixelFormat (AVPixelFormat)> pixel_format,
VideoRange video_range,
- bool aligned,
+ Image::Alignment alignment,
bool fast,
bool prepare_only_proxy
);
std::function<AVPixelFormat (AVPixelFormat)> _pixel_format;
VideoRange _video_range;
- bool _aligned;
+ Image::Alignment _alignment;
bool _fast;
/** true to ask PlayerVideo::prepare to only prepare the ImageProxy and not also
#include "i18n.h"
+
using std::cout;
using std::make_shared;
using std::shared_ptr;
using std::string;
-using dcp::Size;
using dcp::ArrayData;
using dcp::raw_convert;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
#endif
+
#define DCI_COEFFICENT (48.0 / 52.37)
+
/** Construct a DCP video frame.
* @param frame Input frame.
* @param index Index of the frame within the DCP.
{
shared_ptr<dcp::OpenJPEGImage> xyz;
- auto image = frame->image (bind(&PlayerVideo::keep_xyz_or_rgb, _1), VideoRange::FULL, true, false);
+ auto image = frame->image (bind(&PlayerVideo::keep_xyz_or_rgb, _1), VideoRange::FULL, Image::Alignment::PADDED, false);
if (frame->colour_conversion()) {
xyz = dcp::rgb_to_xyz (
image->data()[0],
Encoder::Encoder (std::shared_ptr<const Film> film, std::weak_ptr<Job> job)
: _film (film)
, _job (job)
- , _player (new Player(film, true))
+ , _player (new Player(film, Image::Alignment::PADDED))
{
}
*/
+
/** @file src/ffmpeg_decoder.cc
* @brief A decoder using FFmpeg to decode content.
*/
-#include "filter.h"
-#include "exceptions.h"
-#include "image.h"
-#include "util.h"
-#include "log.h"
+
+#include "audio_buffers.h"
+#include "audio_content.h"
+#include "audio_decoder.h"
+#include "compose.hpp"
#include "dcpomatic_log.h"
-#include "ffmpeg_decoder.h"
-#include "text_decoder.h"
+#include "exceptions.h"
#include "ffmpeg_audio_stream.h"
-#include "ffmpeg_subtitle_stream.h"
-#include "video_filter_graph.h"
-#include "audio_buffers.h"
#include "ffmpeg_content.h"
-#include "raw_image_proxy.h"
-#include "video_decoder.h"
+#include "ffmpeg_decoder.h"
+#include "ffmpeg_subtitle_stream.h"
#include "film.h"
-#include "audio_decoder.h"
-#include "compose.hpp"
-#include "text_content.h"
-#include "audio_content.h"
+#include "filter.h"
#include "frame_interval_checker.h"
+#include "image.h"
+#include "log.h"
+#include "raw_image_proxy.h"
+#include "text_content.h"
+#include "text_decoder.h"
+#include "util.h"
+#include "video_decoder.h"
+#include "video_filter_graph.h"
#include <dcp/subtitle_string.h>
#include <sub/ssa_reader.h>
#include <sub/subtitle.h>
#include <libavformat/avformat.h>
}
#include <boost/algorithm/string.hpp>
-#include <vector>
#include <iomanip>
#include <iostream>
+#include <vector>
#include <stdint.h>
#include "i18n.h"
+
using std::cout;
-using std::string;
-using std::vector;
-using std::list;
+using std::dynamic_pointer_cast;
+using std::make_shared;
using std::min;
-using std::pair;
-using std::max;
-using std::map;
using std::shared_ptr;
-using std::make_shared;
-using std::make_pair;
-using boost::is_any_of;
-using boost::split;
+using std::string;
+using std::vector;
using boost::optional;
-using std::dynamic_pointer_cast;
using dcp::Size;
using namespace dcpomatic;
video = make_shared<VideoDecoder>(this, c);
_pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
/* It doesn't matter what size or pixel format this is, it just needs to be black */
- _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (128, 128), true);
+ _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (128, 128), Image::Alignment::PADDED);
_black_image->make_black ();
} else {
_pts_offset = {};
/* Note BGRA is expressed little-endian, so the first byte in the word is B, second
G, third R, fourth A.
*/
- auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true);
+ auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), Image::Alignment::PADDED);
#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
/* Start of the first line in the subtitle */
*/
+
+#include "butler.h"
+#include "cross.h"
#include "ffmpeg_encoder.h"
#include "film.h"
+#include "image.h"
#include "job.h"
+#include "log.h"
#include "player.h"
#include "player_video.h"
-#include "log.h"
-#include "image.h"
-#include "cross.h"
-#include "butler.h"
#include "compose.hpp"
#include <iostream>
#include "i18n.h"
+
using std::cout;
using std::list;
using std::make_shared;
-using std::map;
-using std::pair;
-using std::runtime_error;
using std::shared_ptr;
using std::string;
using std::weak_ptr;
using namespace boost::placeholders;
#endif
+
/** @param key Key to use to encrypt MP4 outputs */
FFmpegEncoder::FFmpegEncoder (
shared_ptr<const Film> film,
}
_butler = std::make_shared<Butler>(
- _film, _player, map, _output_audio_channels, bind(&PlayerVideo::force, _1, FFmpegFileEncoder::pixel_format(format)), VideoRange::VIDEO, true, false, false
+ _film, _player, map, _output_audio_channels, bind(&PlayerVideo::force, _1, FFmpegFileEncoder::pixel_format(format)), VideoRange::VIDEO, Image::Alignment::PADDED, false, false
);
}
*/
+#include "compose.hpp"
+#include "cross.h"
#include "ffmpeg_encoder.h"
#include "ffmpeg_wrapper.h"
#include "film.h"
+#include "image.h"
#include "job.h"
+#include "log.h"
#include "player.h"
#include "player_video.h"
-#include "log.h"
-#include "image.h"
-#include "cross.h"
-#include "compose.hpp"
extern "C" {
#include <libavutil/channel_layout.h>
}
using std::cout;
using std::make_shared;
-using std::pair;
-using std::runtime_error;
using std::shared_ptr;
using std::string;
-using std::weak_ptr;
using boost::bind;
-using boost::optional;
using namespace dcpomatic;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
auto image = video->image (
bind (&PlayerVideo::force, _1, _pixel_format),
VideoRange::VIDEO,
- true,
+ Image::Alignment::PADDED,
false
);
ImageProxy::Result
-FFmpegImageProxy::image (bool aligned, optional<dcp::Size>) const
+FFmpegImageProxy::image (Image::Alignment alignment, optional<dcp::Size>) const
{
auto constexpr name_for_errors = "FFmpegImageProxy::image";
throw DecodeError (N_("avcodec_receive_frame"), name_for_errors, r, *_path);
}
- _image = make_shared<Image>(frame, aligned);
+ _image = make_shared<Image>(frame, alignment);
av_packet_unref (&packet);
av_frame_free (&frame);
FFmpegImageProxy (std::shared_ptr<Socket> socket);
Result image (
- bool aligned,
+ Image::Alignment alignment,
boost::optional<dcp::Size> size = boost::optional<dcp::Size> ()
) const;
using std::cout;
using std::make_shared;
using std::max;
-using std::min;
-using std::pair;
using std::shared_ptr;
using std::string;
-using std::vector;
using std::weak_ptr;
using boost::optional;
using boost::bind;
emit (bind(boost::ref(Progress), _("Examining audio, subtitles and closed captions")));
}
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
player->set_ignore_video ();
if (check_loudness_done) {
/* We don't need to analyse audio because we already loaded a suitable analysis */
using dcp::Size;
-/** The memory alignment, in bytes, used for each row of an image if aligment is requested */
-#define ALIGNMENT 64
+/** The memory alignment, in bytes, used for each row of an image if Alignment::PADDED is requested */
+int constexpr ALIGNMENT = 64;
/* U/V black value for 8-bit colour */
static uint8_t const eight_bit_uv = (1 << 7) - 1;
VideoRange video_range,
AVPixelFormat out_format,
VideoRange out_video_range,
- bool out_aligned,
+ Alignment out_alignment,
bool fast
) const
{
/* Empirical testing suggests that sws_scale() will crash if
- the input image is not aligned.
+ the input image is not padded.
*/
- DCPOMATIC_ASSERT (aligned ());
+ DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
DCPOMATIC_ASSERT (out_size.width >= inter_size.width);
DCPOMATIC_ASSERT (out_size.height >= inter_size.height);
- auto out = make_shared<Image>(out_format, out_size, out_aligned);
+ auto out = make_shared<Image>(out_format, out_size, out_alignment);
out->make_black ();
auto in_desc = av_pix_fmt_desc_get (_pixel_format);
}
shared_ptr<Image>
-Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const
+Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
{
- return scale(size(), yuv_to_rgb, out_format, out_aligned, fast);
+ return scale(size(), yuv_to_rgb, out_format, out_alignment, fast);
}
/** @param out_size Size to scale to.
* @param yuv_to_rgb YUVToRGB transform transform to use, if required.
* @param out_format Output pixel format.
- * @param out_aligned true to make an aligned output image.
+ * @param out_aligment Output alignment.
* @param fast Try to be fast at the possible expense of quality; at present this means using
* fast bilinear rather than bicubic scaling.
*/
shared_ptr<Image>
-Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const
+Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment out_alignment, bool fast) const
{
/* Empirical testing suggests that sws_scale() will crash if
- the input image is not aligned.
+ the input image alignment is not PADDED.
*/
- DCPOMATIC_ASSERT (aligned ());
+ DCPOMATIC_ASSERT (alignment() == Alignment::PADDED);
- auto scaled = make_shared<Image>(out_format, out_size, out_aligned);
+ auto scaled = make_shared<Image>(out_format, out_size, out_alignment);
auto scale_context = sws_getContext (
size().width, size().height, pixel_format(),
out_size.width, out_size.height, out_format,
}
case AV_PIX_FMT_YUV420P:
{
- auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false);
+ auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
dcp::Size const ts = size();
dcp::Size const os = yuv->size();
for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
}
case AV_PIX_FMT_YUV420P10:
{
- auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false);
+ auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
dcp::Size const ts = size();
dcp::Size const os = yuv->size();
for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
}
case AV_PIX_FMT_YUV422P10LE:
{
- auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false);
+ auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, Alignment::COMPACT, false);
dcp::Size const ts = size();
dcp::Size const os = yuv->size();
for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
*
* @param p Pixel format.
* @param s Size in pixels.
- * @param aligned true to make each row of this image aligned to a ALIGNMENT-byte boundary.
+ * @param alignment PADDED to make each row of this image aligned to a ALIGNMENT-byte boundary, otherwise COMPACT.
*/
-Image::Image (AVPixelFormat p, dcp::Size s, bool aligned)
+Image::Image (AVPixelFormat p, dcp::Size s, Alignment alignment)
: _size (s)
, _pixel_format (p)
- , _aligned (aligned)
+ , _alignment (alignment)
{
allocate ();
}
+
void
Image::allocate ()
{
for (int i = 0; i < planes(); ++i) {
_line_size[i] = ceil (_size.width * bytes_per_pixel(i));
- _stride[i] = stride_round_up (i, _line_size, _aligned ? ALIGNMENT : 1);
+ _stride[i] = stride_round_up (i, _line_size, _alignment == Alignment::PADDED ? ALIGNMENT : 1);
/* The assembler function ff_rgb24ToY_avx (in libswscale/x86/input.asm)
uses a 16-byte fetch to read three bytes (R/G/B) of image data.
: std::enable_shared_from_this<Image>(other)
, _size (other._size)
, _pixel_format (other._pixel_format)
- , _aligned (other._aligned)
+ , _alignment (other._alignment)
{
allocate ();
}
}
-Image::Image (AVFrame const * frame, bool aligned)
+Image::Image (AVFrame const * frame, Alignment alignment)
: _size (frame->width, frame->height)
, _pixel_format (static_cast<AVPixelFormat>(frame->format))
- , _aligned (aligned)
+ , _alignment (alignment)
{
DCPOMATIC_ASSERT (_pixel_format != AV_PIX_FMT_NONE);
}
}
-Image::Image (shared_ptr<const Image> other, bool aligned)
+Image::Image (shared_ptr<const Image> other, Alignment alignment)
: _size (other->_size)
, _pixel_format (other->_pixel_format)
- , _aligned (aligned)
+ , _alignment (alignment)
{
allocate ();
std::swap (_stride[i], other._stride[i]);
}
- std::swap (_aligned, other._aligned);
+ std::swap (_alignment, other._alignment);
}
Image::~Image ()
return _size;
}
-bool
-Image::aligned () const
+Image::Alignment
+Image::alignment () const
{
- return _aligned;
+ return _alignment;
}
PositionImage
-merge (list<PositionImage> images, bool aligned)
+merge (list<PositionImage> images, Image::Alignment alignment)
{
if (images.empty ()) {
return {};
all.extend (dcpomatic::Rect<int>(i.position, i.image->size().width, i.image->size().height));
}
- auto merged = make_shared<Image>(images.front().image->pixel_format(), dcp::Size(all.width, all.height), aligned);
+ auto merged = make_shared<Image>(images.front().image->pixel_format(), dcp::Size(all.width, all.height), alignment);
merged->make_transparent ();
for (auto const& i: images) {
merged->alpha_blend (i.image, i.position - all.position());
bool
operator== (Image const & a, Image const & b)
{
- if (a.planes() != b.planes() || a.pixel_format() != b.pixel_format() || a.aligned() != b.aligned()) {
+ if (a.planes() != b.planes() || a.pixel_format() != b.pixel_format() || a.alignment() != b.alignment()) {
return false;
}
shared_ptr<const Image>
-Image::ensure_aligned (shared_ptr<const Image> image, bool aligned)
+Image::ensure_alignment (shared_ptr<const Image> image, Image::Alignment alignment)
{
- if (image->aligned() == aligned) {
+ if (image->alignment() == alignment) {
return image;
}
- return make_shared<Image>(image, aligned);
+ return make_shared<Image>(image, alignment);
}
DCPOMATIC_ASSERT (bytes_per_pixel(0) == 4);
DCPOMATIC_ASSERT (planes() == 1);
if (pixel_format() != AV_PIX_FMT_RGBA) {
- return convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, true, false)->as_png();
+ return convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, Image::Alignment::PADDED, false)->as_png();
}
/* error handling? */
class Image : public std::enable_shared_from_this<Image>
{
public:
- Image (AVPixelFormat p, dcp::Size s, bool aligned);
- explicit Image (AVFrame const *, bool aligned);
+ enum class Alignment {
+ COMPACT,
+ PADDED
+ };
+
+ Image (AVPixelFormat p, dcp::Size s, Alignment alignment);
+ explicit Image (AVFrame const *, Alignment alignment);
explicit Image (Image const &);
- Image (std::shared_ptr<const Image>, bool);
+ Image (std::shared_ptr<const Image>, Alignment alignment);
Image& operator= (Image const &);
~Image ();
/** @return array of sizes of the data in each line, in bytes (including any alignment padding) */
int const * stride () const;
dcp::Size size () const;
- bool aligned () const;
+ Alignment alignment () const;
int planes () const;
int vertical_factor (int) const;
dcp::Size sample_size (int) const;
float bytes_per_pixel (int) const;
- std::shared_ptr<Image> convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool aligned, bool fast) const;
- std::shared_ptr<Image> scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool aligned, bool fast) const;
+ std::shared_ptr<Image> convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment alignment, bool fast) const;
+ std::shared_ptr<Image> scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, Alignment alignment, bool fast) const;
std::shared_ptr<Image> crop_scale_window (
Crop crop,
dcp::Size inter_size,
VideoRange video_range,
AVPixelFormat out_format,
VideoRange out_video_range,
- bool aligned,
+ Alignment alignment,
bool fast
) const;
void png_error (char const * message);
- static std::shared_ptr<const Image> ensure_aligned (std::shared_ptr<const Image> image, bool aligned);
+ static std::shared_ptr<const Image> ensure_alignment (std::shared_ptr<const Image> image, Alignment alignment);
private:
friend struct pixel_formats_test;
uint8_t** _data; ///< array of pointers to components
int* _line_size; ///< array of sizes of the data in each line, in bytes (without any alignment padding bytes)
int* _stride; ///< array of strides for each line, in bytes (including any alignment padding bytes)
- bool _aligned;
+ Alignment _alignment;
};
-extern PositionImage merge (std::list<PositionImage> images, bool aligned);
+extern PositionImage merge (std::list<PositionImage> images, Image::Alignment alignment);
extern bool operator== (Image const & a, Image const & b);
#endif
delete[] buffer;
} else {
FFmpegImageProxy proxy(content->path(0));
- _video_size = proxy.image(false).image->size();
+ _video_size = proxy.image(Image::Alignment::COMPACT).image->size();
}
if (content->still ()) {
/*
- Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2014-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
*/
+
#ifndef DCPOMATIC_IMAGE_PROXY_H
#define DCPOMATIC_IMAGE_PROXY_H
+
/** @file src/lib/image_proxy.h
* @brief ImageProxy and subclasses.
*/
+
+#include "image.h"
extern "C" {
#include <libavutil/pixfmt.h>
}
#include <boost/optional.hpp>
#include <boost/utility.hpp>
+
class Image;
class Socket;
class Node;
}
+
/** @class ImageProxy
* @brief A class which holds an Image, and can produce it on request.
*
* can be used as an optimisation.
*/
virtual Result image (
- bool aligned,
+ Image::Alignment alignment,
boost::optional<dcp::Size> size = boost::optional<dcp::Size> ()
) const = 0;
* This method may be called in a different thread to image().
* @return log2 of any scaling down that will be applied to the image.
*/
- virtual int prepare (bool, boost::optional<dcp::Size> = boost::optional<dcp::Size>()) const { return 0; }
+ virtual int prepare (Image::Alignment, boost::optional<dcp::Size> = boost::optional<dcp::Size>()) const { return 0; }
virtual size_t memory_used () const = 0;
};
+
std::shared_ptr<ImageProxy> image_proxy_factory (std::shared_ptr<cxml::Node> xml, std::shared_ptr<Socket> socket);
+
#endif
using std::cout;
using std::dynamic_pointer_cast;
-using std::make_pair;
using std::make_shared;
using std::max;
-using std::pair;
using std::shared_ptr;
using std::string;
using boost::optional;
int
-J2KImageProxy::prepare (bool aligned, optional<dcp::Size> target_size) const
+J2KImageProxy::prepare (Image::Alignment alignment, optional<dcp::Size> target_size) const
{
boost::mutex::scoped_lock lm (_mutex);
try {
/* XXX: should check that potentially trashing _data here doesn't matter */
auto decompressed = dcp::decompress_j2k (const_cast<uint8_t*>(_data->data()), _data->size(), reduce);
- _image = make_shared<Image>(_pixel_format, decompressed->size(), aligned);
+ _image = make_shared<Image>(_pixel_format, decompressed->size(), alignment);
int const shift = 16 - decompressed->precision (0);
}
}
} catch (dcp::J2KDecompressionError& e) {
- _image = make_shared<Image>(_pixel_format, _size, aligned);
+ _image = make_shared<Image>(_pixel_format, _size, alignment);
_image->make_black ();
_error = true;
}
ImageProxy::Result
-J2KImageProxy::image (bool aligned, optional<dcp::Size> target_size) const
+J2KImageProxy::image (Image::Alignment alignment, optional<dcp::Size> target_size) const
{
- int const r = prepare (aligned, target_size);
+ int const r = prepare (alignment, target_size);
/* I think this is safe without a lock on mutex. _image is guaranteed to be
set up when prepare() has happened.
J2KImageProxy (dcp::ArrayData data, dcp::Size size, AVPixelFormat pixel_format);
Result image (
- bool aligned,
+ Image::Alignment alignment,
boost::optional<dcp::Size> size = boost::optional<dcp::Size> ()
) const;
void write_to_socket (std::shared_ptr<Socket>) const;
/** @return true if our image is definitely the same as another, false if it is probably not */
bool same (std::shared_ptr<const ImageProxy>) const;
- int prepare (bool aligned, boost::optional<dcp::Size> = boost::optional<dcp::Size>()) const;
+ int prepare (Image::Alignment alignment, boost::optional<dcp::Size> = boost::optional<dcp::Size>()) const;
std::shared_ptr<const dcp::Data> j2k () const {
return _data;
int const PlayerProperty::PLAYBACK_LENGTH = 705;
-Player::Player (shared_ptr<const Film> film, bool aligned)
+Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
: _film (film)
, _suspended (0)
, _tolerant (film->tolerant())
, _audio_merger (_film->audio_frame_rate())
- , _aligned_subtitles (aligned)
+ , _subtitle_alignment (subtitle_alignment)
{
construct ();
}
_video_container_size = s;
- _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
+ _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
_black_image->make_black ();
}
return {};
}
- return merge (captions, _aligned_subtitles);
+ return merge (captions, _subtitle_alignment);
}
}
dcp::Size scaled_size (width, height);
- ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
+ ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
_active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
#include "content_video.h"
#include "empty.h"
#include "film.h"
+#include "image.h"
#include "piece.h"
#include "player_text.h"
#include "position_image.h"
class Player : public std::enable_shared_from_this<Player>
{
public:
- Player (std::shared_ptr<const Film>, bool aligned_subtitles);
+ Player (std::shared_ptr<const Film>, Image::Alignment subtitle_alignment);
Player (std::shared_ptr<const Film>, std::shared_ptr<const Playlist> playlist);
Player (Player const& Player) = delete;
dcpomatic::DCPTime _playback_length;
- /** aligned flag for subtitle images that we create */
- bool _aligned_subtitles = true;
+ /** Alignment for subtitle images that we create */
+ Image::Alignment _subtitle_alignment = Image::Alignment::PADDED;
boost::signals2::scoped_connection _film_changed_connection;
boost::signals2::scoped_connection _playlist_change_connection;
if (node->optional_number_child<int>("SubtitleX")) {
auto image = make_shared<Image> (
- AV_PIX_FMT_BGRA, dcp::Size(node->number_child<int>("SubtitleWidth"), node->number_child<int>("SubtitleHeight")), true
+ AV_PIX_FMT_BGRA, dcp::Size(node->number_child<int>("SubtitleWidth"), node->number_child<int>("SubtitleHeight")), Image::Alignment::PADDED
);
image->read_from_socket (socket);
}
shared_ptr<Image>
-PlayerVideo::image (function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, bool aligned, bool fast) const
+PlayerVideo::image (function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, Image::Alignment alignment, bool fast) const
{
/* XXX: this assumes that image() and prepare() are only ever called with the same parameters (except crop, inter size, out size, fade) */
boost::mutex::scoped_lock lm (_mutex);
if (!_image || _crop != _image_crop || _inter_size != _image_inter_size || _out_size != _image_out_size || _fade != _image_fade) {
- make_image (pixel_format, video_range, aligned, fast);
+ make_image (pixel_format, video_range, alignment, fast);
}
return _image;
}
shared_ptr<const Image>
PlayerVideo::raw_image () const
{
- return _in->image(false, _inter_size).image;
+ return _in->image(Image::Alignment::COMPACT, _inter_size).image;
}
* @param pixel_format Function which is called to decide what pixel format the output image should be;
* it is passed the pixel format of the input image from the ImageProxy, and should return the desired
* output pixel format. Two functions force and keep_xyz_or_rgb are provided for use here.
- * @param aligned true if the output image should be aligned to 32-byte boundaries.
+ * @param alignment PADDED if the output image should be aligned to 32-byte boundaries, otherwise COMPACT.
* @param fast true to be fast at the expense of quality.
*/
void
-PlayerVideo::make_image (function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, bool aligned, bool fast) const
+PlayerVideo::make_image (function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, Image::Alignment alignment, bool fast) const
{
_image_crop = _crop;
_image_inter_size = _inter_size;
_image_out_size = _out_size;
_image_fade = _fade;
- auto prox = _in->image (true, _inter_size);
+ auto prox = _in->image (Image::Alignment::PADDED, _inter_size);
_error = prox.error;
auto total_crop = _crop;
}
_image = prox.image->crop_scale_window (
- total_crop, _inter_size, _out_size, yuv_to_rgb, _video_range, pixel_format (prox.image->pixel_format()), video_range, aligned, fast
+ total_crop, _inter_size, _out_size, yuv_to_rgb, _video_range, pixel_format (prox.image->pixel_format()), video_range, alignment, fast
);
if (_text) {
- _image->alpha_blend (Image::ensure_aligned(_text->image, true), _text->position);
+ _image->alpha_blend (Image::ensure_alignment(_text->image, Image::Alignment::PADDED), _text->position);
}
if (_fade) {
}
void
-PlayerVideo::prepare (function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, bool aligned, bool fast, bool proxy_only)
+PlayerVideo::prepare (function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, Image::Alignment alignment, bool fast, bool proxy_only)
{
- _in->prepare (aligned, _inter_size);
+ _in->prepare (alignment, _inter_size);
boost::mutex::scoped_lock lm (_mutex);
if (!_image && !proxy_only) {
- make_image (pixel_format, video_range, aligned, fast);
+ make_image (pixel_format, video_range, alignment, fast);
}
}
#define DCPOMATIC_PLAYER_VIDEO_H
-#include "types.h"
-#include "position.h"
-#include "dcpomatic_time.h"
#include "colour_conversion.h"
+#include "dcpomatic_time.h"
+#include "image.h"
+#include "position.h"
#include "position_image.h"
+#include "types.h"
extern "C" {
#include <libavutil/pixfmt.h>
}
return _text;
}
- void prepare (std::function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, bool aligned, bool fast, bool proxy_only);
- std::shared_ptr<Image> image (std::function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, bool aligned, bool fast) const;
+ void prepare (std::function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, Image::Alignment alignment, bool fast, bool proxy_only);
+ std::shared_ptr<Image> image (std::function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, Image::Alignment alignment, bool fast) const;
std::shared_ptr<const Image> raw_image () const;
static AVPixelFormat force (AVPixelFormat, AVPixelFormat);
}
private:
- void make_image (std::function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, bool aligned, bool fast) const;
+ void make_image (std::function<AVPixelFormat (AVPixelFormat)> pixel_format, VideoRange video_range, Image::Alignment alignment, bool fast) const;
std::shared_ptr<const ImageProxy> _in;
Crop _crop;
xml->number_child<int>("Width"), xml->number_child<int>("Height")
);
- _image = make_shared<Image>(static_cast<AVPixelFormat>(xml->number_child<int>("PixelFormat")), size, true);
+ _image = make_shared<Image>(static_cast<AVPixelFormat>(xml->number_child<int>("PixelFormat")), size, Image::Alignment::PADDED);
_image->read_from_socket (socket);
}
ImageProxy::Result
-RawImageProxy::image (bool aligned, optional<dcp::Size>) const
+RawImageProxy::image (Image::Alignment alignment, optional<dcp::Size>) const
{
- /* This ensure_aligned could be wasteful */
- return Result (Image::ensure_aligned(_image, aligned), 0);
+ /* This ensure_alignment could be wasteful */
+ return Result (Image::ensure_alignment(_image, alignment), 0);
}
return false;
}
- return (*_image.get()) == (*rp->image(_image->aligned()).image.get());
+ return (*_image.get()) == (*rp->image(_image->alignment()).image.get());
}
RawImageProxy (std::shared_ptr<cxml::Node> xml, std::shared_ptr<Socket> socket);
Result image (
- bool aligned,
+ Image::Alignment alignment,
boost::optional<dcp::Size> size = boost::optional<dcp::Size> ()
) const;
create_image (dcp::Size size)
{
/* FFmpeg BGRA means first byte blue, second byte green, third byte red, fourth byte alpha */
- auto image = make_shared<Image>(AV_PIX_FMT_BGRA, size, false);
+ auto image = make_shared<Image>(AV_PIX_FMT_BGRA, size, Image::Alignment::COMPACT);
image->make_black ();
return image;
}
* @brief Some utility functions and classes.
*/
+
#define UNICODE 1
-#include "util.h"
-#include "exceptions.h"
-#include "dcp_content_type.h"
-#include "filter.h"
+
+#include "audio_buffers.h"
+#include "audio_processor.h"
#include "cinema_sound_processor.h"
+#include "compose.hpp"
#include "config.h"
-#include "ratio.h"
-#include "job.h"
#include "cross.h"
-#include "video_content.h"
-#include "rect.h"
-#include "digester.h"
-#include "audio_processor.h"
#include "crypto.h"
-#include "compose.hpp"
-#include "audio_buffers.h"
-#include "string_text.h"
-#include "font.h"
-#include "render_text.h"
+#include "dcp_content_type.h"
+#include "digester.h"
+#include "exceptions.h"
#include "ffmpeg_image_proxy.h"
+#include "filter.h"
+#include "font.h"
#include "image.h"
-#include "text_decoder.h"
+#include "job.h"
#include "job_manager.h"
+#include "ratio.h"
+#include "rect.h"
+#include "render_text.h"
+#include "string_text.h"
+#include "text_decoder.h"
+#include "util.h"
+#include "video_content.h"
#include "warnings.h"
#include <dcp/decrypted_kdm.h>
#include <dcp/locale_convert.h>
#include "i18n.h"
-using std::string;
-using std::wstring;
-using std::setfill;
-using std::ostream;
+
+using std::bad_alloc;
+using std::cout;
using std::endl;
-using std::vector;
-using std::min;
-using std::max;
-using std::map;
-using std::list;
-using std::multimap;
using std::istream;
+using std::list;
+using std::make_pair;
+using std::make_shared;
+using std::map;
+using std::min;
+using std::ostream;
using std::pair;
-using std::cout;
-using std::bad_alloc;
using std::set_terminate;
-using std::make_pair;
using std::shared_ptr;
-using std::make_shared;
+using std::string;
+using std::vector;
+using std::wstring;
using boost::thread;
using boost::optional;
using boost::lexical_cast;
using dcp::locale_convert;
using namespace dcpomatic;
+
/** Path to our executable, required by the stacktrace stuff and filled
* in during App::onInit().
*/
{
/* XXX: this is rather inefficient; decoding the image just to get its size */
FFmpegImageProxy proxy (sub.png_image());
- auto image = proxy.image(false).image;
+ auto image = proxy.image(Image::Alignment::COMPACT).image;
/* set up rect with height and width */
dcpomatic::Rect<double> rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height));
using std::pair;
using std::shared_ptr;
using std::string;
-using std::vector;
VideoFilterGraph::VideoFilterGraph (dcp::Size s, AVPixelFormat p, dcp::Fraction r)
list<pair<shared_ptr<Image>, int64_t>> images;
if (_copy) {
- images.push_back (make_pair(make_shared<Image>(frame, true), frame->best_effort_timestamp));
+ images.push_back (make_pair(make_shared<Image>(frame, Image::Alignment::PADDED), frame->best_effort_timestamp));
} else {
int r = av_buffersrc_write_frame (_buffer_src_context, frame);
if (r < 0) {
break;
}
- images.push_back (make_pair(make_shared<Image>(_frame, true), frame->best_effort_timestamp));
+ images.push_back (make_pair(make_shared<Image>(_frame, Image::Alignment::PADDED), frame->best_effort_timestamp));
av_frame_unref (_frame);
}
}
film = make_shared<Film>(film_dir);
film->read_metadata ();
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
player->Video.connect (bind(&process_video, _1));
while (!player->pass ()) {}
} catch (std::exception& e) {
using std::bad_alloc;
using std::cout;
using std::dynamic_pointer_cast;
-using std::exception;
-using std::list;
-using std::make_pair;
using std::make_shared;
using std::max;
-using std::min;
-using std::pair;
using std::shared_ptr;
using std::string;
using std::vector;
-using std::weak_ptr;
using boost::optional;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
}
try {
- _player = make_shared<Player>(_film, !_optimise_for_j2k);
+ _player = make_shared<Player>(_film, _optimise_for_j2k ? Image::Alignment::COMPACT : Image::Alignment::PADDED);
_player->set_fast ();
if (_dcp_decode_reduction) {
_player->set_dcp_decode_reduction (_dcp_decode_reduction);
_audio_channels,
bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24),
VideoRange::FULL,
- !_optimise_for_j2k,
+ _optimise_for_j2k ? Image::Alignment::COMPACT : Image::Alignment::PADDED,
true,
dynamic_pointer_cast<GLVideoView>(_video_view) && _optimise_for_j2k
);
void
GLVideoView::set_image (shared_ptr<const PlayerVideo> pv)
{
- shared_ptr<const Image> video = _optimise_for_j2k ? pv->raw_image() : pv->image(bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true);
+ shared_ptr<const Image> video = _optimise_for_j2k ? pv->raw_image() : pv->image(bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true);
/* Only the player's black frames should be aligned at this stage, so this should
* almost always have no work to do.
*/
- video = Image::ensure_aligned (video, false);
+ video = Image::ensure_alignment (video, Image::Alignment::COMPACT);
/** If _optimise_for_j2k is true we render a XYZ image, doing the colourspace
* conversion, scaling and video range conversion in the GL shader.
_have_subtitle_to_render = static_cast<bool>(text) && _optimise_for_j2k;
if (_have_subtitle_to_render) {
/* opt: only do this if it's a new subtitle? */
- DCPOMATIC_ASSERT (!text->image->aligned());
+ DCPOMATIC_ASSERT (text->image->alignment() == Image::Alignment::COMPACT);
_subtitle_texture->set (text->image);
}
glPixelStorei (GL_UNPACK_ALIGNMENT, _unpack_alignment);
check_gl_error ("glPixelStorei");
- DCPOMATIC_ASSERT (!image->aligned());
+ DCPOMATIC_ASSERT (image->alignment() == Image::Alignment::COMPACT);
GLint internal_format;
GLenum format;
/*
- Copyright (C) 2019 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2019-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
*/
-#include "simple_video_view.h"
+
+#include "closed_captions_dialog.h"
#include "film_viewer.h"
+#include "simple_video_view.h"
#include "wx_util.h"
-#include "closed_captions_dialog.h"
-#include "lib/image.h"
-#include "lib/dcpomatic_log.h"
#include "lib/butler.h"
+#include "lib/dcpomatic_log.h"
+#include "lib/image.h"
#include <dcp/util.h>
#include <wx/wx.h>
#include <boost/bind/bind.hpp>
+
using std::max;
+using std::shared_ptr;
using std::string;
using boost::optional;
-using std::shared_ptr;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
#endif
_timer.Bind (wxEVT_TIMER, boost::bind(&SimpleVideoView::timer, this));
}
+
void
SimpleVideoView::paint ()
{
_state_timer.unset();
}
+
void
SimpleVideoView::refresh_panel ()
{
_state_timer.unset ();
}
+
void
SimpleVideoView::timer ()
{
}
}
+
void
SimpleVideoView::start ()
{
timer ();
}
+
/** Try to get a frame from the butler and display it.
* @param non_blocking true to return false quickly if no video is available quickly (i.e. we are waiting for the butler).
* false to ask the butler to block until it has video (unless it is suspended).
return SUCCESS;
}
+
void
SimpleVideoView::update ()
{
_state_timer.set ("get image");
set_image (
- player_video().first->image(bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true)
+ player_video().first->image(bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true)
);
_state_timer.set ("ImageChanged");
auto const image_size = _image->size();
int const waveform_height = GetSize().GetHeight() - _vertical_margin * 2;
- _waveform = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (image_size.width, waveform_height), true);
+ _waveform = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (image_size.width, waveform_height), Image::Alignment::PADDED);
for (int x = 0; x < image_size.width; ++x) {
_waveform = _waveform->scale (
dcp::Size (GetSize().GetWidth() - _x_axis_width, waveform_height),
- dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, false, false
+ dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, Image::Alignment::COMPACT, false
);
}
map.set (i, i, 1);
}
- Butler butler (film, make_shared<Player>(film, false), map, 6, bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, false, false);
+ Butler butler (film, make_shared<Player>(film, Image::Alignment::COMPACT), map, 6, bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, false, false);
BOOST_CHECK (butler.get_video(true, 0).second == DCPTime());
BOOST_CHECK (butler.get_video(true, 0).second == DCPTime::from_frames(1, 24));
BOOST_AUTO_TEST_CASE (client_server_test_rgb)
{
- auto image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (1998, 1080), true);
+ auto image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (1998, 1080), Image::Alignment::PADDED);
uint8_t* p = image->data()[0];
for (int y = 0; y < 1080; ++y) {
p += image->stride()[0];
}
- auto sub_image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (100, 200), true);
+ auto sub_image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (100, 200), Image::Alignment::PADDED);
p = sub_image->data()[0];
for (int y = 0; y < 200; ++y) {
uint8_t* q = p;
BOOST_AUTO_TEST_CASE (client_server_test_yuv)
{
- auto image = make_shared<Image>(AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), true);
+ auto image = make_shared<Image>(AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), Image::Alignment::PADDED);
for (int i = 0; i < image->planes(); ++i) {
uint8_t* p = image->data()[i];
}
}
- auto sub_image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (100, 200), true);
+ auto sub_image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (100, 200), Image::Alignment::PADDED);
uint8_t* p = sub_image->data()[0];
for (int y = 0; y < 200; ++y) {
uint8_t* q = p;
BOOST_AUTO_TEST_CASE (client_server_test_j2k)
{
- auto image = make_shared<Image>(AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), true);
+ auto image = make_shared<Image>(AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), Image::Alignment::PADDED);
for (int i = 0; i < image->planes(); ++i) {
uint8_t* p = image->data()[i];
ov_content = make_shared<DCPContent>(ov->dir(ov->dcp_name(false)));
test->examine_and_add_content (ov_content);
BOOST_REQUIRE (!wait_for_jobs());
- auto player = make_shared<Player>(test, false);
+ auto player = make_shared<Player>(test, Image::Alignment::COMPACT);
auto decoder = std::dynamic_pointer_cast<DCPDecoder>(player->_pieces.front()->decoder);
BOOST_REQUIRE (decoder);
auto vf_content = make_shared<DCPContent>(vf->dir(vf->dcp_name(false)));
test->examine_and_add_content (vf_content);
BOOST_REQUIRE (!wait_for_jobs());
- player = make_shared<Player>(test, false);
+ player = make_shared<Player>(test, Image::Alignment::COMPACT);
decoder = std::dynamic_pointer_cast<DCPDecoder>(player->_pieces.front()->decoder);
BOOST_REQUIRE (decoder);
auto encrypted_content = make_shared<DCPContent>(encrypted->dir(encrypted->dcp_name(false)));
test->examine_and_add_content (encrypted_content);
BOOST_REQUIRE (!wait_for_jobs());
- player = make_shared<Player>(test, false);
+ player = make_shared<Player>(test, Image::Alignment::COMPACT);
decoder = std::dynamic_pointer_cast<DCPDecoder>(player->_pieces.front()->decoder);
BOOST_REQUIRE (decoder);
auto butler = std::make_shared<Butler>(
film,
- make_shared<Player>(film, false),
+ make_shared<Player>(film, Image::Alignment::COMPACT),
AudioMapping(6, 6),
6,
bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24),
VideoRange::FULL,
- false,
+ Image::Alignment::COMPACT,
true,
false
);
}
/* assuming DCP is 24fps/48kHz */
butler->get_audio (audio_buffer, 2000);
- p.first->image(bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true);
+ p.first->image(bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true);
}
delete[] audio_buffer;
}
ref_buffer_size = info.samplerate * info.channels;
ref_buffer = new float[ref_buffer_size];
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
player->Audio.connect (bind (&audio, _1, info.channels));
while (!player->pass ()) {}
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
while (!player->pass ()) {}
}
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
player->set_fast ();
while (!player->pass ()) {}
}
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
player->set_fast ();
BOOST_CHECK_NO_THROW (while (!player->pass()) {});
}
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs());
film->write_metadata ();
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
BOOST_REQUIRE (content->video_frame_rate());
BOOST_CHECK_CLOSE (content->video_frame_rate().get(), fps, 0.01);
using std::cout;
using std::list;
using std::make_shared;
-using std::shared_ptr;
using std::string;
BOOST_AUTO_TEST_CASE (aligned_image_test)
{
- auto s = new Image (AV_PIX_FMT_RGB24, dcp::Size (50, 50), true);
+ auto s = new Image (AV_PIX_FMT_RGB24, dcp::Size (50, 50), Image::Alignment::PADDED);
BOOST_CHECK_EQUAL (s->planes(), 1);
/* 192 is 150 aligned to the nearest 64 bytes */
BOOST_CHECK_EQUAL (s->stride()[0], 192);
BOOST_CHECK_EQUAL (t->stride()[0], s->stride()[0]);
/* assignment operator */
- auto u = new Image (AV_PIX_FMT_YUV422P, dcp::Size (150, 150), false);
+ auto u = new Image (AV_PIX_FMT_YUV422P, dcp::Size (150, 150), Image::Alignment::COMPACT);
*u = *s;
BOOST_CHECK_EQUAL (u->planes(), 1);
BOOST_CHECK_EQUAL (u->stride()[0], 192);
BOOST_AUTO_TEST_CASE (compact_image_test)
{
- auto s = new Image (AV_PIX_FMT_RGB24, dcp::Size (50, 50), false);
+ auto s = new Image (AV_PIX_FMT_RGB24, dcp::Size (50, 50), Image::Alignment::COMPACT);
BOOST_CHECK_EQUAL (s->planes(), 1);
BOOST_CHECK_EQUAL (s->stride()[0], 50 * 3);
BOOST_CHECK_EQUAL (s->line_size()[0], 50 * 3);
BOOST_CHECK_EQUAL (t->stride()[0], s->stride()[0]);
/* assignment operator */
- auto u = new Image (AV_PIX_FMT_YUV422P, dcp::Size (150, 150), true);
+ auto u = new Image (AV_PIX_FMT_YUV422P, dcp::Size (150, 150), Image::Alignment::PADDED);
*u = *s;
BOOST_CHECK_EQUAL (u->planes(), 1);
BOOST_CHECK_EQUAL (u->stride()[0], 50 * 3);
alpha_blend_test_one (AVPixelFormat format, string suffix)
{
auto proxy = make_shared<FFmpegImageProxy>(TestPaths::private_data() / "prophet_frame.tiff");
- auto raw = proxy->image(false).image;
- auto background = raw->convert_pixel_format (dcp::YUVToRGB::REC709, format, true, false);
+ auto raw = proxy->image(Image::Alignment::COMPACT).image;
+ auto background = raw->convert_pixel_format (dcp::YUVToRGB::REC709, format, Image::Alignment::PADDED, false);
- auto overlay = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size(431, 891), true);
+ auto overlay = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size(431, 891), Image::Alignment::PADDED);
overlay->make_transparent ();
for (int y = 0; y < 128; ++y) {
background->alpha_blend (overlay, Position<int> (13, 17));
- auto save = background->convert_pixel_format (dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, false, false);
+ auto save = background->convert_pixel_format (dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, Image::Alignment::COMPACT, false);
write_image (save, "build/test/image_test_" + suffix + ".png");
check_image ("build/test/image_test_" + suffix + ".png", TestPaths::private_data() / ("image_test_" + suffix + ".png"));
{
int const stride = 48 * 4;
- auto A = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (48, 48), false);
+ auto A = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (48, 48), Image::Alignment::COMPACT);
A->make_transparent ();
auto a = A->data()[0];
list<PositionImage> all;
all.push_back (PositionImage (A, Position<int>(0, 0)));
- auto merged = merge (all, false);
+ auto merged = merge (all, Image::Alignment::COMPACT);
BOOST_CHECK (merged.position == Position<int>(0, 0));
BOOST_CHECK_EQUAL (memcmp (merged.image->data()[0], A->data()[0], stride * 48), 0);
/** Test merge (list<PositionImage>) with two images */
BOOST_AUTO_TEST_CASE (merge_test2)
{
- auto A = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (48, 1), false);
+ auto A = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (48, 1), Image::Alignment::COMPACT);
A->make_transparent ();
auto a = A->data()[0];
for (int x = 0; x < 16; ++x) {
a[x * 4 + 3] = 255;
}
- auto B = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (48, 1), false);
+ auto B = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (48, 1), Image::Alignment::COMPACT);
B->make_transparent ();
auto b = B->data()[0];
for (int x = 0; x < 16; ++x) {
list<PositionImage> all;
all.push_back (PositionImage(A, Position<int>(0, 0)));
all.push_back (PositionImage(B, Position<int>(0, 0)));
- auto merged = merge (all, false);
+ auto merged = merge (all, Image::Alignment::COMPACT);
BOOST_CHECK (merged.position == Position<int>(0, 0));
BOOST_AUTO_TEST_CASE (crop_scale_window_test)
{
auto proxy = make_shared<FFmpegImageProxy>("test/data/flat_red.png");
- auto raw = proxy->image(false).image;
+ auto raw = proxy->image(Image::Alignment::COMPACT).image;
auto out = raw->crop_scale_window(
- Crop(), dcp::Size(1998, 836), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_YUV420P, VideoRange::FULL, true, false
+ Crop(), dcp::Size(1998, 836), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_YUV420P, VideoRange::FULL, Image::Alignment::PADDED, false
);
- auto save = out->scale(dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, false, false);
+ auto save = out->scale(dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, Image::Alignment::COMPACT, false);
write_image(save, "build/test/crop_scale_window_test.png");
check_image("test/data/crop_scale_window_test.png", "build/test/crop_scale_window_test.png");
}
/** Special cases of Image::crop_scale_window which triggered some valgrind warnings */
BOOST_AUTO_TEST_CASE (crop_scale_window_test2)
{
- auto image = make_shared<Image>(AV_PIX_FMT_XYZ12LE, dcp::Size(2048, 858), true);
+ auto image = make_shared<Image>(AV_PIX_FMT_XYZ12LE, dcp::Size(2048, 858), Image::Alignment::PADDED);
image->crop_scale_window (
- Crop(279, 0, 0, 0), dcp::Size(1069, 448), dcp::Size(1069, 578), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, false, false
+ Crop(279, 0, 0, 0), dcp::Size(1069, 448), dcp::Size(1069, 578), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, Image::Alignment::COMPACT, false
);
image->crop_scale_window (
- Crop(2048, 0, 0, 0), dcp::Size(1069, 448), dcp::Size(1069, 578), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, false, false
+ Crop(2048, 0, 0, 0), dcp::Size(1069, 448), dcp::Size(1069, 578), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, Image::Alignment::COMPACT, false
);
}
BOOST_AUTO_TEST_CASE (crop_scale_window_test3)
{
auto proxy = make_shared<FFmpegImageProxy>(TestPaths::private_data() / "player_seek_test_0.png");
- auto xyz = proxy->image(false).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, true, false);
+ auto xyz = proxy->image(Image::Alignment::COMPACT).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, Image::Alignment::PADDED, false);
auto cropped = xyz->crop_scale_window(
- Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, false, false
+ Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, Image::Alignment::COMPACT, false
);
write_image(cropped, "build/test/crop_scale_window_test3.png");
check_image("test/data/crop_scale_window_test3.png", "build/test/crop_scale_window_test3.png");
BOOST_AUTO_TEST_CASE (crop_scale_window_test4)
{
auto proxy = make_shared<FFmpegImageProxy>(TestPaths::private_data() / "player_seek_test_0.png");
- auto xyz = proxy->image(false).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, true, false);
+ auto xyz = proxy->image(Image::Alignment::COMPACT).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGB24, Image::Alignment::PADDED, false);
auto cropped = xyz->crop_scale_window(
- Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_XYZ12LE, VideoRange::FULL, false, false
+ Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_XYZ12LE, VideoRange::FULL, Image::Alignment::COMPACT, false
);
write_image(cropped, "build/test/crop_scale_window_test4.png");
check_image("test/data/crop_scale_window_test4.png", "build/test/crop_scale_window_test4.png", 35000);
BOOST_AUTO_TEST_CASE (crop_scale_window_test5)
{
auto proxy = make_shared<FFmpegImageProxy>(TestPaths::private_data() / "player_seek_test_0.png");
- auto xyz = proxy->image(false).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_XYZ12LE, true, false);
+ auto xyz = proxy->image(Image::Alignment::COMPACT).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_XYZ12LE, Image::Alignment::PADDED, false);
auto cropped = xyz->crop_scale_window(
- Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, false, false
+ Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, Image::Alignment::COMPACT, false
);
write_image(cropped, "build/test/crop_scale_window_test5.png");
check_image("test/data/crop_scale_window_test5.png", "build/test/crop_scale_window_test5.png");
BOOST_AUTO_TEST_CASE (crop_scale_window_test6)
{
auto proxy = make_shared<FFmpegImageProxy>(TestPaths::private_data() / "player_seek_test_0.png");
- auto xyz = proxy->image(false).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_XYZ12LE, true, false);
+ auto xyz = proxy->image(Image::Alignment::COMPACT).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_XYZ12LE, Image::Alignment::PADDED, false);
auto cropped = xyz->crop_scale_window(
- Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_XYZ12LE, VideoRange::FULL, false, false
+ Crop(512, 0, 0, 0), dcp::Size(1486, 1080), dcp::Size(1998, 1080), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_XYZ12LE, VideoRange::FULL, Image::Alignment::COMPACT, false
);
write_image(cropped, "build/test/crop_scale_window_test6.png");
check_image("test/data/crop_scale_window_test6.png", "build/test/crop_scale_window_test6.png", 35000);
using namespace boost::filesystem;
for (int left_crop = 0; left_crop < 8; ++left_crop) {
auto proxy = make_shared<FFmpegImageProxy>("test/data/rgb_grey_testcard.png");
- auto yuv = proxy->image(false).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_YUV420P, true, false);
+ auto yuv = proxy->image(Image::Alignment::COMPACT).image->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_YUV420P, Image::Alignment::PADDED, false);
int rounded = left_crop - (left_crop % 2);
auto cropped = yuv->crop_scale_window(
Crop(left_crop, 0, 0, 0),
VideoRange::VIDEO,
AV_PIX_FMT_RGB24,
VideoRange::VIDEO,
- true,
+ Image::Alignment::PADDED,
false
);
path file = String::compose("crop_scale_window_test7-%1.png", left_crop);
BOOST_AUTO_TEST_CASE (as_png_test)
{
auto proxy = make_shared<FFmpegImageProxy>("test/data/3d_test/000001.png");
- auto image_rgb = proxy->image(false).image;
- auto image_bgr = image_rgb->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_BGRA, true, false);
+ auto image_rgb = proxy->image(Image::Alignment::COMPACT).image;
+ auto image_bgr = image_rgb->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_BGRA, Image::Alignment::PADDED, false);
image_rgb->as_png().write ("build/test/as_png_rgb.png");
image_bgr->as_png().write ("build/test/as_png_bgr.png");
static void
fade_test_format_black (AVPixelFormat f, string name)
{
- Image yuv (f, dcp::Size(640, 480), true);
+ Image yuv (f, dcp::Size(640, 480), Image::Alignment::PADDED);
yuv.make_black ();
yuv.fade (0);
string const filename = "fade_test_black_" + name + ".png";
- yuv.convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, true, false)->as_png().write("build/test/" + filename);
+ yuv.convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, Image::Alignment::PADDED, false)->as_png().write("build/test/" + filename);
check_image ("test/data/" + filename, "build/test/" + filename);
}
fade_test_format_red (AVPixelFormat f, float amount, string name)
{
auto proxy = make_shared<FFmpegImageProxy>("test/data/flat_red.png");
- auto red = proxy->image(false).image->convert_pixel_format(dcp::YUVToRGB::REC709, f, true, false);
+ auto red = proxy->image(Image::Alignment::COMPACT).image->convert_pixel_format(dcp::YUVToRGB::REC709, f, Image::Alignment::PADDED, false);
red->fade (amount);
string const filename = "fade_test_red_" + name + ".png";
- red->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, true, false)->as_png().write("build/test/" + filename);
+ red->convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, Image::Alignment::PADDED, false)->as_png().write("build/test/" + filename);
check_image ("test/data/" + filename, "build/test/" + filename);
}
int N = 0;
for (auto i: pix_fmts) {
- auto foo = make_shared<Image>(i, in_size, true);
+ auto foo = make_shared<Image>(i, in_size, Image::Alignment::PADDED);
foo->make_black ();
- auto bar = foo->scale (out_size, dcp::YUVToRGB::REC601, AV_PIX_FMT_RGB24, true, false);
+ auto bar = foo->scale (out_size, dcp::YUVToRGB::REC601, AV_PIX_FMT_RGB24, Image::Alignment::PADDED, false);
uint8_t* p = bar->data()[0];
for (int y = 0; y < bar->size().height; ++y) {
BOOST_AUTO_TEST_CASE (make_part_black_test)
{
auto proxy = make_shared<FFmpegImageProxy>("test/data/flat_red.png");
- auto original = proxy->image(false).image;
+ auto original = proxy->image(Image::Alignment::COMPACT).image;
list<AVPixelFormat> pix_fmts = {
AV_PIX_FMT_RGB24,
int N = 0;
for (auto i: pix_fmts) {
for (auto j: positions) {
- auto foo = original->convert_pixel_format(dcp::YUVToRGB::REC601, i, true, false);
+ auto foo = original->convert_pixel_format(dcp::YUVToRGB::REC601, i, Image::Alignment::PADDED, false);
foo->make_part_black (j.first, j.second);
- auto bar = foo->convert_pixel_format (dcp::YUVToRGB::REC601, AV_PIX_FMT_RGB24, true, false);
+ auto bar = foo->convert_pixel_format (dcp::YUVToRGB::REC601, AV_PIX_FMT_RGB24, Image::Alignment::PADDED, false);
auto p = bar->data()[0];
for (int y = 0; y < bar->size().height; ++y) {
*/
BOOST_AUTO_TEST_CASE (over_crop_test)
{
- auto image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size(128, 128), true);
+ auto image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size(128, 128), Image::Alignment::PADDED);
image->make_black ();
auto scaled = image->crop_scale_window (
- Crop(0, 0, 128, 128), dcp::Size(1323, 565), dcp::Size(1349, 565), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, true, true
+ Crop(0, 0, 128, 128), dcp::Size(1323, 565), dcp::Size(1349, 565), dcp::YUVToRGB::REC709, VideoRange::FULL, AV_PIX_FMT_RGB24, VideoRange::FULL, Image::Alignment::PADDED, true
);
string const filename = "over_crop_test.png";
write_image (scaled, "build/test/" + filename);
BOOST_AUTO_TEST_CASE (low_bitrate_test)
{
- auto image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size(1998, 1080), true);
+ auto image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size(1998, 1080), Image::Alignment::PADDED);
image->make_black ();
+
auto proxy = make_shared<RawImageProxy>(image);
- auto frame = make_shared<PlayerVideo>(proxy, Crop(), boost::optional<double>(), dcp::Size(1998, 1080), dcp::Size(1998, 1080), Eyes::BOTH, Part::WHOLE, boost::optional<ColourConversion>(), VideoRange::FULL, std::weak_ptr<Content>(), boost::optional<Frame>(), false);
+
+ auto frame = make_shared<PlayerVideo>(
+ proxy,
+ Crop(),
+ boost::optional<double>(),
+ dcp::Size(1998, 1080),
+ dcp::Size(1998, 1080),
+ Eyes::BOTH,
+ Part::WHOLE,
+ boost::optional<ColourConversion>(),
+ VideoRange::FULL,
+ std::weak_ptr<Content>(),
+ boost::optional<Frame>(),
+ false
+ );
+
auto dcp_video = make_shared<DCPVideo>(frame, 0, 24, 100000000, Resolution::TWO_K);
auto j2k = dcp_video->encode_locally();
BOOST_REQUIRE (j2k.size() >= 16536);
using std::dynamic_pointer_cast;
using std::make_shared;
-using std::shared_ptr;
BOOST_AUTO_TEST_CASE (overlap_video_test1)
B->video->set_length (24);
B->set_position (film, dcpomatic::DCPTime::from_seconds(1));
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
auto pieces = player->_pieces;
BOOST_REQUIRE_EQUAL (pieces.size(), 2U);
BOOST_CHECK_EQUAL (pieces.front()->content, A);
f->height = 480;
f->format = static_cast<int> (i.format);
av_frame_get_buffer (f, true);
- Image t (f, false);
+ Image t (f, Image::Alignment::COMPACT);
BOOST_CHECK_EQUAL(t.planes(), i.planes);
BOOST_CHECK_EQUAL(t.sample_size(0).height, i.lines[0]);
BOOST_CHECK_EQUAL(t.sample_size(1).height, i.lines[1]);
*/
-#include "lib/film.h"
-#include "lib/ffmpeg_content.h"
-#include "lib/dcp_content_type.h"
-#include "lib/ratio.h"
#include "lib/audio_buffers.h"
-#include "lib/player.h"
-#include "lib/video_content.h"
-#include "lib/image_content.h"
-#include "lib/string_text_file_content.h"
-#include "lib/content_factory.h"
-#include "lib/dcp_content.h"
-#include "lib/text_content.h"
#include "lib/butler.h"
#include "lib/compose.hpp"
+#include "lib/content_factory.h"
#include "lib/cross.h"
+#include "lib/dcp_content.h"
+#include "lib/dcp_content_type.h"
+#include "lib/ffmpeg_content.h"
+#include "lib/film.h"
+#include "lib/image_content.h"
+#include "lib/player.h"
+#include "lib/ratio.h"
+#include "lib/string_text_file_content.h"
+#include "lib/text_content.h"
+#include "lib/video_content.h"
#include "test.h"
#include <boost/test/unit_test.hpp>
#include <boost/algorithm/string.hpp>
using std::cout;
using std::list;
-using std::pair;
using std::shared_ptr;
using std::make_shared;
using boost::bind;
accumulated = std::make_shared<AudioBuffers>(film->audio_channels(), 0);
- auto player = std::make_shared<Player>(film, false);
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
player->Audio.connect (bind (&accumulate, _1, _2));
while (!player->pass ()) {}
BOOST_REQUIRE (accumulated->frames() >= 48000);
/* Length should be rounded up from B's length to the next video frame */
BOOST_CHECK (film->length() == DCPTime::from_frames(3 * 24 + 1, 24));
- auto player = std::make_shared<Player>(film, false);
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
player->setup_pieces ();
BOOST_REQUIRE_EQUAL (player->_black._periods.size(), 1U);
BOOST_CHECK (player->_black._periods.front() == DCPTimePeriod(DCPTime::from_frames(3 * 24, 24), DCPTime::from_frames(3 * 24 + 1, 24)));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- auto player = std::make_shared<Player>(film, false);
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
player->Video.connect (bind (&video, _1, _2));
player->Audio.connect (bind (&audio, _1, _2));
video_frames = audio_frames = 0;
BOOST_REQUIRE (!wait_for_jobs ());
dcp->only_text()->set_use (true);
- auto player = std::make_shared<Player>(film, false);
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
player->set_fast ();
player->set_always_burn_open_subtitles ();
player->set_play_referenced ();
- auto butler = std::make_shared<Butler>(film, player, AudioMapping(), 2, bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true, false);
+ auto butler = std::make_shared<Butler>(film, player, AudioMapping(), 2, bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true, false);
butler->disable_audio();
for (int i = 0; i < 10; ++i) {
butler->seek (t, true);
auto video = butler->get_video(true, 0);
BOOST_CHECK_EQUAL(video.second.get(), t.get());
- write_image(video.first->image(bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true), String::compose("build/test/player_seek_test_%1.png", i));
+ write_image(video.first->image(bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true), String::compose("build/test/player_seek_test_%1.png", i));
/* This 14.08 is empirically chosen (hopefully) to accept changes in rendering between the reference and a test machine
(17.10 and 16.04 seem to anti-alias a little differently) but to reject gross errors e.g. missing fonts or missing
text altogether.
BOOST_REQUIRE (!wait_for_jobs ());
dcp->only_text()->set_use (true);
- auto player = std::make_shared<Player>(film, false);
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
player->set_fast ();
player->set_always_burn_open_subtitles ();
player->set_play_referenced ();
- auto butler = std::make_shared<Butler>(film, player, AudioMapping(), 2, bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true, false);
+ auto butler = std::make_shared<Butler>(film, player, AudioMapping(), 2, bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true, false);
butler->disable_audio();
butler->seek(DCPTime::from_seconds(5), true);
auto video = butler->get_video(true, 0);
BOOST_CHECK_EQUAL(video.second.get(), t.get());
write_image(
- video.first->image(bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true), String::compose("build/test/player_seek_test2_%1.png", i)
+ video.first->image(bind(PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true), String::compose("build/test/player_seek_test2_%1.png", i)
);
check_image(TestPaths::private_data() / String::compose("player_seek_test2_%1.png", i), String::compose("build/test/player_seek_test2_%1.png", i), 14.08);
}
text->only_text()->set_type (TextType::CLOSED_CAPTION);
text->only_text()->set_use (true);
- auto player = std::make_shared<Player>(film, false);
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
player->set_ignore_video ();
player->set_ignore_audio ();
film->examine_and_add_content (boon);
BOOST_REQUIRE (!wait_for_jobs());
- auto player = std::make_shared<Player>(film, false);
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
player->set_fast ();
- auto butler = std::make_shared<Butler>(film, player, AudioMapping(), 6, bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, false, true, false);
+ auto butler = std::make_shared<Butler>(film, player, AudioMapping(), 6, bind(&PlayerVideo::force, _1, AV_PIX_FMT_RGB24), VideoRange::FULL, Image::Alignment::COMPACT, true, false);
/* Wait for the butler to fill */
dcpomatic_sleep_seconds (5);
rms_error (boost::filesystem::path ref, boost::filesystem::path check)
{
FFmpegImageProxy ref_proxy (ref);
- auto ref_image = ref_proxy.image(false).image;
+ auto ref_image = ref_proxy.image(Image::Alignment::COMPACT).image;
FFmpegImageProxy check_proxy (check);
- auto check_image = check_proxy.image(false).image;
+ auto check_image = check_proxy.image(Image::Alignment::COMPACT).image;
BOOST_REQUIRE_EQUAL (ref_image->pixel_format(), check_image->pixel_format());
AVPixelFormat const format = ref_image->pixel_format();
using std::list;
using std::make_shared;
-using std::shared_ptr;
using std::string;
using namespace dcpomatic;
film->set_sequence (false);
film->add_content (content);
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
/* Position 0, no trim, content rate = DCP rate */
content->set_position (film, DCPTime());
film->set_sequence (false);
film->add_content (content);
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
/* Position 0, no trim, content rate = DCP rate */
content->set_position (film, DCPTime());
film->set_sequence (false);
film->add_content (content);
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
/* Position 0, no trim, video/audio content rate = video/audio DCP rate */
content->set_position (film, DCPTime());
Ls = sf_open ("build/test/upmixer_a_test/Ls.wav", SFM_WRITE, &info);
Rs = sf_open ("build/test/upmixer_a_test/Rs.wav", SFM_WRITE, &info);
- auto player = make_shared<Player>(film, false);
+ auto player = make_shared<Player>(film, Image::Alignment::COMPACT);
player->Audio.connect (bind (&write, _1, _2));
while (!player->pass()) {}
make_and_verify_dcp (vf, {dcp::VerificationNote::Code::EXTERNAL_ASSET});
/* Check that the selected reel assets are right */
- auto player = make_shared<Player>(vf, false);
+ auto player = make_shared<Player>(vf, Image::Alignment::COMPACT);
auto a = player->get_reel_assets();
BOOST_REQUIRE_EQUAL (a.size(), 4U);
auto i = a.begin();
using std::min;
-using std::make_pair;
using std::max;
using std::pair;
using std::string;
shared_ptr<Image>
grey_image (dcp::Size size, uint8_t pixel)
{
- auto grey = make_shared<Image>(AV_PIX_FMT_RGB24, size, true);
+ auto grey = make_shared<Image>(AV_PIX_FMT_RGB24, size, Image::Alignment::PADDED);
for (int y = 0; y < size.height; ++y) {
uint8_t* p = grey->data()[0] + y * grey->stride()[0];
for (int x = 0; x < size.width; ++x) {
write_image (grey_image(size, grey_pixel), file);
FFmpegImageProxy proxy (file);
- ImageProxy::Result result = proxy.image (false);
+ ImageProxy::Result result = proxy.image (Image::Alignment::COMPACT);
BOOST_REQUIRE (!result.error);
for (int y = 0; y < size.height; ++y) {
BOOST_REQUIRE (!player->pass());
}
- auto image = player_video->image ([](AVPixelFormat f) { return f; }, VideoRange::FULL, true, false);
+ auto image = player_video->image ([](AVPixelFormat f) { return f; }, VideoRange::FULL, Image::Alignment::PADDED, false);
for (int y = 0; y < size.height; ++y) {
uint8_t* p = image->data()[0] + y * image->stride()[0];
BOOST_REQUIRE (!decoder->pass());
}
- return pixel_range (content_video->image->image(false).image);
+ return pixel_range (content_video->image->image(Image::Alignment::COMPACT).image);
}