#include "i18n.h"
-using std::string;
+using std::make_shared;
using std::shared_ptr;
+using std::string;
using std::weak_ptr;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
void
AnalyseSubtitlesJob::run ()
{
- shared_ptr<Playlist> playlist (new Playlist());
- shared_ptr<Content> content = _content.lock ();
+ auto playlist = make_shared<Playlist>();
+ auto content = _content.lock ();
DCPOMATIC_ASSERT (content);
playlist->add (_film, content);
- shared_ptr<Player> player (new Player(_film, playlist));
+ auto player = make_shared<Player>(_film, playlist);
player->set_ignore_audio ();
player->set_fast ();
player->set_play_referenced ();
void
AnalyseSubtitlesJob::analyse (PlayerText text, TextType type)
{
- if (type != TEXT_OPEN_SUBTITLE) {
+ if (type != TextType::OPEN_SUBTITLE) {
return;
}
Butler::player_change (ChangeType type, int property)
{
if (property == VideoContentProperty::CROP) {
- if (type == CHANGE_TYPE_DONE) {
- shared_ptr<const Film> film = _film.lock();
+ if (type == ChangeType::DONE) {
+ auto film = _film.lock();
if (film) {
_video.reset_metadata (film, _player->video_container_size());
}
boost::mutex::scoped_lock lm (_mutex);
- if (type == CHANGE_TYPE_PENDING) {
+ if (type == ChangeType::PENDING) {
++_suspended;
- } else if (type == CHANGE_TYPE_DONE) {
+ } else if (type == ChangeType::DONE) {
--_suspended;
if (_died || _pending_seek_position) {
lm.unlock ();
}
DCPTime seek_to;
- DCPTime next = _video.get().second;
+ auto next = _video.get().second;
if (_awaiting && _awaiting > next) {
/* We have recently done a player_changed seek and our buffers haven't been refilled yet,
so assume that we're seeking to the same place as last time.
seek_unlocked (seek_to, true);
_awaiting = seek_to;
- } else if (type == CHANGE_TYPE_CANCELLED) {
+ } else if (type == ChangeType::CANCELLED) {
--_suspended;
}
void
Butler::text (PlayerText pt, TextType type, optional<DCPTextTrack> track, DCPTimePeriod period)
{
- if (type != TEXT_CLOSED_CAPTION) {
+ if (type != TextType::CLOSED_CAPTION) {
return;
}
, _property (p)
, _done (true)
{
- _thing->signal_change (CHANGE_TYPE_PENDING, _property);
+ _thing->signal_change (ChangeType::PENDING, _property);
}
~ChangeSignaller ()
{
if (_done) {
- _thing->signal_change (CHANGE_TYPE_DONE, _property);
+ _thing->signal_change (ChangeType::DONE, _property);
} else {
- _thing->signal_change (CHANGE_TYPE_CANCELLED, _property);
+ _thing->signal_change (ChangeType::CANCELLED, _property);
}
}
_use_any_servers = true;
_servers.clear ();
_only_servers_encode = false;
- _tms_protocol = FILE_TRANSFER_PROTOCOL_SCP;
+ _tms_protocol = FileTransferProtocol::SCP;
_tms_ip = "";
_tms_path = ".";
_tms_user = "";
_upload_after_make_dcp = false;
_mail_server = "";
_mail_port = 25;
- _mail_protocol = EMAIL_PROTOCOL_AUTO;
+ _mail_protocol = EmailProtocol::AUTO;
_mail_user = "";
_mail_password = "";
_kdm_from = "";
}
_only_servers_encode = f.optional_bool_child ("OnlyServersEncode").get_value_or (false);
- _tms_protocol = static_cast<FileTransferProtocol>(f.optional_number_child<int>("TMSProtocol").get_value_or(static_cast<int>(FILE_TRANSFER_PROTOCOL_SCP)));
+ _tms_protocol = static_cast<FileTransferProtocol>(f.optional_number_child<int>("TMSProtocol").get_value_or(static_cast<int>(FileTransferProtocol::SCP)));
_tms_ip = f.string_child ("TMSIP");
_tms_path = f.string_child ("TMSPath");
_tms_user = f.string_child ("TMSUser");
/* Make sure this matches the code in write_config */
string const protocol = f.optional_string_child("MailProtocol").get_value_or("Auto");
if (protocol == "Auto") {
- _mail_protocol = EMAIL_PROTOCOL_AUTO;
+ _mail_protocol = EmailProtocol::AUTO;
} else if (protocol == "Plain") {
- _mail_protocol = EMAIL_PROTOCOL_PLAIN;
+ _mail_protocol = EmailProtocol::PLAIN;
} else if (protocol == "STARTTLS") {
- _mail_protocol = EMAIL_PROTOCOL_STARTTLS;
+ _mail_protocol = EmailProtocol::STARTTLS;
} else if (protocol == "SSL") {
- _mail_protocol = EMAIL_PROTOCOL_SSL;
+ _mail_protocol = EmailProtocol::SSL;
}
}
root->add_child("MailPort")->add_child_text (raw_convert<string> (_mail_port));
/* [XML] MailProtocol Protocol to use on SMTP server (Auto, Plain, STARTTLS or SSL) */
switch (_mail_protocol) {
- case EMAIL_PROTOCOL_AUTO:
+ case EmailProtocol::AUTO:
root->add_child("MailProtocol")->add_child_text("Auto");
break;
- case EMAIL_PROTOCOL_PLAIN:
+ case EmailProtocol::PLAIN:
root->add_child("MailProtocol")->add_child_text("Plain");
break;
- case EMAIL_PROTOCOL_STARTTLS:
+ case EmailProtocol::STARTTLS:
root->add_child("MailProtocol")->add_child_text("STARTTLS");
break;
- case EMAIL_PROTOCOL_SSL:
+ case EmailProtocol::SSL:
root->add_child("MailProtocol")->add_child_text("SSL");
break;
}
Content::signal_change (ChangeType c, int p)
{
try {
- if (c == CHANGE_TYPE_PENDING || c == CHANGE_TYPE_CANCELLED) {
+ if (c == ChangeType::PENDING || c == ChangeType::CANCELLED) {
Change (c, shared_from_this(), p, _change_signals_frequent);
} else {
emit (boost::bind (boost::ref(Change), c, shared_from_this(), p, _change_signals_frequent));
public:
ContentVideo ()
: frame (0)
- , eyes (EYES_LEFT)
- , part (PART_WHOLE)
+ , eyes (Eyes::LEFT)
+ , part (Part::WHOLE)
{}
ContentVideo (std::shared_ptr<const ImageProxy> i, Frame f, Eyes e, Part p)
void
copy_dcp_details_to_film (shared_ptr<const DCPContent> dcp, shared_ptr<Film> film)
{
- string name = dcp->name ();
+ auto name = dcp->name ();
name = name.substr (0, name.find("_"));
film->set_name (name);
film->set_use_isdcf_name (true);
film->set_dcp_content_type (DCPContentType::from_libdcp_kind(dcp->content_kind().get()));
}
film->set_encrypted (dcp->encrypted());
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
film->set_interop (dcp->standard() == dcp::Standard::INTEROP);
film->set_three_d (dcp->three_d());
film->set_audio_channels (dcp->audio->stream()->channels());
}
- map<dcp::Marker, dcpomatic::ContentTime> dcp_markers;
- map<dcp::Marker, dcpomatic::DCPTime> film_markers;
film->clear_markers ();
- for (map<dcp::Marker, dcpomatic::ContentTime>::const_iterator i = dcp_markers.begin(); i != dcp_markers.end(); ++i) {
- film->set_marker (i->first, dcpomatic::DCPTime(i->second.get()));
+ for (auto const& i: dcp->markers()) {
+ film->set_marker (i.first, dcpomatic::DCPTime(i.second.get()));
}
film->set_ratings (dcp->ratings());
string config_dir_string;
string output_dir_string;
int j2k_bandwidth_int = 0;
- VideoFrameType next_frame_type = VIDEO_FRAME_TYPE_2D;
+ auto next_frame_type = VideoFrameType::TWO_D;
int i = 1;
while (i < argc) {
} else if (a == "--threed") {
threed = claimed = true;
} else if (a == "--left-eye") {
- next_frame_type = VIDEO_FRAME_TYPE_3D_LEFT;
+ next_frame_type = VideoFrameType::THREE_D_LEFT;
claimed = true;
} else if (a == "--right-eye") {
- next_frame_type = VIDEO_FRAME_TYPE_3D_RIGHT;
+ next_frame_type = VideoFrameType::THREE_D_RIGHT;
claimed = true;
} else if (a == "--fourk") {
fourk = true;
c.path = a;
c.frame_type = next_frame_type;
content.push_back (c);
- next_frame_type = VIDEO_FRAME_TYPE_2D;
+ next_frame_type = VideoFrameType::TWO_D;
}
}
#include "i18n.h"
-using std::string;
using std::cout;
using std::distance;
-using std::pair;
-using std::vector;
using std::list;
+using std::make_shared;
using std::map;
+using std::pair;
using std::shared_ptr;
+using std::string;
+using std::vector;
using boost::scoped_ptr;
using boost::optional;
using boost::function;
read_directory (p);
set_default_colour_conversion ();
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
_reference_text[i] = false;
}
}
text = TextContent::from_xml (this, node, version);
atmos = AtmosContent::from_xml (this, node);
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
_reference_text[i] = false;
}
if (video && audio) {
audio->set_stream (
- AudioStreamPtr (
- new AudioStream (
- node->number_child<int> ("AudioFrameRate"),
- /* AudioLength was not present in some old metadata versions */
- node->optional_number_child<Frame>("AudioLength").get_value_or (
- video->length() * node->number_child<int>("AudioFrameRate") / video_frame_rate().get()
- ),
- AudioMapping (node->node_child ("AudioMapping"), version)
- )
+ make_shared<AudioStream> (
+ node->number_child<int> ("AudioFrameRate"),
+ /* AudioLength was not present in some old metadata versions */
+ node->optional_number_child<Frame>("AudioLength").get_value_or (
+ video->length() * node->number_child<int>("AudioFrameRate") / video_frame_rate().get()
+ ),
+ AudioMapping (node->node_child ("AudioMapping"), version)
)
);
}
_reference_video = node->optional_bool_child ("ReferenceVideo").get_value_or (false);
_reference_audio = node->optional_bool_child ("ReferenceAudio").get_value_or (false);
if (version >= 37) {
- _reference_text[TEXT_OPEN_SUBTITLE] = node->optional_bool_child("ReferenceOpenSubtitle").get_value_or(false);
- _reference_text[TEXT_CLOSED_CAPTION] = node->optional_bool_child("ReferenceClosedCaption").get_value_or(false);
+ _reference_text[static_cast<int>(TextType::OPEN_SUBTITLE)] = node->optional_bool_child("ReferenceOpenSubtitle").get_value_or(false);
+ _reference_text[static_cast<int>(TextType::CLOSED_CAPTION)] = node->optional_bool_child("ReferenceClosedCaption").get_value_or(false);
} else {
- _reference_text[TEXT_OPEN_SUBTITLE] = node->optional_bool_child("ReferenceSubtitle").get_value_or(false);
- _reference_text[TEXT_CLOSED_CAPTION] = false;
+ _reference_text[static_cast<int>(TextType::OPEN_SUBTITLE)] = node->optional_bool_child("ReferenceSubtitle").get_value_or(false);
+ _reference_text[static_cast<int>(TextType::CLOSED_CAPTION)] = false;
}
if (node->optional_string_child("Standard")) {
- string const s = node->optional_string_child("Standard").get();
+ auto const s = node->optional_string_child("Standard").get();
if (s == "Interop") {
_standard = dcp::Standard::INTEROP;
} else if (s == "SMPTE") {
}
_three_d = node->optional_bool_child("ThreeD").get_value_or (false);
- optional<string> ck = node->optional_string_child("ContentKind");
+ auto ck = node->optional_string_child("ContentKind");
if (ck) {
_content_kind = dcp::content_kind_from_string (*ck);
}
DCPContent::read_sub_directory (boost::filesystem::path p)
{
LOG_GENERAL ("DCPContent::read_sub_directory reads %1", p.string());
- for (boost::filesystem::directory_iterator i(p); i != boost::filesystem::directory_iterator(); ++i) {
- if (boost::filesystem::is_regular_file (i->path())) {
- LOG_GENERAL ("Inside there's regular file %1", i->path().string());
- add_path (i->path());
- } else if (boost::filesystem::is_directory (i->path ())) {
- LOG_GENERAL ("Inside there's directory %1", i->path().string());
- read_sub_directory (i->path());
+ for (auto i: boost::filesystem::directory_iterator(p)) {
+ if (boost::filesystem::is_regular_file(i.path())) {
+ LOG_GENERAL ("Inside there's regular file %1", i.path().string());
+ add_path (i.path());
+ } else if (boost::filesystem::is_directory (i.path())) {
+ LOG_GENERAL ("Inside there's directory %1", i.path().string());
+ read_sub_directory (i.path());
}
}
}
}
Content::examine (film, job);
- shared_ptr<DCPExaminer> examiner (new DCPExaminer(shared_from_this(), film ? film->tolerant() : true));
+ auto examiner = make_shared<DCPExaminer>(shared_from_this(), film ? film->tolerant() : true);
if (examiner->has_video()) {
{
boost::mutex::scoped_lock lm (_mutex);
- video.reset (new VideoContent (this));
+ video = make_shared<VideoContent>(this);
}
video->take_from_examiner (examiner);
set_default_colour_conversion ();
if (examiner->has_audio()) {
{
boost::mutex::scoped_lock lm (_mutex);
- audio.reset (new AudioContent (this));
+ audio = make_shared<AudioContent>(this);
}
- AudioStreamPtr as (new AudioStream (examiner->audio_frame_rate(), examiner->audio_length(), examiner->audio_channels()));
+ auto as = make_shared<AudioStream>(examiner->audio_frame_rate(), examiner->audio_length(), examiner->audio_channels());
audio->set_stream (as);
- AudioMapping m = as->mapping ();
+ auto m = as->mapping ();
m.make_default (film ? film->audio_processor() : 0);
as->set_mapping (m);
}
if (examiner->has_atmos()) {
{
boost::mutex::scoped_lock lm (_mutex);
- atmos.reset (new AtmosContent(this));
+ atmos = make_shared<AtmosContent>(this);
}
/* Setting length will cause calculations to be made based on edit rate, so that must
* be set up first otherwise hard-to-spot exceptions will be thrown.
}
list<shared_ptr<TextContent> > new_text;
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
for (int j = 0; j < examiner->text_count(static_cast<TextType>(i)); ++j) {
- shared_ptr<TextContent> c(new TextContent(this, static_cast<TextType>(i), static_cast<TextType>(i)));
- if (i == TEXT_CLOSED_CAPTION) {
+ auto c = make_shared<TextContent>(this, static_cast<TextType>(i), static_cast<TextType>(i));
+ if (i == static_cast<int>(TextType::CLOSED_CAPTION)) {
c->set_dcp_track (examiner->dcp_text_track(j));
}
new_text.push_back (c);
_content_kind = examiner->content_kind ();
_cpl = examiner->cpl ();
_reel_lengths = examiner->reel_lengths ();
- map<dcp::Marker, dcp::Time> markers = examiner->markers();
- for (map<dcp::Marker, dcp::Time>::const_iterator i = markers.begin(); i != markers.end(); ++i) {
- _markers[i->first] = ContentTime(i->second.as_editable_units(DCPTime::HZ));
+ for (auto const& i: examiner->markers()) {
+ _markers[i.first] = ContentTime(i.second.as_editable_units(DCPTime::HZ));
}
_ratings = examiner->ratings ();
_content_versions = examiner->content_versions ();
}
if (video) {
- video->set_frame_type (_three_d ? VIDEO_FRAME_TYPE_3D : VIDEO_FRAME_TYPE_2D);
+ video->set_frame_type (_three_d ? VideoFrameType::THREE_D : VideoFrameType::TWO_D);
}
}
node->add_child("KDMValid")->add_child_text (_kdm_valid ? "1" : "0");
node->add_child("ReferenceVideo")->add_child_text (_reference_video ? "1" : "0");
node->add_child("ReferenceAudio")->add_child_text (_reference_audio ? "1" : "0");
- node->add_child("ReferenceOpenSubtitle")->add_child_text(_reference_text[TEXT_OPEN_SUBTITLE] ? "1" : "0");
- node->add_child("ReferenceClosedCaption")->add_child_text(_reference_text[TEXT_CLOSED_CAPTION] ? "1" : "0");
+ node->add_child("ReferenceOpenSubtitle")->add_child_text(_reference_text[static_cast<int>(TextType::OPEN_SUBTITLE)] ? "1" : "0");
+ node->add_child("ReferenceClosedCaption")->add_child_text(_reference_text[static_cast<int>(TextType::CLOSED_CAPTION)] ? "1" : "0");
if (_standard) {
switch (_standard.get ()) {
case dcp::Standard::INTEROP:
node->add_child("ReelLength")->add_child_text (raw_convert<string> (i));
}
- for (map<dcp::Marker, ContentTime>::const_iterator i = _markers.begin(); i != _markers.end(); ++i) {
- xmlpp::Element* marker = node->add_child("Marker");
- marker->set_attribute("type", dcp::marker_to_string(i->first));
- marker->add_child_text(raw_convert<string>(i->second.get()));
+ for (auto const& i: _markers) {
+ auto marker = node->add_child("Marker");
+ marker->set_attribute("type", dcp::marker_to_string(i.first));
+ marker->add_child_text(raw_convert<string>(i.second.get()));
}
for (auto i: _ratings) {
- xmlpp::Element* rating = node->add_child("Rating");
+ auto rating = node->add_child("Rating");
i.as_xml (rating);
}
DCPContent::full_length (shared_ptr<const Film> film) const
{
if (!video) {
- return DCPTime();
+ return {};
}
FrameRateChange const frc (film, shared_from_this());
return DCPTime::from_frames (llrint(video->length() * frc.factor()), film->video_frame_rate());
DCPContent::approximate_length () const
{
if (!video) {
- return DCPTime();
+ return {};
}
return DCPTime::from_frames (video->length(), 24);
}
}
s += string (_reference_video ? "1" : "0");
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
s += string (_reference_text[i] ? "1" : "0");
}
return s;
{
boost::mutex::scoped_lock lm (_mutex);
- _reference_text[type] = r;
+ _reference_text[static_cast<int>(type)] = r;
}
}
list<DCPTimePeriod>
DCPContent::reels (shared_ptr<const Film> film) const
{
- list<int64_t> reel_lengths = _reel_lengths;
- if (reel_lengths.empty ()) {
+ auto reel_lengths = _reel_lengths;
+ if (reel_lengths.empty()) {
/* Old metadata with no reel lengths; get them here instead */
try {
scoped_ptr<DCPExaminer> examiner (new DCPExaminer(shared_from_this(), film->tolerant()));
*/
/* The starting point of this content on the timeline */
- DCPTime pos = position() - DCPTime (trim_start().get());
+ auto pos = position() - DCPTime (trim_start().get());
for (auto i: reel_lengths) {
/* This reel runs from `pos' to `to' */
DCPTime const to = pos + DCPTime::from_frames (i, film->video_frame_rate());
if (to > position()) {
- p.push_back (DCPTimePeriod (max(position(), pos), min(end(film), to)));
+ p.push_back (DCPTimePeriod(max(position(), pos), min(end(film), to)));
if (to > end(film)) {
break;
}
return false;
}
- list<DCPTimePeriod> const fr = film->reels ();
+ auto const fr = film->reels ();
list<DCPTimePeriod> reel_list;
try {
}
}
- ContentList a = overlaps (film, film->content(), part, position(), end(film));
+ auto a = overlaps (film, film->content(), part, position(), end(film));
if (a.size() != 1 || a.front().get() != this) {
why_not = overlapping;
return false;
}
if (film->resolution() != resolution()) {
- if (resolution() == RESOLUTION_4K) {
+ if (resolution() == Resolution::FOUR_K) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
why_not = _("it is 4K and the film is 2K.");
} else {
}
for (auto i: decoder->reels()) {
- if (type == TEXT_OPEN_SUBTITLE) {
+ if (type == TextType::OPEN_SUBTITLE) {
if (!i->main_subtitle()) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
why_not = _("it does not have open subtitles in all its reels.");
return false;
}
}
- if (type == TEXT_CLOSED_CAPTION) {
+ if (type == TextType::CLOSED_CAPTION) {
if (i->closed_captions().empty()) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
why_not = _("it does not have closed captions in all its reels.");
void
DCPContent::take_settings_from (shared_ptr<const Content> c)
{
- shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (c);
+ auto dc = dynamic_pointer_cast<const DCPContent>(c);
if (!dc) {
return;
}
_reference_video = dc->_reference_video;
_reference_audio = dc->_reference_audio;
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
_reference_text[i] = dc->_reference_text[i];
}
}
DCPContent::resolution () const
{
if (video->size().width > 2048 || video->size().height > 1080) {
- return RESOLUTION_4K;
+ return Resolution::FOUR_K;
}
- return RESOLUTION_2K;
+ return Resolution::TWO_K;
}
*/
bool reference_text (TextType type) const {
boost::mutex::scoped_lock lm (_mutex);
- return _reference_text[type];
+ return _reference_text[static_cast<int>(type)];
}
bool can_reference_text (std::shared_ptr<const Film> film, TextType type, std::string &) const;
* rather than by rewrapping. The types here are the original text types,
* not what they are being used for.
*/
- bool _reference_text[TEXT_COUNT];
+ bool _reference_text[static_cast<int>(TextType::COUNT)];
boost::optional<dcp::Standard> _standard;
boost::optional<dcp::ContentKind> _content_kind;
pass_texts (
next,
(*_reel)->main_subtitle()->asset(),
- _dcp_content->reference_text(TEXT_OPEN_SUBTITLE),
+ _dcp_content->reference_text(TextType::OPEN_SUBTITLE),
(*_reel)->main_subtitle()->entry_point().get_value_or(0),
*decoder,
size
for (auto i: (*_reel)->closed_captions()) {
pass_texts (
- next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size
+ next, i->asset(), _dcp_content->reference_text(TextType::CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size
);
++decoder;
}
DCPEncoder::video (shared_ptr<PlayerVideo> data, DCPTime time)
{
if (!_film->three_d()) {
- if (data->eyes() == EYES_LEFT) {
+ if (data->eyes() == Eyes::LEFT) {
/* Use left-eye images for both eyes... */
- data->set_eyes (EYES_BOTH);
- } else if (data->eyes() == EYES_RIGHT) {
+ data->set_eyes (Eyes::BOTH);
+ } else if (data->eyes() == Eyes::RIGHT) {
/* ...and discard the right */
return;
}
void
DCPEncoder::text (PlayerText data, TextType type, optional<DCPTextTrack> track, DCPTimePeriod period)
{
- if (type == TEXT_CLOSED_CAPTION || _non_burnt_subtitles) {
+ if (type == TextType::CLOSED_CAPTION || _non_burnt_subtitles) {
_writer->write (data, type, track, period);
}
}
DCPExaminer::DCPExaminer (shared_ptr<const DCPContent> content, bool tolerant)
: DCP (content, tolerant)
- , _video_length (0)
- , _audio_length (0)
- , _has_video (false)
- , _has_audio (false)
- , _encrypted (false)
- , _needs_assets (false)
- , _kdm_valid (false)
- , _three_d (false)
- , _has_atmos (false)
- , _atmos_length (0)
{
shared_ptr<dcp::CPL> cpl;
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
_text_count[i] = 0;
}
return;
}
- dcp::Fraction const frac = i->main_picture()->edit_rate ();
+ auto const frac = i->main_picture()->edit_rate ();
float const fr = float(frac.numerator) / frac.denominator;
if (!_video_frame_rate) {
_video_frame_rate = fr;
}
_has_video = true;
- shared_ptr<dcp::PictureAsset> asset = i->main_picture()->asset ();
+ auto asset = i->main_picture()->asset();
if (!_video_size) {
_video_size = asset->size ();
} else if (_video_size.get() != asset->size ()) {
}
_has_audio = true;
- shared_ptr<dcp::SoundAsset> asset = i->main_sound()->asset ();
+ auto asset = i->main_sound()->asset();
if (!_audio_channels) {
_audio_channels = asset->channels ();
return;
}
- _text_count[TEXT_OPEN_SUBTITLE] = 1;
+ _text_count[static_cast<int>(TextType::OPEN_SUBTITLE)] = 1;
}
for (auto j: i->closed_captions()) {
return;
}
- _text_count[TEXT_CLOSED_CAPTION]++;
+ _text_count[static_cast<int>(TextType::CLOSED_CAPTION)]++;
_dcp_text_tracks.push_back (DCPTextTrack(j->annotation_text(), j->language().get_value_or(_("Unknown"))));
}
* @return Number of assets of this type in this DCP.
*/
int text_count (TextType type) const {
- return _text_count[type];
+ return _text_count[static_cast<int>(type)];
}
DCPTextTrack dcp_text_track (int i) const {
private:
boost::optional<double> _video_frame_rate;
boost::optional<dcp::Size> _video_size;
- Frame _video_length;
+ Frame _video_length = 0;
boost::optional<int> _audio_channels;
boost::optional<int> _audio_frame_rate;
- Frame _audio_length;
+ Frame _audio_length = 0;
std::string _name;
/** true if this DCP has video content (but false if it has unresolved references to video content) */
- bool _has_video;
+ bool _has_video = false;
/** true if this DCP has audio content (but false if it has unresolved references to audio content) */
- bool _has_audio;
+ bool _has_audio = false;
/** number of different assets of each type (OCAP/CCAP) */
- int _text_count[TEXT_COUNT];
+ int _text_count[static_cast<int>(TextType::COUNT)];
/** the DCPTextTracks for each of our CCAPs */
std::vector<DCPTextTrack> _dcp_text_tracks;
- bool _encrypted;
- bool _needs_assets;
- bool _kdm_valid;
+ bool _encrypted = false;
+ bool _needs_assets = false;
+ bool _kdm_valid = false;
boost::optional<dcp::Standard> _standard;
- bool _three_d;
+ bool _three_d = false;
dcp::ContentKind _content_kind;
std::string _cpl;
std::list<int64_t> _reel_lengths;
std::map<dcp::Marker, dcp::Time> _markers;
std::vector<dcp::Rating> _ratings;
std::vector<std::string> _content_versions;
- bool _has_atmos;
- Frame _atmos_length;
+ bool _has_atmos = false;
+ Frame _atmos_length = 0;
dcp::Fraction _atmos_edit_rate;
};
/*
- Copyright (C) 2014-2018 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2014-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "i18n.h"
-using std::string;
+using std::dynamic_pointer_cast;
using std::list;
+using std::make_shared;
using std::shared_ptr;
-using std::dynamic_pointer_cast;
+using std::string;
using dcp::raw_convert;
using namespace dcpomatic;
DCPSubtitleContent::DCPSubtitleContent (boost::filesystem::path path)
: Content (path)
{
- text.push_back (shared_ptr<TextContent> (new TextContent (this, TEXT_OPEN_SUBTITLE, TEXT_OPEN_SUBTITLE)));
+ text.push_back (make_shared<TextContent>(this, TextType::OPEN_SUBTITLE, TextType::OPEN_SUBTITLE));
}
DCPSubtitleContent::DCPSubtitleContent (cxml::ConstNodePtr node, int version)
{
Content::examine (film, job);
- shared_ptr<dcp::SubtitleAsset> sc = load (path (0));
+ auto sc = load (path(0));
- shared_ptr<dcp::InteropSubtitleAsset> iop = dynamic_pointer_cast<dcp::InteropSubtitleAsset> (sc);
- shared_ptr<dcp::SMPTESubtitleAsset> smpte = dynamic_pointer_cast<dcp::SMPTESubtitleAsset> (sc);
+ auto iop = dynamic_pointer_cast<dcp::InteropSubtitleAsset>(sc);
+ auto smpte = dynamic_pointer_cast<dcp::SMPTESubtitleAsset>(sc);
if (smpte) {
set_video_frame_rate (smpte->edit_rate().numerator);
}
/* Default to turning these subtitles on */
only_text()->set_use (true);
- _length = ContentTime::from_seconds (sc->latest_subtitle_out().as_seconds ());
+ _length = ContentTime::from_seconds (sc->latest_subtitle_out().as_seconds());
sc->fix_empty_font_ids ();
for (auto i: sc->load_font_nodes()) {
- only_text()->add_font (shared_ptr<Font> (new Font (i->id)));
+ only_text()->add_font(make_shared<Font>(i->id));
}
}
#include "i18n.h"
-using std::string;
using std::cout;
+using std::make_shared;
using std::shared_ptr;
+using std::string;
using dcp::Size;
using dcp::ArrayData;
using dcp::raw_convert;
_index = node->number_child<int> ("Index");
_frames_per_second = node->number_child<int> ("FramesPerSecond");
_j2k_bandwidth = node->number_child<int> ("J2KBandwidth");
- _resolution = Resolution (node->optional_number_child<int>("Resolution").get_value_or (RESOLUTION_2K));
+ _resolution = Resolution (node->optional_number_child<int>("Resolution").get_value_or(static_cast<int>(Resolution::TWO_K)));
}
shared_ptr<dcp::OpenJPEGImage>
{
shared_ptr<dcp::OpenJPEGImage> xyz;
- shared_ptr<Image> image = frame->image (bind (&PlayerVideo::keep_xyz_or_rgb, _1), VideoRange::FULL, true, false);
+ auto image = frame->image (bind(&PlayerVideo::keep_xyz_or_rgb, _1), VideoRange::FULL, true, false);
if (frame->colour_conversion()) {
xyz = dcp::rgb_to_xyz (
image->data()[0],
note
);
} else {
- xyz.reset (new dcp::OpenJPEGImage (image->data()[0], image->size(), image->stride()[0]));
+ xyz = make_shared<dcp::OpenJPEGImage>(image->data()[0], image->size(), image->stride()[0]);
}
return xyz;
ArrayData
DCPVideo::encode_locally ()
{
- string const comment = Config::instance()->dcp_j2k_comment();
+ auto const comment = Config::instance()->dcp_j2k_comment();
- ArrayData enc = dcp::compress_j2k (
+ auto enc = dcp::compress_j2k (
convert_to_xyz (_frame, boost::bind(&Log::dcp_log, dcpomatic_log.get(), _1, _2)),
_j2k_bandwidth,
_frames_per_second,
- _frame->eyes() == EYES_LEFT || _frame->eyes() == EYES_RIGHT,
- _resolution == RESOLUTION_4K,
+ _frame->eyes() == Eyes::LEFT || _frame->eyes() == Eyes::RIGHT,
+ _resolution == Resolution::FOUR_K,
comment.empty() ? "libdcp" : comment
);
switch (_frame->eyes()) {
- case EYES_BOTH:
+ case Eyes::BOTH:
LOG_DEBUG_ENCODE (N_("Finished locally-encoded frame %1 for mono"), _index);
break;
- case EYES_LEFT:
+ case Eyes::LEFT:
LOG_DEBUG_ENCODE (N_("Finished locally-encoded frame %1 for L"), _index);
break;
- case EYES_RIGHT:
+ case Eyes::RIGHT:
LOG_DEBUG_ENCODE (N_("Finished locally-encoded frame %1 for R"), _index);
break;
default:
boost::asio::ip::tcp::resolver::query query (serv.host_name(), raw_convert<string> (ENCODE_FRAME_PORT));
boost::asio::ip::tcp::resolver::iterator endpoint_iterator = resolver.resolve (query);
- shared_ptr<Socket> socket (new Socket (timeout));
+ auto socket = make_shared<Socket>(timeout);
socket->connect (*endpoint_iterator);
/* Collect all XML metadata */
xmlpp::Document doc;
- xmlpp::Element* root = doc.create_root_node ("EncodingRequest");
+ auto root = doc.create_root_node ("EncodingRequest");
root->add_child("Version")->add_child_text (raw_convert<string> (SERVER_LINK_VERSION));
add_metadata (root);
Socket::WriteDigestScope ds (socket);
/* Send XML metadata */
- string xml = doc.write_to_string ("UTF-8");
+ auto xml = doc.write_to_string ("UTF-8");
socket->write (xml.length() + 1);
socket->write ((uint8_t *) xml.c_str(), xml.length() + 1);
throw NetworkError ("Could not initialise libcurl");
}
- if ((protocol == EMAIL_PROTOCOL_AUTO && port == 465) || protocol == EMAIL_PROTOCOL_SSL) {
+ if ((protocol == EmailProtocol::AUTO && port == 465) || protocol == EmailProtocol::SSL) {
/* "SSL" or "Implicit TLS"; I think curl wants us to use smtps here */
curl_easy_setopt (curl, CURLOPT_URL, String::compose("smtps://%1:%2", server, port).c_str());
} else {
curl_easy_setopt (curl, CURLOPT_READDATA, this);
curl_easy_setopt (curl, CURLOPT_UPLOAD, 1L);
- if (protocol == EMAIL_PROTOCOL_AUTO || protocol == EMAIL_PROTOCOL_STARTTLS) {
+ if (protocol == EmailProtocol::AUTO || protocol == EmailProtocol::STARTTLS) {
curl_easy_setopt (curl, CURLOPT_USE_SSL, (long) CURLUSESSL_TRY);
}
curl_easy_setopt (curl, CURLOPT_SSL_VERIFYPEER, 0L);
_subtitle_streams = examiner->subtitle_streams ();
if (!_subtitle_streams.empty ()) {
text.clear ();
- text.push_back (make_shared<TextContent>(this, TEXT_OPEN_SUBTITLE, TEXT_UNKNOWN));
+ text.push_back (make_shared<TextContent>(this, TextType::OPEN_SUBTITLE, TextType::UNKNOWN));
_subtitle_stream = _subtitle_streams.front ();
}
}
#include "i18n.h"
-using std::string;
-using std::runtime_error;
using std::cout;
-using std::pair;
using std::list;
+using std::make_shared;
using std::map;
+using std::pair;
+using std::runtime_error;
using std::shared_ptr;
-using boost::bind;
+using std::string;
using std::weak_ptr;
+using boost::bind;
using boost::optional;
using namespace dcpomatic;
#if BOOST_VERSION >= 106100
}
}
- _butler.reset (
- new Butler(_film, _player, map, _output_audio_channels, bind(&PlayerVideo::force, _1, FFmpegFileEncoder::pixel_format(format)), VideoRange::VIDEO, true, false)
+ _butler = std::make_shared<Butler>(
+ _film, _player, map, _output_audio_channels, bind(&PlayerVideo::force, _1, FFmpegFileEncoder::pixel_format(format)), VideoRange::VIDEO, true, false
);
}
FFmpegEncoder::go ()
{
{
- shared_ptr<Job> job = _job.lock ();
+ auto job = _job.lock ();
DCPOMATIC_ASSERT (job);
job->sub (_("Encoding"));
}
);
}
- list<DCPTimePeriod> reel_periods = _film->reels ();
- list<DCPTimePeriod>::const_iterator reel = reel_periods.begin ();
- list<FileEncoderSet>::iterator encoder = file_encoders.begin ();
+ auto reel_periods = _film->reels ();
+ auto reel = reel_periods.begin ();
+ auto encoder = file_encoders.begin ();
- DCPTime const video_frame = DCPTime::from_frames (1, _film->video_frame_rate ());
+ auto const video_frame = DCPTime::from_frames (1, _film->video_frame_rate ());
int const audio_frames = video_frame.frames_round(_film->audio_frame_rate());
float* interleaved = new float[_output_audio_channels * audio_frames];
- shared_ptr<AudioBuffers> deinterleaved (new AudioBuffers (_output_audio_channels, audio_frames));
+ auto deinterleaved = make_shared<AudioBuffers>(_output_audio_channels, audio_frames);
int const gets_per_frame = _film->three_d() ? 2 : 1;
for (DCPTime i; i < _film->length(); i += video_frame) {
for (int j = 0; j < gets_per_frame; ++j) {
Butler::Error e;
- pair<shared_ptr<PlayerVideo>, DCPTime> v = _butler->get_video (true, &e);
+ auto v = _butler->get_video (true, &e);
_butler->rethrow ();
if (!v.first) {
throw DecodeError(String::compose("Error during decoding: %1", e.summary()));
{
if (three_d) {
/// TRANSLATORS: L here is an abbreviation for "left", to indicate the left-eye part of a 3D export
- _encoders[EYES_LEFT] = shared_ptr<FFmpegFileEncoder>(
- new FFmpegFileEncoder(
- video_frame_size, video_frame_rate, audio_frame_rate, channels, format,
- audio_stream_per_channel, x264_crf, String::compose("%1_%2%3", output.string(), _("L"), extension))
+ _encoders[Eyes::LEFT] = make_shared<FFmpegFileEncoder>(
+ video_frame_size, video_frame_rate, audio_frame_rate, channels, format,
+ audio_stream_per_channel, x264_crf, String::compose("%1_%2%3", output.string(), _("L"), extension)
);
/// TRANSLATORS: R here is an abbreviation for "right", to indicate the right-eye part of a 3D export
- _encoders[EYES_RIGHT] = shared_ptr<FFmpegFileEncoder>(
- new FFmpegFileEncoder(
- video_frame_size, video_frame_rate, audio_frame_rate, channels, format,
- audio_stream_per_channel, x264_crf, String::compose("%1_%2%3", output.string(), _("R"), extension))
+ _encoders[Eyes::RIGHT] = make_shared<FFmpegFileEncoder>(
+ video_frame_size, video_frame_rate, audio_frame_rate, channels, format,
+ audio_stream_per_channel, x264_crf, String::compose("%1_%2%3", output.string(), _("R"), extension)
);
} else {
- _encoders[EYES_BOTH] = shared_ptr<FFmpegFileEncoder>(
- new FFmpegFileEncoder(
- video_frame_size, video_frame_rate, audio_frame_rate, channels, format,
- audio_stream_per_channel, x264_crf, String::compose("%1%2", output.string(), extension))
+ _encoders[Eyes::BOTH] = make_shared<FFmpegFileEncoder>(
+ video_frame_size, video_frame_rate, audio_frame_rate, channels, format,
+ audio_stream_per_channel, x264_crf, String::compose("%1%2", output.string(), extension)
);
}
}
{
if (_encoders.size() == 1) {
/* We are doing a 2D export... */
- if (eyes == EYES_LEFT) {
+ if (eyes == Eyes::LEFT) {
/* ...but we got some 3D data; put the left eye into the output... */
- eyes = EYES_BOTH;
- } else if (eyes == EYES_RIGHT) {
+ eyes = Eyes::BOTH;
+ } else if (eyes == Eyes::RIGHT) {
/* ...and ignore the right eye.*/
return shared_ptr<FFmpegFileEncoder>();
}
void
FFmpegEncoder::FileEncoderSet::flush ()
{
- for (map<Eyes, std::shared_ptr<FFmpegFileEncoder> >::iterator i = _encoders.begin(); i != _encoders.end(); ++i) {
- i->second->flush ();
+ for (auto& i: _encoders) {
+ i.second->flush ();
}
}
void
FFmpegEncoder::FileEncoderSet::audio (shared_ptr<AudioBuffers> a)
{
- for (map<Eyes, std::shared_ptr<FFmpegFileEncoder> >::iterator i = _encoders.begin(); i != _encoders.end(); ++i) {
- i->second->audio (a);
+ for (auto& i: _encoders) {
+ i.second->audio (a);
}
}
_pixel_format = pixel_format (format);
switch (format) {
- case EXPORT_FORMAT_PRORES:
+ case ExportFormat::PRORES:
_sample_format = AV_SAMPLE_FMT_S16;
_video_codec_name = "prores_ks";
_audio_codec_name = "pcm_s16le";
av_dict_set (&_video_options, "profile", "3", 0);
av_dict_set (&_video_options, "threads", "auto", 0);
break;
- case EXPORT_FORMAT_H264_AAC:
+ case ExportFormat::H264_AAC:
_sample_format = AV_SAMPLE_FMT_FLTP;
_video_codec_name = "libx264";
_audio_codec_name = "aac";
av_dict_set_int (&_video_options, "crf", x264_crf, 0);
break;
- case EXPORT_FORMAT_H264_PCM:
+ case ExportFormat::H264_PCM:
_sample_format = AV_SAMPLE_FMT_S32;
_video_codec_name = "libx264";
_audio_codec_name = "pcm_s24le";
FFmpegFileEncoder::pixel_format (ExportFormat format)
{
switch (format) {
- case EXPORT_FORMAT_PRORES:
+ case ExportFormat::PRORES:
return AV_PIX_FMT_YUV422P10;
- case EXPORT_FORMAT_H264_AAC:
- case EXPORT_FORMAT_H264_PCM:
+ case ExportFormat::H264_AAC:
+ case ExportFormat::H264_PCM:
return AV_PIX_FMT_YUV420P;
default:
DCPOMATIC_ASSERT (false);
FFmpegFileEncoder::video (shared_ptr<PlayerVideo> video, DCPTime time)
{
/* All our output formats are video range at the moment */
- shared_ptr<Image> image = video->image (
+ auto image = video->image (
bind (&PlayerVideo::force, _1, _pixel_format),
VideoRange::VIDEO,
true,
false
);
- AVFrame* frame = av_frame_alloc ();
+ auto frame = av_frame_alloc ();
DCPOMATIC_ASSERT (frame);
{
, _use_isdcf_name (true)
, _dcp_content_type (Config::instance()->default_dcp_content_type ())
, _container (Config::instance()->default_container ())
- , _resolution (RESOLUTION_2K)
+ , _resolution (Resolution::TWO_K)
, _encrypted (false)
, _context_id (dcp::make_uuid ())
, _j2k_bandwidth (Config::instance()->default_j2k_bandwidth ())
, _sequence (true)
, _interop (Config::instance()->default_interop ())
, _audio_processor (0)
- , _reel_type (REELTYPE_SINGLE)
+ , _reel_type (ReelType::SINGLE)
, _reel_length (2000000000)
, _reencode_j2k (false)
, _user_explicit_video_frame_rate (false)
}
}
- _reel_type = static_cast<ReelType> (f.optional_number_child<int>("ReelType").get_value_or (static_cast<int>(REELTYPE_SINGLE)));
+ _reel_type = static_cast<ReelType> (f.optional_number_child<int>("ReelType").get_value_or (static_cast<int>(ReelType::SINGLE)));
_reel_length = f.optional_number_child<int64_t>("ReelLength").get_value_or (2000000000);
_reencode_j2k = f.optional_bool_child("ReencodeJ2K").get_value_or(false);
_user_explicit_video_frame_rate = f.optional_bool_child("UserExplicitVideoFrameRate").get_value_or(false);
auto ccap = false;
for (auto i: content()) {
for (auto j: i->text) {
- if (j->type() == TEXT_OPEN_SUBTITLE && j->use() && !j->burn()) {
+ if (j->type() == TextType::OPEN_SUBTITLE && j->use() && !j->burn()) {
burnt_in = false;
- } else if (j->type() == TEXT_CLOSED_CAPTION && j->use()) {
+ } else if (j->type() == TextType::CLOSED_CAPTION && j->use()) {
ccap = true;
}
}
}
bool any_text = false;
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
if (dc->reference_text(static_cast<TextType>(i))) {
any_text = true;
}
void
Film::signal_change (ChangeType type, Property p)
{
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
_dirty = true;
if (p == Film::CONTENT) {
snprintf(buffer, sizeof(buffer), "%08d_%08" PRId64, reel, frame);
string s (buffer);
- if (eyes == EYES_LEFT) {
+ if (eyes == Eyes::LEFT) {
s += ".L";
- } else if (eyes == EYES_RIGHT) {
+ } else if (eyes == Eyes::RIGHT) {
s += ".R";
}
if (!_user_explicit_resolution) {
if (video->size_after_crop().width > 2048 || video->size_after_crop().height > 1080) {
- set_resolution (RESOLUTION_4K, false);
+ set_resolution (Resolution::FOUR_K, false);
} else {
- set_resolution (RESOLUTION_2K, false);
+ set_resolution (Resolution::TWO_K, false);
}
}
}
signal_change (type, Film::NAME);
}
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
emit (boost::bind (boost::ref (ContentChange), type, c, p, frequent));
if (!frequent) {
check_settings_consistency ();
signal_change (type, CONTENT);
signal_change (type, NAME);
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
check_settings_consistency ();
}
d->set_reference_audio(false);
change_made = true;
}
- if (d->reference_text(TEXT_OPEN_SUBTITLE) && !d->can_reference_text(shared_from_this(), TEXT_OPEN_SUBTITLE, why_not)) {
- d->set_reference_text(TEXT_OPEN_SUBTITLE, false);
+ if (d->reference_text(TextType::OPEN_SUBTITLE) && !d->can_reference_text(shared_from_this(), TextType::OPEN_SUBTITLE, why_not)) {
+ d->set_reference_text(TextType::OPEN_SUBTITLE, false);
change_made = true;
}
- if (d->reference_text(TEXT_CLOSED_CAPTION) && !d->can_reference_text(shared_from_this(), TEXT_CLOSED_CAPTION, why_not)) {
- d->set_reference_text(TEXT_CLOSED_CAPTION, false);
+ if (d->reference_text(TextType::CLOSED_CAPTION) && !d->can_reference_text(shared_from_this(), TextType::CLOSED_CAPTION, why_not)) {
+ d->set_reference_text(TextType::CLOSED_CAPTION, false);
change_made = true;
}
}
Film::playlist_order_changed ()
{
/* XXX: missing PENDING */
- signal_change (CHANGE_TYPE_DONE, CONTENT_ORDER);
+ signal_change (ChangeType::DONE, CONTENT_ORDER);
}
int
Film::full_frame () const
{
switch (_resolution) {
- case RESOLUTION_2K:
+ case Resolution::TWO_K:
return dcp::Size (2048, 1080);
- case RESOLUTION_4K:
+ case Resolution::FOUR_K:
return dcp::Size (4096, 2160);
}
auto const len = length();
switch (reel_type ()) {
- case REELTYPE_SINGLE:
+ case ReelType::SINGLE:
p.push_back (DCPTimePeriod (DCPTime (), len));
break;
- case REELTYPE_BY_VIDEO_CONTENT:
+ case ReelType::BY_VIDEO_CONTENT:
{
/* Collect all reel boundaries */
list<DCPTime> split_points;
}
break;
}
- case REELTYPE_BY_LENGTH:
+ case ReelType::BY_LENGTH:
{
DCPTime current;
/* Integer-divide reel length by the size of one frame to give the number of frames per reel,
for (auto j: i->text) {
/* XXX: Empty DCPTextTrack ends up being a magic value here - the "unknown" or "not specified" track */
auto dtt = j->dcp_track().get_value_or(DCPTextTrack());
- if (j->type() == TEXT_CLOSED_CAPTION && find(tt.begin(), tt.end(), dtt) == tt.end()) {
+ if (j->type() == TextType::CLOSED_CAPTION && find(tt.begin(), tt.end(), dtt) == tt.end()) {
tt.push_back (dtt);
}
}
/*
- Copyright (C) 2016-2020 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2016-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "i18n.h"
-using std::vector;
-using std::string;
-using std::pair;
-using std::min;
-using std::max;
using std::cout;
+using std::make_shared;
+using std::max;
+using std::min;
+using std::pair;
using std::shared_ptr;
+using std::string;
+using std::vector;
using std::weak_ptr;
using boost::optional;
using boost::bind;
void
Hints::check_upmixers ()
{
- AudioProcessor const * ap = film()->audio_processor();
+ auto ap = film()->audio_processor();
if (ap && (ap->id() == "stereo-5.1-upmix-a" || ap->id() == "stereo-5.1-upmix-b")) {
hint (_("You are using DCP-o-matic's stereo-to-5.1 upmixer. This is experimental and may result in poor-quality audio. If you continue, you should listen to the resulting DCP in a cinema to make sure that it sounds good."));
}
void
Hints::check_unusual_container ()
{
- string const film_container = film()->container()->id();
+ auto const film_container = film()->container()->id();
if (film_container != "185" && film_container != "239" && film_container != "190") {
hint (_("Your DCP uses an unusual container ratio. This may cause problems on some projectors. If possible, use Flat or Scope for the DCP container ratio"));
}
void
Hints::check_frame_rate ()
{
- shared_ptr<const Film> f = film ();
+ auto f = film ();
switch (f->video_frame_rate()) {
case 24:
/* Fine */
{
int three_d = 0;
for (auto i: film()->content()) {
- if (i->video && i->video->frame_type() != VIDEO_FRAME_TYPE_2D) {
+ if (i->video && i->video->frame_type() != VideoFrameType::TWO_D) {
++three_d;
}
}
void
Hints::check_loudness ()
{
- boost::filesystem::path path = film()->audio_analysis_path(film()->playlist());
+ auto path = film()->audio_analysis_path(film()->playlist());
if (boost::filesystem::exists (path)) {
try {
- shared_ptr<AudioAnalysis> an (new AudioAnalysis (path));
+ auto an = make_shared<AudioAnalysis>(path);
string ch;
- vector<AudioAnalysis::PeakTime> sample_peak = an->sample_peak ();
- vector<float> true_peak = an->true_peak ();
+ auto sample_peak = an->sample_peak ();
+ auto true_peak = an->true_peak ();
for (size_t i = 0; i < sample_peak.size(); ++i) {
float const peak = max (sample_peak[i].peak, true_peak.empty() ? 0 : true_peak[i]);
void
Hints::thread ()
{
- shared_ptr<const Film> film = _film.lock ();
+ auto film = _film.lock ();
if (!film) {
return;
}
- ContentList content = film->content ();
+ auto content = film->content ();
check_big_font_files ();
check_few_audio_channels ();
emit (bind(boost::ref(Progress), _("Examining closed captions")));
- shared_ptr<Player> player (new Player(film));
+ auto player = make_shared<Player>(film);
player->set_ignore_video ();
player->set_ignore_audio ();
player->Text.connect (bind(&Hints::text, this, _1, _2, _3, _4));
dcp::DCP dcp (dcp_dir);
dcp.read ();
DCPOMATIC_ASSERT (dcp.cpls().size() == 1);
- for (auto reel: dcp.cpls().front()->reels()) {
+ for (auto reel: dcp.cpls()[0]->reels()) {
for (auto ccap: reel->closed_captions()) {
if (ccap->asset() && ccap->asset()->xml_as_string().length() > static_cast<size_t>(MAX_CLOSED_CAPTION_XML_SIZE - SIZE_SLACK) && !ccap_xml_too_big) {
hint (_(
_writer->write (text, type, track, period);
switch (type) {
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
closed_caption (text, period);
break;
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
open_subtitle (text, period);
break;
default:
void
Hints::check_ffec_and_ffmc_in_smpte_feature ()
{
- shared_ptr<const Film> f = film();
+ auto f = film();
if (!f->interop() && f->dcp_content_type()->libdcp_kind() == dcp::ContentKind::FEATURE && (!f->marker(dcp::Marker::FFEC) || !f->marker(dcp::Marker::FFMC))) {
hint (_("SMPTE DCPs with the type FTR (feature) should have markers for the first frame of end credits (FFEC) and the first frame of moving credits (FFMC). You should add these markers using the 'Markers' button in the DCP tab."));
}
/* This frame already has J2K data, so just write it */
_writer->write (pv->j2k(), position, pv->eyes ());
frame_done ();
- } else if (_last_player_video[pv->eyes()] && _writer->can_repeat(position) && pv->same (_last_player_video[pv->eyes()])) {
+ } else if (_last_player_video[static_cast<int>(pv->eyes())] && _writer->can_repeat(position) && pv->same (_last_player_video[static_cast<int>(pv->eyes())])) {
LOG_DEBUG_ENCODE("Frame @ %1 REPEAT", to_string(time));
_writer->repeat (position, pv->eyes ());
} else {
_empty_condition.notify_all ();
}
- _last_player_video[pv->eyes()] = pv;
+ _last_player_video[static_cast<int>(pv->eyes())] = pv;
_last_player_video_time = time;
}
std::shared_ptr<Writer> _writer;
Waker _waker;
- std::shared_ptr<PlayerVideo> _last_player_video[EYES_COUNT];
+ std::shared_ptr<PlayerVideo> _last_player_video[static_cast<int>(Eyes::COUNT)];
boost::optional<dcpomatic::DCPTime> _last_player_video_time;
boost::signals2::scoped_connection _server_found_connection;
J2KImageProxy (std::shared_ptr<cxml::Node> xml, std::shared_ptr<Socket> socket);
+ /* For tests */
+ J2KImageProxy (dcp::ArrayData data, dcp::Size size, AVPixelFormat pixel_format);
+
Result image (
boost::optional<dcp::Size> size = boost::optional<dcp::Size> ()
) const;
size_t memory_used () const;
private:
- friend struct client_server_test_j2k;
-
- /* For tests */
- J2KImageProxy (dcp::ArrayData data, dcp::Size size, AVPixelFormat pixel_format);
-
std::shared_ptr<const dcp::Data> _data;
dcp::Size _size;
boost::optional<dcp::Eye> _eye;
#include "i18n.h"
-using std::list;
+using std::copy;
using std::cout;
-using std::min;
+using std::dynamic_pointer_cast;
+using std::list;
+using std::make_pair;
+using std::make_shared;
+using std::map;
using std::max;
using std::min;
-using std::vector;
+using std::min;
using std::pair;
-using std::map;
-using std::make_pair;
-using std::copy;
using std::shared_ptr;
+using std::vector;
using std::weak_ptr;
-using std::dynamic_pointer_cast;
using boost::optional;
using boost::scoped_ptr;
#if BOOST_VERSION >= 106100
Player::Player (shared_ptr<const Film> film)
: _film (film)
, _suspended (0)
- , _ignore_video (false)
- , _ignore_audio (false)
- , _ignore_text (false)
- , _always_burn_open_subtitles (false)
- , _fast (false)
, _tolerant (film->tolerant())
- , _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
- , _shuffler (0)
{
construct ();
}
: _film (film)
, _playlist (playlist_)
, _suspended (0)
- , _ignore_video (false)
- , _ignore_audio (false)
- , _ignore_text (false)
- , _always_burn_open_subtitles (false)
- , _fast (false)
, _tolerant (film->tolerant())
- , _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
- , _shuffler (0)
{
construct ();
}
_playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
set_video_container_size (_film->frame_size ());
- film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
+ film_change (ChangeType::DONE, Film::AUDIO_PROCESSOR);
setup_pieces ();
seek (DCPTime (), true);
{
_playback_length = _playlist ? _playlist->length(_film) : _film->length();
- list<shared_ptr<Piece> > old_pieces = _pieces;
+ auto old_pieces = _pieces;
_pieces.clear ();
delete _shuffler;
}
}
- shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+ auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
DCPOMATIC_ASSERT (decoder);
FrameRateChange frc (_film, i);
}
}
- shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
+ auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
if (dcp) {
dcp->set_decode_referenced (_play_referenced);
if (_play_referenced) {
}
}
- shared_ptr<Piece> piece (new Piece (i, decoder, frc));
+ auto piece = make_shared<Piece>(i, decoder, frc);
_pieces.push_back (piece);
if (decoder->video) {
- if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+ if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
/* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
- decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+ decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
} else {
- decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
+ decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
}
}
decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
}
- list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
+ auto j = decoder->text.begin();
while (j != decoder->text.end()) {
(*j)->BitmapStart.connect (
_silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
_last_video_time = DCPTime ();
- _last_video_eyes = EYES_BOTH;
+ _last_video_eyes = Eyes::BOTH;
_last_audio_time = DCPTime ();
}
Player::playlist_content_change (ChangeType type, int property, bool frequent)
{
if (property == VideoContentProperty::CROP) {
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
dcp::Size const vcs = video_container_size();
boost::mutex::scoped_lock lm (_mutex);
for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
}
}
} else {
- if (type == CHANGE_TYPE_PENDING) {
+ if (type == ChangeType::PENDING) {
/* The player content is probably about to change, so we can't carry on
until that has happened and we've rebuilt our pieces. Stop pass()
and seek() from working until then.
*/
++_suspended;
- } else if (type == CHANGE_TYPE_DONE) {
+ } else if (type == ChangeType::DONE) {
/* A change in our content has gone through. Re-build our pieces. */
setup_pieces ();
--_suspended;
- } else if (type == CHANGE_TYPE_CANCELLED) {
+ } else if (type == ChangeType::CANCELLED) {
--_suspended;
}
}
void
Player::set_video_container_size (dcp::Size s)
{
- Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
{
boost::mutex::scoped_lock lm (_mutex);
if (s == _video_container_size) {
lm.unlock ();
- Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
return;
}
_black_image->make_black ();
}
- Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
}
void
Player::playlist_change (ChangeType type)
{
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
setup_pieces ();
}
Change (type, PlayerProperty::PLAYLIST, false);
/* Pieces contain a FrameRateChange which contains the DCP frame rate,
so we need new pieces here.
*/
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
setup_pieces ();
}
Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
} else if (p == Film::AUDIO_PROCESSOR) {
- if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
+ if (type == ChangeType::DONE && _film->audio_processor ()) {
boost::mutex::scoped_lock lm (_mutex);
_audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
}
} else if (p == Film::AUDIO_CHANNELS) {
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
boost::mutex::scoped_lock lm (_mutex);
_audio_merger.clear ();
}
shared_ptr<PlayerVideo>
Player::black_player_video_frame (Eyes eyes) const
{
- return shared_ptr<PlayerVideo> (
- new PlayerVideo (
- shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
- Crop (),
- optional<double> (),
- _video_container_size,
- _video_container_size,
- eyes,
- PART_WHOLE,
- PresetColourConversion::all().front().conversion,
- VideoRange::FULL,
- std::weak_ptr<Content>(),
- boost::optional<Frame>(),
- false
- )
+ return std::make_shared<PlayerVideo> (
+ std::make_shared<const RawImageProxy>(_black_image),
+ Crop(),
+ optional<double>(),
+ _video_container_size,
+ _video_container_size,
+ eyes,
+ Part::WHOLE,
+ PresetColourConversion::all().front().conversion,
+ VideoRange::FULL,
+ std::weak_ptr<Content>(),
+ boost::optional<Frame>(),
+ false
);
}
Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
{
/* See comment in dcp_to_content_video */
- DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
+ auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
return d + piece->content->position();
}
Frame
Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
- DCPTime s = t - piece->content->position ();
+ auto s = t - piece->content->position ();
s = min (piece->content->length_after_trim(_film), s);
/* See notes in dcp_to_content_video */
return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
ContentTime
Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
{
- DCPTime s = t - piece->content->position ();
+ auto s = t - piece->content->position ();
s = min (piece->content->length_after_trim(_film), s);
return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
}
DCPTime
Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
{
- return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
+ return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
}
vector<FontData>
/* XXX: things may go wrong if there are duplicate font IDs
with different font files.
*/
- vector<FontData> f = i->decoder->fonts ();
+ auto f = i->decoder->fonts ();
copy (f.begin(), f.end(), back_inserter(fonts));
}
maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
}
- if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
+ if (j->reference_text (TextType::OPEN_SUBTITLE)) {
maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
}
- if (j->reference_text (TEXT_CLOSED_CAPTION)) {
+ if (j->reference_text (TextType::CLOSED_CAPTION)) {
for (auto l: k->closed_captions()) {
maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
}
if (_playback_length == DCPTime()) {
/* Special; just give one black frame */
- emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
+ emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
return true;
}
}
case BLACK:
LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
- emit_video (black_player_video_frame(EYES_BOTH), _black.position());
+ emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
_black.set_position (_black.position() + one_video_frame());
break;
case SILENT:
for (
auto j:
- _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+ _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
) {
/* Bitmap subtitles */
LastVideoMap::const_iterator last = _last_video.find (wp);
if (_film->three_d()) {
Eyes fill_to_eyes = video.eyes;
- if (fill_to_eyes == EYES_BOTH) {
- fill_to_eyes = EYES_LEFT;
+ if (fill_to_eyes == Eyes::BOTH) {
+ fill_to_eyes = Eyes::LEFT;
}
if (fill_to == piece->content->end(_film)) {
/* Don't fill after the end of the content */
- fill_to_eyes = EYES_LEFT;
+ fill_to_eyes = Eyes::LEFT;
}
DCPTime j = fill_from;
- Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
- if (eyes == EYES_BOTH) {
- eyes = EYES_LEFT;
+ Eyes eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
+ if (eyes == Eyes::BOTH) {
+ eyes = Eyes::LEFT;
}
while (j < fill_to || eyes != fill_to_eyes) {
if (last != _last_video.end()) {
LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
emit_video (black_player_video_frame(eyes), j);
}
- if (eyes == EYES_RIGHT) {
+ if (eyes == Eyes::RIGHT) {
j += one_video_frame();
}
eyes = increment_eyes (eyes);
if (last != _last_video.end()) {
emit_video (last->second, j);
} else {
- emit_video (black_player_video_frame(EYES_BOTH), j);
+ emit_video (black_player_video_frame(Eyes::BOTH), j);
}
}
}
ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
- _active_texts[text->type()].add_from (wc, ps, from);
+ _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
}
void
ps.add_fonts (text->fonts ());
}
- _active_texts[text->type()].add_from (wc, ps, from);
+ _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
}
void
return;
}
- if (!_active_texts[text->type()].have(wc)) {
+ if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
return;
}
return;
}
- pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
+ pair<PlayerText, DCPTime> from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
- bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
+ bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
if (text->use() && !always && !text->burn()) {
Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
}
}
_audio_merger.clear ();
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
_active_texts[i].clear ();
}
if (accurate) {
_last_video_time = time;
- _last_video_eyes = EYES_LEFT;
+ _last_video_eyes = Eyes::LEFT;
_last_audio_time = time;
} else {
_last_video_time = optional<DCPTime>();
*/
_delay.push_back (make_pair (pv, time));
- if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+ if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
_last_video_time = time + one_video_frame();
}
_last_video_eyes = increment_eyes (pv->eyes());
void
Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
{
- if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
_active_texts[i].clear_before (time);
}
}
- optional<PositionImage> subtitles = open_subtitles_for_frame (time);
+ auto subtitles = open_subtitles_for_frame (time);
if (subtitles) {
pv->set_text (subtitles.get ());
}
void
Player::set_dcp_decode_reduction (optional<int> reduction)
{
- Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
{
boost::mutex::scoped_lock lm (_mutex);
if (reduction == _dcp_decode_reduction) {
lm.unlock ();
- Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
return;
}
setup_pieces_unlocked ();
}
- Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
}
optional<DCPTime>
}
/* We couldn't find this content; perhaps things are being changed over */
- return optional<DCPTime>();
+ return {};
}
std::shared_ptr<Image> _black_image;
/** true if the player should ignore all video; i.e. never produce any */
- bool _ignore_video;
- bool _ignore_audio;
+ bool _ignore_video = false;
+ bool _ignore_audio = false;
/** true if the player should ignore all text; i.e. never produce any */
- bool _ignore_text;
- bool _always_burn_open_subtitles;
+ bool _ignore_text = false;
+ bool _always_burn_open_subtitles = false;
/** true if we should try to be fast rather than high quality */
- bool _fast;
+ bool _fast = false;
/** true if we should keep going in the face of `survivable' errors */
- bool _tolerant;
+ bool _tolerant = false;
/** true if we should `play' (i.e output) referenced DCP data (e.g. for preview) */
- bool _play_referenced;
+ bool _play_referenced = false;
/** Time just after the last video frame we emitted, or the time of the last accurate seek */
boost::optional<dcpomatic::DCPTime> _last_video_time;
LastVideoMap _last_video;
AudioMerger _audio_merger;
- Shuffler* _shuffler;
+ Shuffler* _shuffler = nullptr;
std::list<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime> > _delay;
class StreamState
Empty _black;
Empty _silent;
- ActiveText _active_texts[TEXT_COUNT];
+ ActiveText _active_texts[static_cast<int>(TextType::COUNT)];
std::shared_ptr<AudioProcessor> _audio_processor;
dcpomatic::DCPTime _playback_length;
/*
- Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
*/
-#include "player_video.h"
#include "content.h"
-#include "video_content.h"
+#include "film.h"
#include "image.h"
#include "image_proxy.h"
#include "j2k_image_proxy.h"
-#include "film.h"
#include "player.h"
+#include "player_video.h"
+#include "video_content.h"
#include <dcp/raw_convert.h>
extern "C" {
#include <libavutil/pixfmt.h>
#include <libxml++/libxml++.h>
#include <iostream>
-using std::string;
using std::cout;
+using std::dynamic_pointer_cast;
+using std::make_shared;
using std::pair;
using std::shared_ptr;
+using std::string;
using std::weak_ptr;
-using std::dynamic_pointer_cast;
using boost::optional;
using boost::function;
using dcp::Data;
_inter_size = dcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
_out_size = dcp::Size (node->number_child<int> ("OutWidth"), node->number_child<int> ("OutHeight"));
- _eyes = (Eyes) node->number_child<int> ("Eyes");
- _part = (Part) node->number_child<int> ("Part");
- _video_range = (VideoRange) node->number_child<int>("VideoRange");
+ _eyes = static_cast<Eyes>(node->number_child<int>("Eyes"));
+ _part = static_cast<Part>(node->number_child<int>("Part"));
+ _video_range = static_cast<VideoRange>(node->number_child<int>("VideoRange"));
_error = node->optional_bool_child("Error").get_value_or (false);
/* Assume that the ColourConversion uses the current state version */
_colour_conversion = ColourConversion::from_xml (node, Film::current_state_version);
- _in = image_proxy_factory (node->node_child ("In"), socket);
+ _in = image_proxy_factory (node->node_child("In"), socket);
- if (node->optional_number_child<int> ("SubtitleX")) {
+ if (node->optional_number_child<int>("SubtitleX")) {
- shared_ptr<Image> image (
- new Image (AV_PIX_FMT_BGRA, dcp::Size (node->number_child<int> ("SubtitleWidth"), node->number_child<int> ("SubtitleHeight")), true)
+ auto image = make_shared<Image> (
+ AV_PIX_FMT_BGRA, dcp::Size(node->number_child<int>("SubtitleWidth"), node->number_child<int>("SubtitleHeight")), true
);
image->read_from_socket (socket);
- _text = PositionImage (image, Position<int> (node->number_child<int> ("SubtitleX"), node->number_child<int> ("SubtitleY")));
+ _text = PositionImage (image, Position<int>(node->number_child<int>("SubtitleX"), node->number_child<int>("SubtitleY")));
}
}
_image_out_size = _out_size;
_image_fade = _fade;
- ImageProxy::Result prox = _in->image (_inter_size);
+ auto prox = _in->image (_inter_size);
_error = prox.error;
- Crop total_crop = _crop;
+ auto total_crop = _crop;
switch (_part) {
- case PART_LEFT_HALF:
+ case Part::LEFT_HALF:
total_crop.right += prox.image->size().width / 2;
break;
- case PART_RIGHT_HALF:
+ case Part::RIGHT_HALF:
total_crop.left += prox.image->size().width / 2;
break;
- case PART_TOP_HALF:
+ case Part::TOP_HALF:
total_crop.bottom += prox.image->size().height / 2;
break;
- case PART_BOTTOM_HALF:
+ case Part::BOTTOM_HALF:
total_crop.top += prox.image->size().height / 2;
break;
default:
{
/* XXX: maybe other things */
- shared_ptr<const J2KImageProxy> j2k = dynamic_pointer_cast<const J2KImageProxy> (_in);
+ auto j2k = dynamic_pointer_cast<const J2KImageProxy> (_in);
if (!j2k) {
return false;
}
- return _crop == Crop () && _out_size == j2k->size() && !_text && !_fade && !_colour_conversion;
+ return _crop == Crop() && _out_size == j2k->size() && !_text && !_fade && !_colour_conversion;
}
shared_ptr<const dcp::Data>
PlayerVideo::j2k () const
{
- shared_ptr<const J2KImageProxy> j2k = dynamic_pointer_cast<const J2KImageProxy> (_in);
+ auto j2k = dynamic_pointer_cast<const J2KImageProxy> (_in);
DCPOMATIC_ASSERT (j2k);
return j2k->j2k ();
}
shared_ptr<PlayerVideo>
PlayerVideo::shallow_copy () const
{
- return shared_ptr<PlayerVideo>(
- new PlayerVideo(
- _in,
- _crop,
- _fade,
- _inter_size,
- _out_size,
- _eyes,
- _part,
- _colour_conversion,
- _video_range,
- _content,
- _video_frame,
- _error
- )
+ return std::make_shared<PlayerVideo>(
+ _in,
+ _crop,
+ _fade,
+ _inter_size,
+ _out_size,
+ _eyes,
+ _part,
+ _colour_conversion,
+ _video_range,
+ _content,
+ _video_frame,
+ _error
);
}
bool
PlayerVideo::reset_metadata (shared_ptr<const Film> film, dcp::Size player_video_container_size)
{
- shared_ptr<Content> content = _content.lock();
+ auto content = _content.lock();
if (!content || !_video_frame) {
return false;
}
void
Playlist::content_change (weak_ptr<const Film> weak_film, ChangeType type, weak_ptr<Content> content, int property, bool frequent)
{
- shared_ptr<const Film> film = weak_film.lock ();
+ auto film = weak_film.lock ();
DCPOMATIC_ASSERT (film);
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
if (
property == ContentProperty::TRIM_START ||
property == ContentProperty::TRIM_END ||
continue;
}
- if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+ if (i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
i->set_position (film, next_right);
next_right = i->end(film);
} else {
boost::mutex::scoped_lock lm (_mutex);
for (auto i: node->node_children ("Content")) {
- shared_ptr<Content> content = content_factory (i, version, notes);
+ auto content = content_factory (i, version, notes);
/* See if this content should be nudged to start on a video frame */
- DCPTime const old_pos = content->position();
+ auto const old_pos = content->position();
content->set_position(film, old_pos);
if (old_pos != content->position()) {
string note = _("Your project contains video content that was not aligned to a frame boundary.");
void
Playlist::add (shared_ptr<const Film> film, shared_ptr<Content> c)
{
- Change (CHANGE_TYPE_PENDING);
+ Change (ChangeType::PENDING);
{
boost::mutex::scoped_lock lm (_mutex);
reconnect (film);
}
- Change (CHANGE_TYPE_DONE);
+ Change (ChangeType::DONE);
LengthChange ();
}
void
Playlist::remove (shared_ptr<Content> c)
{
- Change (CHANGE_TYPE_PENDING);
+ Change (ChangeType::PENDING);
bool cancelled = false;
{
boost::mutex::scoped_lock lm (_mutex);
- ContentList::iterator i = _content.begin ();
+ auto i = _content.begin ();
while (i != _content.end() && *i != c) {
++i;
}
}
if (cancelled) {
- Change (CHANGE_TYPE_CANCELLED);
+ Change (ChangeType::CANCELLED);
} else {
- Change (CHANGE_TYPE_DONE);
+ Change (ChangeType::DONE);
}
/* This won't change order, so it does not need a sort */
void
Playlist::remove (ContentList c)
{
- Change (CHANGE_TYPE_PENDING);
+ Change (ChangeType::PENDING);
{
boost::mutex::scoped_lock lm (_mutex);
}
}
- Change (CHANGE_TYPE_DONE);
+ Change (ChangeType::DONE);
/* This won't change order, so it does not need a sort */
int
Playlist::best_video_frame_rate () const
{
- list<int> const allowed_dcp_frame_rates = Config::instance()->allowed_dcp_frame_rates ();
+ auto const allowed_dcp_frame_rates = Config::instance()->allowed_dcp_frame_rates ();
/* Work out what rates we could manage, including those achieved by using skip / repeat */
list<FrameRateCandidate> candidates;
/* Pick the best one */
float error = std::numeric_limits<float>::max ();
optional<FrameRateCandidate> best;
- list<FrameRateCandidate>::iterator i = candidates.begin();
+ auto i = candidates.begin();
while (i != candidates.end()) {
float this_error = 0;
optional<DCPTime>
Playlist::start () const
{
- ContentList cont = content ();
+ auto cont = content ();
if (cont.empty()) {
- return optional<DCPTime> ();
+ return {};
}
- DCPTime start = DCPTime::max ();
+ auto start = DCPTime::max ();
for (auto i: cont) {
start = min (start, i->position ());
}
range.second = max (range.second, i->end(film));
}
- Change (CHANGE_TYPE_PENDING);
+ Change (ChangeType::PENDING);
{
boost::mutex::scoped_lock lm (_mutex);
DCPTime pos = range.second;
for (int i = 0; i < n; ++i) {
for (auto j: c) {
- shared_ptr<Content> copy = j->clone ();
+ auto copy = j->clone ();
copy->set_position (film, pos + copy->position() - range.first);
_content.push_back (copy);
}
reconnect (film);
}
- Change (CHANGE_TYPE_DONE);
+ Change (ChangeType::DONE);
}
void
Playlist::move_earlier (shared_ptr<const Film> film, shared_ptr<Content> c)
{
- ContentList cont = content ();
- ContentList::iterator previous = cont.end();
- ContentList::iterator i = cont.begin();
+ auto cont = content ();
+ auto previous = cont.end();
+ auto i = cont.begin();
while (i != cont.end() && *i != c) {
previous = i;
++i;
return;
}
- shared_ptr<Content> previous_c = *previous;
+ auto previous_c = *previous;
- DCPTime const p = previous_c->position ();
+ auto const p = previous_c->position ();
previous_c->set_position (film, p + c->length_after_trim(film));
c->set_position (film, p);
}
void
Playlist::move_later (shared_ptr<const Film> film, shared_ptr<Content> c)
{
- ContentList cont = content ();
- ContentList::iterator i = cont.begin();
+ auto cont = content ();
+ auto i = cont.begin();
while (i != cont.end() && *i != c) {
++i;
}
return;
}
- shared_ptr<Content> next_c = *next;
+ auto next_c = *next;
next_c->set_position (film, c->position());
c->set_position (film, c->position() + next_c->length_after_trim(film));
int64_t audio = uint64_t (audio_channels * audio_frame_rate * 3) * length(film).seconds();
for (auto i: content()) {
- shared_ptr<DCPContent> d = dynamic_pointer_cast<DCPContent> (i);
+ auto d = dynamic_pointer_cast<DCPContent> (i);
if (d) {
if (d->reference_video()) {
video -= uint64_t (j2k_bandwidth / 8) * d->length_after_trim(film).seconds();
int best_score = -1;
for (auto i: content()) {
int score = 0;
- optional<DCPTimePeriod> const o = DCPTimePeriod(i->position(), i->end(film)).overlap (period);
+ auto const o = DCPTimePeriod(i->position(), i->end(film)).overlap (period);
if (o) {
score += 100 * o.get().duration().get() / period.duration().get();
}
mxf_metadata ()
{
dcp::MXFMetadata meta;
- Config* config = Config::instance();
+ auto config = Config::instance();
if (!config->dcp_company_name().empty()) {
meta.company_name = config->dcp_company_name ();
}
output. We will hard-link it into the DCP later.
*/
- dcp::Standard const standard = film()->interop() ? dcp::Standard::INTEROP : dcp::Standard::SMPTE;
+ auto const standard = film()->interop() ? dcp::Standard::INTEROP : dcp::Standard::SMPTE;
boost::filesystem::path const asset =
film()->internal_video_asset_dir() / film()->internal_video_asset_filename(_period);
/* We already have a complete picture asset that we can just re-use */
/* XXX: what about if the encryption key changes? */
if (film()->three_d()) {
- _picture_asset.reset (new dcp::StereoPictureAsset(asset));
+ _picture_asset = make_shared<dcp::StereoPictureAsset>(asset);
} else {
- _picture_asset.reset (new dcp::MonoPictureAsset(asset));
+ _picture_asset = make_shared<dcp::MonoPictureAsset>(asset);
}
}
if (film()->audio_channels()) {
- _sound_asset.reset (
- new dcp::SoundAsset (dcp::Fraction(film()->video_frame_rate(), 1), film()->audio_frame_rate(), film()->audio_channels(), film()->audio_language(), standard)
+ _sound_asset = make_shared<dcp::SoundAsset> (
+ dcp::Fraction(film()->video_frame_rate(), 1), film()->audio_frame_rate(), film()->audio_channels(), film()->audio_language(), standard
);
_sound_asset->set_metadata (mxf_metadata());
void
ReelWriter::write_frame_info (Frame frame, Eyes eyes, dcp::FrameInfo info) const
{
- shared_ptr<InfoFileHandle> handle = film()->info_file_handle(_period, false);
+ auto handle = film()->info_file_handle(_period, false);
dcpomatic_fseek (handle->get(), frame_info_position(frame, eyes), SEEK_SET);
checked_fwrite (&info.offset, sizeof(info.offset), handle->get(), handle->file());
checked_fwrite (&info.size, sizeof (info.size), handle->get(), handle->file());
ReelWriter::frame_info_position (Frame frame, Eyes eyes) const
{
switch (eyes) {
- case EYES_BOTH:
+ case Eyes::BOTH:
return frame * _info_size;
- case EYES_LEFT:
+ case Eyes::LEFT:
return frame * _info_size * 2;
- case EYES_RIGHT:
+ case Eyes::RIGHT:
return frame * _info_size * 2 + _info_size;
default:
DCPOMATIC_ASSERT (false);
Frame
ReelWriter::check_existing_picture_asset (boost::filesystem::path asset)
{
- shared_ptr<Job> job = _job.lock ();
+ auto job = _job.lock ();
if (job) {
job->sub (_("Checking existing image data"));
}
/* Try to open the existing asset */
- FILE* asset_file = fopen_boost (asset, "rb");
+ auto asset_file = fopen_boost (asset, "rb");
if (!asset_file) {
LOG_GENERAL ("Could not open existing asset at %1 (errno=%2)", asset.string(), errno);
return 0;
return;
}
- dcp::FrameInfo fin = _picture_asset_writer->write (encoded->data(), encoded->size());
+ auto fin = _picture_asset_writer->write (encoded->data(), encoded->size());
write_frame_info (frame, eyes, fin);
- _last_written[eyes] = encoded;
+ _last_written[static_cast<int>(eyes)] = encoded;
}
return;
}
- dcp::FrameInfo fin = _picture_asset_writer->write (
- _last_written[eyes]->data(),
- _last_written[eyes]->size()
+ auto fin = _picture_asset_writer->write (
+ _last_written[static_cast<int>(eyes)]->data(),
+ _last_written[static_cast<int>(eyes)]->size()
);
write_frame_info (frame, eyes, fin);
}
boost::filesystem::create_hard_link (video_from, video_to, ec);
if (ec) {
LOG_WARNING_NC ("Hard-link failed; copying instead");
- shared_ptr<Job> job = _job.lock ();
+ auto job = _job.lock ();
if (job) {
job->sub (_("Copying video file into DCP"));
try {
/* Move the audio asset into the DCP */
if (_sound_asset) {
boost::filesystem::path audio_to = output_dcp;
- string const aaf = audio_asset_filename (_sound_asset, _reel_index, _reel_count, _content_summary);
+ auto const aaf = audio_asset_filename (_sound_asset, _reel_index, _reel_count, _content_summary);
audio_to /= aaf;
boost::system::error_code ec;
if (_atmos_asset) {
_atmos_asset_writer->finalize ();
boost::filesystem::path atmos_to = output_dcp;
- string const aaf = atmos_asset_filename (_atmos_asset, _reel_index, _reel_count, _content_summary);
+ auto const aaf = atmos_asset_filename (_atmos_asset, _reel_index, _reel_count, _content_summary);
atmos_to /= aaf;
boost::system::error_code ec;
}
if (dynamic_pointer_cast<dcp::InteropSubtitleAsset> (asset)) {
- boost::filesystem::path directory = output_dcp / asset->id ();
+ auto directory = output_dcp / asset->id ();
boost::filesystem::create_directories (directory);
asset->write (directory / ("sub_" + asset->id() + ".xml"));
} else {
);
}
- reel_asset.reset (
- new T (
- asset,
- dcp::Fraction (film->video_frame_rate(), 1),
- picture_duration,
- 0
- )
+ reel_asset = make_shared<T> (
+ asset,
+ dcp::Fraction(film->video_frame_rate(), 1),
+ picture_duration,
+ 0
);
} else {
/* We don't have a subtitle asset of our own; hopefully we have one to reference */
for (auto j: refs) {
- shared_ptr<T> k = dynamic_pointer_cast<T> (j.asset);
+ auto k = dynamic_pointer_cast<T> (j.asset);
if (k && j.period == period) {
reel_asset = k;
/* If we have a hash for this asset in the CPL, assume that it is correct */
if (_picture_asset) {
/* We have made a picture asset of our own. Put it into the reel */
- shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (_picture_asset);
+ auto mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (_picture_asset);
if (mono) {
- reel_asset.reset (new dcp::ReelMonoPictureAsset (mono, 0));
+ reel_asset = make_shared<dcp::ReelMonoPictureAsset>(mono, 0);
}
- shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (_picture_asset);
+ auto stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (_picture_asset);
if (stereo) {
- reel_asset.reset (new dcp::ReelStereoPictureAsset (stereo, 0));
+ reel_asset = make_shared<dcp::ReelStereoPictureAsset>(stereo, 0);
}
} else {
LOG_GENERAL ("no picture asset of our own; look through %1", refs.size());
/* We don't have a picture asset of our own; hopefully we have one to reference */
for (auto j: refs) {
- shared_ptr<dcp::ReelPictureAsset> k = dynamic_pointer_cast<dcp::ReelPictureAsset> (j.asset);
+ auto k = dynamic_pointer_cast<dcp::ReelPictureAsset> (j.asset);
if (k) {
LOG_GENERAL ("candidate picture asset period is %1-%2", j.period.from.get(), j.period.to.get());
}
if (_sound_asset) {
/* We have made a sound asset of our own. Put it into the reel */
- reel_asset.reset (new dcp::ReelSoundAsset(_sound_asset, 0));
+ reel_asset = make_shared<dcp::ReelSoundAsset>(_sound_asset, 0);
} else {
LOG_GENERAL ("no sound asset of our own; look through %1", refs.size());
/* We don't have a sound asset of our own; hopefully we have one to reference */
for (auto j: refs) {
- shared_ptr<dcp::ReelSoundAsset> k = dynamic_pointer_cast<dcp::ReelSoundAsset> (j.asset);
+ auto k = dynamic_pointer_cast<dcp::ReelSoundAsset> (j.asset);
if (k) {
LOG_GENERAL ("candidate sound asset period is %1-%2", j.period.from.get(), j.period.to.get());
}
}
}
- Frame const period_duration = _period.duration().frames_round(film()->video_frame_rate());
+ auto const period_duration = _period.duration().frames_round(film()->video_frame_rate());
DCPOMATIC_ASSERT (reel_asset);
if (reel_asset->actual_duration() != period_duration) {
set<DCPTextTrack> ensure_closed_captions
) const
{
- shared_ptr<dcp::ReelSubtitleAsset> subtitle = maybe_add_text<dcp::ReelSubtitleAsset> (
+ auto subtitle = maybe_add_text<dcp::ReelSubtitleAsset> (
_subtitle_asset, duration, reel, refs, fonts, _default_font, film(), _period, output_dcp, _text_only
);
} else if (ensure_subtitles) {
/* We had no subtitle asset, but we've been asked to make sure there is one */
subtitle = maybe_add_text<dcp::ReelSubtitleAsset>(
- empty_text_asset(TEXT_OPEN_SUBTITLE, optional<DCPTextTrack>()),
+ empty_text_asset(TextType::OPEN_SUBTITLE, optional<DCPTextTrack>()),
duration,
reel,
refs,
);
}
- for (map<DCPTextTrack, shared_ptr<dcp::SubtitleAsset> >::const_iterator i = _closed_caption_assets.begin(); i != _closed_caption_assets.end(); ++i) {
- shared_ptr<dcp::ReelClosedCaptionAsset> a = maybe_add_text<dcp::ReelClosedCaptionAsset> (
- i->second, duration, reel, refs, fonts, _default_font, film(), _period, output_dcp, _text_only
+ for (auto const& i: _closed_caption_assets) {
+ auto a = maybe_add_text<dcp::ReelClosedCaptionAsset> (
+ i.second, duration, reel, refs, fonts, _default_font, film(), _period, output_dcp, _text_only
);
DCPOMATIC_ASSERT (a);
- a->set_annotation_text (i->first.name);
- if (!i->first.language.empty()) {
- a->set_language (dcp::LanguageTag(i->first.language));
+ a->set_annotation_text (i.first.name);
+ if (!i.first.language.empty()) {
+ a->set_language (dcp::LanguageTag(i.first.language));
}
- ensure_closed_captions.erase (i->first);
+ ensure_closed_captions.erase (i.first);
}
/* Make empty tracks for anything we've been asked to ensure but that we haven't added */
for (auto i: ensure_closed_captions) {
- shared_ptr<dcp::ReelClosedCaptionAsset> a = maybe_add_text<dcp::ReelClosedCaptionAsset> (
- empty_text_asset(TEXT_CLOSED_CAPTION, i), duration, reel, refs, fonts, _default_font, film(), _period, output_dcp, _text_only
+ auto a = maybe_add_text<dcp::ReelClosedCaptionAsset> (
+ empty_text_asset(TextType::CLOSED_CAPTION, i), duration, reel, refs, fonts, _default_font, film(), _period, output_dcp, _text_only
);
DCPOMATIC_ASSERT (a);
a->set_annotation_text (i.name);
void
ReelWriter::create_reel_markers (shared_ptr<dcp::Reel> reel) const
{
- Film::Markers markers = film()->markers();
+ auto markers = film()->markers();
film()->add_ffoc_lfoc(markers);
Film::Markers reel_markers;
- for (Film::Markers::const_iterator i = markers.begin(); i != markers.end(); ++i) {
- if (_period.contains(i->second)) {
- reel_markers[i->first] = i->second;
+ for (auto const& i: markers) {
+ if (_period.contains(i.second)) {
+ reel_markers[i.first] = i.second;
}
}
if (!reel_markers.empty ()) {
auto ma = make_shared<dcp::ReelMarkersAsset>(dcp::Fraction(film()->video_frame_rate(), 1), reel->duration(), 0);
- for (map<dcp::Marker, DCPTime>::const_iterator i = reel_markers.begin(); i != reel_markers.end(); ++i) {
+ for (auto const& i: reel_markers) {
int h, m, s, f;
- DCPTime relative = i->second - _period.from;
+ DCPTime relative = i.second - _period.from;
relative.split (film()->video_frame_rate(), h, m, s, f);
- ma->set (i->first, dcp::Time(h, m, s, f, film()->video_frame_rate()));
+ ma->set (i.first, dcp::Time(h, m, s, f, film()->video_frame_rate()));
}
reel->add (ma);
}
{
LOG_GENERAL ("create_reel for %1-%2; %3 of %4", _period.from.get(), _period.to.get(), _reel_index, _reel_count);
- shared_ptr<dcp::Reel> reel (new dcp::Reel());
+ auto reel = make_shared<dcp::Reel>();
/* This is a bit of a hack; in the strange `_text_only' mode we have no picture, so we don't know
* how long the subtitle / CCAP assets should be. However, since we're only writing them to see
*/
int64_t duration = 0;
if (!_text_only) {
- shared_ptr<dcp::ReelPictureAsset> reel_picture_asset = create_reel_picture (reel, refs);
+ auto reel_picture_asset = create_reel_picture (reel, refs);
duration = reel_picture_asset->actual_duration ();
create_reel_sound (reel, refs);
create_reel_markers (reel);
create_reel_text (reel, refs, fonts, duration, output_dcp, ensure_subtitles, ensure_closed_captions);
if (_atmos_asset) {
- reel->add (shared_ptr<dcp::ReelAtmosAsset>(new dcp::ReelAtmosAsset(_atmos_asset, 0)));
+ reel->add (make_shared<dcp::ReelAtmosAsset>(_atmos_asset, 0));
}
return reel;
{
shared_ptr<dcp::SubtitleAsset> asset;
- vector<dcp::LanguageTag> lang = film()->subtitle_languages();
+ auto lang = film()->subtitle_languages();
if (film()->interop()) {
- shared_ptr<dcp::InteropSubtitleAsset> s (new dcp::InteropSubtitleAsset ());
+ auto s = make_shared<dcp::InteropSubtitleAsset>();
s->set_movie_title (film()->name());
- if (type == TEXT_OPEN_SUBTITLE) {
+ if (type == TextType::OPEN_SUBTITLE) {
s->set_language (lang.empty() ? "Unknown" : lang.front().to_string());
} else if (!track->language.empty()) {
s->set_language (track->language);
shared_ptr<dcp::SMPTESubtitleAsset> s (new dcp::SMPTESubtitleAsset ());
s->set_content_title_text (film()->name());
s->set_metadata (mxf_metadata());
- if (type == TEXT_OPEN_SUBTITLE && !lang.empty()) {
+ if (type == TextType::OPEN_SUBTITLE && !lang.empty()) {
s->set_language (lang.front());
} else if (track && !track->language.empty()) {
s->set_language (dcp::LanguageTag(track->language));
shared_ptr<dcp::SubtitleAsset> asset;
switch (type) {
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
asset = _subtitle_asset;
break;
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
DCPOMATIC_ASSERT (track);
asset = _closed_caption_assets[*track];
break;
}
switch (type) {
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
_subtitle_asset = asset;
break;
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
DCPOMATIC_ASSERT (track);
_closed_caption_assets[*track] = asset;
break;
for (auto i: subs.bitmap) {
asset->add (
- shared_ptr<dcp::Subtitle>(
- new dcp::SubtitleImage(
- i.image->as_png(),
- dcp::Time(period.from.seconds() - _period.from.seconds(), film()->video_frame_rate()),
- dcp::Time(period.to.seconds() - _period.from.seconds(), film()->video_frame_rate()),
- i.rectangle.x, dcp::HAlign::LEFT, i.rectangle.y, dcp::VAlign::TOP,
- dcp::Time(), dcp::Time()
- )
+ make_shared<dcp::SubtitleImage>(
+ i.image->as_png(),
+ dcp::Time(period.from.seconds() - _period.from.seconds(), film()->video_frame_rate()),
+ dcp::Time(period.to.seconds() - _period.from.seconds(), film()->video_frame_rate()),
+ i.rectangle.x, dcp::HAlign::LEFT, i.rectangle.y, dcp::VAlign::TOP,
+ dcp::Time(), dcp::Time()
)
);
}
/* Read the data from the info file; for 3D we just check the left
frames until we find a good one.
*/
- dcp::FrameInfo const info = read_frame_info (info_file, frame, film()->three_d() ? EYES_LEFT : EYES_BOTH);
+ auto const info = read_frame_info (info_file, frame, film()->three_d() ? Eyes::LEFT : Eyes::BOTH);
bool ok = true;
/** the first picture frame index that does not already exist in our MXF */
int _first_nonexistant_frame;
/** the data of the last written frame, if there is one */
- std::shared_ptr<const dcp::Data> _last_written[EYES_COUNT];
+ std::shared_ptr<const dcp::Data> _last_written[static_cast<int>(Eyes::COUNT)];
/** index of this reel within the DCP (starting from 0) */
int _reel_index;
/** number of reels in the DCP */
string body = _summary + "\n\n";
- body += "Version: " + string (dcpomatic_version) + " " + string (dcpomatic_git_commit) + "\n\n";
+ body += "Version: " + string(dcpomatic_version) + " " + string(dcpomatic_git_commit) + "\n\n";
for (auto i: environment_info ()) {
body += i + "\n";
body += "---<8----\n";
}
- list<string> to;
- to.push_back ("carl@dcpomatic.com");
-
- Emailer emailer (_from, to, "DCP-o-matic problem report", body);
- emailer.send ("main.carlh.net", 2525, EMAIL_PROTOCOL_STARTTLS);
+ Emailer emailer (_from, {"carl@dcpomatic.com"}, "DCP-o-matic problem report", body);
+ emailer.send ("main.carlh.net", 2525, EmailProtocol::STARTTLS);
set_progress (1);
set_state (FINISHED_OK);
{
LOG_DEBUG_THREE_D ("Shuffler::video frame=%1 eyes=%2 part=%3", video.frame, static_cast<int>(video.eyes), static_cast<int>(video.part));
- if (video.eyes != EYES_LEFT && video.eyes != EYES_RIGHT) {
+ if (video.eyes != Eyes::LEFT && video.eyes != Eyes::RIGHT) {
/* Pass through anything that we don't care about */
Video (weak_piece, video);
return;
shared_ptr<Piece> piece = weak_piece.lock ();
DCPOMATIC_ASSERT (piece);
- if (!_last && video.eyes == EYES_LEFT) {
+ if (!_last && video.eyes == Eyes::LEFT) {
LOG_DEBUG_THREE_D_NC ("Shuffler first after clear");
/* We haven't seen anything since the last clear() and we have some eyes-left so assume everything is OK */
Video (weak_piece, video);
!_store.empty() &&
_last &&
(
- (_store.front().second.frame == _last->frame && _store.front().second.eyes == EYES_RIGHT && _last->eyes == EYES_LEFT) ||
- (_store.front().second.frame >= (_last->frame + 1) && _store.front().second.eyes == EYES_LEFT && _last->eyes == EYES_RIGHT)
+ (_store.front().second.frame == _last->frame && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) ||
+ (_store.front().second.frame >= (_last->frame + 1) && _store.front().second.eyes == Eyes::LEFT && _last->eyes == Eyes::RIGHT)
);
if (!store_front_in_sequence) {
using std::string;
using std::cout;
using std::shared_ptr;
+using std::make_shared;
using boost::optional;
using dcp::raw_convert;
using namespace dcpomatic;
StringTextFileContent::StringTextFileContent (boost::filesystem::path path)
: Content (path)
{
- text.push_back (shared_ptr<TextContent> (new TextContent (this, TEXT_OPEN_SUBTITLE, TEXT_UNKNOWN)));
+ text.push_back (shared_ptr<TextContent> (new TextContent (this, TextType::OPEN_SUBTITLE, TextType::UNKNOWN)));
}
StringTextFileContent::StringTextFileContent (cxml::ConstNodePtr node, int version)
boost::mutex::scoped_lock lm (_mutex);
_length = s.length ();
- only_text()->add_font (shared_ptr<Font> (new Font (TEXT_FONT_ID)));
+ only_text()->add_font (make_shared<Font>(TEXT_FONT_ID));
}
string
#include "i18n.h"
-using std::string;
using std::make_pair;
+using std::make_shared;
using std::pair;
-using std::vector;
using std::shared_ptr;
+using std::string;
+using std::vector;
using boost::optional;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
void
SubtitleEncoder::text (PlayerText subs, TextType type, optional<DCPTextTrack> track, dcpomatic::DCPTimePeriod period)
{
- if (type != TEXT_OPEN_SUBTITLE) {
+ if (type != TextType::OPEN_SUBTITLE) {
return;
}
shared_ptr<dcp::SubtitleAsset> asset;
vector<dcp::LanguageTag> lang = _film->subtitle_languages ();
if (_film->interop ()) {
- shared_ptr<dcp::InteropSubtitleAsset> s (new dcp::InteropSubtitleAsset());
+ auto s = make_shared<dcp::InteropSubtitleAsset>();
s->set_movie_title (_film->name());
if (!lang.empty()) {
s->set_language (lang.front().to_string());
s->set_reel_number (raw_convert<string>(_reel_index + 1));
_assets[_reel_index].first = s;
} else {
- shared_ptr<dcp::SMPTESubtitleAsset> s (new dcp::SMPTESubtitleAsset());
+ auto s = make_shared<dcp::SMPTESubtitleAsset>();
s->set_content_title_text (_film->name());
if (!lang.empty()) {
s->set_language (lang.front());
, _y_offset (0)
, _x_scale (1)
, _y_scale (1)
- , _line_spacing (node->optional_number_child<double>("LineSpacing").get_value_or (1))
- , _outline_width (node->optional_number_child<int>("OutlineWidth").get_value_or (4))
- , _type (TEXT_OPEN_SUBTITLE)
- , _original_type (TEXT_OPEN_SUBTITLE)
+ , _line_spacing (node->optional_number_child<double>("LineSpacing").get_value_or(1))
+ , _outline_width (node->optional_number_child<int>("OutlineWidth").get_value_or(4))
+ , _type (TextType::OPEN_SUBTITLE)
+ , _original_type (TextType::OPEN_SUBTITLE)
{
if (version >= 37) {
_use = node->bool_child ("Use");
resolution_to_string (Resolution r)
{
switch (r) {
- case RESOLUTION_2K:
+ case Resolution::TWO_K:
return "2K";
- case RESOLUTION_4K:
+ case Resolution::FOUR_K:
return "4K";
}
string_to_resolution (string s)
{
if (s == "2K") {
- return RESOLUTION_2K;
+ return Resolution::TWO_K;
}
if (s == "4K") {
- return RESOLUTION_4K;
+ return Resolution::FOUR_K;
}
DCPOMATIC_ASSERT (false);
- return RESOLUTION_2K;
+ return Resolution::TWO_K;
}
Crop::Crop (shared_ptr<cxml::Node> node)
string_to_text_type (string s)
{
if (s == "unknown") {
- return TEXT_UNKNOWN;
+ return TextType::UNKNOWN;
} else if (s == "open-subtitle") {
- return TEXT_OPEN_SUBTITLE;
+ return TextType::OPEN_SUBTITLE;
} else if (s == "closed-caption") {
- return TEXT_CLOSED_CAPTION;
+ return TextType::CLOSED_CAPTION;
} else {
throw MetadataError (String::compose ("Unknown text type %1", s));
}
text_type_to_string (TextType t)
{
switch (t) {
- case TEXT_UNKNOWN:
+ case TextType::UNKNOWN:
return "unknown";
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
return "open-subtitle";
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
return "closed-caption";
default:
DCPOMATIC_ASSERT (false);
text_type_to_name (TextType t)
{
switch (t) {
- case TEXT_UNKNOWN:
+ case TextType::UNKNOWN:
return _("Timed text");
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
return _("Open subtitles");
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
return _("Closed captions");
default:
DCPOMATIC_ASSERT (false);
video_frame_type_to_string (VideoFrameType t)
{
switch (t) {
- case VIDEO_FRAME_TYPE_2D:
+ case VideoFrameType::TWO_D:
return "2d";
- case VIDEO_FRAME_TYPE_3D:
+ case VideoFrameType::THREE_D:
return "3d";
- case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ case VideoFrameType::THREE_D_LEFT_RIGHT:
return "3d-left-right";
- case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ case VideoFrameType::THREE_D_TOP_BOTTOM:
return "3d-top-bottom";
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ case VideoFrameType::THREE_D_ALTERNATE:
return "3d-alternate";
- case VIDEO_FRAME_TYPE_3D_LEFT:
+ case VideoFrameType::THREE_D_LEFT:
return "3d-left";
- case VIDEO_FRAME_TYPE_3D_RIGHT:
+ case VideoFrameType::THREE_D_RIGHT:
return "3d-right";
default:
DCPOMATIC_ASSERT (false);
string_to_video_frame_type (string s)
{
if (s == "2d") {
- return VIDEO_FRAME_TYPE_2D;
+ return VideoFrameType::TWO_D;
} else if (s == "3d") {
- return VIDEO_FRAME_TYPE_3D;
+ return VideoFrameType::THREE_D;
} else if (s == "3d-left-right") {
- return VIDEO_FRAME_TYPE_3D_LEFT_RIGHT;
+ return VideoFrameType::THREE_D_LEFT_RIGHT;
} else if (s == "3d-top-bottom") {
- return VIDEO_FRAME_TYPE_3D_TOP_BOTTOM;
+ return VideoFrameType::THREE_D_TOP_BOTTOM;
} else if (s == "3d-alternate") {
- return VIDEO_FRAME_TYPE_3D_ALTERNATE;
+ return VideoFrameType::THREE_D_ALTERNATE;
} else if (s == "3d-left") {
- return VIDEO_FRAME_TYPE_3D_LEFT;
+ return VideoFrameType::THREE_D_LEFT;
} else if (s == "3d-right") {
- return VIDEO_FRAME_TYPE_3D_RIGHT;
+ return VideoFrameType::THREE_D_RIGHT;
}
DCPOMATIC_ASSERT (false);
/*
- Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
/** Port on which player listens for play requests */
#define PLAYER_PLAY_PORT (Config::instance()->server_port_base()+5)
-typedef std::vector<std::shared_ptr<Content> > ContentList;
-typedef std::vector<std::shared_ptr<FFmpegContent> > FFmpegContentList;
+typedef std::vector<std::shared_ptr<Content>> ContentList;
+typedef std::vector<std::shared_ptr<FFmpegContent>> FFmpegContentList;
typedef int64_t Frame;
-enum VideoFrameType
+enum class VideoFrameType
{
- VIDEO_FRAME_TYPE_2D,
+ TWO_D,
/** `True' 3D content, e.g. 3D DCPs */
- VIDEO_FRAME_TYPE_3D,
- VIDEO_FRAME_TYPE_3D_LEFT_RIGHT,
- VIDEO_FRAME_TYPE_3D_TOP_BOTTOM,
- VIDEO_FRAME_TYPE_3D_ALTERNATE,
+ THREE_D,
+ THREE_D_LEFT_RIGHT,
+ THREE_D_TOP_BOTTOM,
+ THREE_D_ALTERNATE,
/** This content is all the left frames of some 3D */
- VIDEO_FRAME_TYPE_3D_LEFT,
+ THREE_D_LEFT,
/** This content is all the right frames of some 3D */
- VIDEO_FRAME_TYPE_3D_RIGHT
+ THREE_D_RIGHT
};
std::string video_frame_type_to_string (VideoFrameType);
VideoFrameType string_to_video_frame_type (std::string);
-enum Eyes
+enum class Eyes
{
- EYES_BOTH,
- EYES_LEFT,
- EYES_RIGHT,
- EYES_COUNT
+ BOTH,
+ LEFT,
+ RIGHT,
+ COUNT
};
-enum Part
+enum class Part
{
- PART_LEFT_HALF,
- PART_RIGHT_HALF,
- PART_TOP_HALF,
- PART_BOTTOM_HALF,
- PART_WHOLE
+ LEFT_HALF,
+ RIGHT_HALF,
+ TOP_HALF,
+ BOTTOM_HALF,
+ WHOLE
};
-enum ReelType
+enum class ReelType
{
- REELTYPE_SINGLE,
- REELTYPE_BY_VIDEO_CONTENT,
- REELTYPE_BY_LENGTH
+ SINGLE,
+ BY_VIDEO_CONTENT,
+ BY_LENGTH
};
-enum ChangeType
+enum class ChangeType
{
- CHANGE_TYPE_PENDING,
- CHANGE_TYPE_DONE,
- CHANGE_TYPE_CANCELLED
+ PENDING,
+ DONE,
+ CANCELLED
};
* There is some use of the word `subtitle' in the code which may mean
* caption in some contexts.
*/
-enum TextType
+enum class TextType
{
- TEXT_UNKNOWN,
- TEXT_OPEN_SUBTITLE,
- TEXT_CLOSED_CAPTION,
- TEXT_COUNT
+ UNKNOWN,
+ OPEN_SUBTITLE,
+ CLOSED_CAPTION,
+ COUNT
};
extern std::string text_type_to_string (TextType t);
extern std::string text_type_to_name (TextType t);
extern TextType string_to_text_type (std::string s);
-enum ExportFormat
+enum class ExportFormat
{
- EXPORT_FORMAT_PRORES,
- EXPORT_FORMAT_H264_AAC,
- EXPORT_FORMAT_H264_PCM,
- EXPORT_FORMAT_SUBTITLES_DCP
+ PRORES,
+ H264_AAC,
+ H264_PCM,
+ SUBTITLES_DCP
};
/** @struct Crop
time_t last_write_time;
};
-enum Resolution {
- RESOLUTION_2K,
- RESOLUTION_4K
+enum class Resolution {
+ TWO_K,
+ FOUR_K
};
std::string resolution_to_string (Resolution);
Resolution string_to_resolution (std::string);
-enum FileTransferProtocol {
- FILE_TRANSFER_PROTOCOL_SCP,
- FILE_TRANSFER_PROTOCOL_FTP
+enum class FileTransferProtocol {
+ SCP,
+ FTP
};
-enum EmailProtocol {
- EMAIL_PROTOCOL_AUTO,
- EMAIL_PROTOCOL_PLAIN,
- EMAIL_PROTOCOL_STARTTLS,
- EMAIL_PROTOCOL_SSL
+enum class EmailProtocol {
+ AUTO,
+ PLAIN,
+ STARTTLS,
+ SSL
};
LOG_GENERAL_NC (N_("Upload job starting"));
scoped_ptr<Uploader> uploader;
- switch (Config::instance()->tms_protocol ()) {
- case FILE_TRANSFER_PROTOCOL_SCP:
- uploader.reset (new SCPUploader (bind (&UploadJob::set_status, this, _1), bind (&UploadJob::set_progress, this, _1, false)));
+ switch (Config::instance()->tms_protocol()) {
+ case FileTransferProtocol::SCP:
+ uploader.reset (new SCPUploader(bind (&UploadJob::set_status, this, _1), bind(&UploadJob::set_progress, this, _1, false)));
break;
- case FILE_TRANSFER_PROTOCOL_FTP:
- uploader.reset (new CurlUploader (bind (&UploadJob::set_status, this, _1), bind (&UploadJob::set_progress, this, _1, false)));
+ case FileTransferProtocol::FTP:
+ uploader.reset (new CurlUploader(bind (&UploadJob::set_status, this, _1), bind(&UploadJob::set_progress, this, _1, false)));
break;
}
- uploader->upload (_film->dir (_film->dcp_name ()));
+ uploader->upload (_film->dir(_film->dcp_name()));
set_progress (1);
set_status (N_(""));
Eyes
increment_eyes (Eyes e)
{
- if (e == EYES_LEFT) {
- return EYES_RIGHT;
+ if (e == Eyes::LEFT) {
+ return Eyes::RIGHT;
}
- return EYES_LEFT;
+ return Eyes::LEFT;
}
void
using std::list;
using std::pair;
using std::shared_ptr;
+using std::make_shared;
using boost::optional;
using std::dynamic_pointer_cast;
using dcp::raw_convert;
: ContentPart (parent)
, _use (true)
, _length (0)
- , _frame_type (VIDEO_FRAME_TYPE_2D)
+ , _frame_type (VideoFrameType::TWO_D)
, _yuv (true)
, _fade_in (0)
, _fade_out (0)
VideoContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version)
{
if (!node->optional_number_child<int> ("VideoWidth")) {
- return shared_ptr<VideoContent> ();
+ return {};
}
- return shared_ptr<VideoContent> (new VideoContent (parent, node, version));
+ return make_shared<VideoContent>(parent, node, version);
}
VideoContent::VideoContent (Content* parent, cxml::ConstNodePtr node, int version)
/* Snapshot of the VideoFrameType enum at version 34 */
switch (node->number_child<int> ("VideoFrameType")) {
case 0:
- _frame_type = VIDEO_FRAME_TYPE_2D;
+ _frame_type = VideoFrameType::TWO_D;
break;
case 1:
- _frame_type = VIDEO_FRAME_TYPE_3D_LEFT_RIGHT;
+ _frame_type = VideoFrameType::THREE_D_LEFT_RIGHT;
break;
case 2:
- _frame_type = VIDEO_FRAME_TYPE_3D_TOP_BOTTOM;
+ _frame_type = VideoFrameType::THREE_D_TOP_BOTTOM;
break;
case 3:
- _frame_type = VIDEO_FRAME_TYPE_3D_ALTERNATE;
+ _frame_type = VideoFrameType::THREE_D_ALTERNATE;
break;
case 4:
- _frame_type = VIDEO_FRAME_TYPE_3D_LEFT;
+ _frame_type = VideoFrameType::THREE_D_LEFT;
break;
case 5:
- _frame_type = VIDEO_FRAME_TYPE_3D_RIGHT;
+ _frame_type = VideoFrameType::THREE_D_RIGHT;
break;
}
} else {
_crop.bottom = node->number_child<int> ("BottomCrop");
if (version <= 7) {
- optional<string> r = node->optional_string_child ("Ratio");
+ auto r = node->optional_string_child ("Ratio");
if (r) {
_legacy_ratio = Ratio::from_id(r.get())->ratio();
}
{
auto const s = size ();
switch (frame_type ()) {
- case VIDEO_FRAME_TYPE_2D:
- case VIDEO_FRAME_TYPE_3D:
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- case VIDEO_FRAME_TYPE_3D_LEFT:
- case VIDEO_FRAME_TYPE_3D_RIGHT:
+ case VideoFrameType::TWO_D:
+ case VideoFrameType::THREE_D:
+ case VideoFrameType::THREE_D_ALTERNATE:
+ case VideoFrameType::THREE_D_LEFT:
+ case VideoFrameType::THREE_D_RIGHT:
return s;
- case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+ case VideoFrameType::THREE_D_LEFT_RIGHT:
return dcp::Size (s.width / 2, s.height);
- case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+ case VideoFrameType::THREE_D_TOP_BOTTOM:
return dcp::Size (s.width, s.height / 2);
}
{
public:
explicit VideoContent (Content* parent);
+ VideoContent (Content* parent, cxml::ConstNodePtr, int);
VideoContent (Content* parent, std::vector<std::shared_ptr<Content> >);
void as_xml (xmlpp::Node *) const;
Frame length_after_3d_combine () const {
boost::mutex::scoped_lock lm (_mutex);
- if (_frame_type == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ if (_frame_type == VideoFrameType::THREE_D_ALTERNATE) {
return _length / 2;
}
friend struct scaled_size_test2;
friend struct scaled_size_legacy_test;
- VideoContent (Content* parent, cxml::ConstNodePtr, int);
void setup_default_colour_conversion ();
bool _use;
*/
if (_frame_interval_checker) {
_frame_interval_checker->feed (frame_time, afr);
- if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VIDEO_FRAME_TYPE_3D) {
+ if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) {
boost::throw_exception (
DecodeError(
String::compose(
}
Frame frame;
- Eyes eyes = EYES_BOTH;
+ Eyes eyes = Eyes::BOTH;
if (!_position) {
/* This is the first data we have received since initialisation or seek. Set
the position based on the frame that was given. After this first time
If we drop the frame with the duplicated timestamp we obviously lose sync.
*/
_position = ContentTime::from_frames (decoder_frame, afr);
- if (vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ if (vft == VideoFrameType::THREE_D_ALTERNATE) {
frame = decoder_frame / 2;
- _last_emitted_eyes = EYES_RIGHT;
+ _last_emitted_eyes = Eyes::RIGHT;
} else {
frame = decoder_frame;
}
} else {
- if (vft == VIDEO_FRAME_TYPE_3D || vft == VIDEO_FRAME_TYPE_3D_ALTERNATE) {
+ if (vft == VideoFrameType::THREE_D || vft == VideoFrameType::THREE_D_ALTERNATE) {
DCPOMATIC_ASSERT (_last_emitted_eyes);
- if (_last_emitted_eyes.get() == EYES_RIGHT) {
+ if (_last_emitted_eyes.get() == Eyes::RIGHT) {
frame = _position->frames_round(afr) + 1;
- eyes = EYES_LEFT;
+ eyes = Eyes::LEFT;
} else {
frame = _position->frames_round(afr);
- eyes = EYES_RIGHT;
+ eyes = Eyes::RIGHT;
}
} else {
frame = _position->frames_round(afr) + 1;
}
switch (vft) {
- case VIDEO_FRAME_TYPE_2D:
- Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
+ case VideoFrameType::TWO_D:
+ Data (ContentVideo (image, frame, Eyes::BOTH, Part::WHOLE));
break;
- case VIDEO_FRAME_TYPE_3D:
+ case VideoFrameType::THREE_D:
{
- Data (ContentVideo (image, frame, eyes, PART_WHOLE));
+ Data (ContentVideo (image, frame, eyes, Part::WHOLE));
_last_emitted_frame = frame;
_last_emitted_eyes = eyes;
break;
}
- case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+ case VideoFrameType::THREE_D_ALTERNATE:
{
- Data (ContentVideo (image, frame, eyes, PART_WHOLE));
+ Data (ContentVideo (image, frame, eyes, Part::WHOLE));
_last_emitted_eyes = eyes;
break;
}
- case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
+ case VideoFrameType::THREE_D_LEFT_RIGHT:
+ Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF));
+ Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF));
break;
- case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
+ case VideoFrameType::THREE_D_TOP_BOTTOM:
+ Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF));
+ Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF));
break;
- case VIDEO_FRAME_TYPE_3D_LEFT:
- Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
+ case VideoFrameType::THREE_D_LEFT:
+ Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE));
break;
- case VIDEO_FRAME_TYPE_3D_RIGHT:
- Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
+ case VideoFrameType::THREE_D_RIGHT:
+ Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE));
break;
default:
DCPOMATIC_ASSERT (false);
qi.reel = video_reel (frame);
qi.frame = frame - _reels[qi.reel].start ();
- if (film()->three_d() && eyes == EYES_BOTH) {
+ if (film()->three_d() && eyes == Eyes::BOTH) {
/* 2D material in a 3D DCP; fake the 3D */
- qi.eyes = EYES_LEFT;
+ qi.eyes = Eyes::LEFT;
_queue.push_back (qi);
++_queued_full_in_memory;
- qi.eyes = EYES_RIGHT;
+ qi.eyes = Eyes::RIGHT;
_queue.push_back (qi);
++_queued_full_in_memory;
} else {
qi.type = QueueItem::REPEAT;
qi.reel = video_reel (frame);
qi.frame = frame - _reels[qi.reel].start ();
- if (film()->three_d() && eyes == EYES_BOTH) {
- qi.eyes = EYES_LEFT;
+ if (film()->three_d() && eyes == Eyes::BOTH) {
+ qi.eyes = Eyes::LEFT;
_queue.push_back (qi);
- qi.eyes = EYES_RIGHT;
+ qi.eyes = Eyes::RIGHT;
_queue.push_back (qi);
} else {
qi.eyes = eyes;
qi.reel = reel;
qi.frame = frame_in_reel;
- if (film()->three_d() && eyes == EYES_BOTH) {
- qi.eyes = EYES_LEFT;
+ if (film()->three_d() && eyes == Eyes::BOTH) {
+ qi.eyes = Eyes::LEFT;
_queue.push_back (qi);
- qi.eyes = EYES_RIGHT;
+ qi.eyes = Eyes::RIGHT;
_queue.push_back (qi);
} else {
qi.eyes = eyes;
bool
Writer::LastWritten::next (QueueItem qi) const
{
- if (qi.eyes == EYES_BOTH) {
+ if (qi.eyes == Eyes::BOTH) {
/* 2D */
return qi.frame == (_frame + 1);
}
/* 3D */
- if (_eyes == EYES_LEFT && qi.frame == _frame && qi.eyes == EYES_RIGHT) {
+ if (_eyes == Eyes::LEFT && qi.frame == _frame && qi.eyes == Eyes::RIGHT) {
return true;
}
- if (_eyes == EYES_RIGHT && qi.frame == (_frame + 1) && qi.eyes == EYES_LEFT) {
+ if (_eyes == Eyes::RIGHT && qi.frame == (_frame + 1) && qi.eyes == Eyes::LEFT) {
return true;
}
return (frame != 0 && frame < reel.first_nonexistant_frame());
}
-/** @param track Closed caption track if type == TEXT_CLOSED_CAPTION */
+/** @param track Closed caption track if type == TextType::CLOSED_CAPTION */
void
Writer::write (PlayerText text, TextType type, optional<DCPTextTrack> track, DCPTimePeriod period)
{
- vector<ReelWriter>::iterator* reel = 0;
+ vector<ReelWriter>::iterator* reel = nullptr;
switch (type) {
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
reel = &_subtitle_reel;
_have_subtitles = true;
break;
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
DCPOMATIC_ASSERT (track);
DCPOMATIC_ASSERT (_caption_reels.find(*track) != _caption_reels.end());
reel = &_caption_reels[*track];
size_t
Writer::video_reel (int frame) const
{
- DCPTime t = DCPTime::from_frames (frame, film()->video_frame_rate());
+ auto t = DCPTime::from_frames (frame, film()->video_frame_rate());
size_t i = 0;
while (i < _reels.size() && !_reels[i].period().contains (t)) {
++i;
_digest_progresses[boost::this_thread::get_id()] = progress;
float min_progress = FLT_MAX;
- for (map<boost::thread::id, float>::const_iterator i = _digest_progresses.begin(); i != _digest_progresses.end(); ++i) {
- min_progress = min (min_progress, i->second);
+ for (auto const& i: _digest_progresses) {
+ min_progress = min (min_progress, i.second);
}
job->set_progress (min_progress);
: size (0)
, reel (0)
, frame (0)
- , eyes (EYES_BOTH)
+ , eyes (Eyes::BOTH)
{}
enum Type {
public:
LastWritten()
: _frame(-1)
- , _eyes(EYES_RIGHT)
+ , _eyes(Eyes::RIGHT)
{}
/** @return true if qi is the next item after this one */
void film_change (ChangeType type)
{
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
set_menu_sensitivity ();
}
}
}
list<string> to = { "carl@dcpomatic.com" };
Emailer emailer (d->email(), to, "DCP-o-matic translations", body);
- emailer.send ("main.carlh.net", 2525, EMAIL_PROTOCOL_STARTTLS);
+ emailer.send ("main.carlh.net", 2525, EmailProtocol::STARTTLS);
}
d->Destroy ();
print_dump (shared_ptr<Film> film)
{
cout << film->dcp_name (true) << "\n"
- << film->container()->container_nickname() << " at " << ((film->resolution() == RESOLUTION_2K) ? "2K" : "4K") << "\n"
+ << film->container()->container_nickname() << " at " << ((film->resolution() == Resolution::TWO_K) ? "2K" : "4K") << "\n"
<< (film->j2k_bandwidth() / 1000000) << "Mbit/s" << "\n"
<< "Output " << film->video_frame_rate() << "fps " << (film->three_d() ? "3D" : "2D") << " " << (film->audio_frame_rate() / 1000) << "kHz\n"
<< (film->interop() ? "Inter-Op" : "SMPTE") << " " << (film->encrypted() ? "encrypted" : "unencrypted") << "\n";
film->set_encrypted (cc.encrypt);
film->set_three_d (cc.threed);
if (cc.fourk) {
- film->set_resolution (RESOLUTION_4K);
+ film->set_resolution (Resolution::FOUR_K);
}
if (cc.j2k_bandwidth) {
film->set_j2k_bandwidth (*cc.j2k_bandwidth);
void film_changed (ChangeType type, Film::Property property)
{
- if (type != CHANGE_TYPE_DONE || property != Film::CONTENT) {
+ if (type != ChangeType::DONE || property != Film::CONTENT) {
return;
}
_film->set_container (Ratio::from_id("185"));
for (auto i: _film->content()) {
- shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(i);
+ auto dcp = dynamic_pointer_cast<DCPContent>(i);
for (auto j: i->text) {
j->set_use (true);
}
if (i->video) {
- Ratio const * r = Ratio::nearest_from_ratio(i->video->size().ratio());
+ auto const r = Ratio::nearest_from_ratio(i->video->size().ratio());
if (r->id() == "239") {
/* Any scope content means we use scope */
_film->set_container(r);
}
/* Any 3D content means we use 3D mode */
- if (i->video && i->video->frame_type() != VIDEO_FRAME_TYPE_2D) {
+ if (i->video && i->video->frame_type() != VideoFrameType::TWO_D) {
_film->set_three_d (true);
}
}
set_menu_sensitivity ();
- wxMenuItemList old = _cpl_menu->GetMenuItems();
- for (wxMenuItemList::iterator i = old.begin(); i != old.end(); ++i) {
- _cpl_menu->Remove (*i);
+ auto old = _cpl_menu->GetMenuItems();
+ for (auto const& i: old) {
+ _cpl_menu->Remove (i);
}
if (_film->content().size() == 1) {
/* Offer a CPL menu */
- shared_ptr<DCPContent> first = dynamic_pointer_cast<DCPContent>(_film->content().front());
+ auto first = dynamic_pointer_cast<DCPContent>(_film->content().front());
if (first) {
DCPExaminer ex (first, true);
int id = ID_view_cpl;
for (auto i: ex.cpls()) {
- wxMenuItem* j = _cpl_menu->AppendRadioItem(
+ auto j = _cpl_menu->AppendRadioItem(
id,
wxString::Format("%s (%s)", std_to_wx(i->annotation_text().get_value_or("")).data(), std_to_wx(i->id()).data())
);
bool report_errors_from_last_job (wxWindow* parent) const
{
- JobManager* jm = JobManager::instance ();
+ auto jm = JobManager::instance ();
DCPOMATIC_ASSERT (!jm->get().empty());
- shared_ptr<Job> last = jm->get().back();
+ auto last = jm->get().back();
if (last->finished_in_error()) {
error_dialog(parent, wxString::Format(_("Could not load DCP.\n\n%s."), std_to_wx(last->error_summary()).data()), std_to_wx(last->error_details()));
return false;
void
process_video (shared_ptr<PlayerVideo> pvf)
{
- shared_ptr<DCPVideo> local (new DCPVideo (pvf, frame_count, film->video_frame_rate(), 250000000, RESOLUTION_2K));
- shared_ptr<DCPVideo> remote (new DCPVideo (pvf, frame_count, film->video_frame_rate(), 250000000, RESOLUTION_2K));
+ shared_ptr<DCPVideo> local (new DCPVideo (pvf, frame_count, film->video_frame_rate(), 250000000, Resolution::TWO_K));
+ shared_ptr<DCPVideo> remote (new DCPVideo (pvf, frame_count, film->video_frame_rate(), 250000000, Resolution::TWO_K));
cout << "Frame " << frame_count << ": ";
cout.flush ();
void
AudioDialog::film_change (ChangeType type, int p)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
if (p == Film::AUDIO_CHANNELS) {
- shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
if (film) {
_channels = film->audio_channels ();
try_to_load_analysis ();
void
AudioDialog::content_change (ChangeType type, int p)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
return;
}
- shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
if (!film) {
return;
}
- pair<AudioAnalysis::PeakTime, int> const peak = _analysis->overall_sample_peak ();
+ auto const peak = _analysis->overall_sample_peak ();
float const peak_dB = linear_to_db(peak.first.peak) + _analysis->gain_correction(_playlist);
_sample_peak->SetLabel (
wxString::Format (
/* XXX: check whether it's ok to add dB gain to these quantities */
- if (static_cast<bool>(_analysis->integrated_loudness ())) {
+ if (static_cast<bool>(_analysis->integrated_loudness())) {
_integrated_loudness->SetLabel (
wxString::Format (
_("Integrated loudness %.2f LUFS"),
);
}
- if (static_cast<bool>(_analysis->loudness_range ())) {
+ if (static_cast<bool>(_analysis->loudness_range())) {
_loudness_range->SetLabel (
wxString::Format (
_("Loudness range %.2f LU"),
return;
}
- shared_ptr<Film> film = _film.lock();
+ auto film = _film.lock();
DCPOMATIC_ASSERT (film);
_cursor->SetLabel (wxString::Format (_("Cursor: %.1fdB at %s"), *db, time->timecode(film->video_frame_rate())));
}
for (auto i: film->content()) {
for (auto j: i->text) {
- if (j->use() && j->type() == TEXT_CLOSED_CAPTION && j->dcp_track()) {
+ if (j->use() && j->type() == TextType::CLOSED_CAPTION && j->dcp_track()) {
if (find(_tracks.begin(), _tracks.end(), j->dcp_track()) == _tracks.end()) {
_tracks.push_back (*j->dcp_track());
}
, _ignore_deselect (false)
, _no_check_selection (false)
{
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
_text_panel[i] = 0;
}
bool have_video = false;
bool have_audio = false;
- bool have_text[TEXT_COUNT] = { false, false };
+ bool have_text[static_cast<int>(TextType::COUNT)] = { false, false };
for (auto i: selected()) {
if (i->video) {
have_video = true;
have_audio = true;
}
for (auto j: i->text) {
- have_text[j->original_type()] = true;
+ have_text[static_cast<int>(j->original_type())] = true;
}
}
++off;
}
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
if (have_text[i] && !_text_panel[i]) {
_text_panel[i] = new TextPanel (this, static_cast<TextType>(i));
_notebook->InsertPage (off, _text_panel[i], _text_panel[i]->name());
if (_audio_panel) {
_audio_panel->Enable (_generally_sensitive && audio_selection.size() > 0);
}
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
if (_text_panel[i]) {
_text_panel[i]->Enable (_generally_sensitive && selection.size() == 1 && !selection.front()->text.empty());
}
if (_audio_panel) {
p.push_back (_audio_panel);
}
- for (int i = 0; i < TEXT_COUNT; ++i) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
if (_text_panel[i]) {
p.push_back (_text_panel[i]);
}
wxButton* _timeline;
VideoPanel* _video_panel;
AudioPanel* _audio_panel;
- TextPanel* _text_panel[TEXT_COUNT];
+ TextPanel* _text_panel[static_cast<int>(TextType::COUNT)];
TimingPanel* _timing_panel;
ContentMenu* _menu;
TimelineDialog* _timeline_dialog;
void button_clicked ()
{
U const v = boost::bind (_model_getter, _part(_content.front().get()).get())();
- for (typename List::iterator i = _content.begin (); i != _content.end(); ++i) {
- boost::bind (_model_setter, _part(i->get()).get(), v) ();
+ for (auto const& i: _content) {
+ boost::bind (_model_setter, _part(i.get()).get(), v)();
}
}
void model_changed (ChangeType type, int property)
{
- if (type == CHANGE_TYPE_DONE && property == _property && !_ignore_model_changes) {
+ if (type == ChangeType::DONE && property == _property && !_ignore_model_changes) {
update_from_model ();
}
}
void
Controls::eye_changed ()
{
- _viewer->set_eyes (_eye->GetSelection() == 0 ? EYES_LEFT : EYES_RIGHT);
+ _viewer->set_eyes (_eye->GetSelection() == 0 ? Eyes::LEFT : Eyes::RIGHT);
}
void
void
Controls::film_change (ChangeType type, Film::Property p)
{
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
if (p == Film::CONTENT) {
setup_sensitivity ();
update_position_label ();
return;
}
- _film->set_resolution (_resolution->GetSelection() == 0 ? RESOLUTION_2K : RESOLUTION_4K);
+ _film->set_resolution (_resolution->GetSelection() == 0 ? Resolution::TWO_K : Resolution::FOUR_K);
}
void
checked_set (_encrypted, _film->encrypted ());
break;
case Film::RESOLUTION:
- checked_set (_resolution, _film->resolution() == RESOLUTION_2K ? 0 : 1);
+ checked_set (_resolution, _film->resolution() == Resolution::TWO_K ? 0 : 1);
setup_container ();
setup_dcp_name ();
break;
film_changed (Film::AUDIO_CHANNELS);
break;
case Film::REEL_TYPE:
- checked_set (_reel_type, _film->reel_type ());
- _reel_length->Enable (_film->reel_type() == REELTYPE_BY_LENGTH);
+ checked_set (_reel_type, static_cast<int>(_film->reel_type()));
+ _reel_length->Enable (_film->reel_type() == ReelType::BY_LENGTH);
break;
case Film::REEL_LENGTH:
checked_set (_reel_length, _film->reel_length() / 1000000000LL);
_copy_isdcf_name_button->Enable (_generally_sensitive);
_encrypted->Enable (_generally_sensitive);
_reel_type->Enable (_generally_sensitive && _film && !_film->references_dcp_video() && !_film->references_dcp_audio());
- _reel_length->Enable (_generally_sensitive && _film && _film->reel_type() == REELTYPE_BY_LENGTH);
+ _reel_length->Enable (_generally_sensitive && _film && _film->reel_type() == ReelType::BY_LENGTH);
_markers->Enable (_generally_sensitive && _film && !_film->interop());
_metadata->Enable (_generally_sensitive);
_frame_rate_choice->Enable (_generally_sensitive && _film && !_film->references_dcp_video() && !_film->contains_atmos_content());
};
ExportFormat formats[] = {
- EXPORT_FORMAT_PRORES,
- EXPORT_FORMAT_H264_AAC,
+ ExportFormat::PRORES,
+ ExportFormat::H264_AAC,
};
ExportVideoFileDialog::ExportVideoFileDialog (wxWindow* parent, string name)
void
FilmEditor::film_change (ChangeType type, Film::Property p)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
void
FilmEditor::film_content_change (ChangeType type, int property)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
_film->LengthChange.connect (boost::bind(&FilmViewer::film_length_change, this));
_player->Change.connect (boost::bind (&FilmViewer::player_change, this, _1, _2, _3));
- film_change (CHANGE_TYPE_DONE, Film::VIDEO_FRAME_RATE);
- film_change (CHANGE_TYPE_DONE, Film::THREE_D);
+ film_change (ChangeType::DONE, Film::VIDEO_FRAME_RATE);
+ film_change (ChangeType::DONE, Film::THREE_D);
film_length_change ();
/* Keep about 1 second's worth of history samples */
void
FilmViewer::player_change (ChangeType type, int property, bool frequent)
{
- if (type != CHANGE_TYPE_DONE || frequent) {
+ if (type != ChangeType::DONE || frequent) {
return;
}
void
FilmViewer::film_change (ChangeType type, Film::Property p)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
if (!c) {
for (auto i: _pending_player_changes) {
- player_change (CHANGE_TYPE_DONE, i, false);
+ player_change (ChangeType::DONE, i, false);
}
_pending_player_changes.clear ();
}
void
FilmViewer::content_change (ChangeType type, int property)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
Config* config = Config::instance ();
checked_set (_upload, config->upload_after_make_dcp());
- checked_set (_tms_protocol, config->tms_protocol ());
+ checked_set (_tms_protocol, static_cast<int>(config->tms_protocol()));
checked_set (_tms_ip, config->tms_ip ());
checked_set (_tms_path, config->tms_path ());
checked_set (_tms_user, config->tms_user ());
void config_changed ()
{
- Config* config = Config::instance ();
+ auto config = Config::instance ();
checked_set (_server, config->mail_server ());
checked_set (_port, config->mail_port ());
switch (config->mail_protocol()) {
- case EMAIL_PROTOCOL_AUTO:
+ case EmailProtocol::AUTO:
checked_set (_protocol, 0);
break;
- case EMAIL_PROTOCOL_PLAIN:
+ case EmailProtocol::PLAIN:
checked_set (_protocol, 1);
break;
- case EMAIL_PROTOCOL_STARTTLS:
+ case EmailProtocol::STARTTLS:
checked_set (_protocol, 2);
break;
- case EMAIL_PROTOCOL_SSL:
+ case EmailProtocol::SSL:
checked_set (_protocol, 3);
break;
}
{
switch (_protocol->GetSelection()) {
case 0:
- Config::instance()->set_mail_protocol(EMAIL_PROTOCOL_AUTO);
+ Config::instance()->set_mail_protocol(EmailProtocol::AUTO);
break;
case 1:
- Config::instance()->set_mail_protocol(EMAIL_PROTOCOL_PLAIN);
+ Config::instance()->set_mail_protocol(EmailProtocol::PLAIN);
break;
case 2:
- Config::instance()->set_mail_protocol(EMAIL_PROTOCOL_STARTTLS);
+ Config::instance()->set_mail_protocol(EmailProtocol::STARTTLS);
break;
case 3:
- Config::instance()->set_mail_protocol(EMAIL_PROTOCOL_SSL);
+ Config::instance()->set_mail_protocol(EmailProtocol::SSL);
break;
}
}
_film_content_change_connection = locked_film->ContentChange.connect (boost::bind (&HintsDialog::film_content_change, this, _1));
}
- film_change (CHANGE_TYPE_DONE);
+ film_change (ChangeType::DONE);
}
void
HintsDialog::film_change (ChangeType type)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
_text->Clear ();
_current.clear ();
- std::shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
if (!film) {
return;
}
_film_changed_connection = film()->Change.connect(boost::bind(&SMPTEMetadataDialog::film_changed, this, _1, _2));
- film_changed (CHANGE_TYPE_DONE, Film::NAME_LANGUAGE);
- film_changed (CHANGE_TYPE_DONE, Film::RELEASE_TERRITORY);
- film_changed (CHANGE_TYPE_DONE, Film::VERSION_NUMBER);
- film_changed (CHANGE_TYPE_DONE, Film::STATUS);
- film_changed (CHANGE_TYPE_DONE, Film::CHAIN);
- film_changed (CHANGE_TYPE_DONE, Film::DISTRIBUTOR);
- film_changed (CHANGE_TYPE_DONE, Film::FACILITY);
- film_changed (CHANGE_TYPE_DONE, Film::CONTENT_VERSIONS);
- film_changed (CHANGE_TYPE_DONE, Film::LUMINANCE);
- film_changed (CHANGE_TYPE_DONE, Film::SUBTITLE_LANGUAGES);
+ film_changed (ChangeType::DONE, Film::NAME_LANGUAGE);
+ film_changed (ChangeType::DONE, Film::RELEASE_TERRITORY);
+ film_changed (ChangeType::DONE, Film::VERSION_NUMBER);
+ film_changed (ChangeType::DONE, Film::STATUS);
+ film_changed (ChangeType::DONE, Film::CHAIN);
+ film_changed (ChangeType::DONE, Film::DISTRIBUTOR);
+ film_changed (ChangeType::DONE, Film::FACILITY);
+ film_changed (ChangeType::DONE, Film::CONTENT_VERSIONS);
+ film_changed (ChangeType::DONE, Film::LUMINANCE);
+ film_changed (ChangeType::DONE, Film::SUBTITLE_LANGUAGES);
setup_sensitivity ();
}
void
SMPTEMetadataDialog::film_changed (ChangeType type, Film::Property property)
{
- if (type != CHANGE_TYPE_DONE || film()->interop()) {
+ if (type != ChangeType::DONE || film()->interop()) {
return;
}
, _content (content)
, _caption (caption)
{
- shared_ptr<FFmpegContent> ff = dynamic_pointer_cast<FFmpegContent> (content);
+ auto ff = dynamic_pointer_cast<FFmpegContent> (content);
if (ff) {
_stream = ff->subtitle_stream ();
/* XXX: assuming that all FFmpeg streams have bitmap subs */
_overall_sizer->Add (restore, 0, wxALL, DCPOMATIC_SIZER_X_GAP);
}
- wxSizer* buttons = CreateSeparatedButtonSizer (wxOK);
+ auto buttons = CreateSeparatedButtonSizer (wxOK);
if (buttons) {
_overall_sizer->Add (buttons, wxSizerFlags().Expand().DoubleBorder());
}
_effect->SetSelection (NONE);
}
- optional<dcp::Colour> effect_colour = _caption->effect_colour();
+ auto effect_colour = _caption->effect_colour();
_force_effect_colour->SetValue (static_cast<bool>(effect_colour));
if (effect_colour) {
_effect_colour->SetColour (wxColour (effect_colour->r, effect_colour->g, effect_colour->b));
_effect_colour->SetColour (wxColour (0, 0, 0));
}
- optional<ContentTime> fade_in = _caption->fade_in();
+ auto fade_in = _caption->fade_in();
_force_fade_in->SetValue (static_cast<bool>(fade_in));
if (fade_in) {
_fade_in->set (*fade_in, _content->active_video_frame_rate(film));
_fade_in->set (ContentTime(), _content->active_video_frame_rate(film));
}
- optional<ContentTime> fade_out = _caption->fade_out();
+ auto fade_out = _caption->fade_out();
_force_fade_out->SetValue (static_cast<bool>(fade_out));
if (fade_out) {
_fade_out->set (*fade_out, _content->active_video_frame_rate(film));
void
SubtitleAppearanceDialog::content_change (ChangeType type)
{
- if (type == CHANGE_TYPE_DONE) {
+ if (type == ChangeType::DONE) {
setup_sensitivity ();
}
}
wxCheckBox*
SubtitleAppearanceDialog::set_to (wxWindow* w, int& r)
{
- wxSizer* s = new wxBoxSizer (wxHORIZONTAL);
- wxCheckBox* set_to = new CheckBox (this, _("Set to"));
+ auto s = new wxBoxSizer (wxHORIZONTAL);
+ auto set_to = new CheckBox (this, _("Set to"));
s->Add (set_to, 0, wxRIGHT | wxALIGN_CENTER_VERTICAL, 8);
s->Add (w, 0, wxALIGN_CENTER_VERTICAL);
_table->Add (s, wxGBPosition (r, 1));
void
SubtitleAppearanceDialog::apply ()
{
- shared_ptr<const Film> film = _film.lock ();
+ auto film = _film.lock ();
if (_force_colour->GetValue ()) {
- wxColour const c = _colour->GetColour ();
+ auto const c = _colour->GetColour ();
_caption->set_colour (dcp::Colour (c.Red(), c.Green(), c.Blue()));
} else {
_caption->unset_colour ();
_caption->unset_effect ();
}
if (_force_effect_colour->GetValue ()) {
- wxColour const ec = _effect_colour->GetColour ();
+ auto const ec = _effect_colour->GetColour ();
_caption->set_effect_colour (dcp::Colour (ec.Red(), ec.Green(), ec.Blue()));
} else {
_caption->unset_effect_colour ();
_caption->set_outline_width (_outline_width->GetValue ());
if (_stream) {
- for (map<RGBA, RGBAColourPicker*>::const_iterator i = _pickers.begin(); i != _pickers.end(); ++i) {
- _stream->set_colour (i->first, i->second->colour ());
+ for (auto const& i: _pickers) {
+ _stream->set_colour (i.first, i.second->colour());
}
}
- shared_ptr<FFmpegContent> fc = dynamic_pointer_cast<FFmpegContent> (_content);
+ auto fc = dynamic_pointer_cast<FFmpegContent> (_content);
if (fc) {
fc->signal_subtitle_stream_changed ();
}
void
SubtitleAppearanceDialog::restore ()
{
- for (map<RGBA, RGBAColourPicker*>::const_iterator i = _pickers.begin(); i != _pickers.end(); ++i) {
- i->second->set (i->first);
+ for (auto const& i: _pickers) {
+ i.second->set (i.first);
}
}
void
SubtitleAppearanceDialog::add_colours ()
{
- map<RGBA, RGBA> colours = _stream->colours ();
- for (map<RGBA, RGBA>::const_iterator i = colours.begin(); i != colours.end(); ++i) {
- wxPanel* from = new wxPanel (_colours_panel, wxID_ANY);
- from->SetBackgroundColour (wxColour (i->first.r, i->first.g, i->first.b, i->first.a));
+ auto colours = _stream->colours ();
+ for (auto const& i: _stream->colours()) {
+ auto from = new wxPanel(_colours_panel, wxID_ANY);
+ from->SetBackgroundColour(wxColour(i.first.r, i.first.g, i.first.b, i.first.a));
_colour_table->Add (from, 1, wxEXPAND);
- RGBAColourPicker* to = new RGBAColourPicker (_colours_panel, i->second);
+ auto to = new RGBAColourPicker(_colours_panel, i.second);
_colour_table->Add (to, 1, wxEXPAND);
- _pickers[i->first] = to;
+ _pickers[i.first] = to;
}
}
, _loading_analysis (false)
{
wxString refer = _("Use this DCP's subtitle as OV and make VF");
- if (t == TEXT_CLOSED_CAPTION) {
+ if (t == TextType::CLOSED_CAPTION) {
refer = _("Use this DCP's closed caption as OV and make VF");
}
TextPanel::setup_visibility ()
{
switch (current_type()) {
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
if (_dcp_track_label) {
_dcp_track_label->Destroy ();
_dcp_track_label = 0;
}
break;
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
if (!_dcp_track_label) {
_dcp_track_label = create_label (this, _("CCAP track"), true);
add_label_to_sizer (_grid, _dcp_track_label, true, wxGBPosition(_ccap_track_row, 0));
if (track) {
for (auto i: _parent->selected_text()) {
shared_ptr<TextContent> t = i->text_of_original_type(_original_type);
- if (t && t->type() == TEXT_CLOSED_CAPTION) {
+ if (t && t->type() == TextType::CLOSED_CAPTION) {
t->set_dcp_track(*track);
}
}
} else if (property == TextContentProperty::TYPE) {
if (text) {
switch (text->type()) {
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
_type->SetSelection (0);
break;
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
_type->SetSelection (1);
break;
default:
{
switch (_type->GetSelection()) {
case 0:
- return TEXT_OPEN_SUBTITLE;
+ return TextType::OPEN_SUBTITLE;
case 1:
- return TEXT_CLOSED_CAPTION;
+ return TextType::CLOSED_CAPTION;
}
- return TEXT_UNKNOWN;
+ return TextType::UNKNOWN;
}
void
}
switch (type) {
- case TEXT_OPEN_SUBTITLE:
+ case TextType::OPEN_SUBTITLE:
_type->SetSelection (0);
break;
- case TEXT_CLOSED_CAPTION:
+ case TextType::CLOSED_CAPTION:
if (_type->GetCount() > 1) {
_type->SetSelection (1);
}
_use->Enable (!reference && any_subs > 0);
bool const use = _use->GetValue ();
if (_outline_subtitles) {
- _outline_subtitles->Enable (!_loading_analysis && any_subs && use && type == TEXT_OPEN_SUBTITLE);
+ _outline_subtitles->Enable (!_loading_analysis && any_subs && use && type == TextType::OPEN_SUBTITLE);
}
_type->Enable (!reference && any_subs > 0 && use);
- _burn->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
- _x_offset->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
- _y_offset->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
- _x_scale->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
- _y_scale->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
- _line_spacing->Enable (!reference && use && type == TEXT_OPEN_SUBTITLE && dcp_subs < any_subs);
+ _burn->Enable (!reference && any_subs > 0 && use && type == TextType::OPEN_SUBTITLE);
+ _x_offset->Enable (!reference && any_subs > 0 && use && type == TextType::OPEN_SUBTITLE);
+ _y_offset->Enable (!reference && any_subs > 0 && use && type == TextType::OPEN_SUBTITLE);
+ _x_scale->Enable (!reference && any_subs > 0 && use && type == TextType::OPEN_SUBTITLE);
+ _y_scale->Enable (!reference && any_subs > 0 && use && type == TextType::OPEN_SUBTITLE);
+ _line_spacing->Enable (!reference && use && type == TextType::OPEN_SUBTITLE && dcp_subs < any_subs);
_stream->Enable (!reference && ffmpeg_subs == 1);
/* Ideally we would check here to see if the FFmpeg content has "string" subs (i.e. not bitmaps) */
_text_view_button->Enable (!reference && any_subs > 0 && ffmpeg_subs == 0);
- _fonts_dialog_button->Enable (!reference && any_subs > 0 && ffmpeg_subs == 0 && type == TEXT_OPEN_SUBTITLE);
- _appearance_dialog_button->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
+ _fonts_dialog_button->Enable (!reference && any_subs > 0 && ffmpeg_subs == 0 && type == TextType::OPEN_SUBTITLE);
+ _appearance_dialog_button->Enable (!reference && any_subs > 0 && use && type == TextType::OPEN_SUBTITLE);
}
void
TextPanel::stream_changed ()
{
- FFmpegContentList fc = _parent->selected_ffmpeg ();
+ auto fc = _parent->selected_ffmpeg ();
if (fc.size() != 1) {
return;
}
- shared_ptr<FFmpegContent> fcs = fc.front ();
+ auto fcs = fc.front ();
- vector<shared_ptr<FFmpegSubtitleStream> > a = fcs->subtitle_streams ();
- vector<shared_ptr<FFmpegSubtitleStream> >::iterator i = a.begin ();
- string const s = string_client_data (_stream->GetClientObject (_stream->GetSelection ()));
+ auto a = fcs->subtitle_streams ();
+ auto i = a.begin ();
+ auto const s = string_client_data (_stream->GetClientObject (_stream->GetSelection ()));
while (i != a.end() && (*i)->identifier () != s) {
++i;
}
void
TextPanel::x_scale_changed ()
{
- ContentList c = _parent->selected_text ();
+ auto c = _parent->selected_text ();
if (c.size() == 1) {
c.front()->text_of_original_type(_original_type)->set_x_scale (_x_scale->GetValue() / 100.0);
}
{
if (_fonts_dialog) {
_fonts_dialog->Destroy ();
- _fonts_dialog = 0;
+ _fonts_dialog = nullptr;
}
- ContentList c = _parent->selected_text ();
+ auto c = _parent->selected_text ();
DCPOMATIC_ASSERT (c.size() == 1);
_fonts_dialog = new FontsDialog (this, c.front(), c.front()->text_of_original_type(_original_type));
void
TextPanel::reference_clicked ()
{
- ContentList c = _parent->selected ();
+ auto c = _parent->selected ();
if (c.size() != 1) {
return;
}
- shared_ptr<DCPContent> d = dynamic_pointer_cast<DCPContent> (c.front ());
+ auto d = dynamic_pointer_cast<DCPContent> (c.front ());
if (!d) {
return;
}
void
TextPanel::appearance_dialog_clicked ()
{
- ContentList c = _parent->selected_text ();
+ auto c = _parent->selected_text ();
DCPOMATIC_ASSERT (c.size() == 1);
- SubtitleAppearanceDialog* d = new SubtitleAppearanceDialog (this, _parent->film(), c.front(), c.front()->text_of_original_type(_original_type));
+ auto d = new SubtitleAppearanceDialog (this, _parent->film(), c.front(), c.front()->text_of_original_type(_original_type));
if (d->ShowModal () == wxID_OK) {
d->apply ();
}
setup_sensitivity ();
_analysis.reset ();
- shared_ptr<Content> content = _analysis_content.lock ();
+ auto content = _analysis_content.lock ();
if (!content) {
_loading_analysis = false;
setup_sensitivity ();
return;
}
- boost::filesystem::path const path = _parent->film()->subtitle_analysis_path(content);
+ auto const path = _parent->film()->subtitle_analysis_path(content);
if (!boost::filesystem::exists(path)) {
for (auto i: JobManager::instance()->get()) {
void
TextPanel::update_outline_subtitles_in_viewer ()
{
- shared_ptr<FilmViewer> fv = _parent->film_viewer().lock();
+ auto fv = _parent->film_viewer().lock();
if (!fv) {
return;
}
if (_analysis) {
- optional<dcpomatic::Rect<double> > rect = _analysis->bounding_box ();
+ auto rect = _analysis->bounding_box ();
if (rect) {
- shared_ptr<Content> content = _analysis_content.lock ();
+ auto content = _analysis_content.lock ();
DCPOMATIC_ASSERT (content);
rect->x += content->text.front()->x_offset();
rect->y += content->text.front()->y_offset();
void
TextPanel::analysis_finished ()
{
- shared_ptr<Content> content = _analysis_content.lock ();
+ auto content = _analysis_content.lock ();
if (!content) {
_loading_analysis = false;
setup_sensitivity ();
_main_canvas->Bind (wxEVT_SCROLLWIN_PAGEDOWN, boost::bind (&Timeline::scrolled, this, _1));
_main_canvas->Bind (wxEVT_SCROLLWIN_THUMBTRACK, boost::bind (&Timeline::scrolled, this, _1));
- film_change (CHANGE_TYPE_DONE, Film::CONTENT);
+ film_change (ChangeType::DONE, Film::CONTENT);
SetMinSize (wxSize (640, 4 * pixels_per_track() + 96));
void
Timeline::film_change (ChangeType type, Film::Property p)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
void
Timeline::film_content_change (ChangeType type, int property, bool frequent)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
}
/* Video on tracks 0 and maybe 1 (left and right eye) */
- if (cv->content()->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+ if (cv->content()->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
cv->set_track (1);
_tracks = max (_tracks, 2);
have_3d = true;
{
DCPOMATIC_ASSERT (_track);
- shared_ptr<const Film> film = _timeline.film ();
- shared_ptr<const Content> content = _content.lock ();
+ auto film = _timeline.film ();
+ auto content = _content.lock ();
if (!film || !content) {
- return dcpomatic::Rect<int> ();
+ return {};
}
return dcpomatic::Rect<int> (
void
TimelineContentView::unset_track ()
{
- _track = boost::optional<int> ();
+ _track = boost::optional<int>();
}
boost::optional<int>
{
DCPOMATIC_ASSERT (_track);
- shared_ptr<const Film> film = _timeline.film ();
- shared_ptr<const Content> cont = content ();
+ auto film = _timeline.film ();
+ auto cont = content ();
if (!film || !cont) {
return;
}
}
/* Outline */
- wxGraphicsPath path = gc->CreatePath ();
+ auto path = gc->CreatePath ();
path.MoveToPoint (time_x (position) + 2, y_pos (_track.get()) + 4);
path.AddLineToPoint (time_x (position + len) - 1, y_pos (_track.get()) + 4);
path.AddLineToPoint (time_x (position + len) - 1, y_pos (_track.get() + 1) - 4);
/* Overlaps */
gc->SetBrush (*wxTheBrushList->FindOrCreateBrush (foreground_colour(), wxBRUSHSTYLE_CROSSDIAG_HATCH));
- for (list<dcpomatic::Rect<int> >::const_iterator i = overlaps.begin(); i != overlaps.end(); ++i) {
- gc->DrawRectangle (i->x, i->y + 4, i->width, i->height - 8);
+ for (auto const& i: overlaps) {
+ gc->DrawRectangle (i.x, i.y + 4, i.width, i.height - 8);
}
/* Label text */
- wxString lab = label ();
+ auto lab = label ();
wxDouble lab_width;
wxDouble lab_height;
wxDouble lab_descent;
void
TimelineContentView::content_change (ChangeType type, int p)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
, _film (film)
, _timeline (this, cp, film, viewer)
{
- wxBoxSizer* sizer = new wxBoxSizer (wxVERTICAL);
+ auto sizer = new wxBoxSizer (wxVERTICAL);
wxBitmap select (bitmap_path("select"), wxBITMAP_TYPE_PNG);
wxBitmap zoom (bitmap_path("zoom"), wxBITMAP_TYPE_PNG);
sizer->SetSizeHints (this);
_toolbar->ToggleTool ((int) Timeline::SNAP, _timeline.snap ());
- film_change (CHANGE_TYPE_DONE, Film::SEQUENCE);
+ film_change (ChangeType::DONE, Film::SEQUENCE);
_film_changed_connection = film->Change.connect (bind (&TimelineDialog::film_change, this, _1, _2));
}
void
TimelineDialog::film_change (ChangeType type, Film::Property p)
{
- if (type != CHANGE_TYPE_DONE) {
+ if (type != ChangeType::DONE) {
return;
}
- shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
if (!film) {
return;
}
if (t == Timeline::SNAP) {
_timeline.set_snap (_toolbar->GetToolState ((int) t));
} else if (t == Timeline::SEQUENCE) {
- shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
if (film) {
film->set_sequence (_toolbar->GetToolState ((int) t));
}
: _viewer (viewer)
, _state_timer ("viewer")
, _video_frame_rate (0)
- , _eyes (EYES_LEFT)
+ , _eyes (Eyes::LEFT)
, _three_d (false)
, _dropped (0)
, _errored (0)
_player_video.first &&
_three_d &&
_eyes != _player_video.first->eyes() &&
- _player_video.first->eyes() != EYES_BOTH
+ _player_video.first->eyes() != Eyes::BOTH
);
if (_player_video.first && _player_video.first->error()) {
return optional<int>();
}
- dcpomatic::DCPTime const next = position() + one_video_frame();
- dcpomatic::DCPTime const time = _viewer->audio_time().get_value_or(position());
+ auto const next = position() + one_video_frame();
+ auto const time = _viewer->audio_time().get_value_or(position());
if (next < time) {
return 0;
}
bool
VideoView::reset_metadata (shared_ptr<const Film> film, dcp::Size player_video_container_size)
{
- pair<shared_ptr<PlayerVideo>, dcpomatic::DCPTime> pv = player_video ();
+ auto pv = player_video ();
if (!pv.first) {
return false;
}
LogSwitcher ls (film->log());
film->set_name ("4k_test");
auto c = make_shared<FFmpegContent>("test/data/test.mp4");
- film->set_resolution (RESOLUTION_4K);
+ film->set_resolution (Resolution::FOUR_K);
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("FTR"));
film->set_container (Ratio::from_id ("185"));
film->examine_and_add_content (c);
using std::list;
using std::shared_ptr;
+using std::weak_ptr;
+using std::make_shared;
using boost::thread;
using boost::optional;
-using std::weak_ptr;
using dcp::ArrayData;
void
BOOST_AUTO_TEST_CASE (client_server_test_rgb)
{
- shared_ptr<Image> image (new Image (AV_PIX_FMT_RGB24, dcp::Size (1998, 1080), true));
+ auto image = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size (1998, 1080), true);
uint8_t* p = image->data()[0];
for (int y = 0; y < 1080; ++y) {
p += image->stride()[0];
}
- shared_ptr<Image> sub_image (new Image (AV_PIX_FMT_BGRA, dcp::Size (100, 200), true));
+ auto sub_image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (100, 200), true);
p = sub_image->data()[0];
for (int y = 0; y < 200; ++y) {
uint8_t* q = p;
p += sub_image->stride()[0];
}
- LogSwitcher ls (shared_ptr<Log>(new FileLog("build/test/client_server_test_rgb.log")));
-
- shared_ptr<PlayerVideo> pvf (
- new PlayerVideo (
- shared_ptr<ImageProxy> (new RawImageProxy (image)),
- Crop (),
- optional<double> (),
- dcp::Size (1998, 1080),
- dcp::Size (1998, 1080),
- EYES_BOTH,
- PART_WHOLE,
- ColourConversion(),
- VideoRange::FULL,
- weak_ptr<Content>(),
- optional<Frame>(),
- false
- )
+ LogSwitcher ls (make_shared<FileLog>("build/test/client_server_test_rgb.log"));
+
+ auto pvf = std::make_shared<PlayerVideo>(
+ make_shared<RawImageProxy>(image),
+ Crop (),
+ optional<double> (),
+ dcp::Size (1998, 1080),
+ dcp::Size (1998, 1080),
+ Eyes::BOTH,
+ Part::WHOLE,
+ ColourConversion(),
+ VideoRange::FULL,
+ weak_ptr<Content>(),
+ optional<Frame>(),
+ false
);
- pvf->set_text (PositionImage (sub_image, Position<int> (50, 60)));
+ pvf->set_text (PositionImage(sub_image, Position<int>(50, 60)));
- shared_ptr<DCPVideo> frame (
- new DCPVideo (
- pvf,
- 0,
- 24,
- 200000000,
- RESOLUTION_2K
- )
+ auto frame = make_shared<DCPVideo> (
+ pvf,
+ 0,
+ 24,
+ 200000000,
+ Resolution::TWO_K
);
- ArrayData locally_encoded = frame->encode_locally ();
+ auto locally_encoded = frame->encode_locally ();
- EncodeServer* server = new EncodeServer (true, 2);
+ auto server = new EncodeServer (true, 2);
- thread* server_thread = new thread (boost::bind (&EncodeServer::run, server));
+ auto server_thread = new thread (boost::bind(&EncodeServer::run, server));
/* Let the server get itself ready */
dcpomatic_sleep_seconds (1);
threads.push_back (new thread (boost::bind (do_remote_encode, frame, description, locally_encoded)));
}
- for (list<thread*>::iterator i = threads.begin(); i != threads.end(); ++i) {
- (*i)->join ();
+ for (auto i: threads) {
+ i->join ();
}
- for (list<thread*>::iterator i = threads.begin(); i != threads.end(); ++i) {
- delete *i;
+ for (auto i: threads) {
+ delete i;
}
server->stop ();
BOOST_AUTO_TEST_CASE (client_server_test_yuv)
{
- shared_ptr<Image> image (new Image (AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), true));
+ auto image = make_shared<Image>(AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), true);
for (int i = 0; i < image->planes(); ++i) {
uint8_t* p = image->data()[i];
}
}
- shared_ptr<Image> sub_image (new Image (AV_PIX_FMT_BGRA, dcp::Size (100, 200), true));
+ auto sub_image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (100, 200), true);
uint8_t* p = sub_image->data()[0];
for (int y = 0; y < 200; ++y) {
uint8_t* q = p;
p += sub_image->stride()[0];
}
- LogSwitcher ls (shared_ptr<Log>(new FileLog("build/test/client_server_test_yuv.log")));
-
- shared_ptr<PlayerVideo> pvf (
- new PlayerVideo (
- shared_ptr<ImageProxy> (new RawImageProxy (image)),
- Crop (),
- optional<double> (),
- dcp::Size (1998, 1080),
- dcp::Size (1998, 1080),
- EYES_BOTH,
- PART_WHOLE,
- ColourConversion(),
- VideoRange::FULL,
- weak_ptr<Content>(),
- optional<Frame>(),
- false
- )
+ LogSwitcher ls (make_shared<FileLog>("build/test/client_server_test_yuv.log"));
+
+ auto pvf = std::make_shared<PlayerVideo>(
+ std::make_shared<RawImageProxy>(image),
+ Crop(),
+ optional<double>(),
+ dcp::Size(1998, 1080),
+ dcp::Size(1998, 1080),
+ Eyes::BOTH,
+ Part::WHOLE,
+ ColourConversion(),
+ VideoRange::FULL,
+ weak_ptr<Content>(),
+ optional<Frame>(),
+ false
);
- pvf->set_text (PositionImage (sub_image, Position<int> (50, 60)));
+ pvf->set_text (PositionImage(sub_image, Position<int>(50, 60)));
- shared_ptr<DCPVideo> frame (
- new DCPVideo (
- pvf,
- 0,
- 24,
- 200000000,
- RESOLUTION_2K
- )
+ auto frame = make_shared<DCPVideo>(
+ pvf,
+ 0,
+ 24,
+ 200000000,
+ Resolution::TWO_K
);
- ArrayData locally_encoded = frame->encode_locally ();
+ auto locally_encoded = frame->encode_locally ();
- EncodeServer* server = new EncodeServer (true, 2);
+ auto server = new EncodeServer (true, 2);
- thread* server_thread = new thread (boost::bind (&EncodeServer::run, server));
+ auto server_thread = new thread(boost::bind(&EncodeServer::run, server));
/* Let the server get itself ready */
dcpomatic_sleep_seconds (1);
list<thread*> threads;
for (int i = 0; i < 8; ++i) {
- threads.push_back (new thread (boost::bind (do_remote_encode, frame, description, locally_encoded)));
+ threads.push_back (new thread(boost::bind(do_remote_encode, frame, description, locally_encoded)));
}
- for (list<thread*>::iterator i = threads.begin(); i != threads.end(); ++i) {
- (*i)->join ();
+ for (auto i: threads) {
+ i->join ();
}
- for (list<thread*>::iterator i = threads.begin(); i != threads.end(); ++i) {
- delete *i;
+ for (auto i: threads) {
+ delete i;
}
server->stop ();
BOOST_AUTO_TEST_CASE (client_server_test_j2k)
{
- shared_ptr<Image> image (new Image (AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), true));
+ auto image = make_shared<Image>(AV_PIX_FMT_YUV420P, dcp::Size (1998, 1080), true);
for (int i = 0; i < image->planes(); ++i) {
uint8_t* p = image->data()[i];
}
}
- LogSwitcher ls (shared_ptr<Log>(new FileLog("build/test/client_server_test_j2k.log")));
-
- shared_ptr<PlayerVideo> raw_pvf (
- new PlayerVideo (
- shared_ptr<ImageProxy> (new RawImageProxy (image)),
- Crop (),
- optional<double> (),
- dcp::Size (1998, 1080),
- dcp::Size (1998, 1080),
- EYES_BOTH,
- PART_WHOLE,
- ColourConversion(),
- VideoRange::FULL,
- weak_ptr<Content>(),
- optional<Frame>(),
- false
- )
+ LogSwitcher ls (make_shared<FileLog>("build/test/client_server_test_j2k.log"));
+
+ auto raw_pvf = std::make_shared<PlayerVideo> (
+ std::make_shared<RawImageProxy>(image),
+ Crop(),
+ optional<double>(),
+ dcp::Size(1998, 1080),
+ dcp::Size(1998, 1080),
+ Eyes::BOTH,
+ Part::WHOLE,
+ ColourConversion(),
+ VideoRange::FULL,
+ weak_ptr<Content>(),
+ optional<Frame>(),
+ false
);
- shared_ptr<DCPVideo> raw_frame (
- new DCPVideo (
- raw_pvf,
- 0,
- 24,
- 200000000,
- RESOLUTION_2K
- )
+ auto raw_frame = make_shared<DCPVideo> (
+ raw_pvf,
+ 0,
+ 24,
+ 200000000,
+ Resolution::TWO_K
);
- ArrayData raw_locally_encoded = raw_frame->encode_locally ();
-
- shared_ptr<PlayerVideo> j2k_pvf (
- new PlayerVideo (
- shared_ptr<ImageProxy> (new J2KImageProxy (raw_locally_encoded, dcp::Size (1998, 1080), AV_PIX_FMT_XYZ12LE)),
- Crop (),
- optional<double> (),
- dcp::Size (1998, 1080),
- dcp::Size (1998, 1080),
- EYES_BOTH,
- PART_WHOLE,
- PresetColourConversion::all().front().conversion,
- VideoRange::FULL,
- weak_ptr<Content>(),
- optional<Frame>(),
- false
- )
+ auto raw_locally_encoded = raw_frame->encode_locally ();
+
+ auto j2k_pvf = std::make_shared<PlayerVideo> (
+ std::make_shared<J2KImageProxy>(raw_locally_encoded, dcp::Size(1998, 1080), AV_PIX_FMT_XYZ12LE),
+ Crop(),
+ optional<double>(),
+ dcp::Size(1998, 1080),
+ dcp::Size(1998, 1080),
+ Eyes::BOTH,
+ Part::WHOLE,
+ PresetColourConversion::all().front().conversion,
+ VideoRange::FULL,
+ weak_ptr<Content>(),
+ optional<Frame>(),
+ false
);
- shared_ptr<DCPVideo> j2k_frame (
- new DCPVideo (
- j2k_pvf,
- 0,
- 24,
- 200000000,
- RESOLUTION_2K
- )
+ auto j2k_frame = make_shared<DCPVideo> (
+ j2k_pvf,
+ 0,
+ 24,
+ 200000000,
+ Resolution::TWO_K
);
- ArrayData j2k_locally_encoded = j2k_frame->encode_locally ();
+ auto j2k_locally_encoded = j2k_frame->encode_locally ();
- EncodeServer* server = new EncodeServer (true, 2);
+ auto server = new EncodeServer (true, 2);
- thread* server_thread = new thread (boost::bind (&EncodeServer::run, server));
+ auto server_thread = new thread (boost::bind (&EncodeServer::run, server));
/* Let the server get itself ready */
dcpomatic_sleep_seconds (1);
list<thread*> threads;
for (int i = 0; i < 8; ++i) {
- threads.push_back (new thread (boost::bind (do_remote_encode, j2k_frame, description, j2k_locally_encoded)));
+ threads.push_back (new thread(boost::bind(do_remote_encode, j2k_frame, description, j2k_locally_encoded)));
}
- for (list<thread*>::iterator i = threads.begin(); i != threads.end(); ++i) {
- (*i)->join ();
+ for (auto i: threads) {
+ i->join ();
}
- for (list<thread*>::iterator i = threads.begin(); i != threads.end(); ++i) {
- delete *i;
+ for (auto i: threads) {
+ delete i;
}
server->stop ();
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ content->only_text()->set_type (TextType::CLOSED_CAPTION);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
film->examine_and_add_content (content3);
BOOST_REQUIRE (!wait_for_jobs ());
- content1->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ content1->only_text()->set_type (TextType::CLOSED_CAPTION);
content1->only_text()->set_dcp_track (DCPTextTrack("First track", "fr-FR"));
- content2->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ content2->only_text()->set_type (TextType::CLOSED_CAPTION);
content2->only_text()->set_dcp_track (DCPTextTrack("Second track", "de-DE"));
- content3->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ content3->only_text()->set_type (TextType::CLOSED_CAPTION);
content3->only_text()->set_dcp_track (DCPTextTrack("Third track", "it-IT"));
film->make_dcp ();
BOOST_CHECK_EQUAL (*cc.output_dir, "flaps");
BOOST_REQUIRE_EQUAL (cc.content.size(), 3U);
BOOST_CHECK_EQUAL (cc.content[0].path, "fred");
- BOOST_CHECK_EQUAL (cc.content[0].frame_type, VIDEO_FRAME_TYPE_2D);
+ BOOST_CHECK_EQUAL (cc.content[0].frame_type, VideoFrameType::TWO_D);
BOOST_CHECK_EQUAL (cc.content[1].path, "jim");
- BOOST_CHECK_EQUAL (cc.content[1].frame_type, VIDEO_FRAME_TYPE_2D);
+ BOOST_CHECK_EQUAL (cc.content[1].frame_type, VideoFrameType::TWO_D);
BOOST_CHECK_EQUAL (cc.content[2].path, "sheila");
- BOOST_CHECK_EQUAL (cc.content[2].frame_type, VIDEO_FRAME_TYPE_2D);
+ BOOST_CHECK_EQUAL (cc.content[2].frame_type, VideoFrameType::TWO_D);
cc = run ("dcpomatic2_create --left-eye left.mp4 --right-eye right.mp4");
BOOST_REQUIRE_EQUAL (cc.content.size(), 2U);
BOOST_CHECK_EQUAL (cc.content[0].path, "left.mp4");
- BOOST_CHECK_EQUAL (cc.content[0].frame_type, VIDEO_FRAME_TYPE_3D_LEFT);
+ BOOST_CHECK_EQUAL (cc.content[0].frame_type, VideoFrameType::THREE_D_LEFT);
BOOST_CHECK_EQUAL (cc.content[1].path, "right.mp4");
- BOOST_CHECK_EQUAL (cc.content[1].frame_type, VIDEO_FRAME_TYPE_3D_RIGHT);
+ BOOST_CHECK_EQUAL (cc.content[1].frame_type, VideoFrameType::THREE_D_RIGHT);
BOOST_CHECK_EQUAL (cc.fourk, false);
cc = run ("dcpomatic2_create --fourk foo.mp4");
film->examine_and_add_content (r);
film->examine_and_add_content (g);
film->examine_and_add_content (b);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
BOOST_REQUIRE (!wait_for_jobs());
BOOST_CHECK (Config::instance()->master_encoding_threads() > 1);
string name = "ffmpeg_encoder_";
string extension;
switch (format) {
- case EXPORT_FORMAT_H264_AAC:
+ case ExportFormat::H264_AAC:
name += "h264";
extension = "mp4";
break;
- case EXPORT_FORMAT_PRORES:
+ case ExportFormat::PRORES:
name += "prores";
extension = "mov";
break;
- case EXPORT_FORMAT_H264_PCM:
- case EXPORT_FORMAT_SUBTITLES_DCP:
+ case ExportFormat::H264_PCM:
+ case ExportFormat::SUBTITLES_DCP:
BOOST_REQUIRE (false);
}
/** Red / green / blue MP4 -> Prores */
BOOST_AUTO_TEST_CASE (ffmpeg_encoder_prores_test1)
{
- ffmpeg_content_test (1, "test/data/test.mp4", EXPORT_FORMAT_PRORES);
+ ffmpeg_content_test (1, "test/data/test.mp4", ExportFormat::PRORES);
}
/** Dolby Aurora trailer VOB -> Prores */
BOOST_AUTO_TEST_CASE (ffmpeg_encoder_prores_test2)
{
- ffmpeg_content_test (2, TestPaths::private_data() / "dolby_aurora.vob", EXPORT_FORMAT_PRORES);
+ ffmpeg_content_test (2, TestPaths::private_data() / "dolby_aurora.vob", ExportFormat::PRORES);
}
/** Sintel trailer -> Prores */
BOOST_AUTO_TEST_CASE (ffmpeg_encoder_prores_test3)
{
- ffmpeg_content_test (3, TestPaths::private_data() / "Sintel_Trailer1.480p.DivX_Plus_HD.mkv", EXPORT_FORMAT_PRORES);
+ ffmpeg_content_test (3, TestPaths::private_data() / "Sintel_Trailer1.480p.DivX_Plus_HD.mkv", ExportFormat::PRORES);
}
/** Big Buck Bunny trailer -> Prores */
BOOST_AUTO_TEST_CASE (ffmpeg_encoder_prores_test4)
{
- ffmpeg_content_test (4, TestPaths::private_data() / "big_buck_bunny_trailer_480p.mov", EXPORT_FORMAT_PRORES);
+ ffmpeg_content_test (4, TestPaths::private_data() / "big_buck_bunny_trailer_480p.mov", ExportFormat::PRORES);
}
/** Still image -> Prores */
film->write_metadata ();
shared_ptr<Job> job (new TranscodeJob (film));
- FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test5.mov", EXPORT_FORMAT_PRORES, false, false, false, 23);
+ FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test5.mov", ExportFormat::PRORES, false, false, false, 23);
encoder.go ();
}
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
- FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test6.mov", EXPORT_FORMAT_PRORES, false, false, false, 23);
+ FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test6.mov", ExportFormat::PRORES, false, false, false, 23);
encoder.go ();
}
s->only_text()->set_effect_colour (dcp::Colour (0, 255, 255));
shared_ptr<Job> job (new TranscodeJob (film));
- FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test7.mov", EXPORT_FORMAT_PRORES, false, false, false, 23);
+ FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test7.mov", ExportFormat::PRORES, false, false, false, 23);
encoder.go ();
}
/** Red / green / blue MP4 -> H264 */
BOOST_AUTO_TEST_CASE (ffmpeg_encoder_h264_test1)
{
- ffmpeg_content_test(1, "test/data/test.mp4", EXPORT_FORMAT_H264_AAC);
+ ffmpeg_content_test(1, "test/data/test.mp4", ExportFormat::H264_AAC);
}
/** Just subtitles -> H264 */
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
- FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_h264_test2.mp4", EXPORT_FORMAT_H264_AAC, false, false, false, 23);
+ FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_h264_test2.mp4", ExportFormat::H264_AAC, false, false, false, 23);
encoder.go ();
}
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
- FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_h264_test3.mp4", EXPORT_FORMAT_H264_AAC, false, false, false, 23);
+ FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_h264_test3.mp4", ExportFormat::H264_AAC, false, false, false, 23);
encoder.go ();
}
film->set_container(Ratio::from_id("185"));
shared_ptr<Job> job(new TranscodeJob(film));
- FFmpegEncoder encoder(film, job, "build/test/ffmpeg_encoder_h264_test4.mp4", EXPORT_FORMAT_H264_AAC, false, false, false, 23);
+ FFmpegEncoder encoder(film, job, "build/test/ffmpeg_encoder_h264_test4.mp4", ExportFormat::H264_AAC, false, false, false, 23);
encoder.go();
}
Rs->audio->set_mapping (map);
shared_ptr<Job> job (new TranscodeJob (film));
- FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_h264_test5.mp4", EXPORT_FORMAT_H264_AAC, true, false, false, 23);
+ FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_h264_test5.mp4", ExportFormat::H264_AAC, true, false, false, 23);
encoder.go ();
check_ffmpeg ("build/test/ffmpeg_encoder_h264_test5.mp4", "test/data/ffmpeg_encoder_h264_test5.mp4", 1);
}
shared_ptr<Job> job (new TranscodeJob (film2));
- FFmpegEncoder encoder (film2, job, "build/test/ffmpeg_encoder_h264_test6_vf.mp4", EXPORT_FORMAT_H264_AAC, true, false, false, 23);
+ FFmpegEncoder encoder (film2, job, "build/test/ffmpeg_encoder_h264_test6_vf.mp4", ExportFormat::H264_AAC, true, false, false, 23);
encoder.go ();
}
shared_ptr<Content> R (shared_ptr<ImageContent>(new ImageContent(TestPaths::private_data() / "bbc405.png")));
film->examine_and_add_content (R);
BOOST_REQUIRE (!wait_for_jobs());
- L->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT);
+ L->video->set_frame_type (VideoFrameType::THREE_D_LEFT);
L->set_position (film, DCPTime());
- R->video->set_frame_type (VIDEO_FRAME_TYPE_3D_RIGHT);
+ R->video->set_frame_type (VideoFrameType::THREE_D_RIGHT);
R->set_position (film, DCPTime());
film->set_three_d (true);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
shared_ptr<Job> job (new TranscodeJob (film2));
- FFmpegEncoder encoder (film2, job, "build/test/ffmpeg_encoder_h264_test7.mp4", EXPORT_FORMAT_H264_AAC, true, false, false, 23);
+ FFmpegEncoder encoder (film2, job, "build/test/ffmpeg_encoder_h264_test7.mp4", ExportFormat::H264_AAC, true, false, false, 23);
encoder.go ();
}
film->set_audio_channels (2);
shared_ptr<Job> job(new TranscodeJob(film));
- FFmpegEncoder encoder(film, job, "build/test/ffmpeg_encoder_h264_test8.mp4", EXPORT_FORMAT_H264_AAC, true, false, false, 23);
+ FFmpegEncoder encoder(film, job, "build/test/ffmpeg_encoder_h264_test8.mp4", ExportFormat::H264_AAC, true, false, false, 23);
encoder.go();
}
film->write_metadata ();
shared_ptr<Job> job (new TranscodeJob (film));
- FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test9.mov", EXPORT_FORMAT_H264_AAC, false, false, false, 23);
+ FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test9.mov", ExportFormat::H264_AAC, false, false, false, 23);
encoder.go ();
}
b->set_video_frame_rate (24);
b->video->set_length (24);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
film->write_metadata ();
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
b->set_video_frame_rate (24);
b->video->set_length (24);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
BOOST_AUTO_TEST_CASE (hint_closed_caption_too_long)
{
check (
- TEXT_CLOSED_CAPTION,
+ TextType::CLOSED_CAPTION,
"hint_closed_caption_too_long",
String::compose("At least one of your closed caption lines has more than %1 characters. It is advisable to make each line %1 characters at most in length.", MAX_CLOSED_CAPTION_LENGTH, MAX_CLOSED_CAPTION_LENGTH)
);
BOOST_AUTO_TEST_CASE (hint_many_closed_caption_lines)
{
check (
- TEXT_CLOSED_CAPTION,
+ TextType::CLOSED_CAPTION,
"hint_many_closed_caption_lines",
String::compose("Some of your closed captions span more than %1 lines, so they will be truncated.", MAX_CLOSED_CAPTION_LINES)
);
BOOST_AUTO_TEST_CASE (hint_subtitle_too_early)
{
check (
- TEXT_OPEN_SUBTITLE,
+ TextType::OPEN_SUBTITLE,
"hint_subtitle_too_early",
string("It is advisable to put your first subtitle at least 4 seconds after the start of the DCP to make sure it is seen.")
);
BOOST_AUTO_TEST_CASE (hint_short_subtitles)
{
check (
- TEXT_OPEN_SUBTITLE,
+ TextType::OPEN_SUBTITLE,
"hint_short_subtitles",
string("At least one of your subtitles lasts less than 15 frames. It is advisable to make each subtitle at least 15 frames long.")
);
BOOST_AUTO_TEST_CASE (hint_subtitles_too_close)
{
check (
- TEXT_OPEN_SUBTITLE,
+ TextType::OPEN_SUBTITLE,
"hint_subtitles_too_close",
string("At least one of your subtitles starts less than 2 frames after the previous one. It is advisable to make the gap between subtitles at least 2 frames.")
);
BOOST_AUTO_TEST_CASE (hint_many_subtitle_lines)
{
check (
- TEXT_OPEN_SUBTITLE,
+ TextType::OPEN_SUBTITLE,
"hint_many_subtitle_lines",
string("At least one of your subtitles has more than 3 lines. It is advisable to use no more than 3 lines.")
);
BOOST_AUTO_TEST_CASE (hint_subtitle_too_long)
{
check (
- TEXT_OPEN_SUBTITLE,
+ TextType::OPEN_SUBTITLE,
"hint_subtitle_too_long",
string("At least one of your subtitle lines has more than 52 characters. It is advisable to make each line 52 characters at most in length.")
);
shared_ptr<Film> film = new_test_film2 (name);
shared_ptr<Content> content = content_factory("test/data/" + name + ".srt").front();
- content->text.front()->set_type (TEXT_OPEN_SUBTITLE);
+ content->text.front()->set_type (TextType::OPEN_SUBTITLE);
for (int i = 1; i < 512; ++i) {
shared_ptr<dcpomatic::Font> font(new dcpomatic::Font(String::compose("font_%1", i)));
font->set_file ("test/data/LiberationSans-Regular.ttf");
fclose (ccap);
shared_ptr<Content> content = content_factory("build/test/" + name + ".srt").front();
- content->text.front()->set_type (TEXT_CLOSED_CAPTION);
+ content->text.front()->set_type (TextType::CLOSED_CAPTION);
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs());
vector<string> hints = get_hints (film);
film->set_container (Ratio::from_id ("239"));
film->_isdcf_date = boost::gregorian::date (2014, boost::gregorian::Jul, 4);
film->set_audio_channels (1);
- film->set_resolution (RESOLUTION_4K);
+ film->set_resolution (Resolution::FOUR_K);
film->set_subtitle_language (dcp::LanguageTag("fr-FR"));
shared_ptr<Content> text = content_factory("test/data/subrip.srt").front();
BOOST_REQUIRE_EQUAL (text->text.size(), 1U);
shared_ptr<Content> content = content_factory("test/data/flat_red.png").front ();
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT_RIGHT);
+ content->video->set_frame_type (VideoFrameType::THREE_D_LEFT_RIGHT);
film->set_three_d (true);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
shared_ptr<Content> text = content_factory("test/data/subrip.srt").front();
film->examine_and_add_content (text);
BOOST_REQUIRE (!wait_for_jobs());
- text->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ text->only_text()->set_type (TextType::CLOSED_CAPTION);
text->only_text()->set_use (true);
shared_ptr<Player> player (new Player(film));
film->examine_and_add_content (right);
BOOST_REQUIRE (!wait_for_jobs());
- left->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT);
+ left->video->set_frame_type (VideoFrameType::THREE_D_LEFT);
left->set_position (film, DCPTime());
- right->video->set_frame_type (VIDEO_FRAME_TYPE_3D_RIGHT);
+ right->video->set_frame_type (VideoFrameType::THREE_D_RIGHT);
right->set_position (film, DCPTime());
film->set_three_d (true);
film->examine_and_add_content (right);
BOOST_REQUIRE (!wait_for_jobs());
- left->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT);
+ left->video->set_frame_type (VideoFrameType::THREE_D_LEFT);
left->set_position (film, DCPTime());
- right->video->set_frame_type (VIDEO_FRAME_TYPE_3D_RIGHT);
+ right->video->set_frame_type (VideoFrameType::THREE_D_RIGHT);
right->set_position (film, DCPTime());
film->set_three_d (true);
#include <iostream>
using std::cout;
-using std::string;
+using std::make_shared;
using std::shared_ptr;
+using std::string;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
#endif
BOOST_AUTO_TEST_CASE (recover_test_3d, * boost::unit_test::depends_on("recover_test_2d"))
{
- shared_ptr<Film> film = new_test_film ("recover_test_3d");
+ auto film = new_test_film ("recover_test_3d");
film->set_interop (false);
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("FTR"));
film->set_container (Ratio::from_id ("185"));
film->set_name ("recover_test");
film->set_three_d (true);
- shared_ptr<ImageContent> content (new ImageContent("test/data/3d_test"));
- content->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT_RIGHT);
+ auto content = make_shared<ImageContent>("test/data/3d_test");
+ content->video->set_frame_type (VideoFrameType::THREE_D_LEFT_RIGHT);
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs());
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
- shared_ptr<dcp::StereoPictureAsset> A (new dcp::StereoPictureAsset ("build/test/recover_test_3d/original.mxf"));
- shared_ptr<dcp::StereoPictureAsset> B (new dcp::StereoPictureAsset (video));
+ auto A = make_shared<dcp::StereoPictureAsset>("build/test/recover_test_3d/original.mxf");
+ auto B = make_shared<dcp::StereoPictureAsset>(video);
dcp::EqualityOptions eq;
BOOST_CHECK (A->equals (B, eq, boost::bind (¬e, _1, _2)));
BOOST_AUTO_TEST_CASE (recover_test_2d_encrypted, * boost::unit_test::depends_on("recover_test_3d"))
{
- shared_ptr<Film> film = new_test_film ("recover_test_2d_encrypted");
+ auto film = new_test_film ("recover_test_2d_encrypted");
film->set_interop (false);
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("FTR"));
film->set_container (Ratio::from_id ("185"));
film->set_encrypted (true);
film->_key = dcp::Key("eafcb91c9f5472edf01f3a2404c57258");
- shared_ptr<FFmpegContent> content (new FFmpegContent("test/data/count300bd24.m2ts"));
+ auto content = make_shared<FFmpegContent>("test/data/count300bd24.m2ts");
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs());
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
- shared_ptr<dcp::MonoPictureAsset> A (new dcp::MonoPictureAsset ("build/test/recover_test_2d_encrypted/original.mxf"));
+ auto A = make_shared<dcp::MonoPictureAsset>("build/test/recover_test_2d_encrypted/original.mxf");
A->set_key (film->key ());
- shared_ptr<dcp::MonoPictureAsset> B (new dcp::MonoPictureAsset (video));
+ auto B = make_shared<dcp::MonoPictureAsset>(video);
B->set_key (film->key ());
dcp::EqualityOptions eq;
BOOST_AUTO_TEST_CASE (write_frame_info_test)
{
- shared_ptr<Film> film = new_test_film2 ("write_frame_info_test");
+ auto film = new_test_film2 ("write_frame_info_test");
dcpomatic::DCPTimePeriod const period (dcpomatic::DCPTime(0), dcpomatic::DCPTime(96000));
ReelWriter writer (film, period, shared_ptr<Job>(), 0, 1, false);
/* Write the first one */
dcp::FrameInfo info1(0, 123, "12345678901234567890123456789012");
- writer.write_frame_info (0, EYES_LEFT, info1);
+ writer.write_frame_info (0, Eyes::LEFT, info1);
{
- shared_ptr<InfoFileHandle> file = film->info_file_handle(period, true);
- BOOST_CHECK (equal(info1, writer, file, 0, EYES_LEFT));
+ auto file = film->info_file_handle(period, true);
+ BOOST_CHECK (equal(info1, writer, file, 0, Eyes::LEFT));
}
/* Write some more */
dcp::FrameInfo info2(596, 14921, "123acb789f1234ae782012n456339522");
- writer.write_frame_info (5, EYES_RIGHT, info2);
+ writer.write_frame_info (5, Eyes::RIGHT, info2);
{
- shared_ptr<InfoFileHandle> file = film->info_file_handle(period, true);
- BOOST_CHECK (equal(info1, writer, file, 0, EYES_LEFT));
- BOOST_CHECK (equal(info2, writer, file, 5, EYES_RIGHT));
+ auto file = film->info_file_handle(period, true);
+ BOOST_CHECK (equal(info1, writer, file, 0, Eyes::LEFT));
+ BOOST_CHECK (equal(info2, writer, file, 5, Eyes::RIGHT));
}
dcp::FrameInfo info3(12494, 99157123, "xxxxyyyyabc12356ffsfdsf456339522");
- writer.write_frame_info (10, EYES_LEFT, info3);
+ writer.write_frame_info (10, Eyes::LEFT, info3);
{
- shared_ptr<InfoFileHandle> file = film->info_file_handle(period, true);
- BOOST_CHECK (equal(info1, writer, file, 0, EYES_LEFT));
- BOOST_CHECK (equal(info2, writer, file, 5, EYES_RIGHT));
- BOOST_CHECK (equal(info3, writer, file, 10, EYES_LEFT));
+ auto file = film->info_file_handle(period, true);
+ BOOST_CHECK (equal(info1, writer, file, 0, Eyes::LEFT));
+ BOOST_CHECK (equal(info2, writer, file, 5, Eyes::RIGHT));
+ BOOST_CHECK (equal(info3, writer, file, 10, Eyes::LEFT));
}
/* Overwrite one */
dcp::FrameInfo info4(55512494, 123599157123, "ABCDEFGyabc12356ffsfdsf4563395ZZ");
- writer.write_frame_info (5, EYES_RIGHT, info4);
+ writer.write_frame_info (5, Eyes::RIGHT, info4);
{
- shared_ptr<InfoFileHandle> file = film->info_file_handle(period, true);
- BOOST_CHECK (equal(info1, writer, file, 0, EYES_LEFT));
- BOOST_CHECK (equal(info4, writer, file, 5, EYES_RIGHT));
- BOOST_CHECK (equal(info3, writer, file, 10, EYES_LEFT));
+ auto file = film->info_file_handle(period, true);
+ BOOST_CHECK (equal(info1, writer, file, 0, Eyes::LEFT));
+ BOOST_CHECK (equal(info4, writer, file, 5, Eyes::RIGHT));
+ BOOST_CHECK (equal(info3, writer, file, 10, Eyes::LEFT));
}
}
BOOST_AUTO_TEST_CASE (reel_reuse_video_test)
{
/* Make a DCP */
- shared_ptr<Film> film = new_test_film2 ("reel_reuse_video_test");
- shared_ptr<Content> video = content_factory("test/data/flat_red.png").front();
+ auto film = new_test_film2 ("reel_reuse_video_test");
+ auto video = content_factory("test/data/flat_red.png").front();
film->examine_and_add_content (video);
BOOST_REQUIRE (!wait_for_jobs());
- shared_ptr<Content> audio = content_factory("test/data/white.wav").front();
+ auto audio = content_factory("test/data/white.wav").front();
film->examine_and_add_content (audio);
BOOST_REQUIRE (!wait_for_jobs());
film->make_dcp ();
using std::vector;
using std::string;
using std::shared_ptr;
+using std::make_shared;
using boost::function;
using namespace dcpomatic;
/** Test Film::reels() */
BOOST_AUTO_TEST_CASE (reels_test1)
{
- shared_ptr<Film> film = new_test_film ("reels_test1");
+ auto film = new_test_film ("reels_test1");
film->set_container (Ratio::from_id ("185"));
- shared_ptr<FFmpegContent> A (new FFmpegContent("test/data/test.mp4"));
+ auto A = make_shared<FFmpegContent>("test/data/test.mp4");
film->examine_and_add_content (A);
- shared_ptr<FFmpegContent> B (new FFmpegContent("test/data/test.mp4"));
+ auto B = make_shared<FFmpegContent>("test/data/test.mp4");
film->examine_and_add_content (B);
BOOST_REQUIRE (!wait_for_jobs());
BOOST_CHECK_EQUAL (A->full_length(film).get(), 288000);
- film->set_reel_type (REELTYPE_SINGLE);
- list<DCPTimePeriod> r = film->reels ();
+ film->set_reel_type (ReelType::SINGLE);
+ auto r = film->reels ();
BOOST_CHECK_EQUAL (r.size(), 1U);
BOOST_CHECK_EQUAL (r.front().from.get(), 0);
BOOST_CHECK_EQUAL (r.front().to.get(), 288000 * 2);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
r = film->reels ();
BOOST_CHECK_EQUAL (r.size(), 2U);
BOOST_CHECK_EQUAL (r.front().from.get(), 0);
BOOST_CHECK_EQUAL (r.back().to.get(), 288000 * 2);
film->set_j2k_bandwidth (100000000);
- film->set_reel_type (REELTYPE_BY_LENGTH);
+ film->set_reel_type (ReelType::BY_LENGTH);
/* This is just over 2.5s at 100Mbit/s; should correspond to 60 frames */
film->set_reel_length (31253154);
r = film->reels ();
BOOST_CHECK_EQUAL (r.size(), 3U);
- list<DCPTimePeriod>::const_iterator i = r.begin ();
+ auto i = r.begin ();
BOOST_CHECK_EQUAL (i->from.get(), 0);
BOOST_CHECK_EQUAL (i->to.get(), DCPTime::from_frames(60, 24).get());
++i;
*/
BOOST_AUTO_TEST_CASE (reels_test2)
{
- shared_ptr<Film> film = new_test_film ("reels_test2");
+ auto film = new_test_film ("reels_test2");
film->set_name ("reels_test2");
film->set_container (Ratio::from_id ("185"));
film->set_interop (false);
c->video->set_length (24);
}
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
BOOST_CHECK_EQUAL (film->reels().size(), 3U);
BOOST_REQUIRE (!wait_for_jobs());
film2->set_name ("reels_test2b");
film2->set_container (Ratio::from_id ("185"));
film2->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- film2->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film2->set_reel_type (ReelType::BY_VIDEO_CONTENT);
- shared_ptr<DCPContent> c (new DCPContent(film->dir(film->dcp_name())));
+ auto c = make_shared<DCPContent>(film->dir(film->dcp_name()));
film2->examine_and_add_content (c);
BOOST_REQUIRE (!wait_for_jobs ());
- list<DCPTimePeriod> r = film2->reels ();
+ auto r = film2->reels ();
BOOST_CHECK_EQUAL (r.size(), 3U);
- list<DCPTimePeriod>::const_iterator i = r.begin ();
+ auto i = r.begin ();
BOOST_CHECK_EQUAL (i->from.get(), 0);
BOOST_CHECK_EQUAL (i->to.get(), 96000);
++i;
BOOST_REQUIRE (!wait_for_jobs());
}
-/** Check that REELTYPE_BY_VIDEO_CONTENT adds an extra reel, if necessary, at the end
+/** Check that ReelType::BY_VIDEO_CONTENT adds an extra reel, if necessary, at the end
* of all the video content to mop up anything afterward.
*/
BOOST_AUTO_TEST_CASE (reels_test3)
{
- shared_ptr<Film> film = new_test_film ("reels_test3");
+ auto film = new_test_film ("reels_test3");
film->set_name ("reels_test3");
film->set_container (Ratio::from_id ("185"));
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
- shared_ptr<Content> dcp (new DCPContent("test/data/reels_test2"));
+ auto dcp = make_shared<DCPContent>("test/data/reels_test2");
film->examine_and_add_content (dcp);
- shared_ptr<Content> sub (new StringTextFileContent("test/data/subrip.srt"));
+ auto sub = make_shared<StringTextFileContent>("test/data/subrip.srt");
film->examine_and_add_content (sub);
BOOST_REQUIRE (!wait_for_jobs());
- list<DCPTimePeriod> reels = film->reels();
+ auto reels = film->reels();
BOOST_REQUIRE_EQUAL (reels.size(), 4U);
- list<DCPTimePeriod>::const_iterator i = reels.begin ();
+ auto i = reels.begin ();
BOOST_CHECK_EQUAL (i->from.get(), 0);
BOOST_CHECK_EQUAL (i->to.get(), 96000);
++i;
*/
BOOST_AUTO_TEST_CASE (reels_test4)
{
- shared_ptr<Film> film = new_test_film ("reels_test4");
+ auto film = new_test_film ("reels_test4");
film->set_name ("reels_test4");
film->set_container (Ratio::from_id ("185"));
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
film->set_interop (false);
/* 4 piece of 1s-long content */
film->examine_and_add_content (subs);
BOOST_REQUIRE (!wait_for_jobs());
- list<DCPTimePeriod> reels = film->reels();
+ auto reels = film->reels();
BOOST_REQUIRE_EQUAL (reels.size(), 4U);
- list<DCPTimePeriod>::const_iterator i = reels.begin ();
+ auto i = reels.begin ();
BOOST_CHECK_EQUAL (i->from.get(), 0);
BOOST_CHECK_EQUAL (i->to.get(), 96000);
++i;
BOOST_AUTO_TEST_CASE (reels_test5)
{
- shared_ptr<Film> film = new_test_film ("reels_test5");
+ auto film = new_test_film ("reels_test5");
film->set_sequence (false);
shared_ptr<DCPContent> dcp (new DCPContent("test/data/reels_test4"));
film->examine_and_add_content (dcp);
dcp->set_position(film, DCPTime(2123));
{
- list<DCPTimePeriod> p = dcp->reels (film);
+ auto p = dcp->reels (film);
BOOST_REQUIRE_EQUAL (p.size(), 4U);
- list<DCPTimePeriod>::const_iterator i = p.begin();
+ auto i = p.begin();
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 0), DCPTime(4000 + 96000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 96000), DCPTime(4000 + 192000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 192000), DCPTime(4000 + 288000)));
{
dcp->set_trim_start (ContentTime::from_seconds (0.5));
- list<DCPTimePeriod> p = dcp->reels (film);
+ auto p = dcp->reels (film);
BOOST_REQUIRE_EQUAL (p.size(), 4U);
- list<DCPTimePeriod>::const_iterator i = p.begin();
+ auto i = p.begin();
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 0), DCPTime(4000 + 48000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 48000), DCPTime(4000 + 144000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 144000), DCPTime(4000 + 240000)));
{
dcp->set_trim_end (ContentTime::from_seconds (0.5));
- list<DCPTimePeriod> p = dcp->reels (film);
+ auto p = dcp->reels (film);
BOOST_REQUIRE_EQUAL (p.size(), 4U);
- list<DCPTimePeriod>::const_iterator i = p.begin();
+ auto i = p.begin();
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 0), DCPTime(4000 + 48000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 48000), DCPTime(4000 + 144000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 144000), DCPTime(4000 + 240000)));
{
dcp->set_trim_start (ContentTime::from_seconds (1.5));
- list<DCPTimePeriod> p = dcp->reels (film);
+ auto p = dcp->reels (film);
BOOST_REQUIRE_EQUAL (p.size(), 3U);
- list<DCPTimePeriod>::const_iterator i = p.begin();
+ auto i = p.begin();
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 0), DCPTime(4000 + 48000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 48000), DCPTime(4000 + 144000)));
BOOST_CHECK (*i++ == DCPTimePeriod (DCPTime(4000 + 144000), DCPTime(4000 + 192000)));
/** Check reel split with a muxed video/audio source */
BOOST_AUTO_TEST_CASE (reels_test6)
{
- shared_ptr<Film> film = new_test_film ("reels_test6");
+ auto film = new_test_film ("reels_test6");
film->set_name ("reels_test6");
film->set_container (Ratio::from_id ("185"));
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
BOOST_REQUIRE (!wait_for_jobs ());
film->set_j2k_bandwidth (100000000);
- film->set_reel_type (REELTYPE_BY_LENGTH);
+ film->set_reel_type (ReelType::BY_LENGTH);
/* This is just over 2.5s at 100Mbit/s; should correspond to 60 frames */
film->set_reel_length (31253154);
film->make_dcp ();
}
/** Check the case where the last bit of audio hangs over the end of the video
- * and we are using REELTYPE_BY_VIDEO_CONTENT.
+ * and we are using ReelType::BY_VIDEO_CONTENT.
*/
BOOST_AUTO_TEST_CASE (reels_test7)
{
- shared_ptr<Film> film = new_test_film ("reels_test7");
+ auto film = new_test_film ("reels_test7");
film->set_name ("reels_test7");
film->set_container (Ratio::from_id ("185"));
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- shared_ptr<Content> A = content_factory("test/data/flat_red.png").front();
+ auto A = content_factory("test/data/flat_red.png").front();
film->examine_and_add_content (A);
BOOST_REQUIRE (!wait_for_jobs ());
- shared_ptr<Content> B = content_factory("test/data/awkward_length.wav").front();
+ auto B = content_factory("test/data/awkward_length.wav").front();
film->examine_and_add_content (B);
BOOST_REQUIRE (!wait_for_jobs ());
film->set_video_frame_rate (24);
A->video->set_length (2 * 24);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
BOOST_REQUIRE_EQUAL (film->reels().size(), 2U);
BOOST_CHECK (film->reels().front() == DCPTimePeriod(DCPTime(0), DCPTime::from_frames(2 * 24, 24)));
BOOST_CHECK (film->reels().back() == DCPTimePeriod(DCPTime::from_frames(2 * 24, 24), DCPTime::from_frames(3 * 24 + 1, 24)));
film->set_name ("reels_test8");
film->set_container (Ratio::from_id ("185"));
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- shared_ptr<FFmpegContent> A (new FFmpegContent("test/data/test2.mp4"));
+ auto A = make_shared<FFmpegContent>("test/data/test2.mp4");
film->examine_and_add_content (A);
BOOST_REQUIRE (!wait_for_jobs ());
B->set_reference_video(true);
B->set_reference_audio(true);
BOOST_REQUIRE(!wait_for_jobs());
- film2->set_reel_type(REELTYPE_BY_VIDEO_CONTENT);
+ film2->set_reel_type(ReelType::BY_VIDEO_CONTENT);
film2->write_metadata();
film2->make_dcp();
BOOST_REQUIRE(!wait_for_jobs());
BOOST_REQUIRE (!wait_for_jobs());
B->video->set_length (5 * 24);
- ov->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ ov->set_reel_type (ReelType::BY_VIDEO_CONTENT);
ov->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
ov->write_metadata ();
shared_ptr<DCPContent> ov_dcp(new DCPContent(ov->dir(ov->dcp_name())));
vf->examine_and_add_content (ov_dcp);
BOOST_REQUIRE (!wait_for_jobs());
- vf->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ vf->set_reel_type (ReelType::BY_VIDEO_CONTENT);
ov_dcp->set_reference_video (true);
ov_dcp->set_reference_audio (true);
vf->examine_and_add_content (content_factory("test/data/15s.srt").front());
vf->write_metadata ();
}
-/** Another reels error; REELTYPE_BY_VIDEO_CONTENT when the first content is not
+/** Another reels error; ReelType::BY_VIDEO_CONTENT when the first content is not
* at time 0.
*/
BOOST_AUTO_TEST_CASE (reels_test11)
A->video->set_length (240);
A->set_video_frame_rate (24);
A->set_position (film, DCPTime::from_seconds(1));
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
BOOST_CHECK_EQUAL (A->position().get(), DCPTime::from_seconds(1).get());
{
shared_ptr<Film> film = new_test_film2 ("reels_test12");
film->set_video_frame_rate (24);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
film->set_sequence (false);
shared_ptr<FFmpegContent> A(new FFmpegContent("test/data/flat_red.png"));
}
/** Leaving less than 1 second's gap between two pieces of content with
- * REELTYPE_BY_VIDEO_CONTENT should not make a <1s reel.
+ * ReelType::BY_VIDEO_CONTENT should not make a <1s reel.
*/
BOOST_AUTO_TEST_CASE (reels_should_not_be_short2)
{
shared_ptr<Film> film = new_test_film2 ("reels_should_not_be_short2");
film->set_video_frame_rate (24);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
shared_ptr<FFmpegContent> A(new FFmpegContent("test/data/flat_red.png"));
film->examine_and_add_content (A);
BOOST_REQUIRE (notes.empty());
}
-/** Setting REELTYPE_BY_LENGTH and using a small length value should not make
+/** Setting ReelType::BY_LENGTH and using a small length value should not make
* <1s reels.
*/
BOOST_AUTO_TEST_CASE (reels_should_not_be_short3)
{
shared_ptr<Film> film = new_test_film2 ("reels_should_not_be_short3");
film->set_video_frame_rate (24);
- film->set_reel_type (REELTYPE_BY_LENGTH);
+ film->set_reel_type (ReelType::BY_LENGTH);
film->set_reel_length (1024 * 1024 * 10);
shared_ptr<FFmpegContent> A(new FFmpegContent("test/data/flat_red.png"));
BOOST_REQUIRE (notes.empty());
}
-/** Having one piece of content less than 1s long in REELTYPE_BY_VIDEO_CONTENT
+/** Having one piece of content less than 1s long in ReelType::BY_VIDEO_CONTENT
* should not make a reel less than 1s long.
*/
BOOST_AUTO_TEST_CASE (reels_should_not_be_short4)
{
shared_ptr<Film> film = new_test_film2 ("reels_should_not_be_short4");
film->set_video_frame_rate (24);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
shared_ptr<FFmpegContent> A(new FFmpegContent("test/data/flat_red.png"));
film->examine_and_add_content (A);
*/
#include "lib/content_factory.h"
-#include "lib/film.h"
#include "lib/dcp_content.h"
+#include "lib/film.h"
#include "test.h"
#include <boost/test/unit_test.hpp>
+using std::make_shared;
using std::shared_ptr;
using std::dynamic_pointer_cast;
BOOST_AUTO_TEST_CASE (required_disk_space_test)
{
- shared_ptr<Film> film = new_test_film ("required_disk_space_test");
+ auto film = new_test_film ("required_disk_space_test");
film->set_j2k_bandwidth (100000000);
film->set_audio_channels (6);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
- shared_ptr<Content> content_a = content_factory("test/data/flat_blue.png").front();
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
+ auto content_a = content_factory("test/data/flat_blue.png").front();
BOOST_REQUIRE (content_a);
film->examine_and_add_content (content_a);
- shared_ptr<DCPContent> content_b (new DCPContent("test/data/burnt_subtitle_test_dcp"));
+ auto content_b = make_shared<DCPContent>("test/data/burnt_subtitle_test_dcp");
film->examine_and_add_content (content_b);
BOOST_REQUIRE (!wait_for_jobs());
film->write_metadata ();
s.Video.connect (boost::bind (&receive, _1, _2));
for (int i = 0; i < 10; ++i) {
- push (s, i, EYES_LEFT);
- push (s, i, EYES_RIGHT);
- check (i, EYES_LEFT, __LINE__);
- check (i, EYES_RIGHT, __LINE__);
+ push (s, i, Eyes::LEFT);
+ push (s, i, Eyes::RIGHT);
+ check (i, Eyes::LEFT, __LINE__);
+ check (i, Eyes::RIGHT, __LINE__);
}
}
s.Video.connect (boost::bind (&receive, _1, _2));
for (int i = 0; i < 10; i += 2) {
- push (s, i, EYES_LEFT);
- push (s, i + 1, EYES_LEFT);
- push (s, i, EYES_RIGHT);
- push (s, i + 1, EYES_RIGHT);
- check (i, EYES_LEFT, __LINE__);
- check (i, EYES_RIGHT, __LINE__);
- check (i + 1, EYES_LEFT, __LINE__);
- check (i + 1, EYES_RIGHT, __LINE__);
+ push (s, i, Eyes::LEFT);
+ push (s, i + 1, Eyes::LEFT);
+ push (s, i, Eyes::RIGHT);
+ push (s, i + 1, Eyes::RIGHT);
+ check (i, Eyes::LEFT, __LINE__);
+ check (i, Eyes::RIGHT, __LINE__);
+ check (i + 1, Eyes::LEFT, __LINE__);
+ check (i + 1, Eyes::RIGHT, __LINE__);
}
}
Shuffler s;
s.Video.connect (boost::bind (&receive, _1, _2));
- push (s, 0, EYES_LEFT);
- check (0, EYES_LEFT, __LINE__);
- push (s, 0, EYES_RIGHT);
- check (0, EYES_RIGHT, __LINE__);
- push (s, 1, EYES_LEFT);
- check (1, EYES_LEFT, __LINE__);
- push (s, 1, EYES_RIGHT);
- check (1, EYES_RIGHT, __LINE__);
- push (s, 2, EYES_RIGHT);
- push (s, 3, EYES_LEFT);
- push (s, 3, EYES_RIGHT);
- push (s, 4, EYES_LEFT);
- push (s, 4, EYES_RIGHT);
+ push (s, 0, Eyes::LEFT);
+ check (0, Eyes::LEFT, __LINE__);
+ push (s, 0, Eyes::RIGHT);
+ check (0, Eyes::RIGHT, __LINE__);
+ push (s, 1, Eyes::LEFT);
+ check (1, Eyes::LEFT, __LINE__);
+ push (s, 1, Eyes::RIGHT);
+ check (1, Eyes::RIGHT, __LINE__);
+ push (s, 2, Eyes::RIGHT);
+ push (s, 3, Eyes::LEFT);
+ push (s, 3, Eyes::RIGHT);
+ push (s, 4, Eyes::LEFT);
+ push (s, 4, Eyes::RIGHT);
s.flush ();
- check (2, EYES_RIGHT, __LINE__);
- check (3, EYES_LEFT, __LINE__);
- check (3, EYES_RIGHT, __LINE__);
- check (4, EYES_LEFT, __LINE__);
- check (4, EYES_RIGHT, __LINE__);
+ check (2, Eyes::RIGHT, __LINE__);
+ check (3, Eyes::LEFT, __LINE__);
+ check (3, Eyes::RIGHT, __LINE__);
+ check (4, Eyes::LEFT, __LINE__);
+ check (4, Eyes::RIGHT, __LINE__);
}
/** One missing right eye image */
Shuffler s;
s.Video.connect (boost::bind (&receive, _1, _2));
- push (s, 0, EYES_LEFT);
- check (0, EYES_LEFT, __LINE__);
- push (s, 0, EYES_RIGHT);
- check (0, EYES_RIGHT, __LINE__);
- push (s, 1, EYES_LEFT);
- check (1, EYES_LEFT, __LINE__);
- push (s, 1, EYES_RIGHT);
- check (1, EYES_RIGHT, __LINE__);
- push (s, 2, EYES_LEFT);
- push (s, 3, EYES_LEFT);
- push (s, 3, EYES_RIGHT);
- push (s, 4, EYES_LEFT);
- push (s, 4, EYES_RIGHT);
+ push (s, 0, Eyes::LEFT);
+ check (0, Eyes::LEFT, __LINE__);
+ push (s, 0, Eyes::RIGHT);
+ check (0, Eyes::RIGHT, __LINE__);
+ push (s, 1, Eyes::LEFT);
+ check (1, Eyes::LEFT, __LINE__);
+ push (s, 1, Eyes::RIGHT);
+ check (1, Eyes::RIGHT, __LINE__);
+ push (s, 2, Eyes::LEFT);
+ push (s, 3, Eyes::LEFT);
+ push (s, 3, Eyes::RIGHT);
+ push (s, 4, Eyes::LEFT);
+ push (s, 4, Eyes::RIGHT);
s.flush ();
- check (2, EYES_LEFT, __LINE__);
- check (3, EYES_LEFT, __LINE__);
- check (3, EYES_RIGHT, __LINE__);
- check (4, EYES_LEFT, __LINE__);
- check (4, EYES_RIGHT, __LINE__);
+ check (2, Eyes::LEFT, __LINE__);
+ check (3, Eyes::LEFT, __LINE__);
+ check (3, Eyes::RIGHT, __LINE__);
+ check (4, Eyes::LEFT, __LINE__);
+ check (4, Eyes::RIGHT, __LINE__);
}
/** Only one eye */
s.Video.connect (boost::bind (&receive, _1, _2));
/* One left should come out straight away */
- push (s, 0, EYES_LEFT);
- check (0, EYES_LEFT, __LINE__);
+ push (s, 0, Eyes::LEFT);
+ check (0, Eyes::LEFT, __LINE__);
/* More lefts should be kept in the shuffler in the hope that some rights arrive */
for (int i = 0; i < s._max_size; ++i) {
- push (s, i + 1, EYES_LEFT);
+ push (s, i + 1, Eyes::LEFT);
}
BOOST_CHECK (pending_cv.empty ());
/* If enough lefts come the shuffler should conclude that there's no rights and start
giving out the lefts.
*/
- push (s, s._max_size + 1, EYES_LEFT);
- check (1, EYES_LEFT, __LINE__);
+ push (s, s._max_size + 1, Eyes::LEFT);
+ check (1, Eyes::LEFT, __LINE__);
}
/** One complete frame (L+R) missing.
Shuffler s;
s.Video.connect (boost::bind (&receive, _1, _2));
- push (s, 0, EYES_LEFT);
- check (0, EYES_LEFT, __LINE__);
- push (s, 0, EYES_RIGHT);
- check (0, EYES_RIGHT, __LINE__);
+ push (s, 0, Eyes::LEFT);
+ check (0, Eyes::LEFT, __LINE__);
+ push (s, 0, Eyes::RIGHT);
+ check (0, Eyes::RIGHT, __LINE__);
- push (s, 2, EYES_LEFT);
- push (s, 2, EYES_RIGHT);
- check (2, EYES_LEFT, __LINE__);
- check (2, EYES_RIGHT, __LINE__);
+ push (s, 2, Eyes::LEFT);
+ push (s, 2, Eyes::RIGHT);
+ check (2, Eyes::LEFT, __LINE__);
+ check (2, Eyes::RIGHT, __LINE__);
- push (s, 3, EYES_LEFT);
- check (3, EYES_LEFT, __LINE__);
- push (s, 3, EYES_RIGHT);
- check (3, EYES_RIGHT, __LINE__);
+ push (s, 3, Eyes::LEFT);
+ check (3, Eyes::LEFT, __LINE__);
+ push (s, 3, Eyes::RIGHT);
+ check (3, Eyes::RIGHT, __LINE__);
}
BOOST_REQUIRE (!wait_for_jobs ());
content->only_text()->set_use (true);
content->only_text()->set_burn (false);
- film->set_reel_type (REELTYPE_BY_LENGTH);
+ film->set_reel_type (ReelType::BY_LENGTH);
film->set_interop (true);
film->set_reel_length (1024 * 1024 * 512);
film->make_dcp ();
dcp::DCP dcp ("build/test/subtitle_reel_number_test/" + film->dcp_name());
dcp.read ();
BOOST_REQUIRE_EQUAL (dcp.cpls().size(), 1U);
- shared_ptr<dcp::CPL> cpl = dcp.cpls().front();
+ auto cpl = dcp.cpls()[0];
BOOST_REQUIRE_EQUAL (cpl->reels().size(), 6U);
int n = 1;
for (auto i: cpl->reels()) {
if (i->main_subtitle()) {
- shared_ptr<dcp::InteropSubtitleAsset> ass = dynamic_pointer_cast<dcp::InteropSubtitleAsset>(i->main_subtitle()->asset());
+ auto ass = dynamic_pointer_cast<dcp::InteropSubtitleAsset>(i->main_subtitle()->asset());
BOOST_REQUIRE (ass);
BOOST_CHECK_EQUAL (ass->reel_number(), dcp::raw_convert<string>(n));
++n;
using std::list;
using std::string;
-using boost::optional;
using std::shared_ptr;
+using std::make_shared;
+using boost::optional;
/* Check that timings are done correctly for multi-reel DCPs with PNG subs */
BOOST_AUTO_TEST_CASE (subtitle_reel_test)
{
- shared_ptr<Film> film = new_test_film2 ("subtitle_reel_test");
+ auto film = new_test_film2 ("subtitle_reel_test");
film->set_interop (true);
- shared_ptr<ImageContent> red_a (new ImageContent("test/data/flat_red.png"));
- shared_ptr<ImageContent> red_b (new ImageContent("test/data/flat_red.png"));
- shared_ptr<DCPSubtitleContent> sub_a (new DCPSubtitleContent("test/data/png_subs/subs.xml"));
- shared_ptr<DCPSubtitleContent> sub_b (new DCPSubtitleContent("test/data/png_subs/subs.xml"));
+ auto red_a = make_shared<ImageContent>("test/data/flat_red.png");
+ auto red_b = make_shared<ImageContent>("test/data/flat_red.png");
+ auto sub_a = make_shared<DCPSubtitleContent>("test/data/png_subs/subs.xml");
+ auto sub_b = make_shared<DCPSubtitleContent>("test/data/png_subs/subs.xml");
film->examine_and_add_content (red_a);
film->examine_and_add_content (red_b);
red_b->video->set_length (240);
sub_b->set_position (film, dcpomatic::DCPTime::from_seconds(10));
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
*/
BOOST_AUTO_TEST_CASE (subtitle_in_all_reels_test)
{
- shared_ptr<Film> film = new_test_film2 ("subtitle_in_all_reels_test");
+ auto film = new_test_film2 ("subtitle_in_all_reels_test");
film->set_interop (false);
film->set_sequence (false);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
for (int i = 0; i < 3; ++i) {
- shared_ptr<Content> video = content_factory("test/data/flat_red.png").front();
+ auto video = content_factory("test/data/flat_red.png").front();
film->examine_and_add_content (video);
BOOST_REQUIRE (!wait_for_jobs());
video->video->set_length (15 * 24);
video->set_position (film, dcpomatic::DCPTime::from_seconds(15 * i));
}
- shared_ptr<Content> subs = content_factory("test/data/15s.srt").front();
+ auto subs = content_factory("test/data/15s.srt").front();
film->examine_and_add_content (subs);
BOOST_REQUIRE (!wait_for_jobs());
film->make_dcp ();
dcp::DCP dcp ("build/test/subtitle_in_all_reels_test/" + film->dcp_name());
dcp.read ();
BOOST_REQUIRE_EQUAL (dcp.cpls().size(), 1U);
- shared_ptr<dcp::CPL> cpl = dcp.cpls().front();
+ auto cpl = dcp.cpls()[0];
BOOST_REQUIRE_EQUAL (cpl->reels().size(), 3U);
for (auto i: cpl->reels()) {
*/
BOOST_AUTO_TEST_CASE (closed_captions_in_all_reels_test)
{
- shared_ptr<Film> film = new_test_film2 ("closed_captions_in_all_reels_test");
+ auto film = new_test_film2 ("closed_captions_in_all_reels_test");
film->set_interop (false);
film->set_sequence (false);
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
for (int i = 0; i < 3; ++i) {
- shared_ptr<Content> video = content_factory("test/data/flat_red.png").front();
+ auto video = content_factory("test/data/flat_red.png").front();
film->examine_and_add_content (video);
BOOST_REQUIRE (!wait_for_jobs());
video->video->set_length (15 * 24);
video->set_position (film, dcpomatic::DCPTime::from_seconds(15 * i));
}
- shared_ptr<Content> ccap1 = content_factory("test/data/15s.srt").front();
+ auto ccap1 = content_factory("test/data/15s.srt").front();
film->examine_and_add_content (ccap1);
BOOST_REQUIRE (!wait_for_jobs());
- ccap1->text.front()->set_type (TEXT_CLOSED_CAPTION);
+ ccap1->text.front()->set_type (TextType::CLOSED_CAPTION);
ccap1->text.front()->set_dcp_track (DCPTextTrack("Test", "de-DE"));
- shared_ptr<Content> ccap2 = content_factory("test/data/15s.srt").front();
+ auto ccap2 = content_factory("test/data/15s.srt").front();
film->examine_and_add_content (ccap2);
BOOST_REQUIRE (!wait_for_jobs());
- ccap2->text.front()->set_type (TEXT_CLOSED_CAPTION);
+ ccap2->text.front()->set_type (TextType::CLOSED_CAPTION);
ccap2->text.front()->set_dcp_track (DCPTextTrack("Other", "en-GB"));
film->make_dcp ();
dcp::DCP dcp ("build/test/closed_captions_in_all_reels_test/" + film->dcp_name());
dcp.read ();
BOOST_REQUIRE_EQUAL (dcp.cpls().size(), 1U);
- shared_ptr<dcp::CPL> cpl = dcp.cpls().front();
+ auto cpl = dcp.cpls().front();
BOOST_REQUIRE_EQUAL (cpl->reels().size(), 3U);
for (auto i: cpl->reels()) {
BOOST_REQUIRE_EQUAL (i->closed_captions().size(), 2U);
- optional<string> first = i->closed_captions().front()->language();
- optional<string> second = i->closed_captions().back()->language();
+ auto first = i->closed_captions().front()->language();
+ auto second = i->closed_captions().back()->language();
BOOST_REQUIRE (first);
BOOST_REQUIRE (second);
BOOST_CHECK (
}
std::ostream&
-dcp::operator<< (std::ostream& s, Standard t)
+dcp::operator<< (std::ostream& s, dcp::Standard t)
{
switch (t) {
case Standard::INTEROP:
return s;
}
+std::ostream&
+operator<< (std::ostream&s, VideoFrameType f)
+{
+ s << video_frame_type_to_string(f);
+ return s;
+}
+
#include "lib/warnings.h"
-#include <dcp/types.h>
+#include "lib/types.h"
#include <boost/filesystem.hpp>
std::shared_ptr<Log> _old;
};
+
namespace dcp {
std::ostream& operator<< (std::ostream& s, dcp::Size i);
-std::ostream& operator<< (std::ostream& s, Standard t);
+std::ostream& operator<< (std::ostream& s, dcp::Standard t);
}
+
+std::ostream& operator<< (std::ostream& s, VideoFrameType f);
using std::cout;
using std::shared_ptr;
-/** Basic sanity check of 3D_LEFT_RIGHT */
+/** Basic sanity check of THREE_D_LEFT_RIGHT */
BOOST_AUTO_TEST_CASE (threed_test1)
{
shared_ptr<Film> film = new_test_film ("threed_test1");
film->examine_and_add_content (c);
BOOST_REQUIRE (!wait_for_jobs());
- c->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT_RIGHT);
+ c->video->set_frame_type (VideoFrameType::THREE_D_LEFT_RIGHT);
film->set_container (Ratio::from_id ("185"));
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
BOOST_REQUIRE (!wait_for_jobs ());
}
-/** Basic sanity check of 3D_ALTERNATE; at the moment this is just to make sure
+/** Basic sanity check of THREE_D_ALTERNATE; at the moment this is just to make sure
* that such a transcode completes without error.
*/
BOOST_AUTO_TEST_CASE (threed_test2)
film->examine_and_add_content (c);
BOOST_REQUIRE (!wait_for_jobs());
- c->video->set_frame_type (VIDEO_FRAME_TYPE_3D_ALTERNATE);
+ c->video->set_frame_type (VideoFrameType::THREE_D_ALTERNATE);
film->set_container (Ratio::from_id ("185"));
film->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
BOOST_REQUIRE (!wait_for_jobs ());
}
-/** Basic sanity check of 3D_LEFT and 3D_RIGHT; at the moment this is just to make sure
+/** Basic sanity check of THREE_D_LEFT and THREE_D_RIGHT; at the moment this is just to make sure
* that such a transcode completes without error.
*/
BOOST_AUTO_TEST_CASE (threed_test3)
film->examine_and_add_content (R);
BOOST_REQUIRE (!wait_for_jobs());
- L->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT);
- R->video->set_frame_type (VIDEO_FRAME_TYPE_3D_RIGHT);
+ L->video->set_frame_type (VideoFrameType::THREE_D_LEFT);
+ R->video->set_frame_type (VideoFrameType::THREE_D_RIGHT);
film->set_three_d (true);
film->make_dcp ();
film->examine_and_add_content (R);
BOOST_REQUIRE (!wait_for_jobs());
- L->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT);
- R->video->set_frame_type (VIDEO_FRAME_TYPE_3D_RIGHT);
+ L->video->set_frame_type (VideoFrameType::THREE_D_LEFT);
+ R->video->set_frame_type (VideoFrameType::THREE_D_RIGHT);
/* There doesn't seem much point in encoding the whole input, especially as we're only
* checking for errors during the encode and not the result. Also decoding these files
* (4K HQ Prores) is very slow.
film->examine_and_add_content (R);
BOOST_REQUIRE (!wait_for_jobs());
- L->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT);
- R->video->set_frame_type (VIDEO_FRAME_TYPE_3D_RIGHT);
+ L->video->set_frame_type (VideoFrameType::THREE_D_LEFT);
+ R->video->set_frame_type (VideoFrameType::THREE_D_RIGHT);
/* There doesn't seem much point in encoding the whole input, especially as we're only
* checking for errors during the encode and not the result.
*/
film->examine_and_add_content (R);
BOOST_REQUIRE (!wait_for_jobs());
- L->video->set_frame_type (VIDEO_FRAME_TYPE_3D_LEFT);
- R->video->set_frame_type (VIDEO_FRAME_TYPE_3D_RIGHT);
+ L->video->set_frame_type (VideoFrameType::THREE_D_LEFT);
+ R->video->set_frame_type (VideoFrameType::THREE_D_RIGHT);
film->set_three_d (true);
film->make_dcp ();
film->examine_and_add_content (c);
BOOST_REQUIRE (!wait_for_jobs());
- c->video->set_frame_type (VIDEO_FRAME_TYPE_3D);
+ c->video->set_frame_type (VideoFrameType::THREE_D);
film->set_three_d (true);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs());
/* Multi-reel DCP can't be referenced if we are using a single reel for the project */
- film->set_reel_type (REELTYPE_SINGLE);
+ film->set_reel_type (ReelType::SINGLE);
string why_not;
BOOST_CHECK (!dcp->can_reference_video(film, why_not));
BOOST_CHECK (!dcp->can_reference_audio(film, why_not));
- BOOST_CHECK (!dcp->can_reference_text(film, TEXT_OPEN_SUBTITLE, why_not));
- BOOST_CHECK (!dcp->can_reference_text(film, TEXT_CLOSED_CAPTION, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(film, TextType::OPEN_SUBTITLE, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(film, TextType::CLOSED_CAPTION, why_not));
/* Multi-reel DCP can be referenced if we are using by-video-content */
- film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ film->set_reel_type (ReelType::BY_VIDEO_CONTENT);
BOOST_CHECK (dcp->can_reference_video(film, why_not));
BOOST_CHECK (dcp->can_reference_audio(film, why_not));
/* (but reels_test2 has no texts to reference) */
- BOOST_CHECK (!dcp->can_reference_text(film, TEXT_OPEN_SUBTITLE, why_not));
- BOOST_CHECK (!dcp->can_reference_text(film, TEXT_CLOSED_CAPTION, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(film, TextType::OPEN_SUBTITLE, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(film, TextType::CLOSED_CAPTION, why_not));
shared_ptr<FFmpegContent> other (new FFmpegContent("test/data/test.mp4"));
film->examine_and_add_content (other);
BOOST_CHECK (dcp->can_reference_video(film, why_not));
BOOST_CHECK (dcp->can_reference_audio(film, why_not));
/* (reels_test2 has no texts to reference) */
- BOOST_CHECK (!dcp->can_reference_text(film, TEXT_OPEN_SUBTITLE, why_not));
- BOOST_CHECK (!dcp->can_reference_text(film, TEXT_CLOSED_CAPTION, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(film, TextType::OPEN_SUBTITLE, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(film, TextType::CLOSED_CAPTION, why_not));
}
/** Make a OV with video and audio and a VF referencing the OV and adding subs */
shared_ptr<Film> vf = new_test_film ("vf_test2_vf");
vf->set_name ("vf_test2_vf");
vf->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- vf->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ vf->set_reel_type (ReelType::BY_VIDEO_CONTENT);
shared_ptr<DCPContent> dcp (new DCPContent(ov->dir (ov->dcp_name ())));
BOOST_REQUIRE (dcp);
vf->examine_and_add_content (dcp);
shared_ptr<Film> vf = new_test_film ("vf_test3_vf");
vf->set_name ("vf_test3_vf");
vf->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- vf->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ vf->set_reel_type (ReelType::BY_VIDEO_CONTENT);
shared_ptr<DCPContent> dcp (new DCPContent(ov->dir(ov->dcp_name())));
BOOST_REQUIRE (dcp);
dcp->set_trim_start (ContentTime::from_seconds (1));
shared_ptr<Film> vf = new_test_film ("vf_test4_vf");
vf->set_name ("vf_test4_vf");
vf->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- vf->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ vf->set_reel_type (ReelType::BY_VIDEO_CONTENT);
vf->set_sequence (false);
shared_ptr<DCPContent> dcp (new DCPContent(ov->dir(ov->dcp_name())));
BOOST_REQUIRE (dcp);
/* Make the OV */
shared_ptr<Film> ov = new_test_film ("vf_test5_ov");
ov->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- ov->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ ov->set_reel_type (ReelType::BY_VIDEO_CONTENT);
for (int i = 0; i < 3; ++i) {
shared_ptr<Content> video = content_factory("test/data/flat_red.png").front();
ov->examine_and_add_content (video);
shared_ptr<Film> vf = new_test_film ("vf_test5_vf");
vf->set_name ("vf_test5_vf");
vf->set_dcp_content_type (DCPContentType::from_isdcf_name ("TST"));
- vf->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ vf->set_reel_type (ReelType::BY_VIDEO_CONTENT);
vf->set_sequence (false);
shared_ptr<DCPContent> dcp (new DCPContent(ov->dir(ov->dcp_name())));
BOOST_REQUIRE (dcp);
/* Make the OV */
shared_ptr<Film> ov = new_test_film ("vf_test6_ov");
ov->set_dcp_content_type (DCPContentType::from_isdcf_name("TST"));
- ov->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ ov->set_reel_type (ReelType::BY_VIDEO_CONTENT);
shared_ptr<Content> video = content_factory("test/data/flat_red.png").front();
ov->examine_and_add_content (video);
BOOST_REQUIRE (!wait_for_jobs());
shared_ptr<Film> vf = new_test_film ("vf_test6_vf");
vf->set_name ("vf_test6_vf");
vf->set_dcp_content_type (DCPContentType::from_isdcf_name("TST"));
- vf->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ vf->set_reel_type (ReelType::BY_VIDEO_CONTENT);
vf->set_sequence (false);
shared_ptr<DCPContent> dcp (new DCPContent(ov->dir(ov->dcp_name())));
BOOST_REQUIRE (dcp);
shared_ptr<DCPContent> ov2_dcp (new DCPContent(ov1->dir(ov1->dcp_name())));
vf->examine_and_add_content (ov2_dcp);
BOOST_REQUIRE (!wait_for_jobs());
- vf->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
+ vf->set_reel_type (ReelType::BY_VIDEO_CONTENT);
ov1_dcp->set_reference_video (true);
ov2_dcp->set_reference_video (true);
ov1_dcp->set_position (vf, DCPTime::from_seconds(1));
using std::pair;
using std::string;
using std::dynamic_pointer_cast;
+using std::make_shared;
using boost::optional;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
shared_ptr<Image>
grey_image (dcp::Size size, uint8_t pixel)
{
- shared_ptr<Image> grey(new Image(AV_PIX_FMT_RGB24, size, true));
+ auto grey = make_shared<Image>(AV_PIX_FMT_RGB24, size, true);
for (int y = 0; y < size.height; ++y) {
uint8_t* p = grey->data()[0] + y * grey->stride()[0];
for (int x = 0; x < size.width; ++x) {
shared_ptr<TranscodeJob> job (new TranscodeJob(film));
job->set_encoder (
shared_ptr<FFmpegEncoder>(
- new FFmpegEncoder (film, job, film->file("export.mov"), EXPORT_FORMAT_PRORES, true, false, false, 23)
+ new FFmpegEncoder (film, job, film->file("export.mov"), ExportFormat::PRORES, true, false, false, 23)
)
);
JobManager::instance()->add (job);
BOOST_REQUIRE (!wait_for_jobs());
/* This is a bit of a hack; add the exported file into the project so we can decode it */
- shared_ptr<FFmpegContent> content(new FFmpegContent(film->file("export.mov")));
+ auto content = make_shared<FFmpegContent>(film->file("export.mov"));
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs());