summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2021-01-21 02:44:11 +0100
committerCarl Hetherington <cth@carlh.net>2021-01-21 20:15:14 +0100
commit28111007e2e6fd62f5810be780706ae1618bd33f (patch)
treed99fe830ba961b174d3f024d2b5671a9821ed8a9 /src/lib
parentc7d77490382d6ddb625340c05b57487cde244f96 (diff)
Adapt for libdcp use of enum class.
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/audio_mapping.cc33
-rw-r--r--src/lib/audio_mapping.h6
-rw-r--r--src/lib/colour_conversion.cc2
-rw-r--r--src/lib/config.cc16
-rw-r--r--src/lib/copy_dcp_details_to_film.cc2
-rw-r--r--src/lib/create_cli.cc4
-rw-r--r--src/lib/dcp.cc2
-rw-r--r--src/lib/dcp_content.cc12
-rw-r--r--src/lib/dcp_content_type.cc24
-rw-r--r--src/lib/dcp_decoder.cc4
-rw-r--r--src/lib/dcp_examiner.cc8
-rw-r--r--src/lib/dkdm_recipient.cc2
-rw-r--r--src/lib/ffmpeg_encoder.cc26
-rw-r--r--src/lib/film.cc16
-rw-r--r--src/lib/hints.cc2
-rw-r--r--src/lib/image.cc24
-rw-r--r--src/lib/j2k_image_proxy.cc2
-rw-r--r--src/lib/log.cc17
-rw-r--r--src/lib/player.cc2
-rw-r--r--src/lib/player_video.cc2
-rw-r--r--src/lib/reel_writer.cc4
-rw-r--r--src/lib/render_text.cc20
-rw-r--r--src/lib/spl_entry.cc4
-rw-r--r--src/lib/text_content.cc20
-rw-r--r--src/lib/text_decoder.cc26
-rw-r--r--src/lib/types.cc2
-rw-r--r--src/lib/util.cc50
-rw-r--r--src/lib/verify_dcp_job.cc2
-rw-r--r--src/lib/video_mxf_decoder.cc4
-rw-r--r--src/lib/writer.cc12
30 files changed, 190 insertions, 160 deletions
diff --git a/src/lib/audio_mapping.cc b/src/lib/audio_mapping.cc
index 0f330dc2b..7734d168a 100644
--- a/src/lib/audio_mapping.cc
+++ b/src/lib/audio_mapping.cc
@@ -134,7 +134,7 @@ AudioMapping::make_default (AudioProcessor const * processor, optional<boost::fi
if (!guessed) {
/* If we have no idea, just put it on centre */
- set (0, static_cast<int>(dcp::CENTRE), 1);
+ set (0, static_cast<int>(dcp::Channel::CENTRE), 1);
}
} else {
/* 1:1 mapping */
@@ -156,14 +156,14 @@ AudioMapping::AudioMapping (cxml::ConstNodePtr node, int state_version)
if (state_version <= 5) {
/* Old-style: on/off mapping */
for (auto i: node->node_children ("Map")) {
- set (i->number_child<int>("ContentIndex"), static_cast<dcp::Channel>(i->number_child<int>("DCP")), 1);
+ set (i->number_child<int>("ContentIndex"), i->number_child<int>("DCP"), 1);
}
} else {
for (auto i: node->node_children("Gain")) {
if (state_version < 32) {
set (
i->number_attribute<int>("Content"),
- static_cast<dcp::Channel>(i->number_attribute<int>("DCP")),
+ i->number_attribute<int>("DCP"),
raw_convert<float>(i->content())
);
} else {
@@ -177,6 +177,21 @@ AudioMapping::AudioMapping (cxml::ConstNodePtr node, int state_version)
}
}
+
+void
+AudioMapping::set (dcp::Channel input_channel, int output_channel, float g)
+{
+ set (static_cast<int>(input_channel), output_channel, g);
+}
+
+
+void
+AudioMapping::set (int input_channel, dcp::Channel output_channel, float g)
+{
+ set (input_channel, static_cast<int>(output_channel), g);
+}
+
+
void
AudioMapping::set (int input_channel, int output_channel, float g)
{
@@ -185,6 +200,14 @@ AudioMapping::set (int input_channel, int output_channel, float g)
_gain[input_channel][output_channel] = g;
}
+
+float
+AudioMapping::get (int input_channel, dcp::Channel output_channel) const
+{
+ return get (input_channel, static_cast<int>(output_channel));
+}
+
+
float
AudioMapping::get (int input_channel, int output_channel) const
{
@@ -236,8 +259,8 @@ AudioMapping::mapped_output_channels () const
for (auto const& i: _gain) {
for (auto j: dcp::used_audio_channels()) {
- if (abs(i[j]) > minus_96_db) {
- mapped.push_back (j);
+ if (abs(i[static_cast<int>(j)]) > minus_96_db) {
+ mapped.push_back (static_cast<int>(j));
}
}
}
diff --git a/src/lib/audio_mapping.h b/src/lib/audio_mapping.h
index 8add0ec83..6bac0a7d8 100644
--- a/src/lib/audio_mapping.h
+++ b/src/lib/audio_mapping.h
@@ -1,5 +1,5 @@
/*
- Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
@@ -25,6 +25,7 @@
#ifndef DCPOMATIC_AUDIO_MAPPING_H
#define DCPOMATIC_AUDIO_MAPPING_H
+#include <dcp/types.h>
#include <libcxml/cxml.h>
#include <vector>
@@ -51,8 +52,11 @@ public:
void make_zero ();
void make_default (AudioProcessor const * processor, boost::optional<boost::filesystem::path> filename = boost::optional<boost::filesystem::path>());
+ void set (dcp::Channel input_channel, int output_channel, float);
void set (int input_channel, int output_channel, float);
+ void set (int input_channel, dcp::Channel output_channel, float);
float get (int input_channel, int output_channel) const;
+ float get (int input_channel, dcp::Channel output_channel) const;
int input_channels () const {
return _input_channels;
diff --git a/src/lib/colour_conversion.cc b/src/lib/colour_conversion.cc
index 57e73a5b5..6cf4ed646 100644
--- a/src/lib/colour_conversion.cc
+++ b/src/lib/colour_conversion.cc
@@ -95,7 +95,7 @@ ColourConversion::ColourConversion (cxml::NodePtr node, int version)
}
}
- _yuv_to_rgb = static_cast<dcp::YUVToRGB> (node->optional_number_child<int>("YUVToRGB").get_value_or (dcp::YUV_TO_RGB_REC601));
+ _yuv_to_rgb = static_cast<dcp::YUVToRGB>(node->optional_number_child<int>("YUVToRGB").get_value_or(static_cast<int>(dcp::YUVToRGB::REC601)));
auto m = node->node_children ("Matrix");
if (!m.empty ()) {
diff --git a/src/lib/config.cc b/src/lib/config.cc
index eae57cc06..7040c5127 100644
--- a/src/lib/config.cc
+++ b/src/lib/config.cc
@@ -1376,14 +1376,14 @@ Config::audio_mapping (int output_channels)
Map so that Lt = L(-3dB) + Ls(-3dB) + C(-6dB) + Lfe(-10dB)
Rt = R(-3dB) + Rs(-3dB) + C(-6dB) + Lfe(-10dB)
*/
- _audio_mapping->set (dcp::LEFT, 0, 1 / sqrt(2)); // L -> Lt
- _audio_mapping->set (dcp::RIGHT, 1, 1 / sqrt(2)); // R -> Rt
- _audio_mapping->set (dcp::CENTRE, 0, 1 / 2.0); // C -> Lt
- _audio_mapping->set (dcp::CENTRE, 1, 1 / 2.0); // C -> Rt
- _audio_mapping->set (dcp::LFE, 0, 1 / sqrt(10)); // Lfe -> Lt
- _audio_mapping->set (dcp::LFE, 1, 1 / sqrt(10)); // Lfe -> Rt
- _audio_mapping->set (dcp::LS, 0, 1 / sqrt(2)); // Ls -> Lt
- _audio_mapping->set (dcp::RS, 1, 1 / sqrt(2)); // Rs -> Rt
+ _audio_mapping->set (dcp::Channel::LEFT, 0, 1 / sqrt(2)); // L -> Lt
+ _audio_mapping->set (dcp::Channel::RIGHT, 1, 1 / sqrt(2)); // R -> Rt
+ _audio_mapping->set (dcp::Channel::CENTRE, 0, 1 / 2.0); // C -> Lt
+ _audio_mapping->set (dcp::Channel::CENTRE, 1, 1 / 2.0); // C -> Rt
+ _audio_mapping->set (dcp::Channel::LFE, 0, 1 / sqrt(10)); // Lfe -> Lt
+ _audio_mapping->set (dcp::Channel::LFE, 1, 1 / sqrt(10)); // Lfe -> Rt
+ _audio_mapping->set (dcp::Channel::LS, 0, 1 / sqrt(2)); // Ls -> Lt
+ _audio_mapping->set (dcp::Channel::RS, 1, 1 / sqrt(2)); // Rs -> Rt
} else {
/* 1:1 mapping */
for (int i = 0; i < min (MAX_DCP_AUDIO_CHANNELS, output_channels); ++i) {
diff --git a/src/lib/copy_dcp_details_to_film.cc b/src/lib/copy_dcp_details_to_film.cc
index 9bd88a422..64a38c335 100644
--- a/src/lib/copy_dcp_details_to_film.cc
+++ b/src/lib/copy_dcp_details_to_film.cc
@@ -47,7 +47,7 @@ copy_dcp_details_to_film (shared_ptr<const DCPContent> dcp, shared_ptr<Film> fil
}
film->set_encrypted (dcp->encrypted());
film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
- film->set_interop (dcp->standard() == dcp::INTEROP);
+ film->set_interop (dcp->standard() == dcp::Standard::INTEROP);
film->set_three_d (dcp->three_d());
if (dcp->video) {
diff --git a/src/lib/create_cli.cc b/src/lib/create_cli.cc
index 7acd5756f..de26a7412 100644
--- a/src/lib/create_cli.cc
+++ b/src/lib/create_cli.cc
@@ -79,7 +79,7 @@ CreateCLI::CreateCLI (int argc, char* argv[])
, dcp_content_type (0)
, container_ratio (0)
, still_length (10)
- , standard (dcp::SMPTE)
+ , standard (dcp::Standard::SMPTE)
, no_use_isdcf_name (false)
, fourk (false)
{
@@ -192,7 +192,7 @@ CreateCLI::CreateCLI (int argc, char* argv[])
}
if (standard_string == "interop") {
- standard = dcp::INTEROP;
+ standard = dcp::Standard::INTEROP;
}
if (content.empty()) {
diff --git a/src/lib/dcp.cc b/src/lib/dcp.cc
index 05b71557e..06e3e15d5 100644
--- a/src/lib/dcp.cc
+++ b/src/lib/dcp.cc
@@ -54,7 +54,7 @@ DCP::cpls () const
if (!_tolerant) {
/** We accept and ignore EMPTY_ASSET_PATH and EXTERNAL_ASSET but everything else is bad */
for (auto j: notes) {
- if (j.code() == dcp::VerificationNote::EMPTY_ASSET_PATH || j.code() == dcp::VerificationNote::EXTERNAL_ASSET) {
+ if (j.code() == dcp::VerificationNote::Code::EMPTY_ASSET_PATH || j.code() == dcp::VerificationNote::Code::EXTERNAL_ASSET) {
LOG_WARNING("Empty path in ASSETMAP of %1", i.string());
} else {
boost::throw_exception(dcp::ReadError(dcp::note_to_string(j)));
diff --git a/src/lib/dcp_content.cc b/src/lib/dcp_content.cc
index e43d88a34..022fd223c 100644
--- a/src/lib/dcp_content.cc
+++ b/src/lib/dcp_content.cc
@@ -136,9 +136,9 @@ DCPContent::DCPContent (cxml::ConstNodePtr node, int version)
if (node->optional_string_child("Standard")) {
string const s = node->optional_string_child("Standard").get();
if (s == "Interop") {
- _standard = dcp::INTEROP;
+ _standard = dcp::Standard::INTEROP;
} else if (s == "SMPTE") {
- _standard = dcp::SMPTE;
+ _standard = dcp::Standard::SMPTE;
} else {
DCPOMATIC_ASSERT (false);
}
@@ -373,10 +373,10 @@ DCPContent::as_xml (xmlpp::Node* node, bool with_paths) const
node->add_child("ReferenceClosedCaption")->add_child_text(_reference_text[TEXT_CLOSED_CAPTION] ? "1" : "0");
if (_standard) {
switch (_standard.get ()) {
- case dcp::INTEROP:
+ case dcp::Standard::INTEROP:
node->add_child("Standard")->add_child_text ("Interop");
break;
- case dcp::SMPTE:
+ case dcp::Standard::SMPTE:
node->add_child("Standard")->add_child_text ("SMPTE");
break;
default:
@@ -595,11 +595,11 @@ DCPContent::can_reference (shared_ptr<const Film> film, function<bool (shared_pt
{
/* We must be using the same standard as the film */
if (_standard) {
- if (_standard.get() == dcp::INTEROP && !film->interop()) {
+ if (_standard.get() == dcp::Standard::INTEROP && !film->interop()) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
why_not = _("it is Interop and the film is set to SMPTE.");
return false;
- } else if (_standard.get() == dcp::SMPTE && film->interop()) {
+ } else if (_standard.get() == dcp::Standard::SMPTE && film->interop()) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
why_not = _("it is SMPTE and the film is set to Interop.");
return false;
diff --git a/src/lib/dcp_content_type.cc b/src/lib/dcp_content_type.cc
index c31c507b0..e1b05852c 100644
--- a/src/lib/dcp_content_type.cc
+++ b/src/lib/dcp_content_type.cc
@@ -42,18 +42,18 @@ DCPContentType::DCPContentType (string p, dcp::ContentKind k, string d)
void
DCPContentType::setup_dcp_content_types ()
{
- _dcp_content_types.push_back (new DCPContentType (_("Feature"), dcp::FEATURE, N_("FTR")));
- _dcp_content_types.push_back (new DCPContentType (_("Short"), dcp::SHORT, N_("SHR")));
- _dcp_content_types.push_back (new DCPContentType (_("Trailer"), dcp::TRAILER, N_("TLR")));
- _dcp_content_types.push_back (new DCPContentType (_("Test"), dcp::TEST, N_("TST")));
- _dcp_content_types.push_back (new DCPContentType (_("Transitional"), dcp::TRANSITIONAL, N_("XSN")));
- _dcp_content_types.push_back (new DCPContentType (_("Rating"), dcp::RATING, N_("RTG")));
- _dcp_content_types.push_back (new DCPContentType (_("Teaser"), dcp::TEASER, N_("TSR")));
- _dcp_content_types.push_back (new DCPContentType (_("Policy"), dcp::POLICY, N_("POL")));
- _dcp_content_types.push_back (new DCPContentType (_("Public Service Announcement"), dcp::PUBLIC_SERVICE_ANNOUNCEMENT, N_("PSA")));
- _dcp_content_types.push_back (new DCPContentType (_("Advertisement"), dcp::ADVERTISEMENT, N_("ADV")));
- _dcp_content_types.push_back (new DCPContentType (_("Episode"), dcp::EPISODE, N_("EPS")));
- _dcp_content_types.push_back (new DCPContentType (_("Promo"), dcp::PROMO, N_("PRO")));
+ _dcp_content_types.push_back (new DCPContentType(_("Feature"), dcp::ContentKind::FEATURE, N_("FTR")));
+ _dcp_content_types.push_back (new DCPContentType(_("Short"), dcp::ContentKind::SHORT, N_("SHR")));
+ _dcp_content_types.push_back (new DCPContentType(_("Trailer"), dcp::ContentKind::TRAILER, N_("TLR")));
+ _dcp_content_types.push_back (new DCPContentType(_("Test"), dcp::ContentKind::TEST, N_("TST")));
+ _dcp_content_types.push_back (new DCPContentType(_("Transitional"), dcp::ContentKind::TRANSITIONAL, N_("XSN")));
+ _dcp_content_types.push_back (new DCPContentType(_("Rating"), dcp::ContentKind::RATING, N_("RTG")));
+ _dcp_content_types.push_back (new DCPContentType(_("Teaser"), dcp::ContentKind::TEASER, N_("TSR")));
+ _dcp_content_types.push_back (new DCPContentType(_("Policy"), dcp::ContentKind::POLICY, N_("POL")));
+ _dcp_content_types.push_back (new DCPContentType(_("Public Service Announcement"), dcp::ContentKind::PUBLIC_SERVICE_ANNOUNCEMENT, N_("PSA")));
+ _dcp_content_types.push_back (new DCPContentType(_("Advertisement"), dcp::ContentKind::ADVERTISEMENT, N_("ADV")));
+ _dcp_content_types.push_back (new DCPContentType(_("Episode"), dcp::ContentKind::EPISODE, N_("EPS")));
+ _dcp_content_types.push_back (new DCPContentType(_("Promo"), dcp::ContentKind::PROMO, N_("PRO")));
}
DCPContentType const *
diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc
index 4bc090bf4..0491f5af7 100644
--- a/src/lib/dcp_decoder.cc
+++ b/src/lib/dcp_decoder.cc
@@ -178,7 +178,7 @@ DCPDecoder::pass ()
new J2KImageProxy (
_stereo_reader->get_frame (entry_point + frame),
picture_asset->size(),
- dcp::EYE_LEFT,
+ dcp::Eye::LEFT,
AV_PIX_FMT_XYZ12LE,
_forced_reduction
)
@@ -192,7 +192,7 @@ DCPDecoder::pass ()
new J2KImageProxy (
_stereo_reader->get_frame (entry_point + frame),
picture_asset->size(),
- dcp::EYE_RIGHT,
+ dcp::Eye::RIGHT,
AV_PIX_FMT_XYZ12LE,
_forced_reduction
)
diff --git a/src/lib/dcp_examiner.cc b/src/lib/dcp_examiner.cc
index d0c3d1021..5ea568359 100644
--- a/src/lib/dcp_examiner.cc
+++ b/src/lib/dcp_examiner.cc
@@ -225,14 +225,14 @@ DCPExaminer::DCPExaminer (shared_ptr<const DCPContent> content, bool tolerant)
/* Check that we can read the first picture, sound and subtitle frames of each reel */
try {
for (auto i: cpl->reels()) {
- shared_ptr<dcp::PictureAsset> pic = i->main_picture()->asset ();
- shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (pic);
- shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (pic);
+ auto pic = i->main_picture()->asset ();
+ auto mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (pic);
+ auto stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (pic);
if (mono) {
mono->start_read()->get_frame(0)->xyz_image ();
} else {
- stereo->start_read()->get_frame(0)->xyz_image (dcp::EYE_LEFT);
+ stereo->start_read()->get_frame(0)->xyz_image(dcp::Eye::LEFT);
}
if (i->main_sound()) {
diff --git a/src/lib/dkdm_recipient.cc b/src/lib/dkdm_recipient.cc
index 22da53f3b..8704f627a 100644
--- a/src/lib/dkdm_recipient.cc
+++ b/src/lib/dkdm_recipient.cc
@@ -78,7 +78,7 @@ kdm_for_dkdm_recipient (
cpl,
begin,
end,
- dcp::MODIFIED_TRANSITIONAL_1,
+ dcp::Formulation::MODIFIED_TRANSITIONAL_1,
true,
0
);
diff --git a/src/lib/ffmpeg_encoder.cc b/src/lib/ffmpeg_encoder.cc
index cc4f18bb6..9504b51b8 100644
--- a/src/lib/ffmpeg_encoder.cc
+++ b/src/lib/ffmpeg_encoder.cc
@@ -78,21 +78,21 @@ FFmpegEncoder::FFmpegEncoder (
float const overall_gain = 2 / (4 + sqrt(2));
float const minus_3dB = 1 / sqrt(2);
if (ch == 2) {
- map.set (dcp::LEFT, 0, 1);
- map.set (dcp::RIGHT, 1, 1);
+ map.set (dcp::Channel::LEFT, 0, 1);
+ map.set (dcp::Channel::RIGHT, 1, 1);
} else if (ch == 4) {
- map.set (dcp::LEFT, 0, overall_gain);
- map.set (dcp::RIGHT, 1, overall_gain);
- map.set (dcp::CENTRE, 0, overall_gain * minus_3dB);
- map.set (dcp::CENTRE, 1, overall_gain * minus_3dB);
- map.set (dcp::LS, 0, overall_gain);
+ map.set (dcp::Channel::LEFT, 0, overall_gain);
+ map.set (dcp::Channel::RIGHT, 1, overall_gain);
+ map.set (dcp::Channel::CENTRE, 0, overall_gain * minus_3dB);
+ map.set (dcp::Channel::CENTRE, 1, overall_gain * minus_3dB);
+ map.set (dcp::Channel::LS, 0, overall_gain);
} else if (ch >= 6) {
- map.set (dcp::LEFT, 0, overall_gain);
- map.set (dcp::RIGHT, 1, overall_gain);
- map.set (dcp::CENTRE, 0, overall_gain * minus_3dB);
- map.set (dcp::CENTRE, 1, overall_gain * minus_3dB);
- map.set (dcp::LS, 0, overall_gain);
- map.set (dcp::RS, 1, overall_gain);
+ map.set (dcp::Channel::LEFT, 0, overall_gain);
+ map.set (dcp::Channel::RIGHT, 1, overall_gain);
+ map.set (dcp::Channel::CENTRE, 0, overall_gain * minus_3dB);
+ map.set (dcp::Channel::CENTRE, 1, overall_gain * minus_3dB);
+ map.set (dcp::Channel::LS, 0, overall_gain);
+ map.set (dcp::Channel::RS, 1, overall_gain);
}
/* XXX: maybe we should do something better for >6 channel DCPs */
} else {
diff --git a/src/lib/film.cc b/src/lib/film.cc
index a0f25d84b..aa5dfdea9 100644
--- a/src/lib/film.cc
+++ b/src/lib/film.cc
@@ -172,8 +172,8 @@ Film::Film (optional<boost::filesystem::path> dir)
, _audio_language (dcp::LanguageTag("en-US"))
, _release_territory (dcp::LanguageTag::RegionSubtag("US"))
, _version_number (1)
- , _status (dcp::FINAL)
- , _luminance (dcp::Luminance(4.5, dcp::Luminance::FOOT_LAMBERT))
+ , _status (dcp::Status::FINAL)
+ , _luminance (dcp::Luminance(4.5, dcp::Luminance::Unit::FOOT_LAMBERT))
, _state_version (current_state_version)
, _dirty (false)
, _tolerant (false)
@@ -770,10 +770,10 @@ Film::mapped_audio_channels () const
mapped.push_back (i);
}
} else {
- for (auto i: content ()) {
+ for (auto i: content()) {
if (i->audio) {
- auto c = i->audio->mapping().mapped_output_channels ();
- copy (c.begin(), c.end(), back_inserter (mapped));
+ auto c = i->audio->mapping().mapped_output_channels();
+ copy (c.begin(), c.end(), back_inserter(mapped));
}
}
@@ -879,7 +879,7 @@ Film::isdcf_name (bool if_created_now) const
/* XXX: this uses the first bit of content only */
/* Interior aspect ratio. The standard says we don't do this for trailers, for some strange reason */
- if (dcp_content_type() && dcp_content_type()->libdcp_kind() != dcp::TRAILER) {
+ if (dcp_content_type() && dcp_content_type()->libdcp_kind() != dcp::ContentKind::TRAILER) {
Ratio const* content_ratio = nullptr;
for (auto i: content ()) {
if (i->video) {
@@ -952,10 +952,10 @@ Film::isdcf_name (bool if_created_now) const
d += String::compose("_%1%2", ch.first, ch.second);
}
- if (audio_channels() > static_cast<int>(dcp::HI) && find(mapped.begin(), mapped.end(), dcp::HI) != mapped.end()) {
+ if (audio_channels() > static_cast<int>(dcp::Channel::HI) && find(mapped.begin(), mapped.end(), static_cast<int>(dcp::Channel::HI)) != mapped.end()) {
d += "-HI";
}
- if (audio_channels() > static_cast<int>(dcp::VI) && find(mapped.begin(), mapped.end(), dcp::VI) != mapped.end()) {
+ if (audio_channels() > static_cast<int>(dcp::Channel::VI) && find(mapped.begin(), mapped.end(), static_cast<int>(dcp::Channel::VI)) != mapped.end()) {
d += "-VI";
}
diff --git a/src/lib/hints.cc b/src/lib/hints.cc
index 58d33204c..ebc42bc91 100644
--- a/src/lib/hints.cc
+++ b/src/lib/hints.cc
@@ -534,7 +534,7 @@ void
Hints::check_ffec_and_ffmc_in_smpte_feature ()
{
shared_ptr<const Film> f = film();
- if (!f->interop() && f->dcp_content_type()->libdcp_kind() == dcp::FEATURE && (!f->marker(dcp::Marker::FFEC) || !f->marker(dcp::Marker::FFMC))) {
+ if (!f->interop() && f->dcp_content_type()->libdcp_kind() == dcp::ContentKind::FEATURE && (!f->marker(dcp::Marker::FFEC) || !f->marker(dcp::Marker::FFMC))) {
hint (_("SMPTE DCPs with the type FTR (feature) should have markers for the first frame of end credits (FFEC) and the first frame of moving credits (FFMC). You should add these markers using the 'Markers' button in the DCP tab."));
}
}
diff --git a/src/lib/image.cc b/src/lib/image.cc
index a31874e18..c0916df89 100644
--- a/src/lib/image.cc
+++ b/src/lib/image.cc
@@ -215,8 +215,8 @@ Image::crop_scale_window (
throw runtime_error (N_("Could not allocate SwsContext"));
}
- DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT);
- int const lut[dcp::YUV_TO_RGB_COUNT] = {
+ DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
+ int const lut[static_cast<int>(dcp::YUVToRGB::COUNT)] = {
SWS_CS_ITU601,
SWS_CS_ITU709
};
@@ -234,8 +234,8 @@ Image::crop_scale_window (
*/
sws_setColorspaceDetails (
scale_context,
- sws_getCoefficients (lut[yuv_to_rgb]), video_range == VIDEO_RANGE_VIDEO ? 0 : 1,
- sws_getCoefficients (lut[yuv_to_rgb]), out_video_range == VIDEO_RANGE_VIDEO ? 0 : 1,
+ sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), video_range == VIDEO_RANGE_VIDEO ? 0 : 1,
+ sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), out_video_range == VIDEO_RANGE_VIDEO ? 0 : 1,
0, 1 << 16, 1 << 16
);
@@ -312,8 +312,8 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo
(fast ? SWS_FAST_BILINEAR : SWS_BICUBIC) | SWS_ACCURATE_RND, 0, 0, 0
);
- DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT);
- int const lut[dcp::YUV_TO_RGB_COUNT] = {
+ DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT);
+ int const lut[static_cast<int>(dcp::YUVToRGB::COUNT)] = {
SWS_CS_ITU601,
SWS_CS_ITU709
};
@@ -331,8 +331,8 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo
*/
sws_setColorspaceDetails (
scale_context,
- sws_getCoefficients (lut[yuv_to_rgb]), 0,
- sws_getCoefficients (lut[yuv_to_rgb]), 0,
+ sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), 0,
+ sws_getCoefficients (lut[static_cast<int>(yuv_to_rgb)]), 0,
0, 1 << 16, 1 << 16
);
@@ -680,7 +680,7 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
}
case AV_PIX_FMT_YUV420P:
{
- shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+ shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false);
dcp::Size const ts = size();
dcp::Size const os = yuv->size();
for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
@@ -715,7 +715,7 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
}
case AV_PIX_FMT_YUV420P10:
{
- shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+ shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false);
dcp::Size const ts = size();
dcp::Size const os = yuv->size();
for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
@@ -750,7 +750,7 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
}
case AV_PIX_FMT_YUV422P10LE:
{
- shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+ shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false);
dcp::Size const ts = size();
dcp::Size const os = yuv->size();
for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
@@ -1333,7 +1333,7 @@ Image::as_png () const
DCPOMATIC_ASSERT (bytes_per_pixel(0) == 4);
DCPOMATIC_ASSERT (planes() == 1);
if (pixel_format() != AV_PIX_FMT_RGBA) {
- return convert_pixel_format(dcp::YUV_TO_RGB_REC709, AV_PIX_FMT_RGBA, true, false)->as_png();
+ return convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, true, false)->as_png();
}
/* error handling? */
diff --git a/src/lib/j2k_image_proxy.cc b/src/lib/j2k_image_proxy.cc
index 13305b7f3..98b859529 100644
--- a/src/lib/j2k_image_proxy.cc
+++ b/src/lib/j2k_image_proxy.cc
@@ -85,7 +85,7 @@ J2KImageProxy::J2KImageProxy (
AVPixelFormat pixel_format,
optional<int> forced_reduction
)
- : _data (eye ? frame->left() : frame->right())
+ : _data (eye == dcp::Eye::LEFT ? frame->left() : frame->right())
, _size (size)
, _eye (eye)
, _pixel_format (pixel_format)
diff --git a/src/lib/log.cc b/src/lib/log.cc
index e1716f1c6..f84cfd3a5 100644
--- a/src/lib/log.cc
+++ b/src/lib/log.cc
@@ -31,9 +31,12 @@
#include "i18n.h"
+
using std::string;
using std::cout;
using std::shared_ptr;
+using std::make_shared;
+
Log::Log ()
: _types (0)
@@ -63,7 +66,7 @@ Log::log (string message, int type)
return;
}
- shared_ptr<StringLogEntry> e (new StringLogEntry (type, message));
+ auto e = make_shared<StringLogEntry>(type, message);
do_log (e);
}
@@ -72,14 +75,14 @@ void
Log::dcp_log (dcp::NoteType type, string m)
{
switch (type) {
- case dcp::DCP_PROGRESS:
- do_log (shared_ptr<const LogEntry> (new StringLogEntry (LogEntry::TYPE_GENERAL, m)));
+ case dcp::NoteType::PROGRESS:
+ do_log (make_shared<StringLogEntry>(LogEntry::TYPE_GENERAL, m));
break;
- case dcp::DCP_ERROR:
- do_log (shared_ptr<const LogEntry> (new StringLogEntry (LogEntry::TYPE_ERROR, m)));
+ case dcp::NoteType::ERROR:
+ do_log (make_shared<StringLogEntry>(LogEntry::TYPE_ERROR, m));
break;
- case dcp::DCP_NOTE:
- do_log (shared_ptr<const LogEntry> (new StringLogEntry (LogEntry::TYPE_WARNING, m)));
+ case dcp::NoteType::NOTE:
+ do_log (make_shared<StringLogEntry>(LogEntry::TYPE_WARNING, m));
break;
}
}
diff --git a/src/lib/player.cc b/src/lib/player.cc
index 503452b59..5b449ccc1 100644
--- a/src/lib/player.cc
+++ b/src/lib/player.cc
@@ -1011,7 +1011,7 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
}
dcp::Size scaled_size (width, height);
- ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
+ ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
_active_texts[text->type()].add_from (wc, ps, from);
diff --git a/src/lib/player_video.cc b/src/lib/player_video.cc
index 683fc27fc..ce552f20f 100644
--- a/src/lib/player_video.cc
+++ b/src/lib/player_video.cc
@@ -166,7 +166,7 @@ PlayerVideo::make_image (function<AVPixelFormat (AVPixelFormat)> pixel_format, V
total_crop.bottom /= r;
}
- dcp::YUVToRGB yuv_to_rgb = dcp::YUV_TO_RGB_REC601;
+ dcp::YUVToRGB yuv_to_rgb = dcp::YUVToRGB::REC601;
if (_colour_conversion) {
yuv_to_rgb = _colour_conversion.get().yuv_to_rgb();
}
diff --git a/src/lib/reel_writer.cc b/src/lib/reel_writer.cc
index 8be31d09b..1d2ec557f 100644
--- a/src/lib/reel_writer.cc
+++ b/src/lib/reel_writer.cc
@@ -114,7 +114,7 @@ ReelWriter::ReelWriter (
output. We will hard-link it into the DCP later.
*/
- dcp::Standard const standard = film()->interop() ? dcp::INTEROP : dcp::SMPTE;
+ dcp::Standard const standard = film()->interop() ? dcp::Standard::INTEROP : dcp::Standard::SMPTE;
boost::filesystem::path const asset =
film()->internal_video_asset_dir() / film()->internal_video_asset_filename(_period);
@@ -861,7 +861,7 @@ ReelWriter::write (PlayerText subs, TextType type, optional<DCPTextTrack> track,
i.image->as_png(),
dcp::Time(period.from.seconds() - _period.from.seconds(), film()->video_frame_rate()),
dcp::Time(period.to.seconds() - _period.from.seconds(), film()->video_frame_rate()),
- i.rectangle.x, dcp::HALIGN_LEFT, i.rectangle.y, dcp::VALIGN_TOP,
+ i.rectangle.x, dcp::HAlign::LEFT, i.rectangle.y, dcp::VAlign::TOP,
dcp::Time(), dcp::Time()
)
)
diff --git a/src/lib/render_text.cc b/src/lib/render_text.cc
index 3dc1e9ff0..2272d3506 100644
--- a/src/lib/render_text.cc
+++ b/src/lib/render_text.cc
@@ -224,15 +224,15 @@ x_position (StringText const& first, int target_width, int layout_width)
{
int x = 0;
switch (first.h_align ()) {
- case dcp::HALIGN_LEFT:
+ case dcp::HAlign::LEFT:
/* h_position is distance between left of frame and left of subtitle */
x = first.h_position() * target_width;
break;
- case dcp::HALIGN_CENTER:
+ case dcp::HAlign::CENTER:
/* h_position is distance between centre of frame and centre of subtitle */
x = (0.5 + first.h_position()) * target_width - layout_width / 2;
break;
- case dcp::HALIGN_RIGHT:
+ case dcp::HAlign::RIGHT:
/* h_position is distance between right of frame and right of subtitle */
x = (1.0 - first.h_position()) * target_width - layout_width;
break;
@@ -248,20 +248,20 @@ y_position (StringText const& first, int target_height, int layout_height)
{
int y = 0;
switch (first.v_align ()) {
- case dcp::VALIGN_TOP:
+ case dcp::VAlign::TOP:
/* SMPTE says that v_position is the distance between top
of frame and top of subtitle, but this doesn't always seem to be
the case in practice; Gunnar Ásgeirsson's Dolby server appears
- to put VALIGN_TOP subs with v_position as the distance between top
+ to put VAlign::TOP subs with v_position as the distance between top
of frame and bottom of subtitle.
*/
y = first.v_position() * target_height - layout_height;
break;
- case dcp::VALIGN_CENTER:
+ case dcp::VAlign::CENTER:
/* v_position is distance between centre of frame and centre of subtitle */
y = (0.5 + first.v_position()) * target_height - layout_height / 2;
break;
- case dcp::VALIGN_BOTTOM:
+ case dcp::VAlign::BOTTOM:
/* v_position is distance between bottom of frame and bottom of subtitle */
y = (1.0 - first.v_position()) * target_height - layout_height;
break;
@@ -333,7 +333,7 @@ render_line (list<StringText> subtitles, list<shared_ptr<Font> > fonts, dcp::Siz
}
}
- float const border_width = first.effect() == dcp::BORDER ? (first.outline_width * target.width / 2048.0) : 0;
+ float const border_width = first.effect() == dcp::Effect::BORDER ? (first.outline_width * target.width / 2048.0) : 0;
size.width += 2 * ceil (border_width);
size.height += 2 * ceil (border_width);
@@ -356,7 +356,7 @@ render_line (list<StringText> subtitles, list<shared_ptr<Font> > fonts, dcp::Siz
context->scale (x_scale, y_scale);
layout->update_from_cairo_context (context);
- if (first.effect() == dcp::SHADOW) {
+ if (first.effect() == dcp::Effect::SHADOW) {
/* Drop-shadow effect */
set_source_rgba (context, first.effect_colour(), fade_factor);
context->move_to (x_offset + 4, y_offset + 4);
@@ -364,7 +364,7 @@ render_line (list<StringText> subtitles, list<shared_ptr<Font> > fonts, dcp::Siz
context->fill ();
}
- if (first.effect() == dcp::BORDER) {
+ if (first.effect() == dcp::Effect::BORDER) {
/* Border effect */
set_source_rgba (context, first.effect_colour(), fade_factor);
context->set_line_width (border_width);
diff --git a/src/lib/spl_entry.cc b/src/lib/spl_entry.cc
index 083ce0296..440f9861b 100644
--- a/src/lib/spl_entry.cc
+++ b/src/lib/spl_entry.cc
@@ -44,11 +44,11 @@ SPLEntry::construct (shared_ptr<Content> c)
name = dcp->name ();
DCPOMATIC_ASSERT (dcp->cpl());
id = *dcp->cpl();
- kind = dcp->content_kind().get_value_or(dcp::FEATURE);
+ kind = dcp->content_kind().get_value_or(dcp::ContentKind::FEATURE);
encrypted = dcp->encrypted ();
} else {
name = content->path(0).filename().string();
- kind = dcp::FEATURE;
+ kind = dcp::ContentKind::FEATURE;
}
}
diff --git a/src/lib/text_content.cc b/src/lib/text_content.cc
index a9ed86083..86fd85b68 100644
--- a/src/lib/text_content.cc
+++ b/src/lib/text_content.cc
@@ -144,21 +144,21 @@ TextContent::TextContent (Content* parent, cxml::ConstNodePtr node, int version)
}
if (node->optional_bool_child("Outline").get_value_or(false)) {
- _effect = dcp::BORDER;
+ _effect = dcp::Effect::BORDER;
} else if (node->optional_bool_child("Shadow").get_value_or(false)) {
- _effect = dcp::SHADOW;
+ _effect = dcp::Effect::SHADOW;
} else {
- _effect = dcp::NONE;
+ _effect = dcp::Effect::NONE;
}
auto effect = node->optional_string_child("Effect");
if (effect) {
if (*effect == "none") {
- _effect = dcp::NONE;
+ _effect = dcp::Effect::NONE;
} else if (*effect == "outline") {
- _effect = dcp::BORDER;
+ _effect = dcp::Effect::BORDER;
} else if (*effect == "shadow") {
- _effect = dcp::SHADOW;
+ _effect = dcp::Effect::SHADOW;
}
}
@@ -341,13 +341,13 @@ TextContent::as_xml (xmlpp::Node* root) const
}
if (_effect) {
switch (*_effect) {
- case dcp::NONE:
+ case dcp::Effect::NONE:
text->add_child("Effect")->add_child_text("none");
break;
- case dcp::BORDER:
+ case dcp::Effect::BORDER:
text->add_child("Effect")->add_child_text("outline");
break;
- case dcp::SHADOW:
+ case dcp::Effect::SHADOW:
text->add_child("Effect")->add_child_text("shadow");
break;
}
@@ -389,7 +389,7 @@ TextContent::identifier () const
+ "_" + raw_convert<string> (fade_out().get_value_or(ContentTime()).get())
+ "_" + raw_convert<string> (outline_width())
+ "_" + raw_convert<string> (colour().get_value_or(dcp::Colour(255, 255, 255)).to_argb_string())
- + "_" + raw_convert<string> (dcp::effect_to_string(effect().get_value_or(dcp::NONE)))
+ + "_" + raw_convert<string> (dcp::effect_to_string(effect().get_value_or(dcp::Effect::NONE)))
+ "_" + raw_convert<string> (effect_colour().get_value_or(dcp::Colour(0, 0, 0)).to_argb_string())
+ "_" + raw_convert<string> (_parent->video_frame_rate().get_value_or(0));
diff --git a/src/lib/text_decoder.cc b/src/lib/text_decoder.cc
index a2241c50b..099e3ee74 100644
--- a/src/lib/text_decoder.cc
+++ b/src/lib/text_decoder.cc
@@ -147,16 +147,16 @@ TextDecoder::emit_plain_start (ContentTime from, sub::Subtitle const & subtitle)
v_position = 1.015 -
(1 + bottom_line.get() - i.vertical_position.line.get()) * multiplier;
- v_align = dcp::VALIGN_TOP;
+ v_align = dcp::VAlign::TOP;
break;
case sub::TOP_OF_SCREEN:
/* This 0.1 is another fudge factor to bring the top line away from the top of the screen a little */
v_position = 0.12 + i.vertical_position.line.get() * multiplier;
- v_align = dcp::VALIGN_TOP;
+ v_align = dcp::VAlign::TOP;
break;
case sub::VERTICAL_CENTRE_OF_SCREEN:
v_position = i.vertical_position.line.get() * multiplier;
- v_align = dcp::VALIGN_CENTER;
+ v_align = dcp::VAlign::CENTER;
break;
}
} else {
@@ -176,16 +176,16 @@ TextDecoder::emit_plain_start (ContentTime from, sub::Subtitle const & subtitle)
switch (i.vertical_position.reference.get()) {
case sub::TOP_OF_SCREEN:
- v_align = dcp::VALIGN_TOP;
+ v_align = dcp::VAlign::TOP;
break;
case sub::VERTICAL_CENTRE_OF_SCREEN:
- v_align = dcp::VALIGN_CENTER;
+ v_align = dcp::VAlign::CENTER;
break;
case sub::BOTTOM_OF_SCREEN:
- v_align = dcp::VALIGN_BOTTOM;
+ v_align = dcp::VAlign::BOTTOM;
break;
default:
- v_align = dcp::VALIGN_TOP;
+ v_align = dcp::VAlign::TOP;
break;
}
}
@@ -194,18 +194,18 @@ TextDecoder::emit_plain_start (ContentTime from, sub::Subtitle const & subtitle)
float h_position = i.horizontal_position.proportional;
switch (i.horizontal_position.reference) {
case sub::LEFT_OF_SCREEN:
- h_align = dcp::HALIGN_LEFT;
+ h_align = dcp::HAlign::LEFT;
h_position = max(h_position, 0.05f);
break;
case sub::HORIZONTAL_CENTRE_OF_SCREEN:
- h_align = dcp::HALIGN_CENTER;
+ h_align = dcp::HAlign::CENTER;
break;
case sub::RIGHT_OF_SCREEN:
- h_align = dcp::HALIGN_RIGHT;
+ h_align = dcp::HAlign::RIGHT;
h_position = max(h_position, 0.05f);
break;
default:
- h_align = dcp::HALIGN_CENTER;
+ h_align = dcp::HAlign::CENTER;
break;
}
@@ -230,9 +230,9 @@ TextDecoder::emit_plain_start (ContentTime from, sub::Subtitle const & subtitle)
h_align,
v_position,
v_align,
- dcp::DIRECTION_LTR,
+ dcp::Direction::LTR,
j.text,
- dcp::NONE,
+ dcp::Effect::NONE,
j.effect_colour.get_value_or(sub::Colour(0, 0, 0)).dcp(),
/* Hack: we should use subtitle.fade_up and subtitle.fade_down here
but the times of these often don't have a frame rate associated
diff --git a/src/lib/types.cc b/src/lib/types.cc
index 9aba915e8..ac7920a2c 100644
--- a/src/lib/types.cc
+++ b/src/lib/types.cc
@@ -201,7 +201,7 @@ CPLSummary::CPLSummary (boost::filesystem::path p)
vector<dcp::VerificationNote> notes;
dcp.read (&notes);
for (auto i: notes) {
- if (i.code() != dcp::VerificationNote::EXTERNAL_ASSET) {
+ if (i.code() != dcp::VerificationNote::Code::EXTERNAL_ASSET) {
/* It's not just a warning about this DCP being a VF */
throw dcp::ReadError(dcp::note_to_string(i));
}
diff --git a/src/lib/util.cc b/src/lib/util.cc
index 0aa7e7a28..78fac8b3a 100644
--- a/src/lib/util.cc
+++ b/src/lib/util.cc
@@ -395,7 +395,7 @@ DCPOMATIC_ENABLE_WARNINGS
/* Render something to fontconfig to create its cache */
list<StringText> subs;
dcp::SubtitleString ss(
- optional<string>(), false, false, false, dcp::Colour(), 42, 1, dcp::Time(), dcp::Time(), 0, dcp::HALIGN_CENTER, 0, dcp::VALIGN_CENTER, dcp::DIRECTION_LTR,
+ optional<string>(), false, false, false, dcp::Colour(), 42, 1, dcp::Time(), dcp::Time(), 0, dcp::HAlign::CENTER, 0, dcp::VAlign::CENTER, dcp::DIRECTION_LTR,
"Hello dolly", dcp::NONE, dcp::Colour(), dcp::Time(), dcp::Time()
);
subs.push_back (StringText(ss, 0));
@@ -818,24 +818,24 @@ audio_channel_types (list<int> mapped, int channels)
}
switch (static_cast<dcp::Channel>(i)) {
- case dcp::LFE:
+ case dcp::Channel::LFE:
++lfe;
break;
- case dcp::LEFT:
- case dcp::RIGHT:
- case dcp::CENTRE:
- case dcp::LS:
- case dcp::RS:
- case dcp::BSL:
- case dcp::BSR:
+ case dcp::Channel::LEFT:
+ case dcp::Channel::RIGHT:
+ case dcp::Channel::CENTRE:
+ case dcp::Channel::LS:
+ case dcp::Channel::RS:
+ case dcp::Channel::BSL:
+ case dcp::Channel::BSR:
++non_lfe;
break;
- case dcp::HI:
- case dcp::VI:
- case dcp::MOTION_DATA:
- case dcp::SYNC_SIGNAL:
- case dcp::SIGN_LANGUAGE:
- case dcp::CHANNEL_COUNT:
+ case dcp::Channel::HI:
+ case dcp::Channel::VI:
+ case dcp::Channel::MOTION_DATA:
+ case dcp::Channel::SYNC_SIGNAL:
+ case dcp::Channel::SIGN_LANGUAGE:
+ case dcp::Channel::CHANNEL_COUNT:
break;
}
}
@@ -853,12 +853,12 @@ remap (shared_ptr<const AudioBuffers> input, int output_channels, AudioMapping m
for (int i = 0; i < to_do; ++i) {
for (int j = 0; j < mapped->channels(); ++j) {
- if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
- mapped->accumulate_channel (
+ if (map.get(i, j) > 0) {
+ mapped->accumulate_channel(
input.get(),
i,
- static_cast<dcp::Channel> (j),
- map.get (i, static_cast<dcp::Channel> (j))
+ j,
+ map.get(i, j)
);
}
}
@@ -962,25 +962,25 @@ emit_subtitle_image (ContentTimePeriod period, dcp::SubtitleImage sub, dcp::Size
/* add in position */
switch (sub.h_align()) {
- case dcp::HALIGN_LEFT:
+ case dcp::HAlign::LEFT:
rect.x += sub.h_position();
break;
- case dcp::HALIGN_CENTER:
+ case dcp::HAlign::CENTER:
rect.x += 0.5 + sub.h_position() - rect.width / 2;
break;
- case dcp::HALIGN_RIGHT:
+ case dcp::HAlign::RIGHT:
rect.x += 1 - sub.h_position() - rect.width;
break;
}
switch (sub.v_align()) {
- case dcp::VALIGN_TOP:
+ case dcp::VAlign::TOP:
rect.y += sub.v_position();
break;
- case dcp::VALIGN_CENTER:
+ case dcp::VAlign::CENTER:
rect.y += 0.5 + sub.v_position() - rect.height / 2;
break;
- case dcp::VALIGN_BOTTOM:
+ case dcp::VAlign::BOTTOM:
rect.y += 1 - sub.v_position() - rect.height;
break;
}
diff --git a/src/lib/verify_dcp_job.cc b/src/lib/verify_dcp_job.cc
index bbcce7208..bd5634669 100644
--- a/src/lib/verify_dcp_job.cc
+++ b/src/lib/verify_dcp_job.cc
@@ -72,7 +72,7 @@ VerifyDCPJob::run ()
bool failed = false;
for (auto i: _notes) {
- if (i.type() == dcp::VerificationNote::VERIFY_ERROR) {
+ if (i.type() == dcp::VerificationNote::Type::ERROR) {
failed = true;
}
}
diff --git a/src/lib/video_mxf_decoder.cc b/src/lib/video_mxf_decoder.cc
index d2b057972..6e194a6df 100644
--- a/src/lib/video_mxf_decoder.cc
+++ b/src/lib/video_mxf_decoder.cc
@@ -92,14 +92,14 @@ VideoMXFDecoder::pass ()
video->emit (
film(),
shared_ptr<ImageProxy> (
- new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE, optional<int>())
+ new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::Eye::LEFT, AV_PIX_FMT_XYZ12LE, optional<int>())
),
frame
);
video->emit (
film(),
shared_ptr<ImageProxy> (
- new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE, optional<int>())
+ new J2KImageProxy (_stereo_reader->get_frame(frame), _size, dcp::Eye::RIGHT, AV_PIX_FMT_XYZ12LE, optional<int>())
),
frame
);
diff --git a/src/lib/writer.cc b/src/lib/writer.cc
index 8c5a82415..a67430d61 100644
--- a/src/lib/writer.cc
+++ b/src/lib/writer.cc
@@ -623,15 +623,15 @@ Writer::finish (boost::filesystem::path output_dcp)
cpl->set_facility (film()->facility());
cpl->set_luminance (film()->luminance());
- list<int> ac = film()->mapped_audio_channels();
+ auto ac = film()->mapped_audio_channels();
dcp::MCASoundField field = (
- find(ac.begin(), ac.end(), static_cast<int>(dcp::BSL)) != ac.end() ||
- find(ac.begin(), ac.end(), static_cast<int>(dcp::BSR)) != ac.end()
- ) ? dcp::SEVEN_POINT_ONE : dcp::FIVE_POINT_ONE;
+ find(ac.begin(), ac.end(), static_cast<int>(dcp::Channel::BSL)) != ac.end() ||
+ find(ac.begin(), ac.end(), static_cast<int>(dcp::Channel::BSR)) != ac.end()
+ ) ? dcp::MCASoundField::SEVEN_POINT_ONE : dcp::MCASoundField::FIVE_POINT_ONE;
dcp::MainSoundConfiguration msc (field, film()->audio_channels());
for (auto i: ac) {
- if (i < film()->audio_channels()) {
+ if (static_cast<int>(i) < film()->audio_channels()) {
msc.set_mapping (i, static_cast<dcp::Channel>(i));
}
}
@@ -655,7 +655,7 @@ Writer::finish (boost::filesystem::path output_dcp)
}
dcp.write_xml (
- film()->interop() ? dcp::INTEROP : dcp::SMPTE,
+ film()->interop() ? dcp::Standard::INTEROP : dcp::Standard::SMPTE,
issuer,
creator,
dcp::LocalTime().as_string(),