using std::shared_ptr;
using std::weak_ptr;
using std::dynamic_pointer_cast;
+using std::make_shared;
using boost::optional;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
void
DCPEncoder::go ()
{
- _writer.reset (new Writer (_film, _job));
+ _writer = make_shared<Writer>(_film, _job);
_writer->start ();
- _j2k_encoder.reset (new J2KEncoder (_film, _writer));
+ _j2k_encoder = make_shared<J2KEncoder>(_film, _writer);
_j2k_encoder->begin ();
{
- shared_ptr<Job> job = _job.lock ();
+ auto job = _job.lock ();
DCPOMATIC_ASSERT (job);
job->sub (_("Encoding"));
}
if (_non_burnt_subtitles) {
- vector<FontData> fonts = _player->get_subtitle_fonts ();
+ auto fonts = _player->get_subtitle_fonts ();
if (fonts.size() > 1 && _film->interop()) {
/* Interop will ignore second and subsequent <LoadFont>s so don't even
write them as they upset some validators.
*/
- FontData first = fonts.front ();
+ auto first = fonts.front();
fonts.clear ();
fonts.push_back (first);
}
{
_writer->write (data, time);
- shared_ptr<Job> job = _job.lock ();
+ auto job = _job.lock ();
DCPOMATIC_ASSERT (job);
job->set_progress (float(time.get()) / _film->length().get());
}
DCPEncoder::current_rate () const
{
if (!_j2k_encoder) {
- return optional<float>();
+ return {};
}
return _j2k_encoder->current_encoding_rate ();
using std::cerr;
using std::fixed;
using std::shared_ptr;
+using std::make_shared;
using boost::thread;
using boost::bind;
using boost::scoped_array;
socket->read (reinterpret_cast<uint8_t*> (buffer.get()), length);
string s (buffer.get());
- shared_ptr<cxml::Document> xml (new cxml::Document ("EncodingRequest"));
+ auto xml = make_shared<cxml::Document>("EncodingRequest");
xml->read_string (s);
/* This is a double-check; the server shouldn't even be on the candidate list
if it is the wrong version, but it doesn't hurt to make sure here.
return -1;
}
- shared_ptr<PlayerVideo> pvf (new PlayerVideo (xml, socket));
+ auto pvf = make_shared<PlayerVideo>(xml, socket);
if (!ds.check()) {
throw NetworkError ("Checksums do not match");
gettimeofday (&after_read, 0);
- ArrayData encoded = dcp_video_frame.encode_locally ();
+ auto encoded = dcp_video_frame.encode_locally ();
gettimeofday (&after_encode, 0);
return;
}
- shared_ptr<Socket> socket = _queue.front ();
+ auto socket = _queue.front ();
_queue.pop_front ();
lock.unlock ();
struct timeval end;
gettimeofday (&end, 0);
- shared_ptr<EncodedLogEntry> e (
- new EncodedLogEntry (
- frame, ip,
- seconds(after_read) - seconds(start),
- seconds(after_encode) - seconds(after_read),
- seconds(end) - seconds(after_encode)
- )
+ auto e = make_shared<EncodedLogEntry>(
+ frame, ip,
+ seconds(after_read) - seconds(start),
+ seconds(after_encode) - seconds(after_read),
+ seconds(end) - seconds(after_encode)
);
if (_verbose) {
EncodeServer::broadcast_thread ()
try
{
- boost::asio::ip::address address = boost::asio::ip::address_v4::any ();
+ auto address = boost::asio::ip::address_v4::any ();
boost::asio::ip::udp::endpoint listen_endpoint (address, HELLO_PORT);
_broadcast.socket = new boost::asio::ip::udp::socket (_broadcast.io_service);
if (strcmp (_broadcast.buffer, DCPOMATIC_HELLO) == 0) {
/* Reply to the client saying what we can do */
xmlpp::Document doc;
- xmlpp::Element* root = doc.create_root_node ("ServerAvailable");
+ auto root = doc.create_root_node ("ServerAvailable");
root->add_child("Threads")->add_child_text (raw_convert<string> (_worker_threads.size ()));
root->add_child("Version")->add_child_text (raw_convert<string> (SERVER_LINK_VERSION));
- string xml = doc.write_to_string ("UTF-8");
+ auto xml = doc.write_to_string ("UTF-8");
if (_verbose) {
cout << "Offering services to master " << _broadcast.send_endpoint.address().to_string () << "\n";
}
try {
- shared_ptr<Socket> socket (new Socket);
+ auto socket = make_shared<Socket>();
socket->connect (boost::asio::ip::tcp::endpoint (_broadcast.send_endpoint.address(), MAIN_SERVER_PRESENCE_PORT));
socket->write (xml.length() + 1);
socket->write ((uint8_t *) xml.c_str(), xml.length() + 1);
}
try {
- shared_ptr<Socket> socket (new Socket);
+ auto socket = make_shared<Socket>();
socket->connect (boost::asio::ip::tcp::endpoint (_broadcast.send_endpoint.address(), BATCH_SERVER_PRESENCE_PORT));
socket->write (xml.length() + 1);
socket->write ((uint8_t *) xml.c_str(), xml.length() + 1);
/*
- Copyright (C) 2012-2020 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
using std::find;
using std::shared_ptr;
using std::weak_ptr;
+using std::make_shared;
using std::dynamic_pointer_cast;
using boost::optional;
using boost::is_any_of;
boost::filesystem::path p (boost::filesystem::system_complete (dir.get()));
boost::filesystem::path result;
- for (boost::filesystem::path::iterator i = p.begin(); i != p.end(); ++i) {
- if (*i == "..") {
+ for (auto i: p) {
+ if (i == "..") {
boost::system::error_code ec;
if (boost::filesystem::is_symlink(result, ec) || result.filename() == "..") {
- result /= *i;
+ result /= i;
} else {
result = result.parent_path ();
}
- } else if (*i != ".") {
- result /= *i;
+ } else if (i != ".") {
+ result /= i;
}
}
}
if (_directory) {
- _log.reset (new FileLog (file ("log")));
+ _log = make_shared<FileLog>(file("log"));
} else {
- _log.reset (new NullLog);
+ _log = make_shared<NullLog>();
}
_playlist->set_sequence (_sequence);
boost::filesystem::path
Film::audio_analysis_path (shared_ptr<const Playlist> playlist) const
{
- boost::filesystem::path p = dir ("analysis");
+ auto p = dir ("analysis");
Digester digester;
for (auto i: playlist->content ()) {
boost::filesystem::path
Film::subtitle_analysis_path (shared_ptr<const Content> content) const
{
- boost::filesystem::path p = dir ("analysis");
+ auto p = dir ("analysis");
Digester digester;
digester.add (content->digest());
if (!content->text.empty()) {
- shared_ptr<TextContent> tc = content->text.front();
+ auto tc = content->text.front();
digester.add (tc->x_scale());
digester.add (tc->y_scale());
for (auto i: tc->fonts()) {
digester.add (tc->outline_width());
}
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent>(content);
+ auto fc = dynamic_pointer_cast<const FFmpegContent>(content);
if (fc) {
digester.add (fc->subtitle_stream()->identifier());
}
throw BadSettingError (_("name"), _("Cannot contain slashes"));
}
- if (container() == 0) {
+ if (container() == nullptr) {
throw MissingSettingError (_("container"));
}
throw runtime_error (_("The DCP is empty, perhaps because all the content has zero length."));
}
- if (dcp_content_type() == 0) {
+ if (dcp_content_type() == nullptr) {
throw MissingSettingError (_("content type"));
}
if (!i->paths_valid()) {
throw runtime_error (_("some of your content is missing"));
}
- shared_ptr<const DCPContent> dcp = dynamic_pointer_cast<const DCPContent> (i);
+ auto dcp = dynamic_pointer_cast<const DCPContent>(i);
if (dcp && dcp->needs_kdm()) {
throw runtime_error (_("Some of your content needs a KDM"));
}
}
LOG_GENERAL ("J2K bandwidth %1", j2k_bandwidth());
- shared_ptr<TranscodeJob> tj (new TranscodeJob (shared_from_this()));
- tj->set_encoder (shared_ptr<Encoder> (new DCPEncoder (shared_from_this(), tj)));
+ auto tj = make_shared<TranscodeJob>(shared_from_this());
+ tj->set_encoder (make_shared<DCPEncoder>(shared_from_this(), tj));
if (check) {
- shared_ptr<CheckContentChangeJob> cc (new CheckContentChangeJob(shared_from_this(), tj, gui));
+ auto cc = make_shared<CheckContentChangeJob>(shared_from_this(), tj, gui);
JobManager::instance()->add (cc);
} else {
JobManager::instance()->add (tj);
void
Film::send_dcp_to_tms ()
{
- shared_ptr<Job> j (new UploadJob (shared_from_this()));
- JobManager::instance()->add (j);
+ JobManager::instance()->add(make_shared<UploadJob>(shared_from_this()));
}
shared_ptr<xmlpp::Document>
Film::metadata (bool with_content_paths) const
{
- shared_ptr<xmlpp::Document> doc (new xmlpp::Document);
- xmlpp::Element* root = doc->create_root_node ("Metadata");
+ auto doc = make_shared<xmlpp::Document>();
+ auto root = doc->create_root_node ("Metadata");
root->add_child("Version")->add_child_text (raw_convert<string> (current_state_version));
root->add_child("Name")->add_child_text (_name);
root->add_child("ReencodeJ2K")->add_child_text (_reencode_j2k ? "1" : "0");
root->add_child("UserExplicitVideoFrameRate")->add_child_text(_user_explicit_video_frame_rate ? "1" : "0");
for (map<dcp::Marker, DCPTime>::const_iterator i = _markers.begin(); i != _markers.end(); ++i) {
- xmlpp::Element* m = root->add_child("Marker");
+ auto m = root->add_child("Marker");
m->set_attribute("Type", dcp::marker_to_string(i->first));
m->add_child_text(raw_convert<string>(i->second.get()));
}
void
Film::write_metadata (boost::filesystem::path path) const
{
- shared_ptr<xmlpp::Document> doc = metadata ();
- doc->write_to_file_formatted (path.string());
+ metadata()->write_to_file_formatted(path.string());
}
/** Write state to our `metadata' file */
{
DCPOMATIC_ASSERT (directory());
boost::filesystem::create_directories (directory().get());
- shared_ptr<xmlpp::Document> doc = metadata ();
- doc->write_to_file_formatted (file(metadata_file).string ());
+ metadata()->write_to_file_formatted(file(metadata_file).string());
_dirty = false;
}
{
boost::filesystem::create_directories (path.parent_path());
shared_ptr<xmlpp::Document> doc = metadata (false);
- doc->write_to_file_formatted (path.string ());
+ metadata(false)->write_to_file_formatted(path.string());
}
/** Read state from our metadata file.
throw runtime_error (_("This film was created with a newer version of DCP-o-matic, and it cannot be loaded into this version. Sorry!"));
} else if (_state_version < current_state_version) {
/* This is an older version; save a copy (if we haven't already) */
- boost::filesystem::path const older = path->parent_path() / String::compose("metadata.%1.xml", _state_version);
+ auto const older = path->parent_path() / String::compose("metadata.%1.xml", _state_version);
if (!boost::filesystem::is_regular_file(older)) {
try {
boost::filesystem::copy_file(*path, older);
{
- optional<string> c = f.optional_string_child ("DCPContentType");
+ auto c = f.optional_string_child("DCPContentType");
if (c) {
_dcp_content_type = DCPContentType::from_isdcf_name (c.get ());
}
}
{
- optional<string> c = f.optional_string_child ("Container");
+ auto c = f.optional_string_child("Container");
if (c) {
_container = Ratio::from_id (c.get ());
}
}
if (_audio_processor && !Config::instance()->show_experimental_audio_processors()) {
- list<AudioProcessor const *> ap = AudioProcessor::visible();
+ auto ap = AudioProcessor::visible();
if (find(ap.begin(), ap.end(), _audio_processor) == ap.end()) {
Config::instance()->set_show_experimental_audio_processors(true);
}
_content_versions.push_back (i->content());
}
- optional<string> name_language = f.optional_string_child("NameLanguage");
+ auto name_language = f.optional_string_child("NameLanguage");
if (name_language) {
_name_language = dcp::LanguageTag (*name_language);
}
- optional<string> audio_language = f.optional_string_child("AudioLanguage");
+ auto audio_language = f.optional_string_child("AudioLanguage");
if (audio_language) {
_audio_language = dcp::LanguageTag (*audio_language);
}
- optional<string> release_territory = f.optional_string_child("ReleaseTerritory");
+ auto release_territory = f.optional_string_child("ReleaseTerritory");
if (release_territory) {
_release_territory = dcp::LanguageTag::RegionSubtag (*release_territory);
}
_version_number = f.optional_number_child<int>("VersionNumber").get_value_or(0);
- optional<string> status = f.optional_string_child("Status");
+ auto status = f.optional_string_child("Status");
if (status) {
_status = dcp::string_to_status (*status);
}
_distributor = f.optional_string_child("Distributor").get_value_or("");
_facility = f.optional_string_child("Facility").get_value_or("");
- float value = f.optional_number_child<float>("LuminanceValue").get_value_or(4.5);
- optional<string> unit = f.optional_string_child("LuminanceUnit");
+ auto value = f.optional_number_child<float>("LuminanceValue").get_value_or(4.5);
+ auto unit = f.optional_string_child("LuminanceUnit");
if (unit) {
_luminance = dcp::Luminance (value, dcp::Luminance::string_to_unit(*unit));
}
optional<dcp::LanguageTag> found_language;
for (auto i: f.node_child("Playlist")->node_children("Content")) {
- cxml::ConstNodePtr text = i->optional_node_child("Text");
+ auto text = i->optional_node_child("Text");
if (text && text->optional_string_child("Language") && !found_language) {
try {
found_language = dcp::LanguageTag(text->string_child("Language"));
}
if (_state_version >= 9) {
- optional<string> isdcf_language = f.node_child("ISDCFMetadata")->optional_string_child("SubtitleLanguage");
+ auto isdcf_language = f.node_child("ISDCFMetadata")->optional_string_child("SubtitleLanguage");
if (isdcf_language && !found_language) {
try {
found_language = dcp::LanguageTag(*isdcf_language);
} else {
for (auto i: content ()) {
if (i->audio) {
- list<int> c = i->audio->mapping().mapped_output_channels ();
+ auto c = i->audio->mapping().mapped_output_channels ();
copy (c.begin(), c.end(), back_inserter (mapped));
}
}
{
string d;
- string raw_name = name ();
+ auto raw_name = name ();
/* Split the raw name up into words */
vector<string> words;
string fixed_name;
/* Add each word to fixed_name */
- for (vector<string>::const_iterator i = words.begin(); i != words.end(); ++i) {
- string w = *i;
+ for (auto i: words) {
+ string w = i;
/* First letter is always capitalised */
w[0] = toupper (w[0]);
/* Count caps in w */
size_t caps = 0;
- for (size_t i = 0; i < w.size(); ++i) {
- if (isupper (w[i])) {
+ for (size_t j = 0; j < w.size(); ++j) {
+ if (isupper (w[j])) {
++caps;
}
}
leave it alone.
*/
if (caps == w.size ()) {
- for (size_t i = 1; i < w.size(); ++i) {
- w[i] = tolower (w[i]);
+ for (size_t j = 1; j < w.size(); ++j) {
+ w[j] = tolower (w[j]);
}
}
- for (size_t i = 0; i < w.size(); ++i) {
- fixed_name += w[i];
+ for (size_t j = 0; j < w.size(); ++j) {
+ fixed_name += w[j];
}
}
d += "-" + raw_convert<string>(isdcf_metadata().content_version);
}
- ISDCFMetadata const dm = isdcf_metadata ();
+ auto const dm = isdcf_metadata ();
if (dm.temp_version) {
d += "-Temp";
/* Interior aspect ratio. The standard says we don't do this for trailers, for some strange reason */
if (dcp_content_type() && dcp_content_type()->libdcp_kind() != dcp::TRAILER) {
- Ratio const * content_ratio = 0;
+ Ratio const* content_ratio = nullptr;
for (auto i: content ()) {
if (i->video) {
/* Here's the first piece of video content */
for now I'm just appending -CCAP if we have any closed captions.
*/
- bool burnt_in = true;
- bool ccap = false;
+ auto burnt_in = true;
+ auto ccap = false;
for (auto i: content()) {
for (auto j: i->text) {
if (j->type() == TEXT_OPEN_SUBTITLE && j->use() && !j->burn()) {
}
if (!_subtitle_languages.empty()) {
- string lang = _subtitle_languages.front().language().get_value_or("en").subtag();
+ auto lang = _subtitle_languages.front().language().get_value_or("en").subtag();
if (burnt_in) {
transform (lang.begin(), lang.end(), lang.begin(), ::tolower);
} else {
/* Count mapped audio channels */
- list<int> mapped = mapped_audio_channels ();
+ auto mapped = mapped_audio_channels ();
- pair<int, int> ch = audio_channel_types (mapped, audio_channels());
+ auto ch = audio_channel_types (mapped, audio_channels());
if (!ch.first && !ch.second) {
d += "_MOS";
} else if (ch.first) {
d += "-3D";
}
- bool vf = false;
+ auto vf = false;
for (auto i: content()) {
- shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
+ auto dc = dynamic_pointer_cast<const DCPContent>(i);
if (!dc) {
continue;
}
vector<CPLSummary> out;
- boost::filesystem::path const dir = directory().get();
- for (boost::filesystem::directory_iterator i = boost::filesystem::directory_iterator(dir); i != boost::filesystem::directory_iterator(); ++i) {
+ auto const dir = directory().get();
+ for (auto i: boost::filesystem::directory_iterator(dir)) {
if (
- boost::filesystem::is_directory (*i) &&
- i->path().leaf() != "j2c" && i->path().leaf() != "video" && i->path().leaf() != "info" && i->path().leaf() != "analysis"
+ boost::filesystem::is_directory (i) &&
+ i.path().leaf() != "j2c" && i.path().leaf() != "video" && i.path().leaf() != "info" && i.path().leaf() != "analysis"
) {
try {
- out.push_back (CPLSummary(*i));
+ out.push_back (CPLSummary(i));
} catch (...) {
}
run_ffprobe (content->path(0), file("ffprobe.log"));
}
- shared_ptr<Job> j (new ExamineContentJob (shared_from_this(), content));
+ auto j = make_shared<ExamineContentJob>(shared_from_this(), content);
_job_connections.push_back (
j->Finished.connect (bind (&Film::maybe_add_content, this, weak_ptr<Job>(j), weak_ptr<Content>(content), disable_audio_analysis))
void
Film::maybe_add_content (weak_ptr<Job> j, weak_ptr<Content> c, bool disable_audio_analysis)
{
- shared_ptr<Job> job = j.lock ();
+ auto job = j.lock ();
if (!job || !job->finished_ok ()) {
return;
}
- shared_ptr<Content> content = c.lock ();
+ auto content = c.lock ();
if (!content) {
return;
}
add_content (content);
if (Config::instance()->automatic_audio_analysis() && content->audio && !disable_audio_analysis) {
- shared_ptr<Playlist> playlist (new Playlist);
+ auto playlist = make_shared<Playlist>();
playlist->add (shared_from_this(), content);
boost::signals2::connection c;
JobManager::instance()->analyse_audio (
Film::best_video_frame_rate () const
{
/* Don't default to anything above 30fps (make the user select that explicitly) */
- int best = _playlist->best_video_frame_rate ();
+ auto best = _playlist->best_video_frame_rate ();
if (best > 30) {
best /= 2;
}
bool change_made = false;
for (auto i: content()) {
- shared_ptr<DCPContent> d = dynamic_pointer_cast<DCPContent>(i);
+ auto d = dynamic_pointer_cast<DCPContent>(i);
if (!d) {
continue;
}
dcp::Size
Film::active_area () const
{
- dcp::Size const frame = frame_size ();
+ auto const frame = frame_size ();
dcp::Size active;
for (auto i: content()) {
throw runtime_error (_("Cannot make a KDM as this project is not encrypted."));
}
- shared_ptr<const dcp::CPL> cpl (new dcp::CPL (cpl_file));
- shared_ptr<const dcp::CertificateChain> signer = Config::instance()->signer_chain ();
+ auto cpl = make_shared<dcp::CPL>(cpl_file);
+ auto signer = Config::instance()->signer_chain();
if (!signer->valid ()) {
throw InvalidSignerError ();
}
/* Find keys that have been added to imported, encrypted DCP content */
list<dcp::DecryptedKDMKey> imported_keys;
for (auto i: content()) {
- shared_ptr<DCPContent> d = dynamic_pointer_cast<DCPContent> (i);
+ auto d = dynamic_pointer_cast<DCPContent> (i);
if (d && d->kdm()) {
dcp::DecryptedKDM kdm (d->kdm().get(), Config::instance()->decryption_chain()->key().get());
- list<dcp::DecryptedKDMKey> keys = kdm.keys ();
+ auto keys = kdm.keys ();
copy (keys.begin(), keys.end(), back_inserter (imported_keys));
}
}
boost::filesystem::path test = internal_video_asset_dir() / "test";
boost::filesystem::path test2 = internal_video_asset_dir() / "test2";
can_hard_link = true;
- FILE* f = fopen_boost (test, "w");
+ auto f = fopen_boost (test, "w");
if (f) {
fclose (f);
boost::system::error_code ec;
boost::filesystem::remove (test2);
}
- boost::filesystem::space_info s = boost::filesystem::space (internal_video_asset_dir ());
+ auto s = boost::filesystem::space (internal_video_asset_dir ());
required = double (required_disk_space ()) / 1073741824.0f;
if (!can_hard_link) {
required *= 2;
Film::reels () const
{
list<DCPTimePeriod> p;
- DCPTime const len = length();
+ auto const len = length();
switch (reel_type ()) {
case REELTYPE_SINGLE:
Film::references_dcp_video () const
{
for (auto i: _playlist->content()) {
- shared_ptr<DCPContent> d = dynamic_pointer_cast<DCPContent>(i);
+ auto d = dynamic_pointer_cast<DCPContent>(i);
if (d && d->reference_video()) {
return true;
}
Film::references_dcp_audio () const
{
for (auto i: _playlist->content()) {
- shared_ptr<DCPContent> d = dynamic_pointer_cast<DCPContent>(i);
+ auto d = dynamic_pointer_cast<DCPContent>(i);
if (d && d->reference_audio()) {
return true;
}
for (auto i: content()) {
for (auto j: i->text) {
/* XXX: Empty DCPTextTrack ends up being a magic value here - the "unknown" or "not specified" track */
- DCPTextTrack dtt = j->dcp_track().get_value_or(DCPTextTrack());
+ auto dtt = j->dcp_track().get_value_or(DCPTextTrack());
if (j->type() == TEXT_CLOSED_CAPTION && find(tt.begin(), tt.end(), dtt) == tt.end()) {
tt.push_back (dtt);
}
void
Film::set_subtitle_language (dcp::LanguageTag language)
{
- vector<dcp::LanguageTag> lang;
- lang.push_back (language);
- set_subtitle_languages (lang);
+ set_subtitle_languages ({language});
}
optional<DCPTime>
Film::marker (dcp::Marker type) const
{
- map<dcp::Marker, DCPTime>::const_iterator i = _markers.find (type);
+ auto i = _markers.find (type);
if (i == _markers.end()) {
- return optional<DCPTime>();
+ return {};
}
return i->second;
}
shared_ptr<InfoFileHandle>
Film::info_file_handle (DCPTimePeriod period, bool read) const
{
- return shared_ptr<InfoFileHandle> (new InfoFileHandle(_info_file_mutex, info_file(period), read));
+ return std::make_shared<InfoFileHandle>(_info_file_mutex, info_file(period), read);
}
InfoFileHandle::InfoFileHandle (boost::mutex& mutex, boost::filesystem::path file, bool read)
throw OpenFileError (file, errno, OpenFileError::READ);
}
} else {
- bool const exists = boost::filesystem::exists (file);
+ auto const exists = boost::filesystem::exists (file);
if (exists) {
_handle = fopen_boost (file, "r+b");
} else {
class InfoFileHandle
{
public:
+ InfoFileHandle (boost::mutex& mutex, boost::filesystem::path file, bool read);
~InfoFileHandle ();
FILE* get () const {
private:
friend class Film;
- InfoFileHandle (boost::mutex& mutex, boost::filesystem::path file, bool read);
-
boost::mutex::scoped_lock _lock;
FILE* _handle;
boost::filesystem::path _file;
using std::exception;
using std::shared_ptr;
using std::weak_ptr;
+using std::make_shared;
using boost::optional;
using dcp::Data;
using namespace dcpomatic;
void
J2KEncoder::call_servers_list_changed (weak_ptr<J2KEncoder> encoder)
{
- shared_ptr<J2KEncoder> e = encoder.lock ();
+ auto e = encoder.lock ();
if (e) {
e->servers_list_changed ();
}
So just mop up anything left in the queue here.
*/
- for (list<shared_ptr<DCPVideo> >::iterator i = _queue.begin(); i != _queue.end(); ++i) {
- LOG_GENERAL (N_("Encode left-over frame %1"), (*i)->index ());
+ for (auto i: _queue) {
+ LOG_GENERAL(N_("Encode left-over frame %1"), i->index());
try {
_writer->write (
- shared_ptr<dcp::Data>(new dcp::ArrayData((*i)->encode_locally())),
- (*i)->index(),
- (*i)->eyes()
+ make_shared<dcp::ArrayData>(i->encode_locally()),
+ i->index(),
+ i->eyes()
);
frame_done ();
} catch (std::exception& e) {
*/
rethrow ();
- Frame const position = time.frames_floor(_film->video_frame_rate());
+ auto const position = time.frames_floor(_film->video_frame_rate());
if (_writer->can_fake_write (position)) {
/* We can fake-write this frame */
LOG_DEBUG_ENCODE("Frame @ %1 ENCODE", to_string(time));
/* Queue this new frame for encoding */
LOG_TIMING ("add-frame-to-queue queue=%1", _queue.size ());
- _queue.push_back (shared_ptr<DCPVideo> (
- new DCPVideo (
- pv,
- position,
- _film->video_frame_rate(),
- _film->j2k_bandwidth(),
- _film->resolution()
- )
- ));
+ _queue.push_back (make_shared<DCPVideo>(
+ pv,
+ position,
+ _film->video_frame_rate(),
+ _film->j2k_bandwidth(),
+ _film->resolution()
+ ));
/* The queue might not be empty any more, so notify anything which is
waiting on that.
}
LOG_TIMING ("encoder-wake thread=%1 queue=%2", thread_id(), _queue.size());
- shared_ptr<DCPVideo> vf = _queue.front ();
+ auto vf = _queue.front ();
/* We're about to commit to either encoding this frame or putting it back onto the queue,
so we must not be interrupted until one or other of these things have happened. This
/* We need to encode this input */
if (server) {
try {
- encoded.reset(new dcp::ArrayData(vf->encode_remotely(server.get())));
+ encoded = make_shared<dcp::ArrayData>(vf->encode_remotely(server.get()));
if (remote_backoff > 0) {
LOG_GENERAL ("%1 was lost, but now she is found; removing backoff", server->host_name ());
} else {
try {
LOG_TIMING ("start-local-encode thread=%1 frame=%2", thread_id(), vf->index());
- encoded.reset(new dcp::ArrayData(vf->encode_locally()));
+ encoded = make_shared<dcp::ArrayData>(vf->encode_locally());
LOG_TIMING ("finish-local-encode thread=%1 frame=%2", thread_id(), vf->index());
} catch (std::exception& e) {
/* This is very bad, so don't cope with it, just pass it on */
boost::mutex::scoped_lock lm (_threads_mutex);
terminate_threads ();
- _threads.reset (new boost::thread_group());
+ _threads = make_shared<boost::thread_group>();
/* XXX: could re-use threads */
if (!Config::instance()->only_servers_encode ()) {
for (int i = 0; i < Config::instance()->master_encoding_threads (); ++i) {
#ifdef DCPOMATIC_LINUX
- boost::thread* t = _threads->create_thread(boost::bind(&J2KEncoder::encoder_thread, this, optional<EncodeServerDescription>()));
+ auto t = _threads->create_thread(boost::bind(&J2KEncoder::encoder_thread, this, optional<EncodeServerDescription>()));
pthread_setname_np (t->native_handle(), "encode-worker");
#else
_threads->create_thread(boost::bind(&J2KEncoder::encoder_thread, this, optional<EncodeServerDescription>()));
/*
- Copyright (C) 2012-2020 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
using std::set_terminate;
using std::make_pair;
using std::shared_ptr;
+using std::make_shared;
using boost::thread;
using boost::optional;
using boost::lexical_cast;
LONG WINAPI
exception_handler(struct _EXCEPTION_POINTERS * info)
{
- FILE* f = fopen_boost (backtrace_file, "w");
+ auto f = fopen_boost (backtrace_file, "w");
fprintf (f, "C-style exception %d\n", info->ExceptionRecord->ExceptionCode);
fclose(f);
/* Add our library directory to the libltdl search path so that
xmlsec can find xmlsec1-openssl.
*/
- boost::filesystem::path lib = directory_containing_executable().parent_path();
+ auto lib = directory_containing_executable().parent_path();
lib /= "Frameworks";
setenv ("LTDL_LIBRARY_PATH", lib.c_str (), 1);
#endif
"Hello dolly", dcp::NONE, dcp::Colour(), dcp::Time(), dcp::Time()
);
subs.push_back (StringText(ss, 0));
- render_text (subs, list<shared_ptr<Font> >(), dcp::Size(640, 480), DCPTime(), 24);
+ render_text (subs, list<shared_ptr<Font>>(), dcp::Size(640, 480), DCPTime(), 24);
#endif
Ratio::setup_ratios ();
char* p = buffer.get ();
int i = 0;
while (i < int64_t (files.size()) && to_do > 0) {
- FILE* f = fopen_boost (files[i], "rb");
+ auto f = fopen_boost (files[i], "rb");
if (!f) {
throw OpenFileError (files[i].string(), errno, OpenFileError::READ);
}
p = buffer.get ();
i = files.size() - 1;
while (i >= 0 && to_do > 0) {
- FILE* f = fopen_boost (files[i], "rb");
+ auto f = fopen_boost (files[i], "rb");
if (!f) {
throw OpenFileError (files[i].string(), errno, OpenFileError::READ);
}
return false;
}
- string ext = f.extension().string();
+ auto ext = f.extension().string();
transform (ext.begin(), ext.end(), ext.begin(), ::tolower);
return (
ext == ".tif" || ext == ".tiff" || ext == ".jpg" || ext == ".jpeg" ||
return false;
}
- string ext = f.extension().string();
+ auto ext = f.extension().string();
transform (ext.begin(), ext.end(), ext.begin(), ::tolower);
return (ext == ".wav" || ext == ".mp3" || ext == ".aif" || ext == ".aiff");
}
bool
valid_j2k_file (boost::filesystem::path f)
{
- string ext = f.extension().string();
+ auto ext = f.extension().string();
transform (ext.begin(), ext.end(), ext.begin(), ::tolower);
return (ext == ".j2k" || ext == ".j2c" || ext == ".jp2");
}
void *
wrapped_av_malloc (size_t s)
{
- void* p = av_malloc (s);
+ auto p = av_malloc (s);
if (!p) {
throw bad_alloc ();
}
shared_ptr<AudioBuffers>
remap (shared_ptr<const AudioBuffers> input, int output_channels, AudioMapping map)
{
- shared_ptr<AudioBuffers> mapped (new AudioBuffers (output_channels, input->frames()));
+ auto mapped = make_shared<AudioBuffers>(output_channels, input->frames());
mapped->make_silent ();
int to_do = min (map.input_channels(), input->channels());
{
/* XXX: this is rather inefficient; decoding the image just to get its size */
FFmpegImageProxy proxy (sub.png_image(), VIDEO_RANGE_FULL);
- shared_ptr<Image> image = proxy.image().image;
+ auto image = proxy.image().image;
/* set up rect with height and width */
dcpomatic::Rect<double> rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height));
dcpomatic_sleep_seconds (5);
- list<shared_ptr<Job> > jobs = JobManager::instance()->get();
+ auto jobs = JobManager::instance()->get();
if (!first && progress) {
for (size_t i = 0; i < jobs.size(); ++i) {
void
copy_in_bits (boost::filesystem::path from, boost::filesystem::path to, boost::function<void (float)> progress)
{
- FILE* f = fopen_boost (from, "rb");
+ auto f = fopen_boost (from, "rb");
if (!f) {
throw OpenFileError (from, errno, OpenFileError::READ);
}
- FILE* t = fopen_boost (to, "wb");
+ auto t = fopen_boost (to, "wb");
if (!t) {
fclose (f);
throw OpenFileError (to, errno, OpenFileError::WRITE);
/* on the order of a second's worth of copying */
boost::uintmax_t const chunk = 20 * 1024 * 1024;
- uint8_t* buffer = static_cast<uint8_t*> (malloc(chunk));
+ auto buffer = static_cast<uint8_t*> (malloc(chunk));
if (!buffer) {
throw std::bad_alloc ();
}
return dcp::DecryptedKDM (kdm, Config::instance()->decryption_chain()->key().get());
} catch (dcp::KDMDecryptionError& e) {
/* Try to flesh out the error a bit */
- string const kdm_subject_name = kdm.recipient_x509_subject_name();
+ auto const kdm_subject_name = kdm.recipient_x509_subject_name();
bool on_chain = false;
- shared_ptr<const dcp::CertificateChain> dc = Config::instance()->decryption_chain();
+ auto dc = Config::instance()->decryption_chain();
for (auto i: dc->root_to_leaf()) {
if (i.subject() == kdm_subject_name) {
on_chain = true;
: wxPanel (parent)
, _set_button (0)
{
- wxSize const s = TimecodeBase::size (parent);
+ auto const s = TimecodeBase::size (parent);
wxTextValidator validator (wxFILTER_INCLUDE_CHAR_LIST);
wxArrayString list;
_sizer = new wxBoxSizer (wxHORIZONTAL);
_editable = new wxPanel (this);
- wxSizer* editable_sizer = new wxBoxSizer (wxHORIZONTAL);
+ auto editable_sizer = new wxBoxSizer (wxHORIZONTAL);
_hours = new wxTextCtrl (_editable, wxID_ANY, wxT(""), wxDefaultPosition, s, 0, validator);
_hours->SetMaxLength (2);
editable_sizer->Add (_hours);
TimecodeBase::size (wxWindow* parent)
{
wxClientDC dc (parent);
- wxSize size = dc.GetTextExtent (wxT ("9999"));
+ auto size = dc.GetTextExtent(wxT("9999"));
size.SetHeight (-1);
return size;
}
/*
- Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
using std::shared_ptr;
using std::weak_ptr;
using std::dynamic_pointer_cast;
+using std::make_shared;
using boost::bind;
using boost::optional;
using namespace dcpomatic;
_main_canvas->SetDoubleBuffered (true);
#endif
- wxSizer* sizer = new wxBoxSizer (wxHORIZONTAL);
+ auto sizer = new wxBoxSizer (wxHORIZONTAL);
sizer->Add (_labels_canvas, 0, wxEXPAND);
_labels_canvas->SetMinSize (wxSize (_labels_view->bbox().width, -1));
sizer->Add (_main_canvas, 1, wxEXPAND);
{
wxPaintDC dc (_labels_canvas);
- wxGraphicsContext* gc = wxGraphicsContext::Create (dc);
+ auto gc = wxGraphicsContext::Create (dc);
if (!gc) {
return;
}
wxPaintDC dc (_main_canvas);
_main_canvas->DoPrepareDC (dc);
- wxGraphicsContext* gc = wxGraphicsContext::Create (dc);
+ auto gc = wxGraphicsContext::Create (dc);
if (!gc) {
return;
}
for (auto i: _views) {
- shared_ptr<TimelineContentView> ic = dynamic_pointer_cast<TimelineContentView> (i);
+ auto ic = dynamic_pointer_cast<TimelineContentView> (i);
/* Find areas of overlap with other content views, so that we can plot them */
- list<dcpomatic::Rect<int> > overlaps;
+ list<dcpomatic::Rect<int>> overlaps;
for (auto j: _views) {
- shared_ptr<TimelineContentView> jc = dynamic_pointer_cast<TimelineContentView> (j);
+ auto jc = dynamic_pointer_cast<TimelineContentView> (j);
/* No overlap with non-content views, views no different tracks, audio views or non-active views */
if (!ic || !jc || i == j || ic->track() != jc->track() || ic->track().get_value_or(2) >= 2 || !ic->active() || !jc->active()) {
continue;
}
- optional<dcpomatic::Rect<int> > r = j->bbox().intersection (i->bbox());
+ auto r = j->bbox().intersection(i->bbox());
if (r) {
overlaps.push_back (r.get ());
}
/* Playhead */
- shared_ptr<FilmViewer> vp = _viewer.lock ();
+ auto vp = _viewer.lock ();
DCPOMATIC_ASSERT (vp);
gc->SetPen (*wxRED_PEN);
- wxGraphicsPath path = gc->CreatePath ();
+ auto path = gc->CreatePath ();
double const ph = vp->position().seconds() * pixels_per_second().get_value_or(0);
path.MoveToPoint (ph, 0);
path.AddLineToPoint (ph, pixels_per_track() * _tracks + 32);
void
Timeline::recreate_views ()
{
- shared_ptr<const Film> film = _film.lock ();
+ auto film = _film.lock ();
if (!film) {
return;
}
for (auto i: film->content ()) {
if (i->video) {
- _views.push_back (shared_ptr<TimelineView> (new TimelineVideoContentView (*this, i)));
+ _views.push_back (make_shared<TimelineVideoContentView>(*this, i));
}
if (i->audio && !i->audio->mapping().mapped_output_channels().empty ()) {
- _views.push_back (shared_ptr<TimelineView> (new TimelineAudioContentView (*this, i)));
+ _views.push_back (make_shared<TimelineAudioContentView>(*this, i));
}
for (auto j: i->text) {
- _views.push_back (shared_ptr<TimelineView> (new TimelineTextContentView (*this, i, j)));
+ _views.push_back (make_shared<TimelineTextContentView>(*this, i, j));
}
if (i->atmos) {
- _views.push_back (shared_ptr<TimelineView>(new TimelineAtmosContentView(*this, i)));
+ _views.push_back (make_shared<TimelineAtmosContentView>(*this, i));
}
}
continue;
}
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (i);
+ auto cv = dynamic_pointer_cast<TimelineContentView> (i);
int t = base;
- shared_ptr<Content> content = cv->content();
+ auto content = cv->content();
DCPTimePeriod const content_period (content->position(), content->end(film));
while (true) {
- TimelineViewList::iterator j = views.begin();
+ auto j = views.begin();
while (j != views.end()) {
- shared_ptr<T> test = dynamic_pointer_cast<T> (*j);
+ auto test = dynamic_pointer_cast<T> (*j);
if (!test) {
++j;
continue;
}
- shared_ptr<Content> test_content = test->content();
+ auto test_content = test->content();
if (
test->track() && test->track().get() == t &&
content_period.overlap(DCPTimePeriod(test_content->position(), test_content->end(film)))) {
struct AudioMappingComparator {
bool operator()(shared_ptr<TimelineView> a, shared_ptr<TimelineView> b) {
int la = -1;
- shared_ptr<TimelineAudioContentView> cva = dynamic_pointer_cast<TimelineAudioContentView>(a);
+ auto cva = dynamic_pointer_cast<TimelineAudioContentView>(a);
if (cva) {
- list<int> oc = cva->content()->audio->mapping().mapped_output_channels();
+ auto oc = cva->content()->audio->mapping().mapped_output_channels();
la = *min_element(boost::begin(oc), boost::end(oc));
}
int lb = -1;
- shared_ptr<TimelineAudioContentView> cvb = dynamic_pointer_cast<TimelineAudioContentView>(b);
+ auto cvb = dynamic_pointer_cast<TimelineAudioContentView>(b);
if (cvb) {
- list<int> oc = cvb->content()->audio->mapping().mapped_output_channels();
+ auto oc = cvb->content()->audio->mapping().mapped_output_channels();
lb = *min_element(boost::begin(oc), boost::end(oc));
}
return la < lb;
Audio N
*/
- shared_ptr<const Film> film = _film.lock ();
+ auto film = _film.lock ();
DCPOMATIC_ASSERT (film);
_tracks = 0;
- for (TimelineViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
- shared_ptr<TimelineContentView> c = dynamic_pointer_cast<TimelineContentView> (*i);
+ for (auto i: _views) {
+ auto c = dynamic_pointer_cast<TimelineContentView>(i);
if (c) {
c->unset_track ();
}
bool have_3d = false;
for (auto i: _views) {
- shared_ptr<TimelineVideoContentView> cv = dynamic_pointer_cast<TimelineVideoContentView> (i);
+ auto cv = dynamic_pointer_cast<TimelineVideoContentView>(i);
if (!cv) {
continue;
}
bool have_atmos = false;
for (auto i: _views) {
- shared_ptr<TimelineAtmosContentView> cv = dynamic_pointer_cast<TimelineAtmosContentView>(i);
+ auto cv = dynamic_pointer_cast<TimelineAtmosContentView>(i);
if (cv) {
cv->set_track (_tracks);
have_atmos = true;
DCP channel index.
*/
- TimelineViewList views = _views;
+ auto views = _views;
sort(views.begin(), views.end(), AudioMappingComparator());
int const audio_tracks = place<TimelineAudioContentView> (film, views, _tracks);
void
Timeline::setup_scrollbars ()
{
- shared_ptr<const Film> film = _film.lock ();
+ auto film = _film.lock ();
if (!film || !_pixels_per_second) {
return;
}
Timeline::event_to_view (wxMouseEvent& ev)
{
/* Search backwards through views so that we find the uppermost one first */
- TimelineViewList::reverse_iterator i = _views.rbegin();
+ auto i = _views.rbegin();
int vsx, vsy;
_main_canvas->GetViewStart (&vsx, &vsy);
Position<int> const p (ev.GetX() + vsx * _x_scroll_rate, ev.GetY() + vsy * _y_scroll_rate);
while (i != _views.rend() && !(*i)->bbox().contains (p)) {
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (*i);
+ auto cv = dynamic_pointer_cast<TimelineContentView>(*i);
++i;
}
if (i == _views.rend ()) {
- return shared_ptr<TimelineView> ();
+ return {};
}
return *i;
void
Timeline::left_down_select (wxMouseEvent& ev)
{
- shared_ptr<TimelineView> view = event_to_view (ev);
- shared_ptr<TimelineContentView> content_view = dynamic_pointer_cast<TimelineContentView> (view);
+ auto view = event_to_view (ev);
+ auto content_view = dynamic_pointer_cast<TimelineContentView>(view);
_down_view.reset ();
_down_view_position = content_view->content()->position ();
}
- for (TimelineViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (*i);
+ for (auto i: _views) {
+ auto cv = dynamic_pointer_cast<TimelineContentView>(i);
if (!cv) {
continue;
}
if (!ev.ShiftDown ()) {
- cv->set_selected (view == *i);
+ cv->set_selected (view == i);
}
}
if (_down_view) {
/* Pre-compute the points that we might snap to */
- for (TimelineViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (*i);
+ for (auto i: _views) {
+ auto cv = dynamic_pointer_cast<TimelineContentView>(i);
if (!cv || cv == _down_view || cv->content() == _down_view->content()) {
continue;
}
- shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
DCPOMATIC_ASSERT (film);
_start_snaps.push_back (cv->content()->position());
return;
}
- DCPTime const time_left = DCPTime::from_seconds((top_left.x + vsx) / *_pixels_per_second);
- DCPTime const time_right = DCPTime::from_seconds((bottom_right.x + vsx) / *_pixels_per_second);
+ auto const time_left = DCPTime::from_seconds((top_left.x + vsx) / *_pixels_per_second);
+ auto const time_right = DCPTime::from_seconds((bottom_right.x + vsx) / *_pixels_per_second);
set_pixels_per_second (double(GetSize().GetWidth()) / (time_right.seconds() - time_left.seconds()));
double const tracks_top = double(top_left.y - tracks_y_offset()) / _pixels_per_track;
void
Timeline::right_down_select (wxMouseEvent& ev)
{
- shared_ptr<TimelineView> view = event_to_view (ev);
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (view);
+ auto view = event_to_view (ev);
+ auto cv = dynamic_pointer_cast<TimelineContentView> (view);
if (!cv) {
return;
}
void
Timeline::maybe_snap (DCPTime a, DCPTime b, optional<DCPTime>& nearest_distance) const
{
- DCPTime const d = a - b;
+ auto const d = a - b;
if (!nearest_distance || d.abs() < nearest_distance.get().abs()) {
nearest_distance = d;
}
double const pps = _pixels_per_second.get ();
- wxPoint const p = ev.GetPosition();
+ auto const p = ev.GetPosition();
if (!_first_move) {
/* We haven't moved yet; in that case, we must move the mouse some reasonable distance
return;
}
- DCPTime new_position = _down_view_position + DCPTime::from_seconds ((p.x - _down_point.x) / pps);
+ auto new_position = _down_view_position + DCPTime::from_seconds ((p.x - _down_point.x) / pps);
- shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
DCPOMATIC_ASSERT (film);
if (_snap) {
- DCPTime const new_end = new_position + _down_view->content()->length_after_trim(film);
+ auto const new_end = new_position + _down_view->content()->length_after_trim(film);
/* Signed `distance' to nearest thing (i.e. negative is left on the timeline,
positive is right).
*/
void
Timeline::clear_selection ()
{
- for (TimelineViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (*i);
+ for (auto i: _views) {
+ shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView>(i);
if (cv) {
cv->set_selected (false);
}
{
TimelineContentViewList sel;
- for (TimelineViewList::const_iterator i = _views.begin(); i != _views.end(); ++i) {
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (*i);
+ for (auto i: _views) {
+ auto cv = dynamic_pointer_cast<TimelineContentView>(i);
if (cv && cv->selected()) {
sel.push_back (cv);
}
Timeline::selected_content () const
{
ContentList sel;
- TimelineContentViewList views = selected_views ();
- for (TimelineContentViewList::const_iterator i = views.begin(); i != views.end(); ++i) {
- sel.push_back ((*i)->content ());
+ for (auto i: selected_views()) {
+ sel.push_back(i->content());
}
return sel;
void
Timeline::set_selection (ContentList selection)
{
- for (TimelineViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
- shared_ptr<TimelineContentView> cv = dynamic_pointer_cast<TimelineContentView> (*i);
+ for (auto i: _views) {
+ auto cv = dynamic_pointer_cast<TimelineContentView> (i);
if (cv) {
cv->set_selected (find (selection.begin(), selection.end(), cv->content ()) != selection.end ());
}
void
Timeline::zoom_all ()
{
- shared_ptr<Film> film = _film.lock ();
+ auto film = _film.lock ();
DCPOMATIC_ASSERT (film);
set_pixels_per_second ((_main_canvas->GetSize().GetWidth() - 32) / film->length().seconds());
set_pixels_per_track ((_main_canvas->GetSize().GetHeight() - tracks_y_offset() - _time_axis_view->bbox().height - 32) / _tracks);