/*
- Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2019 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "upload_job.h"
#include "null_log.h"
#include "file_log.h"
+#include "dcpomatic_log.h"
#include "exceptions.h"
#include "examine_content_job.h"
#include "config.h"
#include "screen.h"
#include "audio_content.h"
#include "video_content.h"
-#include "subtitle_content.h"
+#include "text_content.h"
#include "ffmpeg_content.h"
#include "dcp_content.h"
#include "screen_kdm.h"
#include "cinema.h"
+#include "change_signaller.h"
+#include "check_content_change_job.h"
#include <libcxml/cxml.h>
#include <dcp/cpl.h>
#include <dcp/certificate_chain.h>
using boost::is_any_of;
using dcp::raw_convert;
-#define LOG_GENERAL(...) log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
-#define LOG_GENERAL_NC(...) log()->log (__VA_ARGS__, LogEntry::TYPE_GENERAL);
-
string const Film::metadata_file = "metadata.xml";
/* 5 -> 6
* VideoFrameType in VideoContent is a string rather than an integer.
* 35 -> 36
* EffectColour rather than OutlineColour in Subtitle.
+ * 36 -> 37
+ * TextContent can be in a Caption tag, and some of the tag names
+ * have had Subtitle prefixes or suffixes removed.
*/
-int const Film::current_state_version = 36;
+int const Film::current_state_version = 37;
/** Construct a Film object in a given directory.
*
, _reel_type (REELTYPE_SINGLE)
, _reel_length (2000000000)
, _upload_after_make_dcp (Config::instance()->default_upload_after_make_dcp())
+ , _reencode_j2k (false)
+ , _user_explicit_video_frame_rate (false)
, _state_version (current_state_version)
, _dirty (false)
{
set_isdcf_date_today ();
- _playlist_changed_connection = _playlist->Changed.connect (bind (&Film::playlist_changed, this));
+ _playlist_change_connection = _playlist->Change.connect (bind (&Film::playlist_change, this, _1));
_playlist_order_changed_connection = _playlist->OrderChanged.connect (bind (&Film::playlist_order_changed, this));
- _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Film::playlist_content_changed, this, _1, _2, _3));
+ _playlist_content_change_connection = _playlist->ContentChange.connect (bind (&Film::playlist_content_change, this, _1, _2, _3, _4));
if (dir) {
/* Make state.directory a complete path without ..s (where possible)
digester.add (audio_processor()->id ());
}
+ digester.add (audio_channels());
+
p /= digester.get ();
return p;
}
Film::make_dcp ()
{
if (dcp_name().find ("/") != string::npos) {
- throw BadSettingError (_("name"), _("cannot contain slashes"));
+ throw BadSettingError (_("name"), _("Cannot contain slashes"));
}
if (container() == 0) {
}
if (content().empty()) {
- throw runtime_error (_("you must add some content to the DCP before creating it"));
+ throw runtime_error (_("You must add some content to the DCP before creating it"));
}
if (dcp_content_type() == 0) {
}
shared_ptr<const DCPContent> dcp = dynamic_pointer_cast<const DCPContent> (i);
if (dcp && dcp->needs_kdm()) {
- throw runtime_error (_("some of your content needs a KDM"));
+ throw runtime_error (_("Some of your content needs a KDM"));
}
if (dcp && dcp->needs_assets()) {
- throw runtime_error (_("some of your content needs an OV"));
+ throw runtime_error (_("Some of your content needs an OV"));
}
}
shared_ptr<TranscodeJob> tj (new TranscodeJob (shared_from_this()));
tj->set_encoder (shared_ptr<Encoder> (new DCPEncoder (shared_from_this(), tj)));
- JobManager::instance()->add (tj);
+ shared_ptr<CheckContentChangeJob> cc (new CheckContentChangeJob (shared_from_this(), tj));
+ JobManager::instance()->add (cc);
}
/** Start a job to send our DCP to the configured TMS */
root->add_child("ReelType")->add_child_text (raw_convert<string> (static_cast<int> (_reel_type)));
root->add_child("ReelLength")->add_child_text (raw_convert<string> (_reel_length));
root->add_child("UploadAfterMakeDCP")->add_child_text (_upload_after_make_dcp ? "1" : "0");
+ root->add_child("ReencodeJ2K")->add_child_text (_reencode_j2k ? "1" : "0");
+ root->add_child("UserExplicitVideoFrameRate")->add_child_text(_user_explicit_video_frame_rate ? "1" : "0");
_playlist->as_xml (root->add_child ("Playlist"), with_content_paths);
return doc;
}
+void
+Film::write_metadata (boost::filesystem::path path) const
+{
+ shared_ptr<xmlpp::Document> doc = metadata ();
+ doc->write_to_file_formatted (path.string());
+}
+
/** Write state to our `metadata' file */
void
Film::write_metadata () const
_reel_type = static_cast<ReelType> (f.optional_number_child<int>("ReelType").get_value_or (static_cast<int>(REELTYPE_SINGLE)));
_reel_length = f.optional_number_child<int64_t>("ReelLength").get_value_or (2000000000);
_upload_after_make_dcp = f.optional_bool_child("UploadAfterMakeDCP").get_value_or (false);
+ _reencode_j2k = f.optional_bool_child("ReencodeJ2K").get_value_or(false);
+ _user_explicit_video_frame_rate = f.optional_bool_child("UserExplicitVideoFrameRate").get_value_or(false);
list<string> notes;
/* This method is the only one that can return notes (so far) */
d += "_" + dm.audio_language;
if (!dm.subtitle_language.empty()) {
- bool burnt_in = true;
- BOOST_FOREACH (shared_ptr<Content> i, content ()) {
- if (!i->subtitle) {
- continue;
- }
+ /* I'm not clear on the precise details of the convention for CCAP labelling;
+ for now I'm just appending -CCAP if we have any closed captions.
+ */
- if (i->subtitle->use() && !i->subtitle->burn()) {
- burnt_in = false;
+ bool burnt_in = true;
+ bool ccap = false;
+ BOOST_FOREACH (shared_ptr<Content> i, content()) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
+ if (j->type() == TEXT_OPEN_SUBTITLE && j->use() && !j->burn()) {
+ burnt_in = false;
+ } else if (j->type() == TEXT_CLOSED_CAPTION) {
+ ccap = true;
+ }
}
}
}
d += "-" + language;
+ if (ccap) {
+ d += "-CCAP";
+ }
} else {
d += "-XX";
}
/* Count mapped audio channels */
pair<int, int> ch = audio_channel_types (mapped_audio_channels(), audio_channels());
- if (ch.first) {
+ if (!ch.first && !ch.second) {
+ d += "_MOS";
+ } else if (ch.first) {
d += String::compose("_%1%2", ch.first, ch.second);
}
bool vf = false;
BOOST_FOREACH (shared_ptr<Content> i, content ()) {
shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
- if (dc && (dc->reference_video() || dc->reference_audio() || dc->reference_subtitle())) {
+ if (!dc) {
+ continue;
+ }
+
+ bool any_text = false;
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ if (dc->reference_text(static_cast<TextType>(i))) {
+ any_text = true;
+ }
+ }
+ if (dc->reference_video() || dc->reference_audio() || any_text) {
vf = true;
}
}
void
Film::set_name (string n)
{
+ ChangeSignaller<Film> ch (this, NAME);
_name = n;
- signal_changed (NAME);
}
void
Film::set_use_isdcf_name (bool u)
{
+ ChangeSignaller<Film> ch (this, USE_ISDCF_NAME);
_use_isdcf_name = u;
- signal_changed (USE_ISDCF_NAME);
}
void
Film::set_dcp_content_type (DCPContentType const * t)
{
+ ChangeSignaller<Film> ch (this, DCP_CONTENT_TYPE);
_dcp_content_type = t;
- signal_changed (DCP_CONTENT_TYPE);
}
void
Film::set_container (Ratio const * c)
{
+ ChangeSignaller<Film> ch (this, CONTAINER);
_container = c;
- signal_changed (CONTAINER);
}
void
Film::set_resolution (Resolution r)
{
+ ChangeSignaller<Film> ch (this, RESOLUTION);
_resolution = r;
- signal_changed (RESOLUTION);
}
void
Film::set_j2k_bandwidth (int b)
{
+ ChangeSignaller<Film> ch (this, J2K_BANDWIDTH);
_j2k_bandwidth = b;
- signal_changed (J2K_BANDWIDTH);
}
void
Film::set_isdcf_metadata (ISDCFMetadata m)
{
+ ChangeSignaller<Film> ch (this, ISDCF_METADATA);
_isdcf_metadata = m;
- signal_changed (ISDCF_METADATA);
}
+/** @param f New frame rate.
+ * @param user_explicit true if this comes from a direct user instruction, false if it is from
+ * DCP-o-matic being helpful.
+ */
void
-Film::set_video_frame_rate (int f)
+Film::set_video_frame_rate (int f, bool user_explicit)
{
+ ChangeSignaller<Film> ch (this, VIDEO_FRAME_RATE);
_video_frame_rate = f;
- signal_changed (VIDEO_FRAME_RATE);
+ if (user_explicit) {
+ _user_explicit_video_frame_rate = true;
+ }
}
void
Film::set_audio_channels (int c)
{
+ ChangeSignaller<Film> ch (this, AUDIO_CHANNELS);
_audio_channels = c;
- signal_changed (AUDIO_CHANNELS);
}
void
Film::set_three_d (bool t)
{
+ ChangeSignaller<Film> ch (this, THREE_D);
_three_d = t;
- signal_changed (THREE_D);
if (_three_d && _isdcf_metadata.two_d_version_of_three_d) {
+ ChangeSignaller<Film> ch (this, ISDCF_METADATA);
_isdcf_metadata.two_d_version_of_three_d = false;
- signal_changed (ISDCF_METADATA);
}
}
void
Film::set_interop (bool i)
{
+ ChangeSignaller<Film> ch (this, INTEROP);
_interop = i;
- signal_changed (INTEROP);
}
void
Film::set_audio_processor (AudioProcessor const * processor)
{
+ ChangeSignaller<Film> ch1 (this, AUDIO_PROCESSOR);
+ ChangeSignaller<Film> ch2 (this, AUDIO_CHANNELS);
_audio_processor = processor;
- signal_changed (AUDIO_PROCESSOR);
- signal_changed (AUDIO_CHANNELS);
}
void
Film::set_reel_type (ReelType t)
{
+ ChangeSignaller<Film> ch (this, REEL_TYPE);
_reel_type = t;
- signal_changed (REEL_TYPE);
}
/** @param r Desired reel length in bytes */
void
Film::set_reel_length (int64_t r)
{
+ ChangeSignaller<Film> ch (this, REEL_LENGTH);
_reel_length = r;
- signal_changed (REEL_LENGTH);
}
void
Film::set_upload_after_make_dcp (bool u)
{
+ ChangeSignaller<Film> ch (this, UPLOAD_AFTER_MAKE_DCP);
_upload_after_make_dcp = u;
- signal_changed (UPLOAD_AFTER_MAKE_DCP);
}
void
-Film::signal_changed (Property p)
+Film::set_reencode_j2k (bool r)
{
- _dirty = true;
+ ChangeSignaller<Film> ch (this, REENCODE_J2K);
+ _reencode_j2k = r;
+}
- switch (p) {
- case Film::CONTENT:
- set_video_frame_rate (_playlist->best_video_frame_rate ());
- break;
- case Film::VIDEO_FRAME_RATE:
- case Film::SEQUENCE:
- _playlist->maybe_sequence ();
- break;
- default:
- break;
- }
+void
+Film::signal_change (ChangeType type, int p)
+{
+ signal_change (type, static_cast<Property>(p));
+}
+
+void
+Film::signal_change (ChangeType type, Property p)
+{
+ if (type == CHANGE_TYPE_DONE) {
+ _dirty = true;
+
+ if (p == Film::CONTENT) {
+ if (!_user_explicit_video_frame_rate) {
+ set_video_frame_rate (best_video_frame_rate());
+ }
+ }
+
+ emit (boost::bind (boost::ref (Change), type, p));
- emit (boost::bind (boost::ref (Changed), p));
+ if (p == Film::VIDEO_FRAME_RATE || p == Film::SEQUENCE) {
+ /* We want to call Playlist::maybe_sequence but this must happen after the
+ main signal emission (since the butler will see that emission and un-suspend itself).
+ */
+ emit (boost::bind(&Playlist::maybe_sequence, _playlist.get(), shared_from_this()));
+ }
+ } else {
+ Change (type, p);
+ }
}
void
void
Film::set_signed (bool s)
{
+ ChangeSignaller<Film> ch (this, SIGNED);
_signed = s;
- signal_changed (SIGNED);
}
void
Film::set_encrypted (bool e)
{
+ ChangeSignaller<Film> ch (this, ENCRYPTED);
_encrypted = e;
- signal_changed (ENCRYPTED);
}
void
Film::set_key (dcp::Key key)
{
+ ChangeSignaller<Film> ch (this, KEY);
_key = key;
- signal_changed (KEY);
}
ContentList
Film::examine_and_add_content (shared_ptr<Content> content, bool disable_audio_analysis)
{
if (dynamic_pointer_cast<FFmpegContent> (content) && _directory) {
- run_ffprobe (content->path(0), file ("ffprobe.log"), _log);
+ run_ffprobe (content->path(0), file("ffprobe.log"));
}
shared_ptr<Job> j (new ExamineContentJob (shared_from_this(), content));
if (Config::instance()->automatic_audio_analysis() && content->audio && !disable_audio_analysis) {
shared_ptr<Playlist> playlist (new Playlist);
- playlist->add (content);
+ playlist->add (shared_from_this(), content);
boost::signals2::connection c;
JobManager::instance()->analyse_audio (
- shared_from_this (), playlist, c, bind (&Film::audio_analysis_finished, this)
+ shared_from_this(), playlist, false, c, bind (&Film::audio_analysis_finished, this)
);
_audio_analysis_connections.push_back (c);
}
{
/* Add {video,subtitle} content after any existing {video,subtitle} content */
if (c->video) {
- c->set_position (_playlist->video_end ());
- } else if (c->subtitle) {
- c->set_position (_playlist->subtitle_end ());
+ c->set_position (shared_from_this(), _playlist->video_end(shared_from_this()));
+ } else if (!c->text.empty()) {
+ c->set_position (shared_from_this(), _playlist->text_end(shared_from_this()));
}
if (_template_film) {
/* Take settings from the first piece of content of c's type in _template */
BOOST_FOREACH (shared_ptr<Content> i, _template_film->content()) {
- if (typeid(i.get()) == typeid(c.get())) {
- c->take_settings_from (i);
- }
+ c->take_settings_from (i);
}
}
- _playlist->add (c);
+ _playlist->add (shared_from_this(), c);
}
void
void
Film::move_content_earlier (shared_ptr<Content> c)
{
- _playlist->move_earlier (c);
+ _playlist->move_earlier (shared_from_this(), c);
}
void
Film::move_content_later (shared_ptr<Content> c)
{
- _playlist->move_later (c);
+ _playlist->move_later (shared_from_this(), c);
}
/** @return length of the film from time 0 to the last thing on the playlist */
DCPTime
Film::length () const
{
- return _playlist->length().ceil(video_frame_rate());
+ return _playlist->length(shared_from_this()).ceil(video_frame_rate());
}
int
Film::best_video_frame_rate () const
{
- return _playlist->best_video_frame_rate ();
+ /* Don't default to anything above 30fps (make the user select that explicitly) */
+ int best = _playlist->best_video_frame_rate ();
+ if (best > 30) {
+ best /= 2;
+ }
+ return best;
}
FrameRateChange
}
void
-Film::playlist_content_changed (weak_ptr<Content> c, int p, bool frequent)
+Film::playlist_content_change (ChangeType type, weak_ptr<Content> c, int p, bool frequent)
{
- _dirty = true;
-
if (p == ContentProperty::VIDEO_FRAME_RATE) {
- set_video_frame_rate (_playlist->best_video_frame_rate ());
+ signal_change (type, Film::CONTENT);
} else if (p == AudioContentProperty::STREAMS) {
- signal_changed (NAME);
+ signal_change (type, Film::NAME);
}
- emit (boost::bind (boost::ref (ContentChanged), c, p, frequent));
+ if (type == CHANGE_TYPE_DONE) {
+ emit (boost::bind (boost::ref (ContentChange), type, c, p, frequent));
+ } else {
+ ContentChange (type, c, p, frequent);
+ }
}
void
-Film::playlist_changed ()
+Film::playlist_change (ChangeType type)
{
- signal_changed (CONTENT);
- signal_changed (NAME);
+ signal_change (type, CONTENT);
+ signal_change (type, NAME);
}
void
Film::playlist_order_changed ()
{
- signal_changed (CONTENT_ORDER);
+ /* XXX: missing PENDING */
+ signal_change (CHANGE_TYPE_DONE, CONTENT_ORDER);
}
int
Film::audio_frame_rate () const
{
- BOOST_FOREACH (shared_ptr<Content> i, content ()) {
- if (i->audio && i->audio->has_rate_above_48k ()) {
- return 96000;
- }
- }
-
+ /* It seems that nobody makes 96kHz DCPs at the moment, so let's avoid them.
+ See #1436.
+ */
return 48000;
}
void
Film::set_sequence (bool s)
{
+ if (s == _sequence) {
+ return;
+ }
+
+ ChangeSignaller<Film> cc (this, SEQUENCE);
_sequence = s;
_playlist->set_sequence (s);
- signal_changed (SEQUENCE);
}
/** @return Size of the largest possible image in whatever resolution we are using */
}
/** @param recipient KDM recipient certificate.
- * @param trusted_devices Certificates of other trusted devices (can be empty).
+ * @param trusted_devices Certificate thumbprints of other trusted devices (can be empty).
* @param cpl_file CPL filename.
* @param from KDM from time expressed as a local time with an offset from UTC.
* @param until KDM to time expressed as a local time with an offset from UTC.
* @param formulation KDM formulation to use.
+ * @param disable_forensic_marking_picture true to disable forensic marking of picture.
+ * @param disable_forensic_marking_audio if not set, don't disable forensic marking of audio. If set to 0,
+ * disable all forensic marking; if set above 0, disable forensic marking above that channel.
*/
dcp::EncryptedKDM
Film::make_kdm (
dcp::Certificate recipient,
- vector<dcp::Certificate> trusted_devices,
+ vector<string> trusted_devices,
boost::filesystem::path cpl_file,
dcp::LocalTime from,
dcp::LocalTime until,
- dcp::Formulation formulation
+ dcp::Formulation formulation,
+ bool disable_forensic_marking_picture,
+ optional<int> disable_forensic_marking_audio
) const
{
if (!_encrypted) {
return dcp::DecryptedKDM (
cpl->id(), keys, from, until, cpl->content_title_text(), cpl->content_title_text(), dcp::LocalTime().as_string()
- ).encrypt (signer, recipient, trusted_devices, formulation);
+ ).encrypt (signer, recipient, trusted_devices, formulation, disable_forensic_marking_picture, disable_forensic_marking_audio);
}
/** @param screens Screens to make KDMs for.
* @param from KDM from time expressed as a local time in the time zone of the Screen's Cinema.
* @param until KDM to time expressed as a local time in the time zone of the Screen's Cinema.
* @param formulation KDM formulation to use.
+ * @param disable_forensic_marking_picture true to disable forensic marking of picture.
+ * @param disable_forensic_marking_audio if not set, don't disable forensic marking of audio. If set to 0,
+ * disable all forensic marking; if set above 0, disable forensic marking above that channel.
*/
list<ScreenKDM>
Film::make_kdms (
boost::filesystem::path cpl_file,
boost::posix_time::ptime from,
boost::posix_time::ptime until,
- dcp::Formulation formulation
+ dcp::Formulation formulation,
+ bool disable_forensic_marking_picture,
+ optional<int> disable_forensic_marking_audio
) const
{
list<ScreenKDM> kdms;
if (i->recipient) {
dcp::EncryptedKDM const kdm = make_kdm (
i->recipient.get(),
- i->trusted_devices,
+ i->trusted_device_thumbprints(),
cpl_file,
- dcp::LocalTime (from, i->cinema->utc_offset_hour(), i->cinema->utc_offset_minute()),
- dcp::LocalTime (until, i->cinema->utc_offset_hour(), i->cinema->utc_offset_minute()),
- formulation
+ dcp::LocalTime (from, i->cinema ? i->cinema->utc_offset_hour() : 0, i->cinema ? i->cinema->utc_offset_minute() : 0),
+ dcp::LocalTime (until, i->cinema ? i->cinema->utc_offset_hour() : 0, i->cinema ? i->cinema->utc_offset_minute() : 0),
+ formulation,
+ disable_forensic_marking_picture,
+ disable_forensic_marking_audio
);
kdms.push_back (ScreenKDM (i, kdm));
uint64_t
Film::required_disk_space () const
{
- return _playlist->required_disk_space (j2k_bandwidth(), audio_channels(), audio_frame_rate());
+ return _playlist->required_disk_space (shared_from_this(), j2k_bandwidth(), audio_channels(), audio_frame_rate());
}
/** This method checks the disk that the Film is on and tries to decide whether or not
* there will be enough space to make a DCP for it. If so, true is returned; if not,
* false is returned and required and available are filled in with the amount of disk space
- * required and available respectively (in Gb).
+ * required and available respectively (in GB).
*
* Note: the decision made by this method isn't, of course, 100% reliable.
*/
{
set<string> languages;
- ContentList cl = content ();
- BOOST_FOREACH (shared_ptr<Content>& c, cl) {
- if (c->subtitle) {
- languages.insert (c->subtitle->language ());
+ BOOST_FOREACH (shared_ptr<Content> i, content()) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
+ languages.insert (j->language ());
}
}
return all;
}
-/** Change the gains of the supplied AudioMapping to make it a default
- * for this film. The defaults are guessed based on what processor (if any)
- * is in use, the number of input channels and any filename supplied.
- */
-void
-Film::make_audio_mapping_default (AudioMapping& mapping, optional<boost::filesystem::path> filename) const
-{
- static string const regex[] = {
- ".*[\\._-]L[\\._-].*",
- ".*[\\._-]R[\\._-].*",
- ".*[\\._-]C[\\._-].*",
- ".*[\\._-]Lfe[\\._-].*",
- ".*[\\._-]Ls[\\._-].*",
- ".*[\\._-]Rs[\\._-].*"
- };
-
- static int const regexes = sizeof(regex) / sizeof(*regex);
-
- if (audio_processor ()) {
- audio_processor()->make_audio_mapping_default (mapping);
- } else {
- mapping.make_zero ();
- if (mapping.input_channels() == 1) {
- bool guessed = false;
-
- /* See if we can guess where this stream should go */
- if (filename) {
- for (int i = 0; i < regexes; ++i) {
- boost::regex e (regex[i], boost::regex::icase);
- if (boost::regex_match (filename->string(), e) && i < mapping.output_channels()) {
- mapping.set (0, i, 1);
- guessed = true;
- }
- }
- }
-
- if (!guessed) {
- /* If we have no idea, just put it on centre */
- mapping.set (0, static_cast<int> (dcp::CENTRE), 1);
- }
- } else {
- /* 1:1 mapping */
- for (int i = 0; i < min (mapping.input_channels(), mapping.output_channels()); ++i) {
- mapping.set (i, i, 1);
- }
- }
- }
-}
-
/** @return The names of the channels that audio contents' outputs are passed into;
* this is either the DCP or a AudioProcessor.
*/
void
Film::repeat_content (ContentList c, int n)
{
- _playlist->repeat (c, n);
+ _playlist->repeat (shared_from_this(), c, n);
}
void
shared_ptr<Content> last_video;
BOOST_FOREACH (shared_ptr<Content> c, content ()) {
if (c->video) {
- BOOST_FOREACH (DCPTime t, c->reel_split_points()) {
+ BOOST_FOREACH (DCPTime t, c->reel_split_points(shared_from_this())) {
if (last_split) {
p.push_back (DCPTimePeriod (last_split.get(), t));
}
}
}
- DCPTime video_end = last_video ? last_video->end() : DCPTime(0);
+ DCPTime video_end = last_video ? last_video->end(shared_from_this()) : DCPTime(0);
if (last_split) {
/* Definitely go from the last split to the end of the video content */
p.push_back (DCPTimePeriod (last_split.get(), video_end));
string
Film::content_summary (DCPTimePeriod period) const
{
- return _playlist->content_summary (period);
+ return _playlist->content_summary (shared_from_this(), period);
}
void
_reel_type = _template_film->_reel_type;
_reel_length = _template_film->_reel_length;
_upload_after_make_dcp = _template_film->_upload_after_make_dcp;
+ _isdcf_metadata = _template_film->_isdcf_metadata;
}
pair<double, double>
return false;
}
+
+list<DCPTextTrack>
+Film::closed_caption_tracks () const
+{
+ list<DCPTextTrack> tt;
+ BOOST_FOREACH (shared_ptr<Content> i, content()) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
+ /* XXX: Empty DCPTextTrack ends up being a magic value here */
+ DCPTextTrack dtt = j->dcp_track().get_value_or(DCPTextTrack());
+ if (j->type() == TEXT_CLOSED_CAPTION && find(tt.begin(), tt.end(), dtt) == tt.end()) {
+ tt.push_back (dtt);
+ }
+ }
+ }
+
+ return tt;
+}