#include "upload_job.h"
#include "null_log.h"
#include "file_log.h"
+#include "dcpomatic_log.h"
#include "exceptions.h"
#include "examine_content_job.h"
#include "config.h"
using boost::is_any_of;
using dcp::raw_convert;
-#define LOG_GENERAL(...) log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
-#define LOG_GENERAL_NC(...) log()->log (__VA_ARGS__, LogEntry::TYPE_GENERAL);
-
string const Film::metadata_file = "metadata.xml";
/* 5 -> 6
digester.add (audio_processor()->id ());
}
+ digester.add (audio_channels());
+
p /= digester.get ();
return p;
}
/* We want to call Playlist::maybe_sequence but this must happen after the
main signal emission (since the butler will see that emission and un-suspend itself).
*/
- emit (boost::bind(&Playlist::maybe_sequence, _playlist.get()));
+ emit (boost::bind(&Playlist::maybe_sequence, _playlist.get(), shared_from_this()));
}
} else {
Change (type, p);
Film::examine_and_add_content (shared_ptr<Content> content, bool disable_audio_analysis)
{
if (dynamic_pointer_cast<FFmpegContent> (content) && _directory) {
- run_ffprobe (content->path(0), file ("ffprobe.log"), _log);
+ run_ffprobe (content->path(0), file("ffprobe.log"));
}
shared_ptr<Job> j (new ExamineContentJob (shared_from_this(), content));
if (Config::instance()->automatic_audio_analysis() && content->audio && !disable_audio_analysis) {
shared_ptr<Playlist> playlist (new Playlist);
- playlist->add (content);
+ playlist->add (shared_from_this(), content);
boost::signals2::connection c;
JobManager::instance()->analyse_audio (
shared_from_this(), playlist, false, c, bind (&Film::audio_analysis_finished, this)
{
/* Add {video,subtitle} content after any existing {video,subtitle} content */
if (c->video) {
- c->set_position (_playlist->video_end());
+ c->set_position (shared_from_this(), _playlist->video_end(shared_from_this()));
} else if (!c->text.empty()) {
- c->set_position (_playlist->text_end());
+ c->set_position (shared_from_this(), _playlist->text_end(shared_from_this()));
}
if (_template_film) {
}
}
- _playlist->add (c);
+ _playlist->add (shared_from_this(), c);
}
void
void
Film::move_content_earlier (shared_ptr<Content> c)
{
- _playlist->move_earlier (c);
+ _playlist->move_earlier (shared_from_this(), c);
}
void
Film::move_content_later (shared_ptr<Content> c)
{
- _playlist->move_later (c);
+ _playlist->move_later (shared_from_this(), c);
}
/** @return length of the film from time 0 to the last thing on the playlist */
DCPTime
Film::length () const
{
- return _playlist->length().ceil(video_frame_rate());
+ return _playlist->length(shared_from_this()).ceil(video_frame_rate());
}
int
int
Film::audio_frame_rate () const
{
- BOOST_FOREACH (shared_ptr<Content> i, content ()) {
- if (i->audio && i->audio->has_rate_above_48k ()) {
- return 96000;
- }
- }
-
+ /* It seems that nobody makes 96kHz DCPs at the moment, so let's avoid them.
+ See #1436.
+ */
return 48000;
}
}
/** @param recipient KDM recipient certificate.
- * @param trusted_devices Certificates of other trusted devices (can be empty).
+ * @param trusted_devices Certificate thumbprints of other trusted devices (can be empty).
* @param cpl_file CPL filename.
* @param from KDM from time expressed as a local time with an offset from UTC.
* @param until KDM to time expressed as a local time with an offset from UTC.
dcp::EncryptedKDM
Film::make_kdm (
dcp::Certificate recipient,
- vector<dcp::Certificate> trusted_devices,
+ vector<string> trusted_devices,
boost::filesystem::path cpl_file,
dcp::LocalTime from,
dcp::LocalTime until,
if (i->recipient) {
dcp::EncryptedKDM const kdm = make_kdm (
i->recipient.get(),
- i->trusted_devices,
+ i->trusted_device_thumbprints(),
cpl_file,
- dcp::LocalTime (from, i->cinema->utc_offset_hour(), i->cinema->utc_offset_minute()),
- dcp::LocalTime (until, i->cinema->utc_offset_hour(), i->cinema->utc_offset_minute()),
+ dcp::LocalTime (from, i->cinema ? i->cinema->utc_offset_hour() : 0, i->cinema ? i->cinema->utc_offset_minute() : 0),
+ dcp::LocalTime (until, i->cinema ? i->cinema->utc_offset_hour() : 0, i->cinema ? i->cinema->utc_offset_minute() : 0),
formulation,
disable_forensic_marking_picture,
disable_forensic_marking_audio
uint64_t
Film::required_disk_space () const
{
- return _playlist->required_disk_space (j2k_bandwidth(), audio_channels(), audio_frame_rate());
+ return _playlist->required_disk_space (shared_from_this(), j2k_bandwidth(), audio_channels(), audio_frame_rate());
}
/** This method checks the disk that the Film is on and tries to decide whether or not
* there will be enough space to make a DCP for it. If so, true is returned; if not,
* false is returned and required and available are filled in with the amount of disk space
- * required and available respectively (in Gb).
+ * required and available respectively (in GB).
*
* Note: the decision made by this method isn't, of course, 100% reliable.
*/
return all;
}
-/** Change the gains of the supplied AudioMapping to make it a default
- * for this film. The defaults are guessed based on what processor (if any)
- * is in use, the number of input channels and any filename supplied.
- */
-void
-Film::make_audio_mapping_default (AudioMapping& mapping, optional<boost::filesystem::path> filename) const
-{
- static string const regex[] = {
- ".*[\\._-]L[\\._-].*",
- ".*[\\._-]R[\\._-].*",
- ".*[\\._-]C[\\._-].*",
- ".*[\\._-]Lfe[\\._-].*",
- ".*[\\._-]Ls[\\._-].*",
- ".*[\\._-]Rs[\\._-].*"
- };
-
- static int const regexes = sizeof(regex) / sizeof(*regex);
-
- if (audio_processor ()) {
- audio_processor()->make_audio_mapping_default (mapping);
- } else {
- mapping.make_zero ();
- if (mapping.input_channels() == 1) {
- bool guessed = false;
-
- /* See if we can guess where this stream should go */
- if (filename) {
- for (int i = 0; i < regexes; ++i) {
- boost::regex e (regex[i], boost::regex::icase);
- if (boost::regex_match (filename->string(), e) && i < mapping.output_channels()) {
- mapping.set (0, i, 1);
- guessed = true;
- }
- }
- }
-
- if (!guessed) {
- /* If we have no idea, just put it on centre */
- mapping.set (0, static_cast<int> (dcp::CENTRE), 1);
- }
- } else {
- /* 1:1 mapping */
- for (int i = 0; i < min (mapping.input_channels(), mapping.output_channels()); ++i) {
- mapping.set (i, i, 1);
- }
- }
- }
-}
-
/** @return The names of the channels that audio contents' outputs are passed into;
* this is either the DCP or a AudioProcessor.
*/
void
Film::repeat_content (ContentList c, int n)
{
- _playlist->repeat (c, n);
+ _playlist->repeat (shared_from_this(), c, n);
}
void
shared_ptr<Content> last_video;
BOOST_FOREACH (shared_ptr<Content> c, content ()) {
if (c->video) {
- BOOST_FOREACH (DCPTime t, c->reel_split_points()) {
+ BOOST_FOREACH (DCPTime t, c->reel_split_points(shared_from_this())) {
if (last_split) {
p.push_back (DCPTimePeriod (last_split.get(), t));
}
}
}
- DCPTime video_end = last_video ? last_video->end() : DCPTime(0);
+ DCPTime video_end = last_video ? last_video->end(shared_from_this()) : DCPTime(0);
if (last_split) {
/* Definitely go from the last split to the end of the video content */
p.push_back (DCPTimePeriod (last_split.get(), video_end));
string
Film::content_summary (DCPTimePeriod period) const
{
- return _playlist->content_summary (period);
+ return _playlist->content_summary (shared_from_this(), period);
}
void