{
shared_ptr<Player> player (new Player (_film, _playlist));
player->set_ignore_video ();
- player->set_ignore_caption ();
+ player->set_ignore_text ();
player->set_fast ();
player->set_play_referenced ();
player->Audio.connect (bind (&AnalyseAudioJob::analyse, this, _1, _2));
audio->take_settings_from (c->audio);
}
- list<shared_ptr<TextContent> >::iterator i = caption.begin ();
- list<shared_ptr<TextContent> >::const_iterator j = c->caption.begin ();
- while (i != caption.end() && j != c->caption.end()) {
+ list<shared_ptr<TextContent> >::iterator i = text.begin ();
+ list<shared_ptr<TextContent> >::const_iterator j = c->text.begin ();
+ while (i != text.end() && j != c->text.end()) {
(*i)->take_settings_from (*j);
++i;
++j;
}
shared_ptr<TextContent>
-Content::only_caption () const
+Content::only_text () const
{
- DCPOMATIC_ASSERT (caption.size() < 2);
- if (caption.empty ()) {
+ DCPOMATIC_ASSERT (text.size() < 2);
+ if (text.empty ()) {
return shared_ptr<TextContent> ();
}
- return caption.front ();
+ return text.front ();
}
shared_ptr<TextContent>
-Content::caption_of_original_type (TextType type) const
+Content::text_of_original_type (TextType type) const
{
- BOOST_FOREACH (shared_ptr<TextContent> i, caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> i, text) {
if (i->original_type() == type) {
return i;
}
/*
- Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
boost::shared_ptr<VideoContent> video;
boost::shared_ptr<AudioContent> audio;
- std::list<boost::shared_ptr<TextContent> > caption;
+ std::list<boost::shared_ptr<TextContent> > text;
- boost::shared_ptr<TextContent> only_caption () const;
- boost::shared_ptr<TextContent> caption_of_original_type (TextType type) const;
+ boost::shared_ptr<TextContent> only_text () const;
+ boost::shared_ptr<TextContent> text_of_original_type (TextType type) const;
void signal_changed (int);
int const DCPContentProperty::NEEDS_KDM = 601;
int const DCPContentProperty::REFERENCE_VIDEO = 602;
int const DCPContentProperty::REFERENCE_AUDIO = 603;
-int const DCPContentProperty::REFERENCE_CAPTION = 604;
+int const DCPContentProperty::REFERENCE_TEXT = 604;
int const DCPContentProperty::NAME = 605;
-int const DCPContentProperty::CAPTIONS = 606;
+int const DCPContentProperty::TEXTS = 606;
DCPContent::DCPContent (shared_ptr<const Film> film, boost::filesystem::path p)
: Content (film)
read_directory (p);
set_default_colour_conversion ();
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _reference_caption[i] = false;
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _reference_text[i] = false;
}
}
{
video = VideoContent::from_xml (this, node, version);
audio = AudioContent::from_xml (this, node, version);
- caption = TextContent::from_xml (this, node, version);
+ text = TextContent::from_xml (this, node, version);
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _reference_caption[i] = false;
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _reference_text[i] = false;
}
if (video && audio) {
_reference_video = node->optional_bool_child ("ReferenceVideo").get_value_or (false);
_reference_audio = node->optional_bool_child ("ReferenceAudio").get_value_or (false);
if (version >= 37) {
- _reference_caption[CAPTION_OPEN] = node->optional_bool_child("ReferenceOpenCaption").get_value_or(false);
- _reference_caption[CAPTION_CLOSED] = node->optional_bool_child("ReferenceClosedCaption").get_value_or(false);
+ _reference_text[TEXT_OPEN_SUBTITLE] = node->optional_bool_child("ReferenceOpenSubtitle").get_value_or(false);
+ _reference_text[TEXT_CLOSED_CAPTION] = node->optional_bool_child("ReferenceClosedCaption").get_value_or(false);
} else {
- _reference_caption[CAPTION_OPEN] = node->optional_bool_child("ReferenceSubtitle").get_value_or(false);
- _reference_caption[CAPTION_CLOSED] = false;
+ _reference_text[TEXT_OPEN_SUBTITLE] = node->optional_bool_child("ReferenceSubtitle").get_value_or(false);
+ _reference_text[TEXT_CLOSED_CAPTION] = false;
}
if (node->optional_string_child("Standard")) {
string const s = node->optional_string_child("Standard").get();
bool const needed_assets = needs_assets ();
bool const needed_kdm = needs_kdm ();
string const old_name = name ();
- int const old_captions = caption.size ();
+ int const old_texts = text.size ();
if (job) {
job->set_progress_unknown ();
signal_changed (AudioContentProperty::STREAMS);
}
- int captions = 0;
+ int texts = 0;
{
boost::mutex::scoped_lock lm (_mutex);
_name = examiner->name ();
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- if (examiner->has_caption(static_cast<TextType>(i))) {
- caption.push_back (shared_ptr<TextContent>(new TextContent(this, static_cast<TextType>(i))));
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ if (examiner->has_text(static_cast<TextType>(i))) {
+ text.push_back (shared_ptr<TextContent>(new TextContent(this, static_cast<TextType>(i))));
}
}
- captions = caption.size ();
+ texts = text.size ();
_encrypted = examiner->encrypted ();
_needs_assets = examiner->needs_assets ();
_kdm_valid = examiner->kdm_valid ();
_reel_lengths = examiner->reel_lengths ();
}
- if (old_captions != captions) {
- signal_changed (DCPContentProperty::CAPTIONS);
+ if (old_texts != texts) {
+ signal_changed (DCPContentProperty::TEXTS);
}
if (needed_assets != needs_assets ()) {
audio->stream()->mapping().as_xml (node->add_child("AudioMapping"));
}
- BOOST_FOREACH (shared_ptr<TextContent> i, caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> i, text) {
i->as_xml (node);
}
node->add_child("KDMValid")->add_child_text (_kdm_valid ? "1" : "0");
node->add_child("ReferenceVideo")->add_child_text (_reference_video ? "1" : "0");
node->add_child("ReferenceAudio")->add_child_text (_reference_audio ? "1" : "0");
- node->add_child("ReferenceOpenCaption")->add_child_text(_reference_caption[CAPTION_OPEN] ? "1" : "0");
- node->add_child("ReferenceClosedCaption")->add_child_text(_reference_caption[CAPTION_CLOSED] ? "1" : "0");
+ node->add_child("ReferenceOpenSubtitle")->add_child_text(_reference_text[TEXT_OPEN_SUBTITLE] ? "1" : "0");
+ node->add_child("ReferenceClosedCaption")->add_child_text(_reference_text[TEXT_CLOSED_CAPTION] ? "1" : "0");
if (_standard) {
switch (_standard.get ()) {
case dcp::INTEROP:
s += video->identifier() + "_";
}
- BOOST_FOREACH (shared_ptr<TextContent> i, caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> i, text) {
s += i->identifier () + " ";
}
s += string (_reference_video ? "1" : "0");
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- s += string (_reference_caption[i] ? "1" : "0");
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ s += string (_reference_text[i] ? "1" : "0");
}
return s;
}
}
void
-DCPContent::set_reference_caption (TextType type, bool r)
+DCPContent::set_reference_text (TextType type, bool r)
{
{
boost::mutex::scoped_lock lm (_mutex);
- _reference_caption[type] = r;
+ _reference_text[type] = r;
}
- signal_changed (DCPContentProperty::REFERENCE_CAPTION);
+ signal_changed (DCPContentProperty::REFERENCE_TEXT);
}
list<DCPTimePeriod>
}
static
-bool check_caption (shared_ptr<const Content> c)
+bool check_text (shared_ptr<const Content> c)
{
- return !c->caption.empty();
+ return !c->text.empty();
}
bool
-DCPContent::can_reference_caption (TextType type, string& why_not) const
+DCPContent::can_reference_text (TextType type, string& why_not) const
{
shared_ptr<DCPDecoder> decoder;
try {
}
BOOST_FOREACH (shared_ptr<dcp::Reel> i, decoder->reels()) {
- if (type == CAPTION_OPEN && !i->main_subtitle()) {
+ if (type == TEXT_OPEN_SUBTITLE && !i->main_subtitle()) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
- why_not = _("it does not have subtitles in all its reels.");
+ why_not = _("it does not have open subtitles in all its reels.");
return false;
}
- if (type == CAPTION_CLOSED && !i->closed_caption()) {
+ if (type == TEXT_CLOSED_CAPTION && !i->closed_caption()) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
why_not = _("it does not have closed captions in all its reels.");
return false;
}
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
- return can_reference (bind (&check_caption, _1), _("it overlaps other caption content; remove the other content."), why_not);
+ return can_reference (bind (&check_text, _1), _("it overlaps other text content; remove the other content."), why_not);
}
void
_reference_video = dc->_reference_video;
_reference_audio = dc->_reference_audio;
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _reference_caption[i] = dc->_reference_caption[i];
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _reference_text[i] = dc->_reference_text[i];
}
}
static int const NEEDS_ASSETS;
static int const REFERENCE_VIDEO;
static int const REFERENCE_AUDIO;
- static int const REFERENCE_CAPTION;
+ static int const REFERENCE_TEXT;
static int const NAME;
- static int const CAPTIONS;
+ static int const TEXTS;
};
class ContentPart;
bool can_reference_audio (std::string &) const;
- void set_reference_caption (TextType type, bool r);
+ void set_reference_text (TextType type, bool r);
- /** @param type Original type of captions in the DCP.
- * @return true if these captions are to be referenced.
+ /** @param type Original type of texts in the DCP.
+ * @return true if these texts are to be referenced.
*/
- bool reference_caption (TextType type) const {
+ bool reference_text (TextType type) const {
boost::mutex::scoped_lock lm (_mutex);
- return _reference_caption[type];
+ return _reference_text[type];
}
- bool can_reference_caption (TextType type, std::string &) const;
+ bool can_reference_text (TextType type, std::string &) const;
void set_cpl (std::string id);
* rather than by rewrapping.
*/
bool _reference_audio;
- /** true if the captions in this DCP should be included in the output by reference
- * rather than by rewrapping. The types here are the original caption types,
+ /** true if the texts in this DCP should be included in the output by reference
+ * rather than by rewrapping. The types here are the original text types,
* not what they are being used for.
*/
- bool _reference_caption[CAPTION_COUNT];
+ bool _reference_text[TEXT_COUNT];
boost::optional<dcp::Standard> _standard;
bool _three_d;
if (c->audio) {
audio.reset (new AudioDecoder (this, c->audio, log, fast));
}
- BOOST_FOREACH (shared_ptr<TextContent> i, c->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> i, c->text) {
/* XXX: this time here should be the time of the first subtitle, not 0 */
- caption.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, log, ContentTime())));
+ text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, log, ContentTime())));
}
list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
/* Frame within the (played part of the) reel that is coming up next */
int64_t const frame = _next.frames_round (vfr);
- /* We must emit captions first as when we emit the video for this frame
- it will expect already to have the captions.
+ /* We must emit texts first as when we emit the video for this frame
+ it will expect already to have the texts.
*/
- pass_captions (_next);
+ pass_texts (_next);
if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
}
void
-DCPDecoder::pass_captions (ContentTime next)
+DCPDecoder::pass_texts (ContentTime next)
{
- list<shared_ptr<TextDecoder> >::const_iterator decoder = caption.begin ();
+ list<shared_ptr<TextDecoder> >::const_iterator decoder = text.begin ();
if ((*_reel)->main_subtitle()) {
- pass_captions (
- next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_caption(CAPTION_OPEN), (*_reel)->main_subtitle()->entry_point(), *decoder
+ pass_texts (
+ next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point(), *decoder
);
++decoder;
}
if ((*_reel)->closed_caption()) {
- pass_captions (
- next, (*_reel)->closed_caption()->asset(), _dcp_content->reference_caption(CAPTION_CLOSED), (*_reel)->closed_caption()->entry_point(), *decoder
+ pass_texts (
+ next, (*_reel)->closed_caption()->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), (*_reel)->closed_caption()->entry_point(), *decoder
);
++decoder;
}
}
void
-DCPDecoder::pass_captions (ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder)
+DCPDecoder::pass_texts (ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder)
{
double const vfr = _dcp_content->active_video_frame_rate ();
/* Frame within the (played part of the) reel that is coming up next */
next_reel ();
}
- /* Pass captions in the pre-roll */
+ /* Pass texts in the pre-roll */
double const vfr = _dcp_content->active_video_frame_rate ();
for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
- pass_captions (pre);
+ pass_texts (pre);
pre += ContentTime::from_frames (1, vfr);
}
void next_reel ();
void get_readers ();
- void pass_captions (ContentTime next);
- void pass_captions (ContentTime next, boost::shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, boost::shared_ptr<TextDecoder> decoder);
+ void pass_texts (ContentTime next);
+ void pass_texts (ContentTime next, boost::shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, boost::shared_ptr<TextDecoder> decoder);
/** Time of next thing to return from pass relative to the start of _reel */
ContentTime _next;
{
_player_video_connection = _player->Video.connect (bind (&DCPEncoder::video, this, _1, _2));
_player_audio_connection = _player->Audio.connect (bind (&DCPEncoder::audio, this, _1, _2));
- _player_caption_connection = _player->Caption.connect (bind (&DCPEncoder::caption, this, _1, _2, _3));
+ _player_text_connection = _player->Text.connect (bind (&DCPEncoder::text, this, _1, _2, _3));
BOOST_FOREACH (shared_ptr<const Content> c, film->content ()) {
- BOOST_FOREACH (shared_ptr<TextContent> i, c->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> i, c->text) {
if (i->use() && !i->burn()) {
_non_burnt_subtitles = true;
}
/* We must stop receiving more video data before we die */
_player_video_connection.release ();
_player_audio_connection.release ();
- _player_caption_connection.release ();
+ _player_text_connection.release ();
}
void
}
void
-DCPEncoder::caption (PlayerText data, TextType type, DCPTimePeriod period)
+DCPEncoder::text (PlayerText data, TextType type, DCPTimePeriod period)
{
- if (type == CAPTION_CLOSED || _non_burnt_subtitles) {
+ if (type == TEXT_CLOSED_CAPTION || _non_burnt_subtitles) {
_writer->write (data, type, period);
}
}
void video (boost::shared_ptr<PlayerVideo>, DCPTime);
void audio (boost::shared_ptr<AudioBuffers>, DCPTime);
- void caption (PlayerText, TextType, DCPTimePeriod);
+ void text (PlayerText, TextType, DCPTimePeriod);
boost::shared_ptr<Writer> _writer;
boost::shared_ptr<J2KEncoder> _j2k_encoder;
boost::signals2::scoped_connection _player_video_connection;
boost::signals2::scoped_connection _player_audio_connection;
- boost::signals2::scoped_connection _player_caption_connection;
+ boost::signals2::scoped_connection _player_text_connection;
};
{
shared_ptr<dcp::CPL> cpl;
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _has_caption[i] = false;
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _has_text[i] = false;
}
if (content->cpl ()) {
return;
}
- _has_caption[CAPTION_OPEN] = true;
+ _has_text[TEXT_OPEN_SUBTITLE] = true;
}
if (i->closed_caption ()) {
return;
}
- _has_caption[CAPTION_CLOSED] = true;
+ _has_text[TEXT_CLOSED_CAPTION] = true;
}
if (i->main_picture()) {
return _audio_frame_rate.get_value_or (48000);
}
- bool has_caption (TextType type) const {
- return _has_caption[type];
+ bool has_text (TextType type) const {
+ return _has_text[type];
}
bool kdm_valid () const {
bool _has_video;
/** true if this DCP has audio content (but false if it has unresolved references to audio content) */
bool _has_audio;
- bool _has_caption[CAPTION_COUNT];
+ bool _has_text[TEXT_COUNT];
bool _encrypted;
bool _needs_assets;
bool _kdm_valid;
DCPSubtitleContent::DCPSubtitleContent (shared_ptr<const Film> film, boost::filesystem::path path)
: Content (film, path)
{
- caption.push_back (shared_ptr<TextContent> (new TextContent (this, CAPTION_OPEN)));
+ text.push_back (shared_ptr<TextContent> (new TextContent (this, TEXT_OPEN_SUBTITLE)));
}
DCPSubtitleContent::DCPSubtitleContent (shared_ptr<const Film> film, cxml::ConstNodePtr node, int version)
: Content (film, node)
, _length (node->number_child<ContentTime::Type> ("Length"))
{
- caption = TextContent::from_xml (this, node, version);
+ text = TextContent::from_xml (this, node, version);
}
void
boost::mutex::scoped_lock lm (_mutex);
/* Default to turning these subtitles on */
- only_caption()->set_use (true);
+ only_text()->set_use (true);
if (iop) {
- only_caption()->set_language (iop->language ());
+ only_text()->set_language (iop->language ());
} else if (smpte) {
- only_caption()->set_language (smpte->language().get_value_or (""));
+ only_text()->set_language (smpte->language().get_value_or (""));
}
_length = ContentTime::from_seconds (sc->latest_subtitle_out().as_seconds ());
BOOST_FOREACH (shared_ptr<dcp::LoadFontNode> i, sc->load_font_nodes ()) {
- only_caption()->add_font (shared_ptr<Font> (new Font (i->id)));
+ only_text()->add_font (shared_ptr<Font> (new Font (i->id)));
}
}
node->add_child("Type")->add_child_text ("DCPSubtitle");
Content::as_xml (node, with_paths);
- if (only_caption()) {
- only_caption()->as_xml (node);
+ if (only_text()) {
+ only_text()->as_xml (node);
}
node->add_child("Length")->add_child_text (raw_convert<string> (_length.get ()));
if (_next != _subtitles.end()) {
first = content_time_period(*_next).from;
}
- caption.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, content->only_caption(), log, first)));
+ text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, content->only_text(), log, first)));
}
void
/* XXX: image subtitles */
}
- only_caption()->emit_plain (p, s);
+ only_text()->emit_plain (p, s);
return false;
}
pos = audio->position();
}
- BOOST_FOREACH (shared_ptr<TextDecoder> i, caption) {
+ BOOST_FOREACH (shared_ptr<TextDecoder> i, text) {
if (!i->ignore() && (!pos || i->position() < *pos)) {
pos = i->position();
}
if (audio) {
audio->seek ();
}
- BOOST_FOREACH (shared_ptr<TextDecoder> i, caption) {
+ BOOST_FOREACH (shared_ptr<TextDecoder> i, text) {
i->seek ();
}
}
shared_ptr<TextDecoder>
-Decoder::only_caption () const
+Decoder::only_text () const
{
- DCPOMATIC_ASSERT (caption.size() < 2);
- if (caption.empty ()) {
+ DCPOMATIC_ASSERT (text.size() < 2);
+ if (text.empty ()) {
return shared_ptr<TextDecoder> ();
}
- return caption.front ();
+ return text.front ();
}
boost::shared_ptr<VideoDecoder> video;
boost::shared_ptr<AudioDecoder> audio;
- std::list<boost::shared_ptr<TextDecoder> > caption;
+ std::list<boost::shared_ptr<TextDecoder> > text;
- boost::shared_ptr<TextDecoder> only_caption () const;
+ boost::shared_ptr<TextDecoder> only_text () const;
/** Do some decoding and perhaps emit video, audio or subtitle data.
* @return true if this decoder will emit no more data unless a seek() happens.
{
video = VideoContent::from_xml (this, node, version);
audio = AudioContent::from_xml (this, node, version);
- caption = TextContent::from_xml (this, node, version);
+ text = TextContent::from_xml (this, node, version);
list<cxml::NodePtr> c = node->node_children ("SubtitleStream");
for (list<cxml::NodePtr>::const_iterator i = c.begin(); i != c.end(); ++i) {
bool need_video = false;
bool need_audio = false;
- bool need_caption = false;
+ bool need_text = false;
if (i != c.end ()) {
need_video = static_cast<bool> ((*i)->video);
need_audio = static_cast<bool> ((*i)->audio);
- need_caption = !(*i)->caption.empty();
+ need_text = !(*i)->text.empty();
}
while (i != c.end ()) {
if (need_audio != static_cast<bool> ((*i)->audio)) {
throw JoinError (_("Content to be joined must all have or not have audio"));
}
- if (need_caption != !(*i)->caption.empty()) {
- throw JoinError (_("Content to be joined must all have or not have captions"));
+ if (need_text != !(*i)->text.empty()) {
+ throw JoinError (_("Content to be joined must all have or not have subtitles or captions"));
}
++i;
}
if (need_audio) {
audio.reset (new AudioContent (this, c));
}
- if (need_caption) {
- caption.push_back (shared_ptr<TextContent> (new TextContent (this, c)));
+ if (need_text) {
+ text.push_back (shared_ptr<TextContent> (new TextContent (this, c)));
}
shared_ptr<FFmpegContent> ref = dynamic_pointer_cast<FFmpegContent> (c[0]);
for (size_t i = 0; i < c.size(); ++i) {
shared_ptr<FFmpegContent> fc = dynamic_pointer_cast<FFmpegContent> (c[i]);
- if (fc->only_caption() && fc->only_caption()->use() && *(fc->_subtitle_stream.get()) != *(ref->_subtitle_stream.get())) {
+ if (fc->only_text() && fc->only_text()->use() && *(fc->_subtitle_stream.get()) != *(ref->_subtitle_stream.get())) {
throw JoinError (_("Content to be joined must use the same subtitle stream."));
}
}
}
}
- if (only_caption()) {
- only_caption()->as_xml (node);
+ if (only_text()) {
+ only_text()->as_xml (node);
}
boost::mutex::scoped_lock lm (_mutex);
_subtitle_streams = examiner->subtitle_streams ();
if (!_subtitle_streams.empty ()) {
- caption.clear ();
- caption.push_back (shared_ptr<TextContent> (new TextContent (this, CAPTION_OPEN)));
+ text.clear ();
+ text.push_back (shared_ptr<TextContent> (new TextContent (this, TEXT_OPEN_SUBTITLE)));
_subtitle_stream = _subtitle_streams.front ();
}
s += "_" + video->identifier();
}
- if (only_caption() && only_caption()->use() && only_caption()->burn()) {
- s += "_" + only_caption()->identifier();
+ if (only_text() && only_text()->use() && only_text()->burn()) {
+ s += "_" + only_text()->identifier();
}
boost::mutex::scoped_lock lm (_mutex);
audio.reset (new AudioDecoder (this, c->audio, log, fast));
}
- if (c->only_caption()) {
+ if (c->only_text()) {
/* XXX: this time here should be the time of the first subtitle, not 0 */
- caption.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_caption(), log, ContentTime())));
+ text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), log, ContentTime())));
}
_next_time.resize (_format_context->nb_streams);
if (_video_stream && si == _video_stream.get() && !video->ignore()) {
decode_video_packet ();
- } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_caption()->ignore()) {
+ } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
decode_subtitle_packet ();
} else {
decode_audio_packet ();
/* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
if (_have_current_subtitle) {
if (_current_subtitle_to) {
- only_caption()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
+ only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
} else {
- only_caption()->emit_stop (subtitle_period(sub).from + _pts_offset);
+ only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
}
_have_current_subtitle = false;
}
}
if (_current_subtitle_to) {
- only_caption()->emit_stop (*_current_subtitle_to);
+ only_text()->emit_stop (*_current_subtitle_to);
}
avsubtitle_free (&sub);
static_cast<double> (rect->h) / target_height
);
- only_caption()->emit_bitmap_start (from, image, scaled_rect);
+ only_text()->emit_bitmap_start (from, image, scaled_rect);
}
void
);
BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
- only_caption()->emit_plain_start (from, i);
+ only_text()->emit_plain_start (from, i);
}
}
break;
}
- _player->set_always_burn_open_captions ();
+ _player->set_always_burn_open_subtitles ();
_player->set_play_referenced ();
int const ch = film->audio_channels ();
bool burnt_in = true;
bool ccap = false;
BOOST_FOREACH (shared_ptr<Content> i, content()) {
- BOOST_FOREACH (shared_ptr<TextContent> j, i->caption) {
- if (j->type() == CAPTION_OPEN && j->use() && !j->burn()) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
+ if (j->type() == TEXT_OPEN_SUBTITLE && j->use() && !j->burn()) {
burnt_in = false;
- } else if (j->type() == CAPTION_CLOSED) {
+ } else if (j->type() == TEXT_CLOSED_CAPTION) {
ccap = true;
}
}
continue;
}
- bool any_caption = false;
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- if (dc->reference_caption(static_cast<TextType>(i))) {
- any_caption = true;
+ bool any_text = false;
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ if (dc->reference_text(static_cast<TextType>(i))) {
+ any_text = true;
}
}
- if (dc->reference_video() || dc->reference_audio() || any_caption) {
+ if (dc->reference_video() || dc->reference_audio() || any_text) {
vf = true;
}
}
/* Add {video,subtitle} content after any existing {video,subtitle} content */
if (c->video) {
c->set_position (_playlist->video_end());
- } else if (!c->caption.empty()) {
- c->set_position (_playlist->caption_end());
+ } else if (!c->text.empty()) {
+ c->set_position (_playlist->text_end());
}
if (_template_film) {
set<string> languages;
BOOST_FOREACH (shared_ptr<Content> i, content()) {
- BOOST_FOREACH (shared_ptr<TextContent> j, i->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
languages.insert (j->language ());
}
}
bool big_font_files = false;
if (film->interop ()) {
BOOST_FOREACH (shared_ptr<Content> i, content) {
- BOOST_FOREACH (shared_ptr<TextContent> j, i->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
BOOST_FOREACH (shared_ptr<Font> k, j->fonts()) {
for (int l = 0; l < FontFiles::VARIANTS; ++l) {
optional<boost::filesystem::path> const p = k->file (static_cast<FontFiles::Variant>(l));
, _playlist (playlist)
, _have_valid_pieces (false)
, _ignore_video (false)
- , _ignore_caption (false)
+ , _ignore_text (false)
, _fast (false)
, _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
decoder->video->set_ignore (true);
}
- if (_ignore_caption) {
- BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->caption) {
+ if (_ignore_text) {
+ BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
i->set_ignore (true);
}
}
decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
}
- list<shared_ptr<TextDecoder> >::const_iterator j = decoder->caption.begin();
+ list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
- while (j != decoder->caption.end()) {
+ while (j != decoder->text.end()) {
(*j)->BitmapStart.connect (
bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
);
}
list<PositionImage>
-Player::transform_bitmap_captions (list<BitmapText> subs) const
+Player::transform_bitmap_texts (list<BitmapText> subs) const
{
list<PositionImage> all;
list<shared_ptr<Font> > fonts;
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- BOOST_FOREACH (shared_ptr<TextContent> j, i->content->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
/* XXX: things may go wrong if there are duplicate font IDs
with different font files.
*/
}
void
-Player::set_ignore_caption ()
+Player::set_ignore_text ()
{
- _ignore_caption = true;
+ _ignore_text = true;
}
-/** Set the player to always burn open captions into the image regardless of the content settings */
+/** Set the player to always burn open texts into the image regardless of the content settings */
void
-Player::set_always_burn_open_captions ()
+Player::set_always_burn_open_subtitles ()
{
- _always_burn_open_captions = true;
+ _always_burn_open_subtitles = true;
}
/** Sets up the player to be faster, possibly at the expense of quality */
);
}
- if (j->reference_caption (CAPTION_OPEN)) {
+ if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
DCPOMATIC_ASSERT (ra);
ra->set_entry_point (ra->entry_point() + trim_start);
);
}
- if (j->reference_caption (CAPTION_CLOSED)) {
+ if (j->reference_text (TEXT_CLOSED_CAPTION)) {
shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
DCPOMATIC_ASSERT (ra);
ra->set_entry_point (ra->entry_point() + trim_start);
i->done = true;
} else {
- /* Given two choices at the same time, pick the one with captions so we see it before
+ /* Given two choices at the same time, pick the one with texts so we see it before
the video.
*/
- if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->caption.empty())) {
+ if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
earliest_time = t;
earliest_content = i;
}
list<PlayerText>
Player::closed_captions_for_frame (DCPTime time) const
{
- return _active_captions[CAPTION_CLOSED].get (
+ return _active_texts[TEXT_CLOSED_CAPTION].get (
DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
);
}
-/** @return Open captions for the frame at the given time, converted to images */
+/** @return Open subtitles for the frame at the given time, converted to images */
optional<PositionImage>
-Player::open_captions_for_frame (DCPTime time) const
+Player::open_subtitles_for_frame (DCPTime time) const
{
list<PositionImage> captions;
int const vfr = _film->video_frame_rate();
BOOST_FOREACH (
PlayerText j,
- _active_captions[CAPTION_OPEN].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_captions)
+ _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
) {
/* Image subtitles */
- list<PositionImage> c = transform_bitmap_captions (j.image);
+ list<PositionImage> c = transform_bitmap_texts (j.image);
copy (c.begin(), c.end(), back_inserter (captions));
/* Text subtitles (rendered to an image) */
Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
{
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<const TextContent> caption = wc.lock ();
- if (!piece || !caption) {
+ shared_ptr<const TextContent> text = wc.lock ();
+ if (!piece || !text) {
return;
}
/* Apply content's subtitle offsets */
- subtitle.sub.rectangle.x += caption->x_offset ();
- subtitle.sub.rectangle.y += caption->y_offset ();
+ subtitle.sub.rectangle.x += text->x_offset ();
+ subtitle.sub.rectangle.y += text->y_offset ();
/* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
- subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((caption->x_scale() - 1) / 2);
- subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((caption->y_scale() - 1) / 2);
+ subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
+ subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
/* Apply content's subtitle scale */
- subtitle.sub.rectangle.width *= caption->x_scale ();
- subtitle.sub.rectangle.height *= caption->y_scale ();
+ subtitle.sub.rectangle.width *= text->x_scale ();
+ subtitle.sub.rectangle.height *= text->y_scale ();
PlayerText ps;
ps.image.push_back (subtitle.sub);
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
- _active_captions[subtitle.type()].add_from (wc, ps, from);
+ _active_texts[subtitle.type()].add_from (wc, ps, from);
}
void
Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
{
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<const TextContent> caption = wc.lock ();
- if (!piece || !caption) {
+ shared_ptr<const TextContent> text = wc.lock ();
+ if (!piece || !text) {
return;
}
}
BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
- s.set_h_position (s.h_position() + caption->x_offset ());
- s.set_v_position (s.v_position() + caption->y_offset ());
- float const xs = caption->x_scale();
- float const ys = caption->y_scale();
+ s.set_h_position (s.h_position() + text->x_offset ());
+ s.set_v_position (s.v_position() + text->y_offset ());
+ float const xs = text->x_scale();
+ float const ys = text->y_scale();
float size = s.size();
/* Adjust size to express the common part of the scaling;
}
s.set_in (dcp::Time(from.seconds(), 1000));
- ps.text.push_back (StringText (s, caption->outline_width()));
- ps.add_fonts (caption->fonts ());
+ ps.text.push_back (StringText (s, text->outline_width()));
+ ps.add_fonts (text->fonts ());
}
- _active_captions[subtitle.type()].add_from (wc, ps, from);
+ _active_texts[subtitle.type()].add_from (wc, ps, from);
}
void
Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
{
- if (!_active_captions[type].have (wc)) {
+ if (!_active_texts[type].have (wc)) {
return;
}
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<const TextContent> caption = wc.lock ();
- if (!piece || !caption) {
+ shared_ptr<const TextContent> text = wc.lock ();
+ if (!piece || !text) {
return;
}
return;
}
- pair<PlayerText, DCPTime> from = _active_captions[type].add_to (wc, dcp_to);
+ pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
- bool const always = type == CAPTION_OPEN && _always_burn_open_captions;
- if (caption->use() && !always && !caption->burn()) {
- Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
+ bool const always = type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles;
+ if (text->use() && !always && !text->burn()) {
+ Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
}
}
}
_audio_merger.clear ();
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _active_captions[i].clear ();
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _active_texts[i].clear ();
}
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
{
if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _active_captions[i].clear_before (time);
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _active_texts[i].clear_before (time);
}
}
- optional<PositionImage> captions = open_captions_for_frame (time);
- if (captions) {
- pv->set_caption (captions.get ());
+ optional<PositionImage> subtitles = open_subtitles_for_frame (time);
+ if (subtitles) {
+ pv->set_text (subtitles.get ());
}
Video (pv, time);
void set_video_container_size (dcp::Size);
void set_ignore_video ();
- void set_ignore_caption ();
- void set_always_burn_open_captions ();
+ void set_ignore_text ();
+ void set_always_burn_open_subtitles ();
void set_fast ();
void set_play_referenced ();
void set_dcp_decode_reduction (boost::optional<int> reduction);
/** Emitted when a video frame is ready. These emissions happen in the correct order. */
boost::signals2::signal<void (boost::shared_ptr<PlayerVideo>, DCPTime)> Video;
boost::signals2::signal<void (boost::shared_ptr<AudioBuffers>, DCPTime)> Audio;
- /** Emitted when a caption is ready. This signal may be emitted considerably
+ /** Emitted when a text is ready. This signal may be emitted considerably
* after the corresponding Video.
*/
- boost::signals2::signal<void (PlayerText, TextType, DCPTimePeriod)> Caption;
+ boost::signals2::signal<void (PlayerText, TextType, DCPTimePeriod)> Text;
private:
friend class PlayerWrapper;
void film_changed (Film::Property);
void playlist_changed ();
void playlist_content_changed (boost::weak_ptr<Content>, int, bool);
- std::list<PositionImage> transform_bitmap_captions (std::list<BitmapText>) const;
+ std::list<PositionImage> transform_bitmap_texts (std::list<BitmapText>) const;
Frame dcp_to_content_video (boost::shared_ptr<const Piece> piece, DCPTime t) const;
DCPTime content_video_to_dcp (boost::shared_ptr<const Piece> piece, Frame f) const;
Frame dcp_to_resampled_audio (boost::shared_ptr<const Piece> piece, DCPTime t) const;
std::pair<boost::shared_ptr<AudioBuffers>, DCPTime> discard_audio (
boost::shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to
) const;
- boost::optional<PositionImage> open_captions_for_frame (DCPTime time) const;
+ boost::optional<PositionImage> open_subtitles_for_frame (DCPTime time) const;
void emit_video (boost::shared_ptr<PlayerVideo> pv, DCPTime time);
void do_emit_video (boost::shared_ptr<PlayerVideo> pv, DCPTime time);
void emit_audio (boost::shared_ptr<AudioBuffers> data, DCPTime time);
/** true if the player should ignore all video; i.e. never produce any */
bool _ignore_video;
- /** true if the player should ignore all captions; i.e. never produce any */
- bool _ignore_caption;
- bool _always_burn_open_captions;
+ /** true if the player should ignore all text; i.e. never produce any */
+ bool _ignore_text;
+ bool _always_burn_open_subtitles;
/** true if we should try to be fast rather than high quality */
bool _fast;
/** true if we should `play' (i.e output) referenced DCP data (e.g. for preview) */
Empty _black;
Empty _silent;
- ActiveText _active_captions[CAPTION_COUNT];
+ ActiveText _active_texts[TEXT_COUNT];
boost::shared_ptr<AudioProcessor> _audio_processor;
boost::signals2::scoped_connection _film_changed_connection;
image->read_from_socket (socket);
- _caption = PositionImage (image, Position<int> (node->number_child<int> ("SubtitleX"), node->number_child<int> ("SubtitleY")));
+ _text = PositionImage (image, Position<int> (node->number_child<int> ("SubtitleX"), node->number_child<int> ("SubtitleY")));
}
}
void
-PlayerVideo::set_caption (PositionImage image)
+PlayerVideo::set_text (PositionImage image)
{
- _caption = image;
+ _text = image;
}
/** Create an image for this frame.
total_crop, _inter_size, _out_size, yuv_to_rgb, pixel_format (_in->pixel_format()), aligned, fast
);
- if (_caption) {
- out->alpha_blend (Image::ensure_aligned (_caption->image), _caption->position);
+ if (_text) {
+ out->alpha_blend (Image::ensure_aligned (_text->image), _text->position);
}
if (_fade) {
if (_colour_conversion) {
_colour_conversion.get().as_xml (node);
}
- if (_caption) {
- node->add_child ("SubtitleWidth")->add_child_text (raw_convert<string> (_caption->image->size().width));
- node->add_child ("SubtitleHeight")->add_child_text (raw_convert<string> (_caption->image->size().height));
- node->add_child ("SubtitleX")->add_child_text (raw_convert<string> (_caption->position.x));
- node->add_child ("SubtitleY")->add_child_text (raw_convert<string> (_caption->position.y));
+ if (_text) {
+ node->add_child ("SubtitleWidth")->add_child_text (raw_convert<string> (_text->image->size().width));
+ node->add_child ("SubtitleHeight")->add_child_text (raw_convert<string> (_text->image->size().height));
+ node->add_child ("SubtitleX")->add_child_text (raw_convert<string> (_text->position.x));
+ node->add_child ("SubtitleY")->add_child_text (raw_convert<string> (_text->position.y));
}
}
PlayerVideo::send_binary (shared_ptr<Socket> socket) const
{
_in->send_binary (socket);
- if (_caption) {
- _caption->image->write_to_socket (socket);
+ if (_text) {
+ _text->image->write_to_socket (socket);
}
}
return false;
}
- return _crop == Crop () && _out_size == j2k->size() && !_caption && !_fade && !_colour_conversion;
+ return _crop == Crop () && _out_size == j2k->size() && !_text && !_fade && !_colour_conversion;
}
Data
return false;
}
- if ((!_caption && other->_caption) || (_caption && !other->_caption)) {
- /* One has a caption and the other doesn't */
+ if ((!_text && other->_text) || (_text && !other->_text)) {
+ /* One has a text and the other doesn't */
return false;
}
- if (_caption && other->_caption && !_caption->same (other->_caption.get ())) {
- /* They both have captions but they are different */
+ if (_text && other->_text && !_text->same (other->_text.get ())) {
+ /* They both have texts but they are different */
return false;
}
return _in->memory_used();
}
-/** @return Shallow copy of this; _in and _caption are shared between the original and the copy */
+/** @return Shallow copy of this; _in and _text are shared between the original and the copy */
shared_ptr<PlayerVideo>
PlayerVideo::shallow_copy () const
{
boost::shared_ptr<PlayerVideo> shallow_copy () const;
- void set_caption (PositionImage);
+ void set_text (PositionImage);
void prepare ();
boost::shared_ptr<Image> image (dcp::NoteHandler note, boost::function<AVPixelFormat (AVPixelFormat)> pixel_format, bool aligned, bool fast) const;
Eyes _eyes;
Part _part;
boost::optional<ColourConversion> _colour_conversion;
- boost::optional<PositionImage> _caption;
+ boost::optional<PositionImage> _text;
/** Content that we came from. This is so that reset_metadata() can work */
boost::weak_ptr<Content> _content;
/** Video frame that we came from. Again, this is for reset_metadata() */
DCPTime next;
BOOST_FOREACH (shared_ptr<Content> i, _content) {
- if (i->caption.empty() || find (placed.begin(), placed.end(), i) != placed.end()) {
+ if (i->text.empty() || find (placed.begin(), placed.end(), i) != placed.end()) {
continue;
}
BOOST_FOREACH (shared_ptr<const Content> i, _content) {
bool burn = false;
- BOOST_FOREACH (shared_ptr<TextContent> j, i->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
if (j->burn()) {
burn = true;
}
}
DCPTime
-Playlist::caption_end () const
+Playlist::text_end () const
{
DCPTime end;
BOOST_FOREACH (shared_ptr<Content> i, _content) {
- if (!i->caption.empty ()) {
+ if (!i->text.empty ()) {
end = max (end, i->end ());
}
}
int best_video_frame_rate () const;
DCPTime video_end () const;
- DCPTime caption_end () const;
+ DCPTime text_end () const;
FrameRateChange active_frame_rate_change (DCPTime, int dcp_frame_rate) const;
std::string content_summary (DCPTimePeriod period) const;
std::pair<double, double> speed_up_range (int dcp_video_frame_rate) const;
}
reel->add (reel_sound_asset);
- maybe_add_captions<dcp::ReelSubtitleAsset> (_caption_asset[CAPTION_OPEN], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
- maybe_add_captions<dcp::ReelClosedCaptionAsset> (_caption_asset[CAPTION_CLOSED], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
+ maybe_add_captions<dcp::ReelSubtitleAsset> (_caption_asset[TEXT_OPEN_SUBTITLE], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
+ maybe_add_captions<dcp::ReelClosedCaptionAsset> (_caption_asset[TEXT_CLOSED_CAPTION], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
return reel;
}
boost::shared_ptr<dcp::PictureAssetWriter> _picture_asset_writer;
boost::shared_ptr<dcp::SoundAsset> _sound_asset;
boost::shared_ptr<dcp::SoundAssetWriter> _sound_asset_writer;
- boost::shared_ptr<dcp::SubtitleAsset> _caption_asset[CAPTION_COUNT];
+ boost::shared_ptr<dcp::SubtitleAsset> _caption_asset[TEXT_COUNT];
static int const _info_size;
};
StringTextFileContent::StringTextFileContent (shared_ptr<const Film> film, boost::filesystem::path path)
: Content (film, path)
{
- caption.push_back (shared_ptr<TextContent> (new TextContent (this, CAPTION_OPEN)));
+ text.push_back (shared_ptr<TextContent> (new TextContent (this, TEXT_OPEN_SUBTITLE)));
}
StringTextFileContent::StringTextFileContent (shared_ptr<const Film> film, cxml::ConstNodePtr node, int version)
: Content (film, node)
, _length (node->number_child<ContentTime::Type> ("Length"))
{
- caption = TextContent::from_xml (this, node, version);
+ text = TextContent::from_xml (this, node, version);
}
void
StringTextFile s (shared_from_this ());
/* Default to turning these subtitles on */
- only_caption()->set_use (true);
+ only_text()->set_use (true);
boost::mutex::scoped_lock lm (_mutex);
_length = s.length ();
- only_caption()->add_font (shared_ptr<Font> (new Font (TEXT_FONT_ID)));
+ only_text()->add_font (shared_ptr<Font> (new Font (TEXT_FONT_ID)));
}
string
node->add_child("Type")->add_child_text ("TextSubtitle");
Content::as_xml (node, with_paths);
- if (only_caption()) {
- only_caption()->as_xml (node);
+ if (only_text()) {
+ only_text()->as_xml (node);
}
node->add_child("Length")->add_child_text (raw_convert<string> (_length.get ()));
if (!_subtitles.empty()) {
first = content_time_period(_subtitles[0]).from;
}
- caption.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, content->only_caption(), log, first)));
+ text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, content->only_text(), log, first)));
}
void
}
ContentTimePeriod const p = content_time_period (_subtitles[_next]);
- only_caption()->emit_plain (p, _subtitles[_next]);
+ only_text()->emit_plain (p, _subtitles[_next]);
++_next;
return false;
, _y_scale (1)
, _line_spacing (node->optional_number_child<double>("LineSpacing").get_value_or (1))
, _outline_width (node->optional_number_child<int>("OutlineWidth").get_value_or (2))
- , _type (CAPTION_OPEN)
- , _original_type (CAPTION_OPEN)
+ , _type (TEXT_OPEN_SUBTITLE)
+ , _original_type (TEXT_OPEN_SUBTITLE)
{
if (version >= 37) {
_use = node->bool_child ("Use");
connect_to_fonts ();
- _type = string_to_caption_type (node->optional_string_child("Type").get_value_or("open"));
- _original_type = string_to_caption_type (node->optional_string_child("OriginalType").get_value_or("open"));
+ _type = string_to_text_type (node->optional_string_child("Type").get_value_or("open"));
+ _original_type = string_to_text_type (node->optional_string_child("OriginalType").get_value_or("open"));
}
TextContent::TextContent (Content* parent, vector<shared_ptr<Content> > c)
: ContentPart (parent)
{
/* This constructor is for join which is only supported for content types
- that have a single caption, so we can use only_caption() here.
+ that have a single text, so we can use only_text() here.
*/
- shared_ptr<TextContent> ref = c[0]->only_caption();
+ shared_ptr<TextContent> ref = c[0]->only_text();
DCPOMATIC_ASSERT (ref);
list<shared_ptr<Font> > ref_fonts = ref->fonts ();
for (size_t i = 1; i < c.size(); ++i) {
- if (c[i]->only_caption()->use() != ref->use()) {
+ if (c[i]->only_text()->use() != ref->use()) {
throw JoinError (_("Content to be joined must have the same 'use subtitles' setting."));
}
- if (c[i]->only_caption()->burn() != ref->burn()) {
+ if (c[i]->only_text()->burn() != ref->burn()) {
throw JoinError (_("Content to be joined must have the same 'burn subtitles' setting."));
}
- if (c[i]->only_caption()->x_offset() != ref->x_offset()) {
+ if (c[i]->only_text()->x_offset() != ref->x_offset()) {
throw JoinError (_("Content to be joined must have the same subtitle X offset."));
}
- if (c[i]->only_caption()->y_offset() != ref->y_offset()) {
+ if (c[i]->only_text()->y_offset() != ref->y_offset()) {
throw JoinError (_("Content to be joined must have the same subtitle Y offset."));
}
- if (c[i]->only_caption()->x_scale() != ref->x_scale()) {
+ if (c[i]->only_text()->x_scale() != ref->x_scale()) {
throw JoinError (_("Content to be joined must have the same subtitle X scale."));
}
- if (c[i]->only_caption()->y_scale() != ref->y_scale()) {
+ if (c[i]->only_text()->y_scale() != ref->y_scale()) {
throw JoinError (_("Content to be joined must have the same subtitle Y scale."));
}
- if (c[i]->only_caption()->line_spacing() != ref->line_spacing()) {
+ if (c[i]->only_text()->line_spacing() != ref->line_spacing()) {
throw JoinError (_("Content to be joined must have the same subtitle line spacing."));
}
- if ((c[i]->only_caption()->fade_in() != ref->fade_in()) || (c[i]->only_caption()->fade_out() != ref->fade_out())) {
+ if ((c[i]->only_text()->fade_in() != ref->fade_in()) || (c[i]->only_text()->fade_out() != ref->fade_out())) {
throw JoinError (_("Content to be joined must have the same subtitle fades."));
}
- if ((c[i]->only_caption()->outline_width() != ref->outline_width())) {
+ if ((c[i]->only_text()->outline_width() != ref->outline_width())) {
throw JoinError (_("Content to be joined must have the same outline width."));
}
- list<shared_ptr<Font> > fonts = c[i]->only_caption()->fonts ();
+ list<shared_ptr<Font> > fonts = c[i]->only_text()->fonts ();
if (fonts.size() != ref_fonts.size()) {
throw JoinError (_("Content to be joined must use the same fonts."));
}
{
boost::mutex::scoped_lock lm (_mutex);
- xmlpp::Element* caption = root->add_child ("Caption");
+ xmlpp::Element* text = root->add_child ("Text");
- caption->add_child("Use")->add_child_text (_use ? "1" : "0");
- caption->add_child("Burn")->add_child_text (_burn ? "1" : "0");
- caption->add_child("XOffset")->add_child_text (raw_convert<string> (_x_offset));
- caption->add_child("YOffset")->add_child_text (raw_convert<string> (_y_offset));
- caption->add_child("XScale")->add_child_text (raw_convert<string> (_x_scale));
- caption->add_child("YScale")->add_child_text (raw_convert<string> (_y_scale));
- caption->add_child("Language")->add_child_text (_language);
+ text->add_child("Use")->add_child_text (_use ? "1" : "0");
+ text->add_child("Burn")->add_child_text (_burn ? "1" : "0");
+ text->add_child("XOffset")->add_child_text (raw_convert<string> (_x_offset));
+ text->add_child("YOffset")->add_child_text (raw_convert<string> (_y_offset));
+ text->add_child("XScale")->add_child_text (raw_convert<string> (_x_scale));
+ text->add_child("YScale")->add_child_text (raw_convert<string> (_y_scale));
+ text->add_child("Language")->add_child_text (_language);
if (_colour) {
- caption->add_child("Red")->add_child_text (raw_convert<string> (_colour->r));
- caption->add_child("Green")->add_child_text (raw_convert<string> (_colour->g));
- caption->add_child("Blue")->add_child_text (raw_convert<string> (_colour->b));
+ text->add_child("Red")->add_child_text (raw_convert<string> (_colour->r));
+ text->add_child("Green")->add_child_text (raw_convert<string> (_colour->g));
+ text->add_child("Blue")->add_child_text (raw_convert<string> (_colour->b));
}
if (_effect) {
switch (*_effect) {
case dcp::NONE:
- caption->add_child("Effect")->add_child_text("none");
+ text->add_child("Effect")->add_child_text("none");
break;
case dcp::BORDER:
- caption->add_child("Effect")->add_child_text("outline");
+ text->add_child("Effect")->add_child_text("outline");
break;
case dcp::SHADOW:
- caption->add_child("Effect")->add_child_text("shadow");
+ text->add_child("Effect")->add_child_text("shadow");
break;
}
}
if (_effect_colour) {
- caption->add_child("EffectRed")->add_child_text (raw_convert<string> (_effect_colour->r));
- caption->add_child("EffectGreen")->add_child_text (raw_convert<string> (_effect_colour->g));
- caption->add_child("EffectBlue")->add_child_text (raw_convert<string> (_effect_colour->b));
+ text->add_child("EffectRed")->add_child_text (raw_convert<string> (_effect_colour->r));
+ text->add_child("EffectGreen")->add_child_text (raw_convert<string> (_effect_colour->g));
+ text->add_child("EffectBlue")->add_child_text (raw_convert<string> (_effect_colour->b));
}
- caption->add_child("LineSpacing")->add_child_text (raw_convert<string> (_line_spacing));
+ text->add_child("LineSpacing")->add_child_text (raw_convert<string> (_line_spacing));
if (_fade_in) {
- caption->add_child("FadeIn")->add_child_text (raw_convert<string> (_fade_in->get()));
+ text->add_child("FadeIn")->add_child_text (raw_convert<string> (_fade_in->get()));
}
if (_fade_out) {
- caption->add_child("FadeOut")->add_child_text (raw_convert<string> (_fade_out->get()));
+ text->add_child("FadeOut")->add_child_text (raw_convert<string> (_fade_out->get()));
}
- caption->add_child("OutlineWidth")->add_child_text (raw_convert<string> (_outline_width));
+ text->add_child("OutlineWidth")->add_child_text (raw_convert<string> (_outline_width));
for (list<shared_ptr<Font> >::const_iterator i = _fonts.begin(); i != _fonts.end(); ++i) {
- (*i)->as_xml (caption->add_child("Font"));
+ (*i)->as_xml (text->add_child("Font"));
}
- caption->add_child("Type")->add_child_text (caption_type_to_string(_type));
- caption->add_child("OriginalType")->add_child_text (caption_type_to_string(_original_type));
+ text->add_child("Type")->add_child_text (text_type_to_string(_type));
+ text->add_child("OriginalType")->add_child_text (text_type_to_string(_original_type));
}
string
}
TextType
-string_to_caption_type (string s)
+string_to_text_type (string s)
{
- if (s == "open") {
- return CAPTION_OPEN;
+ if (s == "open-subtitle") {
+ return TEXT_OPEN_SUBTITLE;
} else if (s == "closed") {
- return CAPTION_CLOSED;
+ return TEXT_CLOSED_CAPTION;
} else {
- throw MetadataError (String::compose ("Unknown caption type %1", s));
+ throw MetadataError (String::compose ("Unknown text type %1", s));
}
}
string
-caption_type_to_string (TextType t)
+text_type_to_string (TextType t)
{
switch (t) {
- case CAPTION_OPEN:
- return "open";
- case CAPTION_CLOSED:
- return "closed";
+ case TEXT_OPEN_SUBTITLE:
+ return "open-subtitle";
+ case TEXT_CLOSED_CAPTION:
+ return "closed-caption";
default:
DCPOMATIC_ASSERT (false);
}
}
string
-caption_type_to_name (TextType t)
+text_type_to_name (TextType t)
{
switch (t) {
- case CAPTION_OPEN:
- return _("Subtitles");
- case CAPTION_CLOSED:
+ case TEXT_OPEN_SUBTITLE:
+ return _("Open subtitles");
+ case TEXT_CLOSED_CAPTION:
return _("Closed captions");
default:
DCPOMATIC_ASSERT (false);
};
/** Type of captions.
- * For better or worse DoM has uses two names for text that appears
- * with the DCP:
*
- * open captions: text that is shown to everybody on-screen (aka subtitles).
- * closed captions: text that is shown to some viewers using some other method.
+ * The generally accepted definitions seem to be:
+ * - subtitles: text for an audience who doesn't speak the film's language
+ * - captions: text for a hearing-impaired audience
+ * - open: on-screen
+ * - closed: only visible by some audience members
*
- * There is also still use of the word `subtitle' in the code; these are the
- * same as open captions in DoM.
+ * At the moment DoM supports open subtitles and closed captions.
+ *
+ * There is some use of the word `subtitle' in the code which may mean
+ * caption in some contexts.
*/
enum TextType
{
- CAPTION_OPEN,
- CAPTION_CLOSED,
- CAPTION_COUNT
+ TEXT_OPEN_SUBTITLE,
+ TEXT_CLOSED_CAPTION,
+ TEXT_COUNT
};
-extern std::string caption_type_to_string (TextType t);
-extern std::string caption_type_to_name (TextType t);
-extern TextType string_to_caption_type (std::string s);
+extern std::string text_type_to_string (TextType t);
+extern std::string text_type_to_name (TextType t);
+extern TextType string_to_text_type (std::string s);
/** @struct Crop
* @brief A description of the crop of an image or video.
and captions arrive to the Writer in sequence. This is not so for video.
*/
_audio_reel = _reels.begin ();
- for (int i = 0; i < CAPTION_COUNT; ++i) {
+ for (int i = 0; i < TEXT_COUNT; ++i) {
_caption_reel[i] = _reels.begin ();
}
boost::weak_ptr<Job> _job;
std::vector<ReelWriter> _reels;
std::vector<ReelWriter>::iterator _audio_reel;
- std::vector<ReelWriter>::iterator _caption_reel[CAPTION_COUNT];
+ std::vector<ReelWriter>::iterator _caption_reel[TEXT_COUNT];
/** our thread, or 0 */
boost::thread* _thread;
{
DCPOMATIC_ASSERT (_clipboard);
- PasteDialog* d = new PasteDialog (this, static_cast<bool>(_clipboard->video), static_cast<bool>(_clipboard->audio), !_clipboard->caption.empty());
+ PasteDialog* d = new PasteDialog (this, static_cast<bool>(_clipboard->video), static_cast<bool>(_clipboard->audio), !_clipboard->text.empty());
if (d->ShowModal() == wxID_OK) {
BOOST_FOREACH (shared_ptr<Content> i, _film_editor->content_panel()->selected()) {
if (d->video() && i->video) {
i->audio->take_settings_from (_clipboard->audio);
}
- if (d->caption()) {
- list<shared_ptr<TextContent> >::iterator j = i->caption.begin ();
- list<shared_ptr<TextContent> >::const_iterator k = _clipboard->caption.begin ();
- while (j != i->caption.end() && k != _clipboard->caption.end()) {
+ if (d->text()) {
+ list<shared_ptr<TextContent> >::iterator j = i->text.begin ();
+ list<shared_ptr<TextContent> >::const_iterator k = _clipboard->text.begin ();
+ while (j != i->text.end() && k != _clipboard->text.end()) {
(*j)->take_settings_from (*k);
++j;
++k;
void setup_from_dcp (shared_ptr<DCPContent> dcp)
{
- BOOST_FOREACH (shared_ptr<TextContent> i, dcp->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> i, dcp->text) {
i->set_use (true);
}
_panels.push_back (_video_panel);
_audio_panel = new AudioPanel (this);
_panels.push_back (_audio_panel);
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _caption_panel[i] = new TextPanel (this, static_cast<TextType>(i));
- _panels.push_back (_caption_panel[i]);
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _text_panel[i] = new TextPanel (this, static_cast<TextType>(i));
+ _panels.push_back (_text_panel[i]);
}
_timing_panel = new TimingPanel (this, _film_viewer);
_panels.push_back (_timing_panel);
}
ContentList
-ContentPanel::selected_caption ()
+ContentPanel::selected_text ()
{
ContentList sc;
BOOST_FOREACH (shared_ptr<Content> i, selected ()) {
- if (!i->caption.empty()) {
+ if (!i->text.empty()) {
sc.push_back (i);
}
}
bool have_video = false;
bool have_audio = false;
- bool have_caption[CAPTION_COUNT] = { false, false };
+ bool have_text[TEXT_COUNT] = { false, false };
BOOST_FOREACH (shared_ptr<Content> i, selected()) {
if (i->video) {
have_video = true;
if (i->audio) {
have_audio = true;
}
- BOOST_FOREACH (shared_ptr<TextContent> j, i->caption) {
- have_caption[j->original_type()] = true;
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
+ have_text[j->original_type()] = true;
}
}
bool video_panel = false;
bool audio_panel = false;
- bool caption_panel[CAPTION_COUNT] = { false, false };
+ bool text_panel[TEXT_COUNT] = { false, false };
for (size_t i = 0; i < _notebook->GetPageCount(); ++i) {
if (_notebook->GetPage(i) == _video_panel) {
video_panel = true;
} else if (_notebook->GetPage(i) == _audio_panel) {
audio_panel = true;
}
- for (int j = 0; j < CAPTION_COUNT; ++j) {
- if (_notebook->GetPage(i) == _caption_panel[j]) {
- caption_panel[j] = true;
+ for (int j = 0; j < TEXT_COUNT; ++j) {
+ if (_notebook->GetPage(i) == _text_panel[j]) {
+ text_panel[j] = true;
}
}
}
++off;
}
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- if (have_caption[i] != caption_panel[i]) {
- if (caption_panel[i]) {
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ if (have_text[i] != text_panel[i]) {
+ if (text_panel[i]) {
_notebook->RemovePage (off);
}
- if (have_caption[i]) {
- _notebook->InsertPage (off, _caption_panel[i], _caption_panel[i]->name());
+ if (have_text[i]) {
+ _notebook->InsertPage (off, _text_panel[i], _text_panel[i]->name());
}
}
- if (have_caption[i]) {
+ if (have_text[i]) {
++off;
}
}
_video_panel->Enable (_generally_sensitive && video_selection.size() > 0);
_audio_panel->Enable (_generally_sensitive && audio_selection.size() > 0);
- for (int i = 0; i < CAPTION_COUNT; ++i) {
- _caption_panel[i]->Enable (_generally_sensitive && selection.size() == 1 && !selection.front()->caption.empty());
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _text_panel[i]->Enable (_generally_sensitive && selection.size() == 1 && !selection.front()->text.empty());
}
_timing_panel->Enable (_generally_sensitive);
}
ContentList selected ();
ContentList selected_video ();
ContentList selected_audio ();
- ContentList selected_caption ();
+ ContentList selected_text ();
FFmpegContentList selected_ffmpeg ();
void add_file_clicked ();
wxButton* _timeline;
ContentSubPanel* _video_panel;
AudioPanel* _audio_panel;
- TextPanel* _caption_panel[CAPTION_COUNT];
+ TextPanel* _text_panel[TEXT_COUNT];
ContentSubPanel* _timing_panel;
std::list<ContentSubPanel *> _panels;
ContentMenu* _menu;
property == VideoContentProperty::SCALE ||
property == DCPContentProperty::REFERENCE_VIDEO ||
property == DCPContentProperty::REFERENCE_AUDIO ||
- property == DCPContentProperty::REFERENCE_CAPTION) {
+ property == DCPContentProperty::REFERENCE_TEXT) {
setup_dcp_name ();
setup_sensitivity ();
}
_closed_captions_dialog->set_player (_player);
- _player->set_always_burn_open_captions ();
+ _player->set_always_burn_open_subtitles ();
_player->set_play_referenced ();
_film->Changed.connect (boost::bind (&FilmViewer::film_changed, this, _1));
#include "paste_dialog.h"
-PasteDialog::PasteDialog (wxWindow* parent, bool video, bool audio, bool caption)
+PasteDialog::PasteDialog (wxWindow* parent, bool video, bool audio, bool text)
: TableDialog (parent, _("Paste"), 1, 0, true)
{
_video = new wxCheckBox (this, wxID_ANY, _("Paste video settings"));
_audio = new wxCheckBox (this, wxID_ANY, _("Paste audio settings"));
_audio->Enable (audio);
add (_audio);
- _caption = new wxCheckBox (this, wxID_ANY, _("Paste caption settings"));
- _caption->Enable (caption);
- add (_caption);
+ _text = new wxCheckBox (this, wxID_ANY, _("Paste subtitle and caption settings"));
+ _text->Enable (text);
+ add (_text);
layout ();
}
}
bool
-PasteDialog::caption () const
+PasteDialog::text () const
{
- return _caption->GetValue ();
+ return _text->GetValue ();
}
class PasteDialog : public TableDialog
{
public:
- PasteDialog (wxWindow* parent, bool video, bool audio, bool caption);
+ PasteDialog (wxWindow* parent, bool video, bool audio, bool text);
bool video () const;
bool audio () const;
- bool caption () const;
+ bool text () const;
private:
wxCheckBox* _video;
wxCheckBox* _audio;
- wxCheckBox* _caption;
+ wxCheckBox* _text;
};
if (dcp->audio && !dcp->audio->streams().empty()) {
checked_set (_dcp[r++], wxString::Format(_("Audio channels: %d"), dcp->audio->streams().front()->channels()));
}
- if (!dcp->caption.empty()) {
+ if (!dcp->text.empty()) {
checked_set (_dcp[r++], _("Subtitles: yes"));
} else {
checked_set (_dcp[r++], _("Subtitles: no"));
using boost::dynamic_pointer_cast;
TextPanel::TextPanel (ContentPanel* p, TextType t)
- : ContentSubPanel (p, std_to_wx(caption_type_to_name(t)))
- , _caption_view (0)
+ : ContentSubPanel (p, std_to_wx(text_type_to_name(t)))
+ , _text_view (0)
, _fonts_dialog (0)
, _original_type (t)
{
_use = new wxCheckBox (this, wxID_ANY, _("Use as"));
use->Add (_use, 0, wxEXPAND | wxRIGHT, DCPOMATIC_SIZER_GAP);
_type = new wxChoice (this, wxID_ANY);
- _type->Append (_("subtitles (open captions)"));
+ _type->Append (_("open subtitles"));
_type->Append (_("closed captions"));
use->Add (_type, 1, wxEXPAND, 0);
grid->Add (use, wxGBPosition (r, 0), wxGBSpan (1, 2));
{
wxBoxSizer* s = new wxBoxSizer (wxHORIZONTAL);
- _caption_view_button = new wxButton (this, wxID_ANY, _("View..."));
- s->Add (_caption_view_button, 1, wxALL, DCPOMATIC_SIZER_GAP);
+ _text_view_button = new wxButton (this, wxID_ANY, _("View..."));
+ s->Add (_text_view_button, 1, wxALL, DCPOMATIC_SIZER_GAP);
_fonts_dialog_button = new wxButton (this, wxID_ANY, _("Fonts..."));
s->Add (_fonts_dialog_button, 1, wxALL, DCPOMATIC_SIZER_GAP);
_appearance_dialog_button = new wxButton (this, wxID_ANY, _("Appearance..."));
_reference->Bind (wxEVT_CHECKBOX, boost::bind (&TextPanel::reference_clicked, this));
_use->Bind (wxEVT_CHECKBOX, boost::bind (&TextPanel::use_toggled, this));
- _type->Bind (wxEVT_CHOICE, boost::bind (&TextPanel::type_changed, this));
+ _type->Bind (wxEVT_CHOICE, boost::bind (&TextPanel::type_changed, this));
_burn->Bind (wxEVT_CHECKBOX, boost::bind (&TextPanel::burn_toggled, this));
_x_offset->Bind (wxEVT_SPINCTRL, boost::bind (&TextPanel::x_offset_changed, this));
_y_offset->Bind (wxEVT_SPINCTRL, boost::bind (&TextPanel::y_offset_changed, this));
_line_spacing->Bind (wxEVT_SPINCTRL, boost::bind (&TextPanel::line_spacing_changed, this));
_language->Bind (wxEVT_TEXT, boost::bind (&TextPanel::language_changed, this));
_stream->Bind (wxEVT_CHOICE, boost::bind (&TextPanel::stream_changed, this));
- _caption_view_button->Bind (wxEVT_BUTTON, boost::bind (&TextPanel::caption_view_clicked, this));
+ _text_view_button->Bind (wxEVT_BUTTON, boost::bind (&TextPanel::text_view_clicked, this));
_fonts_dialog_button->Bind (wxEVT_BUTTON, boost::bind (&TextPanel::fonts_dialog_clicked, this));
_appearance_dialog_button->Bind (wxEVT_BUTTON, boost::bind (&TextPanel::appearance_dialog_clicked, this));
}
TextPanel::film_content_changed (int property)
{
FFmpegContentList fc = _parent->selected_ffmpeg ();
- ContentList sc = _parent->selected_caption ();
+ ContentList sc = _parent->selected_text ();
shared_ptr<FFmpegContent> fcs;
if (fc.size() == 1) {
scs = sc.front ();
}
- shared_ptr<TextContent> caption;
+ shared_ptr<TextContent> text;
if (scs) {
- caption = scs->caption_of_original_type(_original_type);
+ text = scs->text_of_original_type(_original_type);
}
if (property == FFmpegContentProperty::SUBTITLE_STREAMS) {
}
setup_sensitivity ();
} else if (property == TextContentProperty::USE) {
- checked_set (_use, caption ? caption->use() : false);
+ checked_set (_use, text ? text->use() : false);
setup_sensitivity ();
} else if (property == TextContentProperty::TYPE) {
- if (caption) {
- switch (caption->type()) {
- case CAPTION_OPEN:
+ if (text) {
+ switch (text->type()) {
+ case TEXT_OPEN_SUBTITLE:
_type->SetSelection (0);
break;
- case CAPTION_CLOSED:
+ case TEXT_CLOSED_CAPTION:
_type->SetSelection (1);
break;
default:
}
setup_sensitivity ();
} else if (property == TextContentProperty::BURN) {
- checked_set (_burn, caption ? caption->burn() : false);
+ checked_set (_burn, text ? text->burn() : false);
} else if (property == TextContentProperty::X_OFFSET) {
- checked_set (_x_offset, caption ? lrint (caption->x_offset() * 100) : 0);
+ checked_set (_x_offset, text ? lrint (text->x_offset() * 100) : 0);
} else if (property == TextContentProperty::Y_OFFSET) {
- checked_set (_y_offset, caption ? lrint (caption->y_offset() * 100) : 0);
+ checked_set (_y_offset, text ? lrint (text->y_offset() * 100) : 0);
} else if (property == TextContentProperty::X_SCALE) {
- checked_set (_x_scale, caption ? lrint (caption->x_scale() * 100) : 100);
+ checked_set (_x_scale, text ? lrint (text->x_scale() * 100) : 100);
} else if (property == TextContentProperty::Y_SCALE) {
- checked_set (_y_scale, caption ? lrint (caption->y_scale() * 100) : 100);
+ checked_set (_y_scale, text ? lrint (text->y_scale() * 100) : 100);
} else if (property == TextContentProperty::LINE_SPACING) {
- checked_set (_line_spacing, caption ? lrint (caption->line_spacing() * 100) : 100);
+ checked_set (_line_spacing, text ? lrint (text->line_spacing() * 100) : 100);
} else if (property == TextContentProperty::LANGUAGE) {
- checked_set (_language, caption ? caption->language() : "");
- } else if (property == DCPContentProperty::REFERENCE_CAPTION) {
+ checked_set (_language, text ? text->language() : "");
+ } else if (property == DCPContentProperty::REFERENCE_TEXT) {
if (scs) {
shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent> (scs);
- checked_set (_reference, dcp ? dcp->reference_caption(_original_type) : false);
+ checked_set (_reference, dcp ? dcp->reference_text(_original_type) : false);
} else {
checked_set (_reference, false);
}
setup_sensitivity ();
- } else if (property == DCPContentProperty::CAPTIONS) {
+ } else if (property == DCPContentProperty::TEXTS) {
setup_sensitivity ();
}
}
void
TextPanel::use_toggled ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption()) {
- i->caption_of_original_type(_original_type)->set_use (_use->GetValue());
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text()) {
+ i->text_of_original_type(_original_type)->set_use (_use->GetValue());
}
}
void
TextPanel::type_changed ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption()) {
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text()) {
switch (_type->GetSelection()) {
case 0:
- i->caption_of_original_type(_original_type)->set_type (CAPTION_OPEN);
+ i->text_of_original_type(_original_type)->set_type (TEXT_OPEN_SUBTITLE);
break;
case 1:
- i->caption_of_original_type(_original_type)->set_type (CAPTION_CLOSED);
+ i->text_of_original_type(_original_type)->set_type (TEXT_CLOSED_CAPTION);
break;
}
}
void
TextPanel::burn_toggled ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption_of_original_type(_original_type)->set_burn (_burn->GetValue());
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text ()) {
+ i->text_of_original_type(_original_type)->set_burn (_burn->GetValue());
}
}
{
int any_subs = 0;
int ffmpeg_subs = 0;
- ContentList sel = _parent->selected_caption ();
+ ContentList sel = _parent->selected_text ();
BOOST_FOREACH (shared_ptr<Content> i, sel) {
/* These are the content types that could include subtitles */
shared_ptr<const FFmpegContent> fc = boost::dynamic_pointer_cast<const FFmpegContent> (i);
shared_ptr<const DCPContent> dc = boost::dynamic_pointer_cast<const DCPContent> (i);
shared_ptr<const DCPSubtitleContent> dsc = boost::dynamic_pointer_cast<const DCPSubtitleContent> (i);
if (fc) {
- if (!fc->caption.empty()) {
+ if (!fc->text.empty()) {
++ffmpeg_subs;
++any_subs;
}
}
string why_not;
- bool const can_reference = dcp && dcp->can_reference_caption (_original_type, why_not);
+ bool const can_reference = dcp && dcp->can_reference_text (_original_type, why_not);
setup_refer_button (_reference, _reference_note, dcp, can_reference, why_not);
bool const reference = _reference->GetValue ();
_line_spacing->Enable (!reference && use);
_language->Enable (!reference && any_subs > 0 && use);
_stream->Enable (!reference && ffmpeg_subs == 1);
- _caption_view_button->Enable (!reference);
+ _text_view_button->Enable (!reference);
_fonts_dialog_button->Enable (!reference);
_appearance_dialog_button->Enable (!reference && any_subs > 0 && use);
}
void
TextPanel::x_offset_changed ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption_of_original_type(_original_type)->set_x_offset (_x_offset->GetValue() / 100.0);
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text ()) {
+ i->text_of_original_type(_original_type)->set_x_offset (_x_offset->GetValue() / 100.0);
}
}
void
TextPanel::y_offset_changed ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption_of_original_type(_original_type)->set_y_offset (_y_offset->GetValue() / 100.0);
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text ()) {
+ i->text_of_original_type(_original_type)->set_y_offset (_y_offset->GetValue() / 100.0);
}
}
void
TextPanel::x_scale_changed ()
{
- ContentList c = _parent->selected_caption ();
+ ContentList c = _parent->selected_text ();
if (c.size() == 1) {
- c.front()->caption_of_original_type(_original_type)->set_x_scale (_x_scale->GetValue() / 100.0);
+ c.front()->text_of_original_type(_original_type)->set_x_scale (_x_scale->GetValue() / 100.0);
}
}
void
TextPanel::y_scale_changed ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption_of_original_type(_original_type)->set_y_scale (_y_scale->GetValue() / 100.0);
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text ()) {
+ i->text_of_original_type(_original_type)->set_y_scale (_y_scale->GetValue() / 100.0);
}
}
void
TextPanel::line_spacing_changed ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption_of_original_type(_original_type)->set_line_spacing (_line_spacing->GetValue() / 100.0);
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text ()) {
+ i->text_of_original_type(_original_type)->set_line_spacing (_line_spacing->GetValue() / 100.0);
}
}
void
TextPanel::language_changed ()
{
- BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption_of_original_type(_original_type)->set_language (wx_to_std (_language->GetValue()));
+ BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_text ()) {
+ i->text_of_original_type(_original_type)->set_language (wx_to_std (_language->GetValue()));
}
}
film_content_changed (TextContentProperty::LANGUAGE);
film_content_changed (TextContentProperty::FONTS);
film_content_changed (TextContentProperty::TYPE);
- film_content_changed (DCPContentProperty::REFERENCE_CAPTION);
+ film_content_changed (DCPContentProperty::REFERENCE_TEXT);
}
void
-TextPanel::caption_view_clicked ()
+TextPanel::text_view_clicked ()
{
- if (_caption_view) {
- _caption_view->Destroy ();
- _caption_view = 0;
+ if (_text_view) {
+ _text_view->Destroy ();
+ _text_view = 0;
}
- ContentList c = _parent->selected_caption ();
+ ContentList c = _parent->selected_text ();
DCPOMATIC_ASSERT (c.size() == 1);
shared_ptr<Decoder> decoder = decoder_factory (c.front(), _parent->film()->log(), false);
if (decoder) {
- _caption_view = new TextView (this, _parent->film(), c.front(), c.front()->caption_of_original_type(_original_type), decoder, _parent->film_viewer());
- _caption_view->Show ();
+ _text_view = new TextView (this, _parent->film(), c.front(), c.front()->text_of_original_type(_original_type), decoder, _parent->film_viewer());
+ _text_view->Show ();
}
}
_fonts_dialog = 0;
}
- ContentList c = _parent->selected_caption ();
+ ContentList c = _parent->selected_text ();
DCPOMATIC_ASSERT (c.size() == 1);
- _fonts_dialog = new FontsDialog (this, c.front(), c.front()->caption_of_original_type(_original_type));
+ _fonts_dialog = new FontsDialog (this, c.front(), c.front()->text_of_original_type(_original_type));
_fonts_dialog->Show ();
}
return;
}
- d->set_reference_caption (_original_type, _reference->GetValue ());
+ d->set_reference_text (_original_type, _reference->GetValue ());
}
void
TextPanel::appearance_dialog_clicked ()
{
- ContentList c = _parent->selected_caption ();
+ ContentList c = _parent->selected_text ();
DCPOMATIC_ASSERT (c.size() == 1);
- SubtitleAppearanceDialog* d = new SubtitleAppearanceDialog (this, c.front(), c.front()->caption_of_original_type(_original_type));
+ SubtitleAppearanceDialog* d = new SubtitleAppearanceDialog (this, c.front(), c.front()->text_of_original_type(_original_type));
if (d->ShowModal () == wxID_OK) {
d->apply ();
}
void line_spacing_changed ();
void language_changed ();
void stream_changed ();
- void caption_view_clicked ();
+ void text_view_clicked ();
void fonts_dialog_clicked ();
void reference_clicked ();
void appearance_dialog_clicked ();
wxSpinCtrl* _line_spacing;
wxTextCtrl* _language;
wxChoice* _stream;
- wxButton* _caption_view_button;
- TextView* _caption_view;
+ wxButton* _text_view_button;
+ TextView* _text_view;
wxButton* _fonts_dialog_button;
FontsDialog* _fonts_dialog;
wxButton* _appearance_dialog_button;
using boost::bind;
using boost::dynamic_pointer_cast;
-TextView::TextView (wxWindow* parent, shared_ptr<Film> film, shared_ptr<Content> content, shared_ptr<TextContent> caption, shared_ptr<Decoder> decoder, FilmViewer* viewer)
+TextView::TextView (wxWindow* parent, shared_ptr<Film> film, shared_ptr<Content> content, shared_ptr<TextContent> text, shared_ptr<Decoder> decoder, FilmViewer* viewer)
: wxDialog (parent, wxID_ANY, _("Captions"), wxDefaultPosition, wxDefaultSize, wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER)
, _content (content)
, _film_viewer (viewer)
_frc = film->active_frame_rate_change (content->position());
/* Find the decoder that is being used for our TextContent and attach to it */
- BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->caption) {
- if (i->content() == caption) {
+ BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
+ if (i->content() == text) {
i->PlainStart.connect (bind (&TextView::data_start, this, _1));
i->Stop.connect (bind (&TextView::data_stop, this, _1));
}
_views.push_back (shared_ptr<TimelineView> (new TimelineAudioContentView (*this, i)));
}
- BOOST_FOREACH (shared_ptr<TextContent> j, i->caption) {
+ BOOST_FOREACH (shared_ptr<TextContent> j, i->text) {
_views.push_back (shared_ptr<TimelineView> (new TimelineTextContentView (*this, i, j)));
}
/* Tracks are:
Video (mono or left-eye)
Video (right-eye)
- Caption 1
- Caption 2
- Caption N
+ Text 1
+ Text 2
+ Text N
Atmos
Audio 1
Audio 2
_tracks = max (_tracks, 1);
- /* Captions */
+ /* Texts */
- int const caption_tracks = place<TimelineTextContentView> (_views, _tracks);
+ int const text_tracks = place<TimelineTextContentView> (_views, _tracks);
/* Atmos */
_labels_view->set_3d (have_3d);
_labels_view->set_audio_tracks (audio_tracks);
- _labels_view->set_caption_tracks (caption_tracks);
+ _labels_view->set_text_tracks (text_tracks);
_labels_view->set_atmos (have_atmos);
_time_axis_view->set_y (tracks());
: TimelineView (tl)
, _threed (true)
, _audio_tracks (0)
- , _caption_tracks (0)
+ , _text_tracks (0)
, _atmos (true)
{
wxString labels[] = {
_("Video"),
_("Audio"),
- _("Captions"),
+ _("Subtitles/captions"),
_("Atmos")
};
gc->DrawText (_("Video"), 0, (ty + fy) / 2 - 8);
fy = ty;
- if (_caption_tracks) {
- ty = fy + _caption_tracks * h;
- gc->DrawText (_("Captions"), 0, (ty + fy) / 2 - 8);
+ if (_text_tracks) {
+ ty = fy + _text_tracks * h;
+ gc->DrawText (_("Subtitle/captions"), 0, (ty + fy) / 2 - 8);
fy = ty;
}
}
void
-TimelineLabelsView::set_caption_tracks (int n)
+TimelineLabelsView::set_text_tracks (int n)
{
- _caption_tracks = n;
+ _text_tracks = n;
}
void
void set_3d (bool s);
void set_audio_tracks (int n);
- void set_caption_tracks (int n);
+ void set_text_tracks (int n);
void set_atmos (bool s);
private:
int _width;
bool _threed;
int _audio_tracks;
- int _caption_tracks;
+ int _text_tracks;
bool _atmos;
};
++count_ac;
content = i;
}
- if (!i->caption.empty() && i->video_frame_rate()) {
+ if (!i->text.empty() && i->video_frame_rate()) {
++count_sc;
content = i;
}
)
);
- pvf->set_caption (PositionImage (sub_image, Position<int> (50, 60)));
+ pvf->set_text (PositionImage (sub_image, Position<int> (50, 60)));
shared_ptr<DCPVideo> frame (
new DCPVideo (
)
);
- pvf->set_caption (PositionImage (sub_image, Position<int> (50, 60)));
+ pvf->set_text (PositionImage (sub_image, Position<int> (50, 60)));
shared_ptr<DCPVideo> frame (
new DCPVideo (
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->only_caption()->set_type (CAPTION_CLOSED);
+ content->only_text()->set_type (TEXT_CLOSED_CAPTION);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
BOOST_CHECK_EQUAL (content->full_length().get(), DCPTime::from_seconds(2).get());
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
BOOST_REQUIRE (!wait_for_jobs ());
shared_ptr<DCPDecoder> decoder (new DCPDecoder (content, film->log(), false));
- decoder->only_caption()->PlainStart.connect (bind (store, _1));
+ decoder->only_text()->PlainStart.connect (bind (store, _1));
stored = optional<ContentStringText> ();
while (!decoder->pass() && !stored) {}
BOOST_REQUIRE (!wait_for_jobs ());
shared_ptr<DCPSubtitleDecoder> decoder (new DCPSubtitleDecoder (content, film->log()));
- decoder->only_caption()->PlainStart.connect (bind (store, _1));
+ decoder->only_text()->PlainStart.connect (bind (store, _1));
stored = optional<ContentStringText> ();
while (!decoder->pass ()) {
shared_ptr<DCPSubtitleDecoder> decoder (new DCPSubtitleDecoder (content, film->log()));
stored = optional<ContentStringText> ();
while (!decoder->pass ()) {
- decoder->only_caption()->PlainStart.connect (bind (store, _1));
+ decoder->only_text()->PlainStart.connect (bind (store, _1));
if (stored && stored->from() == ContentTime::from_seconds(0.08)) {
list<dcp::SubtitleString> s = stored->subs;
list<dcp::SubtitleString>::const_iterator i = s.begin ();
film->examine_and_add_content (content2);
BOOST_REQUIRE (!wait_for_jobs ());
- content->only_caption()->add_font (shared_ptr<Font> (new Font ("font1")));
- content2->only_caption()->add_font (shared_ptr<Font> (new Font ("font2")));
+ content->only_text()->add_font (shared_ptr<Font> (new Font ("font1")));
+ content2->only_text()->add_font (shared_ptr<Font> (new Font ("font2")));
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
shared_ptr<StringTextFileContent> s (new StringTextFileContent (film, "test/data/subrip2.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
- s->only_caption()->set_effect (dcp::SHADOW);
- s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_text()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_text()->set_effect (dcp::SHADOW);
+ s->only_text()->set_effect_colour (dcp::Colour (0, 255, 255));
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
shared_ptr<StringTextFileContent> s (new StringTextFileContent (film, "test/data/subrip.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
- s->only_caption()->set_effect (dcp::SHADOW);
- s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_text()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_text()->set_effect (dcp::SHADOW);
+ s->only_text()->set_effect_colour (dcp::Colour (0, 255, 255));
shared_ptr<Job> job (new TranscodeJob (film));
FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test7.mov", FFmpegEncoder::FORMAT_PRORES, false);
shared_ptr<StringTextFileContent> s (new StringTextFileContent (film, "test/data/subrip2.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
- s->only_caption()->set_effect (dcp::SHADOW);
- s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_text()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_text()->set_effect (dcp::SHADOW);
+ s->only_text()->set_effect_colour (dcp::Colour (0, 255, 255));
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
shared_ptr<StringTextFileContent> s (new StringTextFileContent (film, "test/data/subrip.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
- s->only_caption()->set_effect (dcp::SHADOW);
- s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_text()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_text()->set_effect (dcp::SHADOW);
+ s->only_text()->set_effect_colour (dcp::Colour (0, 255, 255));
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
shared_ptr<DCPContent> dcp (new DCPContent (film, private_data / "awkward_subs"));
film->examine_and_add_content (dcp, true);
BOOST_REQUIRE (!wait_for_jobs ());
- dcp->only_caption()->set_use (true);
+ dcp->only_text()->set_use (true);
shared_ptr<Player> player (new Player (film, film->playlist()));
player->set_fast ();
- player->set_always_burn_open_captions ();
+ player->set_always_burn_open_subtitles ();
player->set_play_referenced ();
shared_ptr<Butler> butler (new Butler (player, film->log(), AudioMapping(), 2));
shared_ptr<DCPContent> dcp (new DCPContent (film, private_data / "awkward_subs2"));
film->examine_and_add_content (dcp, true);
BOOST_REQUIRE (!wait_for_jobs ());
- dcp->only_caption()->set_use (true);
+ dcp->only_text()->set_use (true);
shared_ptr<Player> player (new Player (film, film->playlist()));
player->set_fast ();
- player->set_always_burn_open_captions ();
+ player->set_always_burn_open_subtitles ();
player->set_play_referenced ();
shared_ptr<Butler> butler (new Butler (player, film->log(), AudioMapping(), 2));
shared_ptr<FFmpegContent> content = dynamic_pointer_cast<FFmpegContent>(content_factory(film, private_data / "prophet_short_clip.mkv").front());
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->only_caption()->set_burn (true);
- content->only_caption()->set_use (true);
+ content->only_text()->set_burn (true);
+ content->only_text()->set_use (true);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
boost::filesystem::remove_all (film->dir (film->dcp_name(), false));
- content->only_caption()->set_use (false);
+ content->only_text()->set_use (false);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
film->examine_and_add_content (content);
wait_for_jobs ();
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->make_dcp ();
wait_for_jobs ();
film->examine_and_add_content (content);
wait_for_jobs ();
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
/* Use test/data/subrip2.srt as if it were a font file */
- content->only_caption()->fonts().front()->set_file (FontFiles::NORMAL, "test/data/subrip2.srt");
+ content->only_text()->fonts().front()->set_file (FontFiles::NORMAL, "test/data/subrip2.srt");
film->make_dcp ();
wait_for_jobs ();
film->examine_and_add_content (content);
wait_for_jobs ();
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->make_dcp ();
wait_for_jobs ();
film->set_name ("frobozz");
film->set_interop (false);
shared_ptr<StringTextFileContent> content (new StringTextFileContent (film, "test/data/subrip2.srt"));
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->examine_and_add_content (content);
wait_for_jobs ();
film->make_dcp ();
film->set_interop (true);
film->set_sequence (false);
shared_ptr<StringTextFileContent> content (new StringTextFileContent (film, "test/data/subrip2.srt"));
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->examine_and_add_content (content);
film->examine_and_add_content (content);
wait_for_jobs ();
shared_ptr<Film> film = new_test_film2 ("srt_subtitle_test6");
film->set_interop (false);
shared_ptr<StringTextFileContent> content (new StringTextFileContent (film, "test/data/frames.srt"));
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
film->make_dcp ();
film->examine_and_add_content (content);
wait_for_jobs ();
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->make_dcp ();
wait_for_jobs ();
shared_ptr<StringTextFileContent> content (new StringTextFileContent (film, "test/data/subrip5.srt"));
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->only_caption()->set_use (true);
- content->only_caption()->set_burn (false);
+ content->only_text()->set_use (true);
+ content->only_text()->set_burn (false);
film->set_reel_type (REELTYPE_BY_LENGTH);
film->set_interop (true);
film->set_reel_length (1024 * 1024 * 512);
string why_not;
BOOST_CHECK (!dcp->can_reference_video(why_not));
BOOST_CHECK (!dcp->can_reference_audio(why_not));
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_OPEN_SUBTITLE, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_CLOSED_CAPTION, why_not));
/* Multi-reel DCP can be referenced if we are using by-video-content */
film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
BOOST_CHECK (dcp->can_reference_video(why_not));
BOOST_CHECK (dcp->can_reference_audio(why_not));
- /* (but reels_test2 has no captions to reference) */
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
+ /* (but reels_test2 has no texts to reference) */
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_OPEN_SUBTITLE, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_CLOSED_CAPTION, why_not));
shared_ptr<FFmpegContent> other (new FFmpegContent (film, "test/data/test.mp4"));
film->examine_and_add_content (other);
other->set_position (DCPTime (0));
BOOST_CHECK (!dcp->can_reference_video(why_not));
BOOST_CHECK (!dcp->can_reference_audio(why_not));
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_OPEN_SUBTITLE, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_CLOSED_CAPTION, why_not));
/* This should not be considered an overlap */
other->set_position (dcp->end ());
BOOST_CHECK (dcp->can_reference_video(why_not));
BOOST_CHECK (dcp->can_reference_audio(why_not));
- /* (reels_test2 has no captions to reference) */
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
- BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
+ /* (reels_test2 has no texts to reference) */
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_OPEN_SUBTITLE, why_not));
+ BOOST_CHECK (!dcp->can_reference_text(TEXT_CLOSED_CAPTION, why_not));
}
/** Make a OV with video and audio and a VF referencing the OV and adding subs */