summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2024-08-11 12:48:43 +0200
committerCarl Hetherington <cth@carlh.net>2024-09-03 17:02:24 +0200
commitf145574466ca52e754c4febf1d79fb6d202d42d2 (patch)
tree6816a6d9891405eef56bf7481d528337381226ff /src/lib
parent0dce5e78c79eb5989c78a1cec10983406d7a4cac (diff)
Support closed subtitles and open captions.
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/dcp_content.cc16
-rw-r--r--src/lib/dcp_examiner.cc114
-rw-r--r--src/lib/dcp_examiner.h25
-rw-r--r--src/lib/film.cc4
-rw-r--r--src/lib/film.h2
-rw-r--r--src/lib/player.cc67
-rw-r--r--src/lib/player.h2
-rw-r--r--src/lib/text_type.cc13
-rw-r--r--src/lib/text_type.h3
-rw-r--r--src/lib/writer.cc4
10 files changed, 164 insertions, 86 deletions
diff --git a/src/lib/dcp_content.cc b/src/lib/dcp_content.cc
index b61138577..48d382c41 100644
--- a/src/lib/dcp_content.cc
+++ b/src/lib/dcp_content.cc
@@ -295,9 +295,23 @@ DCPContent::examine (shared_ptr<const Film> film, shared_ptr<Job> job)
new_text.push_back (c);
}
+ for (int i = 0; i < examiner->text_count(TextType::OPEN_CAPTION); ++i) {
+ auto c = make_shared<TextContent>(this, TextType::OPEN_CAPTION, TextType::OPEN_CAPTION);
+ c->set_language(examiner->open_caption_language());
+ examiner->add_fonts(c);
+ new_text.push_back(c);
+ }
+
+ for (int i = 0; i < examiner->text_count(TextType::CLOSED_SUBTITLE); ++i) {
+ auto c = make_shared<TextContent>(this, TextType::CLOSED_SUBTITLE, TextType::CLOSED_SUBTITLE);
+ c->set_dcp_track(examiner->dcp_subtitle_track(i));
+ examiner->add_fonts(c);
+ new_text.push_back(c);
+ }
+
for (int i = 0; i < examiner->text_count(TextType::CLOSED_CAPTION); ++i) {
auto c = make_shared<TextContent>(this, TextType::CLOSED_CAPTION, TextType::CLOSED_CAPTION);
- c->set_dcp_track (examiner->dcp_text_track(i));
+ c->set_dcp_track(examiner->dcp_caption_track(i));
examiner->add_fonts(c);
new_text.push_back (c);
}
diff --git a/src/lib/dcp_examiner.cc b/src/lib/dcp_examiner.cc
index 3f8b4ca90..3ee6dd99c 100644
--- a/src/lib/dcp_examiner.cc
+++ b/src/lib/dcp_examiner.cc
@@ -95,6 +95,9 @@ DCPExaminer::DCPExaminer (shared_ptr<const DCPContent> content, bool tolerant)
if (reel->main_subtitle() && !reel->main_subtitle()->asset_ref().resolved()) {
++unsatisfied;
}
+ if (reel->main_caption() && !reel->main_caption()->asset_ref().resolved()) {
+ ++unsatisfied;
+ }
if (reel->atmos() && !reel->atmos()->asset_ref().resolved()) {
++unsatisfied;
}
@@ -201,54 +204,72 @@ DCPExaminer::DCPExaminer (shared_ptr<const DCPContent> content, bool tolerant)
}
}
- if (auto sub = reel->main_subtitle()) {
- if (sub->entry_point().get_value_or(0) != 0) {
- _has_non_zero_entry_point[TextType::OPEN_SUBTITLE] = true;
- }
- if (!sub->asset_ref().resolved()) {
- LOG_GENERAL("Main subtitle %1 of reel %2 is missing", sub->id(), reel->id());
- _needs_assets = true;
- } else {
- LOG_GENERAL("Main subtitle %1 of reel %2 found", sub->id(), reel->id());
-
- _text_count[TextType::OPEN_SUBTITLE] = 1;
- _open_subtitle_language = try_to_parse_language(sub->language());
+ auto read_main_text = [this, reel, reel_index, try_to_parse_language](
+ shared_ptr<dcp::ReelTextAsset> reel_asset, TextType type, string name, boost::optional<dcp::LanguageTag>& language
+ ) {
- auto asset = sub->asset();
- for (auto const& font: asset->font_data()) {
- _fonts.push_back({reel_index, asset->id(), make_shared<dcpomatic::Font>(font.first, font.second)});
+ if (reel_asset) {
+ if (reel_asset->entry_point().get_value_or(0) != 0) {
+ _has_non_zero_entry_point[type] = true;
+ }
+ if (!reel_asset->asset_ref().resolved()) {
+ LOG_GENERAL("Main %1 %2 of reel %3 is missing", name, reel_asset->id(), reel->id());
+ _needs_assets = true;
+ } else {
+ LOG_GENERAL("Main %1 %2 of reel %3 found", name, reel_asset->id(), reel->id());
+
+ _text_count[type] = 1;
+ language = try_to_parse_language(reel_asset->language());
+
+ auto asset = reel_asset->asset();
+ for (auto const& font: asset->font_data()) {
+ _fonts.push_back({reel_index, asset->id(), make_shared<dcpomatic::Font>(font.first, font.second)});
+ }
}
}
- }
- _text_count[TextType::CLOSED_CAPTION] = std::max(_text_count[TextType::CLOSED_CAPTION], static_cast<int>(reel->closed_captions().size()));
- if (_dcp_text_tracks.size() < reel->closed_captions().size()) {
- /* We only want to add 1 DCPTextTrack to _dcp_text_tracks per closed caption. I guess it's possible that different
- * reels have different numbers of tracks (though I don't think they should) so make sure that _dcp_text_tracks ends
- * up with the maximum.
- */
- _dcp_text_tracks.clear();
- for (auto ccap: reel->closed_captions()) {
- _dcp_text_tracks.push_back(DCPTextTrack(ccap->annotation_text().get_value_or(""), try_to_parse_language(ccap->language())));
- }
- }
+ };
+
+ read_main_text(reel->main_subtitle(), TextType::OPEN_SUBTITLE, "subtitle", _open_subtitle_language);
+ read_main_text(reel->main_caption(), TextType::OPEN_CAPTION, "caption", _open_caption_language);
+
+ auto read_closed_text = [this, reel, reel_index, try_to_parse_language](
+ vector<shared_ptr<dcp::ReelTextAsset>> reel_assets, TextType type, string name, vector<DCPTextTrack>& tracks
+ ) {
- for (auto ccap: reel->closed_captions()) {
- if (ccap->entry_point().get_value_or(0) != 0) {
- _has_non_zero_entry_point[TextType::CLOSED_CAPTION] = true;
+ _text_count[type] = std::max(_text_count[type], static_cast<int>(reel_assets.size()));
+
+ if (tracks.size() < reel_assets.size()) {
+ /* We only want to add 1 DCPTextTrack to tracks per closed subtitle/caption. I guess it's possible that different
+ * reels have different numbers of tracks (though I don't think they should) so make sure that tracks ends
+ * up with the maximum.
+ */
+ tracks.clear();
+ for (auto subtitle: reel_assets) {
+ tracks.push_back(DCPTextTrack(subtitle->annotation_text().get_value_or(""), try_to_parse_language(subtitle->language())));
+ }
}
- if (!ccap->asset_ref().resolved()) {
- LOG_GENERAL("Closed caption %1 of reel %2 is missing", ccap->id(), reel->id());
- _needs_assets = true;
- } else {
- LOG_GENERAL("Closed caption %1 of reel %2 found", ccap->id(), reel->id());
- auto asset = ccap->asset();
- for (auto const& font: asset->font_data()) {
- _fonts.push_back({reel_index, asset->id(), make_shared<dcpomatic::Font>(font.first, font.second)});
+ for (auto text: reel_assets) {
+ if (text->entry_point().get_value_or(0) != 0) {
+ _has_non_zero_entry_point[type] = true;
+ }
+ if (!text->asset_ref().resolved()) {
+ LOG_GENERAL("Closed %1 %2 of reel %3 is missing", name, text->id(), reel->id());
+ _needs_assets = true;
+ } else {
+ LOG_GENERAL("Closed %1 %2 of reel %3 found", name, text->id(), reel->id());
+
+ auto asset = text->asset();
+ for (auto const& font: asset->font_data()) {
+ _fonts.push_back({reel_index, asset->id(), make_shared<dcpomatic::Font>(font.first, font.second)});
+ }
}
}
- }
+ };
+
+ read_closed_text(reel->closed_subtitles(), TextType::CLOSED_SUBTITLE, "subtitle", _dcp_subtitle_tracks);
+ read_closed_text(reel->closed_captions(), TextType::CLOSED_CAPTION, "caption", _dcp_caption_tracks);
if (reel->main_markers ()) {
auto rm = reel->main_markers()->get();
@@ -270,6 +291,10 @@ DCPExaminer::DCPExaminer (shared_ptr<const DCPContent> content, bool tolerant)
_reel_lengths.push_back(reel->main_sound()->actual_duration());
} else if (reel->main_subtitle()) {
_reel_lengths.push_back(reel->main_subtitle()->actual_duration());
+ } else if (reel->main_caption()) {
+ _reel_lengths.push_back(reel->main_caption()->actual_duration());
+ } else if (!reel->closed_subtitles().empty()) {
+ _reel_lengths.push_back(reel->closed_subtitles().front()->actual_duration());
} else if (!reel->closed_captions().empty()) {
_reel_lengths.push_back(reel->closed_captions().front()->actual_duration());
} else if (!reel->atmos()) {
@@ -345,6 +370,17 @@ DCPExaminer::DCPExaminer (shared_ptr<const DCPContent> content, bool tolerant)
sub->texts();
}
+ if (i->main_caption() && i->main_caption()->asset_ref().resolved()) {
+ auto cap = i->main_caption()->asset();
+ auto mxf_cap = dynamic_pointer_cast<dcp::MXF>(cap);
+ if (mxf_cap && mxf_cap->encrypted() && !mxf_cap->key()) {
+ _kdm_valid = false;
+ LOG_GENERAL_NC("Caption has no key");
+ break;
+ }
+ cap->texts();
+ }
+
if (i->atmos() && i->atmos()->asset_ref().resolved()) {
if (auto atmos = i->atmos()->asset()) {
if (atmos->encrypted() && !atmos->key()) {
diff --git a/src/lib/dcp_examiner.h b/src/lib/dcp_examiner.h
index 28b59ee2f..5e2a33361 100644
--- a/src/lib/dcp_examiner.h
+++ b/src/lib/dcp_examiner.h
@@ -104,8 +104,7 @@ public:
return _audio_language;
}
- /** @param type TEXT_OPEN_SUBTITLE or TEXT_CLOSED_CAPTION.
- * @return the number of "streams" of this type in the DCP.
+ /* @return the number of "streams" of @type in the DCP.
* Reels do not affect the return value of this method: if a DCP
* has any subtitles, type=TEXT_OPEN_SUBTITLE will return 1.
*/
@@ -117,9 +116,18 @@ public:
return _open_subtitle_language;
}
- DCPTextTrack dcp_text_track (int i) const {
- DCPOMATIC_ASSERT (i >= 0 && i < static_cast<int>(_dcp_text_tracks.size()));
- return _dcp_text_tracks[i];
+ boost::optional<dcp::LanguageTag> open_caption_language() const {
+ return _open_caption_language;
+ }
+
+ DCPTextTrack dcp_subtitle_track(int i) const {
+ DCPOMATIC_ASSERT (i >= 0 && i < static_cast<int>(_dcp_subtitle_tracks.size()));
+ return _dcp_subtitle_tracks[i];
+ }
+
+ DCPTextTrack dcp_caption_track(int i) const {
+ DCPOMATIC_ASSERT (i >= 0 && i < static_cast<int>(_dcp_caption_tracks.size()));
+ return _dcp_caption_tracks[i];
}
bool kdm_valid () const {
@@ -198,8 +206,11 @@ private:
/** number of different assets of each type (OCAP/CCAP) */
EnumIndexedVector<int, TextType> _text_count;
boost::optional<dcp::LanguageTag> _open_subtitle_language;
- /** the DCPTextTracks for each of our CCAPs */
- std::vector<DCPTextTrack> _dcp_text_tracks;
+ boost::optional<dcp::LanguageTag> _open_caption_language;
+ /** the DCPTextTracks for each of our closed subtitles */
+ std::vector<DCPTextTrack> _dcp_subtitle_tracks;
+ /** the DCPTextTracks for each of our closed captions */
+ std::vector<DCPTextTrack> _dcp_caption_tracks;
bool _encrypted = false;
bool _needs_assets = false;
bool _kdm_valid = false;
diff --git a/src/lib/film.cc b/src/lib/film.cc
index a6db71465..2e7df9cf8 100644
--- a/src/lib/film.cc
+++ b/src/lib/film.cc
@@ -2048,14 +2048,14 @@ Film::contains_atmos_content () const
list<DCPTextTrack>
-Film::closed_caption_tracks () const
+Film::closed_text_tracks() const
{
list<DCPTextTrack> tt;
for (auto i: content()) {
for (auto text: i->text) {
/* XXX: Empty DCPTextTrack ends up being a magic value here - the "unknown" or "not specified" track */
auto dtt = text->dcp_track().get_value_or(DCPTextTrack());
- if (text->type() == TextType::CLOSED_CAPTION && find(tt.begin(), tt.end(), dtt) == tt.end()) {
+ if (!is_open(text->type()) && find(tt.begin(), tt.end(), dtt) == tt.end()) {
tt.push_back (dtt);
}
}
diff --git a/src/lib/film.h b/src/lib/film.h
index d71435566..12872caf1 100644
--- a/src/lib/film.h
+++ b/src/lib/film.h
@@ -159,7 +159,7 @@ public:
std::vector<CPLSummary> cpls () const;
- std::list<DCPTextTrack> closed_caption_tracks () const;
+ std::list<DCPTextTrack> closed_text_tracks() const;
uint64_t required_disk_space () const;
bool should_be_enough_disk_space(double& required, double& available) const;
diff --git a/src/lib/player.cc b/src/lib/player.cc
index 20792aec4..f348f6b28 100644
--- a/src/lib/player.cc
+++ b/src/lib/player.cc
@@ -919,56 +919,58 @@ Player::pass ()
/** @return Open subtitles for the frame at the given time, converted to images */
optional<PositionImage>
-Player::open_subtitles_for_frame (DCPTime time) const
+Player::open_texts_for_frame(DCPTime time) const
{
auto film = _film.lock();
if (!film) {
return {};
}
- list<PositionImage> captions;
+ list<PositionImage> texts;
int const vfr = film->video_frame_rate();
- for (
- auto j:
- _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
- ) {
+ for (auto type: { TextType::OPEN_SUBTITLE, TextType::OPEN_CAPTION }) {
+ for (
+ auto const& text:
+ _active_texts[type].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+ ) {
- /* Bitmap subtitles */
- for (auto i: j.bitmap) {
- if (!i.image || i.image->size().width == 0 || i.image->size().height == 0) {
- continue;
- }
+ /* Bitmap texts */
+ for (auto i: text.bitmap) {
+ if (!i.image || i.image->size().width == 0 || i.image->size().height == 0) {
+ continue;
+ }
- /* i.image will already have been scaled to fit _video_container_size */
- dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
+ /* i.image will already have been scaled to fit _video_container_size */
+ dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
- captions.push_back (
- PositionImage (
- i.image,
- Position<int> (
- lrint(_video_container_size.load().width * i.rectangle.x),
- lrint(_video_container_size.load().height * i.rectangle.y)
+ texts.push_back(
+ PositionImage (
+ i.image,
+ Position<int> (
+ lrint(_video_container_size.load().width * i.rectangle.x),
+ lrint(_video_container_size.load().height * i.rectangle.y)
+ )
)
- )
- );
- }
+ );
+ }
- /* String subtitles (rendered to an image) */
- if (!j.string.empty()) {
- auto s = render_text(j.string, _video_container_size, time, vfr);
- copy_if(s.begin(), s.end(), back_inserter(captions), [](PositionImage const& image) {
- return image.image->size().width && image.image->size().height;
- });
+ /* String texts (rendered to an image) */
+ if (!text.string.empty()) {
+ auto s = render_text(text.string, _video_container_size, time, vfr);
+ copy_if(s.begin(), s.end(), back_inserter(texts), [](PositionImage const& image) {
+ return image.image->size().width && image.image->size().height;
+ });
+ }
}
}
- if (captions.empty()) {
+ if (texts.empty()) {
return {};
}
- return merge (captions, _subtitle_alignment);
+ return merge(texts, _subtitle_alignment);
}
@@ -1439,9 +1441,8 @@ Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
}
- auto subtitles = open_subtitles_for_frame (time);
- if (subtitles) {
- pv->set_text (subtitles.get ());
+ if (auto texts = open_texts_for_frame(time)) {
+ pv->set_text(texts.get());
}
Video (pv, time);
diff --git a/src/lib/player.h b/src/lib/player.h
index 314031698..f6a30230f 100644
--- a/src/lib/player.h
+++ b/src/lib/player.h
@@ -173,7 +173,7 @@ private:
std::pair<std::shared_ptr<AudioBuffers>, dcpomatic::DCPTime> discard_audio (
std::shared_ptr<const AudioBuffers> audio, dcpomatic::DCPTime time, dcpomatic::DCPTime discard_to
) const;
- boost::optional<PositionImage> open_subtitles_for_frame (dcpomatic::DCPTime time) const;
+ boost::optional<PositionImage> open_texts_for_frame(dcpomatic::DCPTime time) const;
void emit_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
void use_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
void emit_audio (std::shared_ptr<AudioBuffers> data, dcpomatic::DCPTime time);
diff --git a/src/lib/text_type.cc b/src/lib/text_type.cc
index 4f8f50ac5..33e74bdca 100644
--- a/src/lib/text_type.cc
+++ b/src/lib/text_type.cc
@@ -24,6 +24,7 @@
#include "exceptions.h"
#include "text_type.h"
#include "types.h"
+#include <dcp/text_type.h>
#include "i18n.h"
@@ -88,3 +89,15 @@ text_type_to_name(TextType t)
}
+bool
+is_open(TextType type)
+{
+ return type == TextType::OPEN_SUBTITLE || type == TextType::OPEN_CAPTION;
+}
+
+
+bool
+is_open(dcp::TextType type)
+{
+ return type == dcp::TextType::OPEN_SUBTITLE || type == dcp::TextType::OPEN_CAPTION;
+}
diff --git a/src/lib/text_type.h b/src/lib/text_type.h
index 94224af4e..1f2a5aeec 100644
--- a/src/lib/text_type.h
+++ b/src/lib/text_type.h
@@ -24,6 +24,7 @@
#include <string>
+#include <dcp/text_type.h>
/** Type of captions.
@@ -50,6 +51,8 @@ enum class TextType
extern std::string text_type_to_string(TextType t);
extern std::string text_type_to_name(TextType t);
extern TextType string_to_text_type(std::string s);
+bool is_open(TextType type);
+bool is_open(dcp::TextType type);
#endif
diff --git a/src/lib/writer.cc b/src/lib/writer.cc
index 29fc543ba..4b6f70f21 100644
--- a/src/lib/writer.cc
+++ b/src/lib/writer.cc
@@ -102,7 +102,7 @@ Writer::Writer(weak_ptr<const Film> weak_film, weak_ptr<Job> weak_job, boost::fi
*/
_audio_reel = _reels.begin ();
_subtitle_reel = _reels.begin ();
- for (auto i: film()->closed_caption_tracks()) {
+ for (auto i: film()->closed_text_tracks()) {
_caption_reels[i] = _reels.begin ();
}
_atmos_reel = _reels.begin ();
@@ -945,7 +945,7 @@ Writer::write (ReferencedReelAsset asset)
_reel_assets.push_back (asset);
if (auto text_asset = dynamic_pointer_cast<dcp::ReelTextAsset>(asset.asset)) {
- if (text_asset->type() == dcp::TextType::OPEN_SUBTITLE || text_asset->type() == dcp::TextType::OPEN_CAPTION) {
+ if (is_open(text_asset->type())) {
_have_subtitles = true;
} else {
/* This feels quite fragile. We have a referenced reel and want to know if it's