# Use distro-provided FFmpeg on Arch
deps = []
- deps.append(('libdcp', '3a328b6'))
- deps.append(('libsub', '2bbddc6'))
+ deps.append(('libdcp', '4552587'))
+ deps.append(('libsub', '9901351'))
deps.append(('leqm-nrt', '131f971'))
deps.append(('rtaudio', 'f619b76'))
# We get our OpenSSL libraries from the environment, but we
boost::mutex::scoped_lock lm (_mutex);
audio = make_shared<AudioContent>(this);
}
+ audio->set_language (examiner->audio_language());
auto as = make_shared<AudioStream>(examiner->audio_frame_rate(), examiner->audio_length(), examiner->audio_channels());
audio->set_stream (as);
auto m = as->mapping ();
}
list<shared_ptr<TextContent>> new_text;
- for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
- for (int j = 0; j < examiner->text_count(static_cast<TextType>(i)); ++j) {
- auto c = make_shared<TextContent>(this, static_cast<TextType>(i), static_cast<TextType>(i));
- if (i == static_cast<int>(TextType::CLOSED_CAPTION)) {
- c->set_dcp_track (examiner->dcp_text_track(j));
- }
- new_text.push_back (c);
- }
+
+ for (int i = 0; i < examiner->text_count(TextType::OPEN_SUBTITLE); ++i) {
+ auto c = make_shared<TextContent>(this, TextType::OPEN_SUBTITLE, TextType::OPEN_SUBTITLE);
+ c->set_language (examiner->open_subtitle_language());
+ new_text.push_back (c);
+ }
+
+ for (int i = 0; i < examiner->text_count(TextType::CLOSED_CAPTION); ++i) {
+ auto c = make_shared<TextContent>(this, TextType::CLOSED_CAPTION, TextType::CLOSED_CAPTION);
+ c->set_dcp_track (examiner->dcp_text_track(i));
+ new_text.push_back (c);
}
{
using std::runtime_error;
using std::map;
using std::shared_ptr;
+using std::string;
using std::dynamic_pointer_cast;
+using boost::optional;
+
DCPExaminer::DCPExaminer (shared_ptr<const DCPContent> content, bool tolerant)
: DCP (content, tolerant)
_name = cpl->content_title_text ();
_content_kind = cpl->content_kind ();
+ auto try_to_parse_language = [](optional<string> lang) -> boost::optional<dcp::LanguageTag> {
+ try {
+ if (lang) {
+ return dcp::LanguageTag (*lang);
+ }
+ } catch (...) {}
+ return boost::none;
+ };
+
for (auto i: cpl->reels()) {
if (i->main_picture ()) {
}
_audio_length += i->main_sound()->actual_duration();
+ _audio_language = try_to_parse_language (asset->language());
}
if (i->main_subtitle ()) {
}
_text_count[static_cast<int>(TextType::OPEN_SUBTITLE)] = 1;
+ _open_subtitle_language = try_to_parse_language (i->main_subtitle()->language());
}
for (auto j: i->closed_captions()) {
}
if (i->main_markers ()) {
- map<dcp::Marker, dcp::Time> rm = i->main_markers()->get();
+ auto rm = i->main_markers()->get();
_markers.insert (rm.begin(), rm.end());
}
return _audio_frame_rate.get_value_or (48000);
}
+ boost::optional<dcp::LanguageTag> audio_language () const {
+ return _audio_language;
+ }
+
/** @param type TEXT_OPEN_SUBTITLE or TEXT_CLOSED_CAPTION.
* @return Number of assets of this type in this DCP.
*/
return _text_count[static_cast<int>(type)];
}
+ boost::optional<dcp::LanguageTag> open_subtitle_language () const {
+ return _open_subtitle_language;
+ }
+
DCPTextTrack dcp_text_track (int i) const {
DCPOMATIC_ASSERT (i >= 0 && i < static_cast<int>(_dcp_text_tracks.size()));
return _dcp_text_tracks[i];
bool _has_video = false;
/** true if this DCP has audio content (but false if it has unresolved references to audio content) */
bool _has_audio = false;
+ boost::optional<dcp::LanguageTag> _audio_language;
/** number of different assets of each type (OCAP/CCAP) */
int _text_count[static_cast<int>(TextType::COUNT)];
+ boost::optional<dcp::LanguageTag> _open_subtitle_language;
/** the DCPTextTracks for each of our CCAPs */
std::vector<DCPTextTrack> _dcp_text_tracks;
bool _encrypted = false;