+ if (reel->main_subtitle()) {
+ if (!reel->main_subtitle()->asset_ref().resolved()) {
+ /* We are missing this asset so we can't continue; examination will be repeated later */
+ _needs_assets = true;
+ LOG_GENERAL("Main subtitle %1 of reel %2 is missing", reel->main_subtitle()->id(), reel->id());
+ return;
+ }
+
+ LOG_GENERAL("Main subtitle %1 of reel %2 found", reel->main_subtitle()->id(), reel->id());
+
+ _text_count[TextType::OPEN_SUBTITLE] = 1;
+ _open_subtitle_language = try_to_parse_language(reel->main_subtitle()->language());
+
+ for (auto const& font: reel->main_subtitle()->asset()->font_data()) {
+ reel_fonts.push_back(make_shared<dcpomatic::Font>(font.first, font.second));
+ }
+ }
+
+ _text_count[TextType::CLOSED_CAPTION] = std::max(_text_count[TextType::CLOSED_CAPTION], static_cast<int>(reel->closed_captions().size()));
+ if (_dcp_text_tracks.size() < reel->closed_captions().size()) {
+ /* We only want to add 1 DCPTextTrack to _dcp_text_tracks per closed caption. I guess it's possible that different
+ * reels have different numbers of tracks (though I don't think they should) so make sure that _dcp_text_tracks ends
+ * up with the maximum.
+ */
+ _dcp_text_tracks.clear();
+ for (auto ccap: reel->closed_captions()) {
+ _dcp_text_tracks.push_back(DCPTextTrack(ccap->annotation_text().get_value_or(""), try_to_parse_language(ccap->language())));
+ }
+ }
+
+ for (auto ccap: reel->closed_captions()) {
+ if (!ccap->asset_ref().resolved()) {
+ /* We are missing this asset so we can't continue; examination will be repeated later */
+ _needs_assets = true;
+ LOG_GENERAL("Closed caption %1 of reel %2 is missing", ccap->id(), reel->id());
+ return;
+ }
+
+ LOG_GENERAL("Closed caption %1 of reel %2 found", ccap->id(), reel->id());
+
+ for (auto const& font: ccap->asset()->font_data()) {
+ reel_fonts.push_back(make_shared<dcpomatic::Font>(font.first, font.second));
+ }
+ }
+
+ if (reel->main_markers ()) {
+ auto rm = reel->main_markers()->get();
+ _markers.insert (rm.begin(), rm.end());