X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fdcp_examiner.cc;h=50b19e2fd2918b01d0a953919af4ac531f0619cc;hb=78b2c650a9249cb7165d269b4378391d31e68e8b;hp=536c9ec1ab81885a9c71f0ecfc332245a1845193;hpb=b168d211622f94a5240c945c1df03b0bed48d3bc;p=dcpomatic.git diff --git a/src/lib/dcp_examiner.cc b/src/lib/dcp_examiner.cc index 536c9ec1a..50b19e2fd 100644 --- a/src/lib/dcp_examiner.cc +++ b/src/lib/dcp_examiner.cc @@ -175,8 +175,8 @@ DCPExaminer::DCPExaminer (shared_ptr content, bool tolerant) auto asset = reel->main_sound()->asset(); if (!_audio_channels) { - _audio_channels = asset->channels (); - } else if (_audio_channels.get() != asset->channels ()) { + _audio_channels = asset->active_channels(); + } else if (_audio_channels.get() != asset->active_channels()) { throw DCPError (_("Mismatched audio channel counts in DCP")); } @@ -208,6 +208,18 @@ DCPExaminer::DCPExaminer (shared_ptr content, bool tolerant) } } + _text_count[TextType::CLOSED_CAPTION] = std::max(_text_count[TextType::CLOSED_CAPTION], static_cast(reel->closed_captions().size())); + if (_dcp_text_tracks.size() < reel->closed_captions().size()) { + /* We only want to add 1 DCPTextTrack to _dcp_text_tracks per closed caption. I guess it's possible that different + * reels have different numbers of tracks (though I don't think they should) so make sure that _dcp_text_tracks ends + * up with the maximum. + */ + _dcp_text_tracks.clear(); + for (auto ccap: reel->closed_captions()) { + _dcp_text_tracks.push_back(DCPTextTrack(ccap->annotation_text().get_value_or(""), try_to_parse_language(ccap->language()))); + } + } + for (auto ccap: reel->closed_captions()) { if (!ccap->asset_ref().resolved()) { /* We are missing this asset so we can't continue; examination will be repeated later */ @@ -218,8 +230,9 @@ DCPExaminer::DCPExaminer (shared_ptr content, bool tolerant) LOG_GENERAL("Closed caption %1 of reel %2 found", ccap->id(), reel->id()); - _text_count[TextType::CLOSED_CAPTION]++; - _dcp_text_tracks.push_back(DCPTextTrack(ccap->annotation_text().get_value_or(""), try_to_parse_language(ccap->language()))); + for (auto const& font: ccap->asset()->font_data()) { + reel_fonts.push_back(make_shared(font.first, font.second)); + } } if (reel->main_markers ()) {