X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fdcp_decoder.cc;h=9064627ba9cd937a4930631e17f4665837553ee6;hb=9726a58f44d52d235b027225ddd68c6acf83c733;hp=b6947211c1d8ddf05461a62e5b78605088c39121;hpb=57f112a2bd073123a686141be6c16ba997349056;p=dcpomatic.git diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc index b6947211c..9064627ba 100644 --- a/src/lib/dcp_decoder.cc +++ b/src/lib/dcp_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2014-2018 Carl Hetherington + Copyright (C) 2014-2022 Carl Hetherington This file is part of DCP-o-matic. @@ -18,74 +18,101 @@ */ -#include "dcp_decoder.h" -#include "dcp_content.h" + +#include "atmos_decoder.h" #include "audio_content.h" -#include "video_decoder.h" #include "audio_decoder.h" -#include "j2k_image_proxy.h" -#include "text_decoder.h" +#include "config.h" +#include "dcp_content.h" +#include "dcp_decoder.h" +#include "digester.h" #include "ffmpeg_image_proxy.h" +#include "frame_interval_checker.h" #include "image.h" -#include "config.h" -#include +#include "j2k_image_proxy.h" +#include "text_decoder.h" +#include "video_decoder.h" #include -#include +#include +#include #include #include -#include -#include +#include +#include +#include +#include #include #include #include -#include -#include -#include -#include +#include #include +#include +#include +#include +#include #include -#include #include #include "i18n.h" -using std::list; + using std::cout; -using boost::shared_ptr; -using boost::dynamic_pointer_cast; +using std::dynamic_pointer_cast; +using std::list; +using std::make_shared; +using std::map; +using std::shared_ptr; +using std::string; +using std::vector; using boost::optional; using namespace dcpomatic; -DCPDecoder::DCPDecoder (shared_ptr film, shared_ptr c, bool fast, shared_ptr old) - : DCP (c) - , Decoder (film) - , _decode_referenced (false) + +DCPDecoder::DCPDecoder (shared_ptr film, shared_ptr content, bool fast, bool tolerant, shared_ptr old) + : Decoder (film) + , _dcp_content (content) { - if (c->can_be_played()) { - if (c->video) { - video.reset (new VideoDecoder (this, c)); + if (content->can_be_played()) { + if (content->video) { + video = make_shared(this, content); + } + if (content->audio) { + audio = make_shared(this, content->audio, fast); } - if (c->audio) { - audio.reset (new AudioDecoder (this, c->audio, fast)); + for (auto i: content->text) { + text.push_back (make_shared(this, i)); + /* We should really call maybe_set_position() on this TextDecoder to set the time + * of the first subtitle, but it probably doesn't matter since we'll always + * have regularly occurring video (and maybe audio) content. + */ } - BOOST_FOREACH (shared_ptr i, c->text) { - /* XXX: this time here should be the time of the first subtitle, not 0 */ - text.push_back (shared_ptr (new TextDecoder (this, i, ContentTime()))); + if (content->atmos) { + atmos = make_shared(this, content); } } - if (old) { + /* We try to avoid re-scanning the DCP's files every time we make a new DCPDecoder; we do this + by re-using the _reels list. Before we do this we need to check that nothing too serious + has changed in the DCPContent. + + We do this by storing a digest of the important bits of the DCPContent and then checking that's + the same before we re-use _reels. + */ + + _lazy_digest = calculate_lazy_digest (content); + + if (old && old->lazy_digest() == _lazy_digest) { _reels = old->_reels; } else { - list > cpl_list = cpls (); + auto cpl_list = dcp::find_and_resolve_cpls(content->directories(), tolerant); if (cpl_list.empty()) { throw DCPError (_("No CPLs found in DCP.")); } shared_ptr cpl; - BOOST_FOREACH (shared_ptr i, cpl_list) { + for (auto i: cpl_list) { if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) { cpl = i; } @@ -95,7 +122,11 @@ DCPDecoder::DCPDecoder (shared_ptr film, shared_ptrkdm()) { + cpl->add (decrypt_kdm_with_helpful_error(content->kdm().get())); } _reels = cpl->reels (); @@ -104,7 +135,6 @@ DCPDecoder::DCPDecoder (shared_ptr film, shared_ptr film, shared_ptrcan_be_played ()) { + if (!_dcp_content->can_be_played()) { + return true; + } + + if (_reel == _reels.end()) { + if (audio) { + audio->flush (); + } return true; } - double const vfr = _dcp_content->active_video_frame_rate (film()); + auto const vfr = _dcp_content->active_video_frame_rate (film()); /* Frame within the (played part of the) reel that is coming up next */ - int64_t const frame = _next.frames_round (vfr); + auto const frame = _next.frames_round (vfr); - shared_ptr picture_asset = (*_reel)->main_picture()->asset(); + auto picture_asset = (*_reel)->main_picture()->asset(); DCPOMATIC_ASSERT (picture_asset); /* We must emit texts first as when we emit the video for this frame @@ -130,45 +167,39 @@ DCPDecoder::pass () pass_texts (_next, picture_asset->size()); if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) { - int64_t const entry_point = (*_reel)->main_picture()->entry_point().get_value_or(0); + auto const entry_point = (*_reel)->main_picture()->entry_point().get_value_or(0); if (_mono_reader) { video->emit ( film(), - shared_ptr ( - new J2KImageProxy ( - _mono_reader->get_frame (entry_point + frame), - picture_asset->size(), - AV_PIX_FMT_XYZ12LE, - _forced_reduction - ) + std::make_shared( + _mono_reader->get_frame (entry_point + frame), + picture_asset->size(), + AV_PIX_FMT_XYZ12LE, + _forced_reduction ), _offset + frame ); } else { video->emit ( film(), - shared_ptr ( - new J2KImageProxy ( - _stereo_reader->get_frame (entry_point + frame), - picture_asset->size(), - dcp::EYE_LEFT, - AV_PIX_FMT_XYZ12LE, - _forced_reduction - ) + std::make_shared( + _stereo_reader->get_frame (entry_point + frame), + picture_asset->size(), + dcp::Eye::LEFT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction ), _offset + frame ); video->emit ( film(), - shared_ptr ( - new J2KImageProxy ( - _stereo_reader->get_frame (entry_point + frame), - picture_asset->size(), - dcp::EYE_RIGHT, - AV_PIX_FMT_XYZ12LE, - _forced_reduction - ) + std::make_shared( + _stereo_reader->get_frame (entry_point + frame), + picture_asset->size(), + dcp::Eye::RIGHT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction ), _offset + frame ); @@ -176,14 +207,14 @@ DCPDecoder::pass () } if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) { - int64_t const entry_point = (*_reel)->main_sound()->entry_point().get_value_or(0); - shared_ptr sf = _sound_reader->get_frame (entry_point + frame); - uint8_t const * from = sf->data (); + auto const entry_point = (*_reel)->main_sound()->entry_point().get_value_or(0); + auto sf = _sound_reader->get_frame (entry_point + frame); + auto from = sf->data (); int const channels = _dcp_content->audio->stream()->channels (); int const frames = sf->size() / (3 * channels); - shared_ptr data (new AudioBuffers (channels, frames)); - float** data_data = data->data(); + auto data = make_shared(channels, frames); + auto data_data = data->data(); for (int i = 0; i < frames; ++i) { for (int j = 0; j < channels; ++j) { data_data[j][i] = static_cast ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast (INT_MAX - 256); @@ -194,6 +225,12 @@ DCPDecoder::pass () audio->emit (film(), _dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next); } + if (_atmos_reader) { + DCPOMATIC_ASSERT (_atmos_metadata); + auto const entry_point = (*_reel)->atmos()->entry_point().get_value_or(0); + atmos->emit (film(), _atmos_reader->get_frame(entry_point + frame), _offset + frame, *_atmos_metadata); + } + _next += ContentTime::from_frames (1, vfr); if ((*_reel)->main_picture ()) { @@ -206,26 +243,33 @@ DCPDecoder::pass () return false; } + void DCPDecoder::pass_texts (ContentTime next, dcp::Size size) { - list >::const_iterator decoder = text.begin (); + auto decoder = text.begin (); + if (decoder == text.end()) { + /* It's possible that there is now a main subtitle but no TextDecoders, for example if + the CPL has just changed but the TextContent's texts have not been recreated yet. + */ + return; + } + if ((*_reel)->main_subtitle()) { - DCPOMATIC_ASSERT (decoder != text.end ()); pass_texts ( next, (*_reel)->main_subtitle()->asset(), - _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), + _dcp_content->reference_text(TextType::OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point().get_value_or(0), *decoder, size ); ++decoder; } - BOOST_FOREACH (shared_ptr i, (*_reel)->closed_captions()) { - DCPOMATIC_ASSERT (decoder != text.end ()); + + for (auto i: (*_reel)->closed_captions()) { pass_texts ( - next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size + next, i->asset(), _dcp_content->reference_text(TextType::CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size ); ++decoder; } @@ -236,42 +280,45 @@ DCPDecoder::pass_texts ( ContentTime next, shared_ptr asset, bool reference, int64_t entry_point, shared_ptr decoder, dcp::Size size ) { - double const vfr = _dcp_content->active_video_frame_rate (film()); + auto const vfr = _dcp_content->active_video_frame_rate (film()); /* Frame within the (played part of the) reel that is coming up next */ - int64_t const frame = next.frames_round (vfr); + auto const frame = next.frames_round (vfr); if (_decode_referenced || !reference) { - list > subs = asset->subtitles_during ( + auto subs = asset->subtitles_during ( dcp::Time (entry_point + frame, vfr, vfr), dcp::Time (entry_point + frame + 1, vfr, vfr), true ); - list strings; + vector strings; - BOOST_FOREACH (shared_ptr i, subs) { - shared_ptr is = dynamic_pointer_cast (i); + for (auto i: subs) { + auto is = dynamic_pointer_cast(i); if (is) { if (!strings.empty() && (strings.back().in() != is->in() || strings.back().out() != is->out())) { - dcp::SubtitleString b = strings.back(); + auto b = strings.back(); decoder->emit_plain ( ContentTimePeriod ( ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()), ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds()) ), - strings + strings, + _dcp_content->standard() ); strings.clear (); } - strings.push_back (*is); + dcp::SubtitleString is_copy = *is; + is_copy.set_font(id_for_font_in_reel(is_copy.font().get_value_or(""), _reel - _reels.begin())); + strings.push_back(is_copy); } /* XXX: perhaps these image subs should also be collected together like the string ones are; this would need to be done both here and in DCPSubtitleDecoder. */ - shared_ptr ii = dynamic_pointer_cast (i); + auto ii = dynamic_pointer_cast(i); if (ii) { emit_subtitle_image ( ContentTimePeriod ( @@ -286,19 +333,21 @@ DCPDecoder::pass_texts ( } if (!strings.empty()) { - dcp::SubtitleString b = strings.back(); + auto b = strings.back(); decoder->emit_plain ( ContentTimePeriod ( ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()), ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds()) ), - strings + strings, + _dcp_content->standard() ); strings.clear (); } } } + void DCPDecoder::next_reel () { @@ -307,6 +356,7 @@ DCPDecoder::next_reel () get_readers (); } + void DCPDecoder::get_readers () { @@ -314,19 +364,22 @@ DCPDecoder::get_readers () _mono_reader.reset (); _stereo_reader.reset (); _sound_reader.reset (); + _atmos_reader.reset (); return; } if ((*_reel)->main_picture()) { - shared_ptr asset = (*_reel)->main_picture()->asset (); - shared_ptr mono = dynamic_pointer_cast (asset); - shared_ptr stereo = dynamic_pointer_cast (asset); + auto asset = (*_reel)->main_picture()->asset (); + auto mono = dynamic_pointer_cast (asset); + auto stereo = dynamic_pointer_cast (asset); DCPOMATIC_ASSERT (mono || stereo); if (mono) { _mono_reader = mono->start_read (); + _mono_reader->set_check_hmac (false); _stereo_reader.reset (); } else { _stereo_reader = stereo->start_read (); + _stereo_reader->set_check_hmac (false); _mono_reader.reset (); } } else { @@ -336,11 +389,23 @@ DCPDecoder::get_readers () if ((*_reel)->main_sound()) { _sound_reader = (*_reel)->main_sound()->asset()->start_read (); + _sound_reader->set_check_hmac (false); } else { _sound_reader.reset (); } + + if ((*_reel)->atmos()) { + auto asset = (*_reel)->atmos()->asset(); + _atmos_reader = asset->start_read(); + _atmos_reader->set_check_hmac (false); + _atmos_metadata = AtmosMetadata (asset); + } else { + _atmos_reader.reset (); + _atmos_metadata = boost::none; + } } + void DCPDecoder::seek (ContentTime t, bool accurate) { @@ -358,7 +423,7 @@ DCPDecoder::seek (ContentTime t, bool accurate) /* Pre-roll for subs */ - ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds); + auto pre = t - ContentTime::from_seconds (pre_roll_seconds); if (pre < ContentTime()) { pre = ContentTime (); } @@ -370,7 +435,7 @@ DCPDecoder::seek (ContentTime t, bool accurate) pre >= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film())) ) { - ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film())); + auto rd = ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film())); pre -= rd; t -= rd; next_reel (); @@ -378,7 +443,7 @@ DCPDecoder::seek (ContentTime t, bool accurate) /* Pass texts in the pre-roll */ - double const vfr = _dcp_content->active_video_frame_rate (film()); + auto const vfr = _dcp_content->active_video_frame_rate (film()); for (int i = 0; i < pre_roll_seconds * vfr; ++i) { pass_texts (pre, (*_reel)->main_picture()->asset()->size()); pre += ContentTime::from_frames (1, vfr); @@ -398,6 +463,7 @@ DCPDecoder::seek (ContentTime t, bool accurate) _next = t; } + void DCPDecoder::set_decode_referenced (bool r) { @@ -411,8 +477,35 @@ DCPDecoder::set_decode_referenced (bool r) } } + void DCPDecoder::set_forced_reduction (optional reduction) { _forced_reduction = reduction; } + + +string +DCPDecoder::calculate_lazy_digest (shared_ptr c) const +{ + Digester d; + for (auto i: c->paths()) { + d.add (i.string()); + } + if (_dcp_content->kdm()) { + d.add(_dcp_content->kdm()->id()); + } + d.add (static_cast(c->cpl())); + if (c->cpl()) { + d.add (c->cpl().get()); + } + return d.get (); +} + + +ContentTime +DCPDecoder::position () const +{ + return ContentTime::from_frames(_offset, _dcp_content->active_video_frame_rate(film())) + _next; +} +