/*
- Copyright (C) 2014-2020 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2014-2022 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
*/
+
#include "atmos_decoder.h"
-#include "dcp_decoder.h"
-#include "dcp_content.h"
#include "audio_content.h"
-#include "video_decoder.h"
#include "audio_decoder.h"
-#include "j2k_image_proxy.h"
-#include "text_decoder.h"
-#include "ffmpeg_image_proxy.h"
-#include "image.h"
#include "config.h"
+#include "constants.h"
+#include "dcp_content.h"
+#include "dcp_decoder.h"
#include "digester.h"
+#include "ffmpeg_image_proxy.h"
#include "frame_interval_checker.h"
-#include <dcp/dcp.h>
+#include "image.h"
+#include "j2k_image_proxy.h"
+#include "text_decoder.h"
+#include "video_decoder.h"
#include <dcp/cpl.h>
-#include <dcp/reel.h>
+#include <dcp/dcp.h>
+#include <dcp/decrypted_kdm.h>
#include <dcp/mono_picture_asset.h>
#include <dcp/mono_picture_asset_reader.h>
-#include <dcp/stereo_picture_asset.h>
-#include <dcp/stereo_picture_asset_reader.h>
+#include <dcp/mono_picture_frame.h>
+#include <dcp/reel.h>
+#include <dcp/reel_atmos_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
#include <dcp/reel_picture_asset.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
-#include <dcp/reel_closed_caption_asset.h>
-#include <dcp/mono_picture_frame.h>
-#include <dcp/stereo_picture_frame.h>
-#include <dcp/sound_frame.h>
+#include <dcp/search.h>
#include <dcp/sound_asset_reader.h>
+#include <dcp/sound_frame.h>
+#include <dcp/stereo_picture_asset.h>
+#include <dcp/stereo_picture_asset_reader.h>
+#include <dcp/stereo_picture_frame.h>
#include <dcp/subtitle_image.h>
-#include <dcp/decrypted_kdm.h>
-#include <dcp/reel_atmos_asset.h>
#include <iostream>
#include "i18n.h"
-using std::list;
+
using std::cout;
+using std::dynamic_pointer_cast;
+using std::list;
+using std::make_shared;
using std::map;
+using std::shared_ptr;
using std::string;
using std::vector;
-using std::shared_ptr;
-using std::dynamic_pointer_cast;
using boost::optional;
using namespace dcpomatic;
-DCPDecoder::DCPDecoder (shared_ptr<const Film> film, shared_ptr<const DCPContent> c, bool fast, bool tolerant, shared_ptr<DCPDecoder> old)
- : DCP (c, tolerant)
- , Decoder (film)
- , _decode_referenced (false)
+
+DCPDecoder::DCPDecoder (shared_ptr<const Film> film, shared_ptr<const DCPContent> content, bool fast, bool tolerant, shared_ptr<DCPDecoder> old)
+ : Decoder (film)
+ , _dcp_content (content)
{
- if (c->can_be_played()) {
- if (c->video) {
- video.reset (new VideoDecoder (this, c));
+ if (content->can_be_played()) {
+ if (content->video) {
+ video = make_shared<VideoDecoder>(this, content);
}
- if (c->audio) {
- audio.reset (new AudioDecoder (this, c->audio, fast));
+ if (content->audio) {
+ audio = make_shared<AudioDecoder>(this, content->audio, fast);
}
- for (auto i: c->text) {
- /* XXX: this time here should be the time of the first subtitle, not 0 */
- text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, ContentTime())));
+ for (auto i: content->text) {
+ text.push_back (make_shared<TextDecoder>(this, i));
+ /* We should really call maybe_set_position() on this TextDecoder to set the time
+ * of the first subtitle, but it probably doesn't matter since we'll always
+ * have regularly occurring video (and maybe audio) content.
+ */
}
- if (c->atmos) {
- atmos.reset (new AtmosDecoder (this, c));
+ if (content->atmos) {
+ atmos = make_shared<AtmosDecoder>(this, content);
}
}
the same before we re-use _reels.
*/
- _lazy_digest = calculate_lazy_digest (c);
+ _lazy_digest = calculate_lazy_digest (content);
if (old && old->lazy_digest() == _lazy_digest) {
_reels = old->_reels;
} else {
- list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
+ auto cpl_list = dcp::find_and_resolve_cpls(content->directories(), tolerant);
if (cpl_list.empty()) {
throw DCPError (_("No CPLs found in DCP."));
/* No CPL found; probably an old file that doesn't specify it;
just use the first one.
*/
- cpl = cpls().front ();
+ cpl = cpl_list.front();
+ }
+
+ if (content->kdm()) {
+ cpl->add (decrypt_kdm_with_helpful_error(content->kdm().get()));
}
_reels = cpl->reels ();
set_decode_referenced (false);
_reel = _reels.begin ();
- _offset = 0;
get_readers ();
+
+ _font_id_allocator.add_fonts_from_reels(_reels);
+ _font_id_allocator.allocate();
}
return true;
}
- double const vfr = _dcp_content->active_video_frame_rate (film());
+ auto const vfr = _dcp_content->active_video_frame_rate (film());
/* Frame within the (played part of the) reel that is coming up next */
- int64_t const frame = _next.frames_round (vfr);
+ auto const frame = _next.frames_round (vfr);
- shared_ptr<dcp::PictureAsset> picture_asset = (*_reel)->main_picture()->asset();
+ auto picture_asset = (*_reel)->main_picture()->asset();
DCPOMATIC_ASSERT (picture_asset);
/* We must emit texts first as when we emit the video for this frame
pass_texts (_next, picture_asset->size());
if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
- int64_t const entry_point = (*_reel)->main_picture()->entry_point().get_value_or(0);
+ auto const entry_point = (*_reel)->main_picture()->entry_point().get_value_or(0);
if (_mono_reader) {
video->emit (
film(),
- shared_ptr<ImageProxy> (
- new J2KImageProxy (
- _mono_reader->get_frame (entry_point + frame),
- picture_asset->size(),
- AV_PIX_FMT_XYZ12LE,
- _forced_reduction
- )
+ std::make_shared<J2KImageProxy>(
+ _mono_reader->get_frame (entry_point + frame),
+ picture_asset->size(),
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
),
_offset + frame
);
} else {
video->emit (
film(),
- shared_ptr<ImageProxy> (
- new J2KImageProxy (
- _stereo_reader->get_frame (entry_point + frame),
- picture_asset->size(),
- dcp::EYE_LEFT,
- AV_PIX_FMT_XYZ12LE,
- _forced_reduction
- )
+ std::make_shared<J2KImageProxy>(
+ _stereo_reader->get_frame (entry_point + frame),
+ picture_asset->size(),
+ dcp::Eye::LEFT,
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
),
_offset + frame
);
video->emit (
film(),
- shared_ptr<ImageProxy> (
- new J2KImageProxy (
- _stereo_reader->get_frame (entry_point + frame),
- picture_asset->size(),
- dcp::EYE_RIGHT,
- AV_PIX_FMT_XYZ12LE,
- _forced_reduction
- )
+ std::make_shared<J2KImageProxy>(
+ _stereo_reader->get_frame (entry_point + frame),
+ picture_asset->size(),
+ dcp::Eye::RIGHT,
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
),
_offset + frame
);
}
if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) {
- int64_t const entry_point = (*_reel)->main_sound()->entry_point().get_value_or(0);
- shared_ptr<const dcp::SoundFrame> sf = _sound_reader->get_frame (entry_point + frame);
- uint8_t const * from = sf->data ();
+ auto const entry_point = (*_reel)->main_sound()->entry_point().get_value_or(0);
+ auto sf = _sound_reader->get_frame (entry_point + frame);
+ auto from = sf->data ();
int const channels = _dcp_content->audio->stream()->channels ();
int const frames = sf->size() / (3 * channels);
- shared_ptr<AudioBuffers> data (new AudioBuffers (channels, frames));
- float** data_data = data->data();
+ auto data = make_shared<AudioBuffers>(channels, frames);
+ auto data_data = data->data();
for (int i = 0; i < frames; ++i) {
for (int j = 0; j < channels; ++j) {
data_data[j][i] = static_cast<int> ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast<float> (INT_MAX - 256);
if (_atmos_reader) {
DCPOMATIC_ASSERT (_atmos_metadata);
- int64_t const entry_point = (*_reel)->atmos()->entry_point().get_value_or(0);
+ auto const entry_point = (*_reel)->atmos()->entry_point().get_value_or(0);
atmos->emit (film(), _atmos_reader->get_frame(entry_point + frame), _offset + frame, *_atmos_metadata);
}
return false;
}
+
void
DCPDecoder::pass_texts (ContentTime next, dcp::Size size)
{
- list<shared_ptr<TextDecoder> >::const_iterator decoder = text.begin ();
+ auto decoder = text.begin ();
if (decoder == text.end()) {
/* It's possible that there is now a main subtitle but no TextDecoders, for example if
the CPL has just changed but the TextContent's texts have not been recreated yet.
pass_texts (
next,
(*_reel)->main_subtitle()->asset(),
- _dcp_content->reference_text(TEXT_OPEN_SUBTITLE),
+ _dcp_content->reference_text(TextType::OPEN_SUBTITLE),
(*_reel)->main_subtitle()->entry_point().get_value_or(0),
*decoder,
size
for (auto i: (*_reel)->closed_captions()) {
pass_texts (
- next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size
+ next, i->asset(), _dcp_content->reference_text(TextType::CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size
);
++decoder;
}
ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size
)
{
- double const vfr = _dcp_content->active_video_frame_rate (film());
+ auto const vfr = _dcp_content->active_video_frame_rate (film());
/* Frame within the (played part of the) reel that is coming up next */
- int64_t const frame = next.frames_round (vfr);
+ auto const frame = next.frames_round (vfr);
if (_decode_referenced || !reference) {
- list<shared_ptr<dcp::Subtitle> > subs = asset->subtitles_during (
+ auto subs = asset->subtitles_during (
dcp::Time (entry_point + frame, vfr, vfr),
dcp::Time (entry_point + frame + 1, vfr, vfr),
true
);
- list<dcp::SubtitleString> strings;
+ vector<dcp::SubtitleString> strings;
for (auto i: subs) {
- shared_ptr<dcp::SubtitleString> is = dynamic_pointer_cast<dcp::SubtitleString> (i);
+ auto is = dynamic_pointer_cast<const dcp::SubtitleString>(i);
if (is) {
if (!strings.empty() && (strings.back().in() != is->in() || strings.back().out() != is->out())) {
- dcp::SubtitleString b = strings.back();
+ auto b = strings.back();
decoder->emit_plain (
ContentTimePeriod (
ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
),
- strings
+ strings,
+ asset->subtitle_standard()
);
strings.clear ();
}
- strings.push_back (*is);
+ dcp::SubtitleString is_copy = *is;
+ is_copy.set_font(_font_id_allocator.font_id(_reel - _reels.begin(), asset->id(), is_copy.font().get_value_or("")));
+ strings.push_back(is_copy);
}
/* XXX: perhaps these image subs should also be collected together like the string ones are;
this would need to be done both here and in DCPSubtitleDecoder.
*/
- shared_ptr<dcp::SubtitleImage> ii = dynamic_pointer_cast<dcp::SubtitleImage> (i);
+ auto ii = dynamic_pointer_cast<const dcp::SubtitleImage>(i);
if (ii) {
emit_subtitle_image (
ContentTimePeriod (
}
if (!strings.empty()) {
- dcp::SubtitleString b = strings.back();
+ auto b = strings.back();
decoder->emit_plain (
ContentTimePeriod (
ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
),
- strings
+ strings,
+ asset->subtitle_standard()
);
strings.clear ();
}
}
}
+
void
DCPDecoder::next_reel ()
{
get_readers ();
}
+
void
DCPDecoder::get_readers ()
{
return;
}
- if ((*_reel)->main_picture()) {
- shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
- shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
- shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
+ DCPOMATIC_ASSERT(video);
+ if (!video->ignore() && (*_reel)->main_picture()) {
+ auto asset = (*_reel)->main_picture()->asset ();
+ auto mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
+ auto stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
DCPOMATIC_ASSERT (mono || stereo);
if (mono) {
_mono_reader = mono->start_read ();
+ _mono_reader->set_check_hmac (false);
_stereo_reader.reset ();
} else {
_stereo_reader = stereo->start_read ();
+ _stereo_reader->set_check_hmac (false);
_mono_reader.reset ();
}
} else {
_stereo_reader.reset ();
}
- if ((*_reel)->main_sound()) {
+ DCPOMATIC_ASSERT(audio);
+ if (!audio->ignore() && (*_reel)->main_sound()) {
_sound_reader = (*_reel)->main_sound()->asset()->start_read ();
+ _sound_reader->set_check_hmac (false);
} else {
_sound_reader.reset ();
}
if ((*_reel)->atmos()) {
- shared_ptr<dcp::AtmosAsset> asset = (*_reel)->atmos()->asset();
+ auto asset = (*_reel)->atmos()->asset();
_atmos_reader = asset->start_read();
+ _atmos_reader->set_check_hmac (false);
_atmos_metadata = AtmosMetadata (asset);
} else {
_atmos_reader.reset ();
}
}
+
void
DCPDecoder::seek (ContentTime t, bool accurate)
{
/* Pre-roll for subs */
- ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds);
+ auto pre = t - ContentTime::from_seconds (pre_roll_seconds);
if (pre < ContentTime()) {
pre = ContentTime ();
}
pre >= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()))
) {
- ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()));
+ auto rd = ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()));
pre -= rd;
t -= rd;
next_reel ();
/* Pass texts in the pre-roll */
- double const vfr = _dcp_content->active_video_frame_rate (film());
+ auto const vfr = _dcp_content->active_video_frame_rate (film());
for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
pass_texts (pre, (*_reel)->main_picture()->asset()->size());
pre += ContentTime::from_frames (1, vfr);
_next = t;
}
+
void
DCPDecoder::set_decode_referenced (bool r)
{
}
}
+
void
DCPDecoder::set_forced_reduction (optional<int> reduction)
{
_forced_reduction = reduction;
}
+
string
DCPDecoder::calculate_lazy_digest (shared_ptr<const DCPContent> c) const
{
return d.get ();
}
+
ContentTime
DCPDecoder::position () const
{
return ContentTime::from_frames(_offset, _dcp_content->active_video_frame_rate(film())) + _next;
}
-
-vector<FontData>
-DCPDecoder::fonts () const
-{
- vector<FontData> data;
- for (auto i: _reels) {
- if (i->main_subtitle() && i->main_subtitle()->asset()) {
- map<string, dcp::ArrayData> fm = i->main_subtitle()->asset()->font_data();
- for (map<string, dcp::ArrayData>::const_iterator j = fm.begin(); j != fm.end(); ++j) {
- data.push_back (FontData(j->first, j->second));
- }
- }
- }
- return data;
-}
-