/*
- Copyright (C) 2014-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2014-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "video_decoder.h"
#include "audio_decoder.h"
#include "j2k_image_proxy.h"
-#include "subtitle_decoder.h"
+#include "text_decoder.h"
#include "image.h"
#include "config.h"
#include <dcp/dcp.h>
-#include <dcp/decrypted_kdm.h>
#include <dcp/cpl.h>
#include <dcp/reel.h>
#include <dcp/mono_picture_asset.h>
#include <dcp/reel_picture_asset.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
#include <dcp/mono_picture_frame.h>
#include <dcp/stereo_picture_frame.h>
#include <dcp/sound_frame.h>
#include <dcp/sound_asset_reader.h>
+#include <dcp/subtitle_image.h>
#include <boost/foreach.hpp>
#include <iostream>
+#include "i18n.h"
+
using std::list;
using std::cout;
using boost::shared_ptr;
using boost::dynamic_pointer_cast;
+using boost::optional;
DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, bool fast)
- : _dcp_content (c)
+ : DCP (c)
+ , _decode_referenced (false)
{
- video.reset (new VideoDecoder (this, c, log));
- audio.reset (new AudioDecoder (this, c->audio, fast, log));
-
- subtitle.reset (
- new SubtitleDecoder (
- this,
- c->subtitle,
- bind (&DCPDecoder::image_subtitles_during, this, _1, _2),
- bind (&DCPDecoder::text_subtitles_during, this, _1, _2)
- )
- );
-
- dcp::DCP dcp (c->directory ());
- dcp.read (false, 0, true);
- if (c->kdm ()) {
- dcp.add (dcp::DecryptedKDM (c->kdm().get (), Config::instance()->decryption_chain()->key().get ()));
+ if (c->video) {
+ video.reset (new VideoDecoder (this, c, log));
+ }
+ if (c->audio) {
+ audio.reset (new AudioDecoder (this, c->audio, log, fast));
+ }
+ BOOST_FOREACH (shared_ptr<TextContent> i, c->text) {
+ /* XXX: this time here should be the time of the first subtitle, not 0 */
+ text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, log, ContentTime())));
+ }
+
+ list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
+
+ if (cpl_list.empty()) {
+ throw DCPError (_("No CPLs found in DCP."));
}
- DCPOMATIC_ASSERT (dcp.cpls().size() == 1);
- _reels = dcp.cpls().front()->reels ();
+
+ shared_ptr<dcp::CPL> cpl;
+ BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpl_list) {
+ if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) {
+ cpl = i;
+ }
+ }
+
+ if (!cpl) {
+ /* No CPL found; probably an old file that doesn't specify it;
+ just use the first one.
+ */
+ cpl = cpls().front ();
+ }
+
+ set_decode_referenced (false);
+
+ _reels = cpl->reels ();
_reel = _reels.begin ();
_offset = 0;
get_readers ();
}
+
bool
-DCPDecoder::pass (PassReason reason, bool)
+DCPDecoder::pass ()
{
if (_reel == _reels.end () || !_dcp_content->can_be_played ()) {
return true;
/* Frame within the (played part of the) reel that is coming up next */
int64_t const frame = _next.frames_round (vfr);
- if ((_mono_reader || _stereo_reader) && reason != PASS_REASON_SUBTITLE) {
- shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
+ shared_ptr<dcp::PictureAsset> picture_asset = (*_reel)->main_picture()->asset();
+ DCPOMATIC_ASSERT (picture_asset);
+
+ /* We must emit texts first as when we emit the video for this frame
+ it will expect already to have the texts.
+ */
+ pass_texts (_next, picture_asset->size());
+
+ if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
if (_mono_reader) {
- video->give (shared_ptr<ImageProxy> (new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size())), _offset + frame);
+ video->emit (
+ shared_ptr<ImageProxy> (
+ new J2KImageProxy (
+ _mono_reader->get_frame (entry_point + frame),
+ picture_asset->size(),
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
+ )
+ ),
+ _offset + frame
+ );
} else {
- video->give (
- shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT)),
+ video->emit (
+ shared_ptr<ImageProxy> (
+ new J2KImageProxy (
+ _stereo_reader->get_frame (entry_point + frame),
+ picture_asset->size(),
+ dcp::EYE_LEFT,
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
+ )
+ ),
_offset + frame
);
- video->give (
- shared_ptr<ImageProxy> (new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT)),
+ video->emit (
+ shared_ptr<ImageProxy> (
+ new J2KImageProxy (
+ _stereo_reader->get_frame (entry_point + frame),
+ picture_asset->size(),
+ dcp::EYE_RIGHT,
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
+ )
+ ),
_offset + frame
);
}
}
- if (_sound_reader && reason != PASS_REASON_SUBTITLE) {
+ if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) {
int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
shared_ptr<const dcp::SoundFrame> sf = _sound_reader->get_frame (entry_point + frame);
uint8_t const * from = sf->data ();
}
}
- audio->give (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
- }
-
- if ((*_reel)->main_subtitle ()) {
- int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
- list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
- dcp::Time (entry_point + frame, vfr, vfr),
- dcp::Time (entry_point + frame + 1, vfr, vfr),
- true
- );
-
- if (!subs.empty ()) {
- /* XXX: assuming that all `subs' are at the same time; maybe this is ok */
- subtitle->give_text (
- ContentTimePeriod (
- ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()),
- ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ())
- ),
- subs
- );
- }
+ audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
}
_next += ContentTime::from_frames (1, vfr);
return false;
}
+void
+DCPDecoder::pass_texts (ContentTime next, dcp::Size size)
+{
+ list<shared_ptr<TextDecoder> >::const_iterator decoder = text.begin ();
+ if ((*_reel)->main_subtitle()) {
+ DCPOMATIC_ASSERT (decoder != text.end ());
+ pass_texts (
+ next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point(), *decoder, size
+ );
+ ++decoder;
+ }
+ BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> i, (*_reel)->closed_captions()) {
+ DCPOMATIC_ASSERT (decoder != text.end ());
+ pass_texts (
+ next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size
+ );
+ ++decoder;
+ }
+}
+
+void
+DCPDecoder::pass_texts (ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size)
+{
+ double const vfr = _dcp_content->active_video_frame_rate ();
+ /* Frame within the (played part of the) reel that is coming up next */
+ int64_t const frame = next.frames_round (vfr);
+
+ if (_decode_referenced || !reference) {
+ list<shared_ptr<dcp::Subtitle> > subs = asset->subtitles_during (
+ dcp::Time (entry_point + frame, vfr, vfr),
+ dcp::Time (entry_point + frame + 1, vfr, vfr),
+ true
+ );
+
+ BOOST_FOREACH (shared_ptr<dcp::Subtitle> i, subs) {
+ shared_ptr<dcp::SubtitleString> is = dynamic_pointer_cast<dcp::SubtitleString> (i);
+ if (is) {
+ list<dcp::SubtitleString> s;
+ s.push_back (*is);
+ decoder->emit_plain (
+ ContentTimePeriod (
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
+ ),
+ s
+ );
+ }
+
+ shared_ptr<dcp::SubtitleImage> ii = dynamic_pointer_cast<dcp::SubtitleImage> (i);
+ if (ii) {
+ shared_ptr<Image> image(new Image(ii->png_image()));
+ /* set up rect with height and width */
+ dcpomatic::Rect<double> rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height));
+
+ /* add in position */
+
+ switch (ii->h_align()) {
+ case dcp::HALIGN_LEFT:
+ rect.x += ii->h_position();
+ break;
+ case dcp::HALIGN_CENTER:
+ rect.x += 0.5 + ii->h_position() - rect.width / 2;
+ break;
+ case dcp::HALIGN_RIGHT:
+ rect.x += 1 - ii->h_position() - rect.width;
+ break;
+ }
+
+ switch (ii->v_align()) {
+ case dcp::VALIGN_TOP:
+ rect.y += ii->v_position();
+ break;
+ case dcp::VALIGN_CENTER:
+ rect.y += 0.5 + ii->v_position() - rect.height / 2;
+ break;
+ case dcp::VALIGN_BOTTOM:
+ rect.y += 1 - ii->v_position() - rect.height;
+ break;
+ }
+
+ decoder->emit_bitmap (
+ ContentTimePeriod (
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
+ ),
+ image, rect
+ );
+ }
+ }
+ }
+}
+
void
DCPDecoder::next_reel ()
{
void
DCPDecoder::get_readers ()
{
- if (_reel == _reels.end()) {
+ if (_reel == _reels.end() || !_dcp_content->can_be_played ()) {
_mono_reader.reset ();
_stereo_reader.reset ();
_sound_reader.reset ();
void
DCPDecoder::seek (ContentTime t, bool accurate)
{
- video->seek (t, accurate);
- audio->seek (t, accurate);
- subtitle->seek (t, accurate);
+ if (!_dcp_content->can_be_played ()) {
+ return;
+ }
+
+ Decoder::seek (t, accurate);
- _offset = 0;
_reel = _reels.begin ();
- while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
- t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
- next_reel ();
- }
+ _offset = 0;
+ get_readers ();
- _next = t;
-}
+ int const pre_roll_seconds = 2;
+ /* Pre-roll for subs */
-list<ContentTimePeriod>
-DCPDecoder::image_subtitles_during (ContentTimePeriod, bool) const
-{
- return list<ContentTimePeriod> ();
-}
+ ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds);
+ if (pre < ContentTime()) {
+ pre = ContentTime ();
+ }
-list<ContentTimePeriod>
-DCPDecoder::text_subtitles_during (ContentTimePeriod period, bool starting) const
-{
- /* XXX: inefficient */
+ /* Seek to pre-roll position */
+
+ while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
+ ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+ pre -= rd;
+ t -= rd;
+ next_reel ();
+ }
+
+ /* Pass texts in the pre-roll */
- list<ContentTimePeriod> ctp;
double const vfr = _dcp_content->active_video_frame_rate ();
+ for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
+ pass_texts (pre, (*_reel)->main_picture()->asset()->size());
+ pre += ContentTime::from_frames (1, vfr);
+ }
- BOOST_FOREACH (shared_ptr<dcp::Reel> r, _reels) {
- if (!r->main_subtitle ()) {
- continue;
- }
+ /* Seek to correct position */
- int64_t const entry_point = r->main_subtitle()->entry_point ();
+ while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
+ t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+ next_reel ();
+ }
- list<dcp::SubtitleString> subs = r->main_subtitle()->asset()->subtitles_during (
- dcp::Time (period.from.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
- dcp::Time (period.to.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
- starting
- );
+ _next = t;
+}
- BOOST_FOREACH (dcp::SubtitleString const & s, subs) {
- ctp.push_back (
- ContentTimePeriod (
- ContentTime::from_seconds (s.in().as_seconds ()),
- ContentTime::from_seconds (s.out().as_seconds ())
- )
- );
- }
+void
+DCPDecoder::set_decode_referenced (bool r)
+{
+ _decode_referenced = r;
+
+ if (video) {
+ video->set_ignore (_dcp_content->reference_video() && !_decode_referenced);
+ }
+ if (audio) {
+ audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced);
}
+}
- return ctp;
+void
+DCPDecoder::set_forced_reduction (optional<int> reduction)
+{
+ _forced_reduction = reduction;
}