using std::cout;
using boost::shared_ptr;
using boost::dynamic_pointer_cast;
+using boost::optional;
-DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log)
+DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, bool fast)
: DCP (c)
, _decode_referenced (false)
{
- video.reset (new VideoDecoder (this, c, log));
- audio.reset (new AudioDecoder (this, c->audio, log));
-
- subtitle.reset (new SubtitleDecoder (this, c->subtitle, log));
+ if (c->video) {
+ video.reset (new VideoDecoder (this, c, log));
+ }
+ if (c->audio) {
+ audio.reset (new AudioDecoder (this, c->audio, log, fast));
+ }
+ if (c->subtitle) {
+ /* XXX: this time here should be the time of the first subtitle, not 0 */
+ subtitle.reset (new SubtitleDecoder (this, c->subtitle, log, ContentTime()));
+ }
shared_ptr<dcp::CPL> cpl;
BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpls ()) {
cpl = cpls().front ();
}
+ set_decode_referenced (false);
+
_reels = cpl->reels ();
_reel = _reels.begin ();
get_readers ();
}
+
bool
DCPDecoder::pass ()
{
/* Frame within the (played part of the) reel that is coming up next */
int64_t const frame = _next.frames_round (vfr);
+ /* We must emit subtitles first as when we emit the video for this frame
+ it will expect already to have the subs.
+ */
+ pass_subtitles (_next);
+
if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
if (_mono_reader) {
video->emit (
shared_ptr<ImageProxy> (
- new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size(), AV_PIX_FMT_XYZ12LE)
+ new J2KImageProxy (
+ _mono_reader->get_frame (entry_point + frame),
+ asset->size(),
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
+ )
),
_offset + frame
);
} else {
video->emit (
shared_ptr<ImageProxy> (
- new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)),
+ new J2KImageProxy (
+ _stereo_reader->get_frame (entry_point + frame),
+ asset->size(),
+ dcp::EYE_LEFT,
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
+ )
+ ),
_offset + frame
);
video->emit (
shared_ptr<ImageProxy> (
- new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)),
+ new J2KImageProxy (
+ _stereo_reader->get_frame (entry_point + frame),
+ asset->size(),
+ dcp::EYE_RIGHT,
+ AV_PIX_FMT_XYZ12LE,
+ _forced_reduction
+ )
+ ),
_offset + frame
);
}
audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
}
+ _next += ContentTime::from_frames (1, vfr);
+
+ if ((*_reel)->main_picture ()) {
+ if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) {
+ next_reel ();
+ _next = ContentTime ();
+ }
+ }
+
+ return false;
+}
+
+void
+DCPDecoder::pass_subtitles (ContentTime next)
+{
+ double const vfr = _dcp_content->active_video_frame_rate ();
+ /* Frame within the (played part of the) reel that is coming up next */
+ int64_t const frame = next.frames_round (vfr);
+
if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) {
int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
true
);
- if (!subs.empty ()) {
- /* XXX: assuming that all `subs' are at the same time; maybe this is ok */
+ BOOST_FOREACH (dcp::SubtitleString i, subs) {
+ list<dcp::SubtitleString> s;
+ s.push_back (i);
subtitle->emit_text (
ContentTimePeriod (
- ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()),
- ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ())
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i.in().as_seconds ()),
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i.out().as_seconds ())
),
- subs
+ s
);
}
}
-
- _next += ContentTime::from_frames (1, vfr);
-
- if ((*_reel)->main_picture ()) {
- if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) {
- next_reel ();
- _next = ContentTime ();
- }
- }
-
- return false;
}
void
}
void
-DCPDecoder::seek (ContentTime t, bool)
+DCPDecoder::seek (ContentTime t, bool accurate)
{
+ if (!_dcp_content->can_be_played ()) {
+ return;
+ }
+
+ Decoder::seek (t, accurate);
+
_reel = _reels.begin ();
_offset = 0;
get_readers ();
+ int const pre_roll_seconds = 2;
+
+ /* Pre-roll for subs */
+
+ ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds);
+ if (pre < ContentTime()) {
+ pre = ContentTime ();
+ }
+
+ /* Seek to pre-roll position */
+
+ while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
+ ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+ pre -= rd;
+ t -= rd;
+ next_reel ();
+ }
+
+ /* Pass subtitles in the pre-roll */
+
+ double const vfr = _dcp_content->active_video_frame_rate ();
+ for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
+ pass_subtitles (pre);
+ pre += ContentTime::from_frames (1, vfr);
+ }
+
+ /* Seek to correct position */
+
while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
next_reel ();
}
void
-DCPDecoder::set_decode_referenced ()
+DCPDecoder::set_decode_referenced (bool r)
+{
+ _decode_referenced = r;
+
+ if (video) {
+ video->set_ignore (_dcp_content->reference_video() && !_decode_referenced);
+ }
+ if (audio) {
+ audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced);
+ }
+}
+
+void
+DCPDecoder::set_forced_reduction (optional<int> reduction)
{
- _decode_referenced = true;
+ _forced_reduction = reduction;
}