X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fdcp_decoder.cc;h=72db5369c81e9dd162457d5636bb0c3b420b44fa;hb=72b11d5eb036651b6ff68edf3ed270e8fc52960f;hp=25c805d3f2ab74d8a9c77c7d7941d18a8e7aad93;hpb=22b13599407e45d85d1c83e0805aa14965b0ab19;p=dcpomatic.git diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc index 25c805d3f..72db5369c 100644 --- a/src/lib/dcp_decoder.cc +++ b/src/lib/dcp_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2014-2016 Carl Hetherington + Copyright (C) 2014-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -24,7 +24,8 @@ #include "video_decoder.h" #include "audio_decoder.h" #include "j2k_image_proxy.h" -#include "subtitle_decoder.h" +#include "text_decoder.h" +#include "ffmpeg_image_proxy.h" #include "image.h" #include "config.h" #include @@ -37,29 +38,46 @@ #include #include #include +#include #include #include #include #include +#include #include #include +#include "i18n.h" + using std::list; using std::cout; using boost::shared_ptr; using boost::dynamic_pointer_cast; +using boost::optional; -DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log) +DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log, bool fast) : DCP (c) , _decode_referenced (false) { - video.reset (new VideoDecoder (this, c, log)); - audio.reset (new AudioDecoder (this, c->audio, log)); + if (c->video) { + video.reset (new VideoDecoder (this, c, log)); + } + if (c->audio) { + audio.reset (new AudioDecoder (this, c->audio, log, fast)); + } + BOOST_FOREACH (shared_ptr i, c->text) { + /* XXX: this time here should be the time of the first subtitle, not 0 */ + text.push_back (shared_ptr (new TextDecoder (this, i, log, ContentTime()))); + } + + list > cpl_list = cpls (); - subtitle.reset (new SubtitleDecoder (this, c->subtitle, log)); + if (cpl_list.empty()) { + throw DCPError (_("No CPLs found in DCP.")); + } shared_ptr cpl; - BOOST_FOREACH (shared_ptr i, cpls ()) { + BOOST_FOREACH (shared_ptr i, cpl_list) { if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) { cpl = i; } @@ -72,6 +90,8 @@ DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log) cpl = cpls().front (); } + set_decode_referenced (false); + _reels = cpl->reels (); _reel = _reels.begin (); @@ -79,11 +99,12 @@ DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log) get_readers (); } -void + +bool DCPDecoder::pass () { if (_reel == _reels.end () || !_dcp_content->can_be_played ()) { - return; + return true; } double const vfr = _dcp_content->active_video_frame_rate (); @@ -91,26 +112,52 @@ DCPDecoder::pass () /* Frame within the (played part of the) reel that is coming up next */ int64_t const frame = _next.frames_round (vfr); + shared_ptr picture_asset = (*_reel)->main_picture()->asset(); + DCPOMATIC_ASSERT (picture_asset); + + /* We must emit texts first as when we emit the video for this frame + it will expect already to have the texts. + */ + pass_texts (_next, picture_asset->size()); + if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) { - shared_ptr asset = (*_reel)->main_picture()->asset (); int64_t const entry_point = (*_reel)->main_picture()->entry_point (); if (_mono_reader) { video->emit ( shared_ptr ( - new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size(), AV_PIX_FMT_XYZ12LE) + new J2KImageProxy ( + _mono_reader->get_frame (entry_point + frame), + picture_asset->size(), + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) ), _offset + frame ); } else { video->emit ( shared_ptr ( - new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)), + new J2KImageProxy ( + _stereo_reader->get_frame (entry_point + frame), + picture_asset->size(), + dcp::EYE_LEFT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) + ), _offset + frame ); video->emit ( shared_ptr ( - new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)), + new J2KImageProxy ( + _stereo_reader->get_frame (entry_point + frame), + picture_asset->size(), + dcp::EYE_RIGHT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) + ), _offset + frame ); } @@ -135,26 +182,6 @@ DCPDecoder::pass () audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next); } - if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) { - int64_t const entry_point = (*_reel)->main_subtitle()->entry_point (); - list subs = (*_reel)->main_subtitle()->asset()->subtitles_during ( - dcp::Time (entry_point + frame, vfr, vfr), - dcp::Time (entry_point + frame + 1, vfr, vfr), - true - ); - - if (!subs.empty ()) { - /* XXX: assuming that all `subs' are at the same time; maybe this is ok */ - subtitle->emit_text ( - ContentTimePeriod ( - ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()), - ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ()) - ), - subs - ); - } - } - _next += ContentTime::from_frames (1, vfr); if ((*_reel)->main_picture ()) { @@ -163,6 +190,101 @@ DCPDecoder::pass () _next = ContentTime (); } } + + return false; +} + +void +DCPDecoder::pass_texts (ContentTime next, dcp::Size size) +{ + list >::const_iterator decoder = text.begin (); + if ((*_reel)->main_subtitle()) { + DCPOMATIC_ASSERT (decoder != text.end ()); + pass_texts ( + next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point(), *decoder, size + ); + ++decoder; + } + BOOST_FOREACH (shared_ptr i, (*_reel)->closed_captions()) { + DCPOMATIC_ASSERT (decoder != text.end ()); + pass_texts ( + next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size + ); + ++decoder; + } +} + +void +DCPDecoder::pass_texts (ContentTime next, shared_ptr asset, bool reference, int64_t entry_point, shared_ptr decoder, dcp::Size size) +{ + double const vfr = _dcp_content->active_video_frame_rate (); + /* Frame within the (played part of the) reel that is coming up next */ + int64_t const frame = next.frames_round (vfr); + + if (_decode_referenced || !reference) { + list > subs = asset->subtitles_during ( + dcp::Time (entry_point + frame, vfr, vfr), + dcp::Time (entry_point + frame + 1, vfr, vfr), + true + ); + + BOOST_FOREACH (shared_ptr i, subs) { + shared_ptr is = dynamic_pointer_cast (i); + if (is) { + list s; + s.push_back (*is); + decoder->emit_plain ( + ContentTimePeriod ( + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()), + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ()) + ), + s + ); + } + + shared_ptr ii = dynamic_pointer_cast (i); + if (ii) { + FFmpegImageProxy proxy (ii->png_image()); + shared_ptr image = proxy.image().first; + /* set up rect with height and width */ + dcpomatic::Rect rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height)); + + /* add in position */ + + switch (ii->h_align()) { + case dcp::HALIGN_LEFT: + rect.x += ii->h_position(); + break; + case dcp::HALIGN_CENTER: + rect.x += 0.5 + ii->h_position() - rect.width / 2; + break; + case dcp::HALIGN_RIGHT: + rect.x += 1 - ii->h_position() - rect.width; + break; + } + + switch (ii->v_align()) { + case dcp::VALIGN_TOP: + rect.y += ii->v_position(); + break; + case dcp::VALIGN_CENTER: + rect.y += 0.5 + ii->v_position() - rect.height / 2; + break; + case dcp::VALIGN_BOTTOM: + rect.y += 1 - ii->v_position() - rect.height; + break; + } + + decoder->emit_bitmap ( + ContentTimePeriod ( + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()), + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ()) + ), + image, rect + ); + } + } + } } void @@ -208,12 +330,46 @@ DCPDecoder::get_readers () } void -DCPDecoder::seek (ContentTime t, bool) +DCPDecoder::seek (ContentTime t, bool accurate) { + if (!_dcp_content->can_be_played ()) { + return; + } + + Decoder::seek (t, accurate); + _reel = _reels.begin (); _offset = 0; get_readers (); + int const pre_roll_seconds = 2; + + /* Pre-roll for subs */ + + ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds); + if (pre < ContentTime()) { + pre = ContentTime (); + } + + /* Seek to pre-roll position */ + + while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) { + ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ()); + pre -= rd; + t -= rd; + next_reel (); + } + + /* Pass texts in the pre-roll */ + + double const vfr = _dcp_content->active_video_frame_rate (); + for (int i = 0; i < pre_roll_seconds * vfr; ++i) { + pass_texts (pre, (*_reel)->main_picture()->asset()->size()); + pre += ContentTime::from_frames (1, vfr); + } + + /* Seek to correct position */ + while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) { t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ()); next_reel (); @@ -223,7 +379,20 @@ DCPDecoder::seek (ContentTime t, bool) } void -DCPDecoder::set_decode_referenced () +DCPDecoder::set_decode_referenced (bool r) +{ + _decode_referenced = r; + + if (video) { + video->set_ignore (_dcp_content->reference_video() && !_decode_referenced); + } + if (audio) { + audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced); + } +} + +void +DCPDecoder::set_forced_reduction (optional reduction) { - _decode_referenced = true; + _forced_reduction = reduction; }