X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fdcp_decoder.cc;h=72db5369c81e9dd162457d5636bb0c3b420b44fa;hb=72b11d5eb036651b6ff68edf3ed270e8fc52960f;hp=b9cb66a607c0bdd10c6c53a20596c42d93b3e3d8;hpb=a1ad809a3c76dcc4849f18015ac52b4bc51003aa;p=dcpomatic.git diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc index b9cb66a60..72db5369c 100644 --- a/src/lib/dcp_decoder.cc +++ b/src/lib/dcp_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2014-2016 Carl Hetherington + Copyright (C) 2014-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -25,6 +25,7 @@ #include "audio_decoder.h" #include "j2k_image_proxy.h" #include "text_decoder.h" +#include "ffmpeg_image_proxy.h" #include "image.h" #include "config.h" #include @@ -42,6 +43,7 @@ #include #include #include +#include #include #include @@ -110,20 +112,22 @@ DCPDecoder::pass () /* Frame within the (played part of the) reel that is coming up next */ int64_t const frame = _next.frames_round (vfr); + shared_ptr picture_asset = (*_reel)->main_picture()->asset(); + DCPOMATIC_ASSERT (picture_asset); + /* We must emit texts first as when we emit the video for this frame it will expect already to have the texts. */ - pass_texts (_next); + pass_texts (_next, picture_asset->size()); if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) { - shared_ptr asset = (*_reel)->main_picture()->asset (); int64_t const entry_point = (*_reel)->main_picture()->entry_point (); if (_mono_reader) { video->emit ( shared_ptr ( new J2KImageProxy ( _mono_reader->get_frame (entry_point + frame), - asset->size(), + picture_asset->size(), AV_PIX_FMT_XYZ12LE, _forced_reduction ) @@ -135,7 +139,7 @@ DCPDecoder::pass () shared_ptr ( new J2KImageProxy ( _stereo_reader->get_frame (entry_point + frame), - asset->size(), + picture_asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE, _forced_reduction @@ -148,7 +152,7 @@ DCPDecoder::pass () shared_ptr ( new J2KImageProxy ( _stereo_reader->get_frame (entry_point + frame), - asset->size(), + picture_asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE, _forced_reduction @@ -191,27 +195,27 @@ DCPDecoder::pass () } void -DCPDecoder::pass_texts (ContentTime next) +DCPDecoder::pass_texts (ContentTime next, dcp::Size size) { list >::const_iterator decoder = text.begin (); if ((*_reel)->main_subtitle()) { DCPOMATIC_ASSERT (decoder != text.end ()); pass_texts ( - next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point(), *decoder + next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point(), *decoder, size ); ++decoder; } - if ((*_reel)->closed_caption()) { + BOOST_FOREACH (shared_ptr i, (*_reel)->closed_captions()) { DCPOMATIC_ASSERT (decoder != text.end ()); pass_texts ( - next, (*_reel)->closed_caption()->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), (*_reel)->closed_caption()->entry_point(), *decoder + next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size ); ++decoder; } } void -DCPDecoder::pass_texts (ContentTime next, shared_ptr asset, bool reference, int64_t entry_point, shared_ptr decoder) +DCPDecoder::pass_texts (ContentTime next, shared_ptr asset, bool reference, int64_t entry_point, shared_ptr decoder, dcp::Size size) { double const vfr = _dcp_content->active_video_frame_rate (); /* Frame within the (played part of the) reel that is coming up next */ @@ -238,7 +242,47 @@ DCPDecoder::pass_texts (ContentTime next, shared_ptr asset, ); } - /* XXX: image subtitles */ + shared_ptr ii = dynamic_pointer_cast (i); + if (ii) { + FFmpegImageProxy proxy (ii->png_image()); + shared_ptr image = proxy.image().first; + /* set up rect with height and width */ + dcpomatic::Rect rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height)); + + /* add in position */ + + switch (ii->h_align()) { + case dcp::HALIGN_LEFT: + rect.x += ii->h_position(); + break; + case dcp::HALIGN_CENTER: + rect.x += 0.5 + ii->h_position() - rect.width / 2; + break; + case dcp::HALIGN_RIGHT: + rect.x += 1 - ii->h_position() - rect.width; + break; + } + + switch (ii->v_align()) { + case dcp::VALIGN_TOP: + rect.y += ii->v_position(); + break; + case dcp::VALIGN_CENTER: + rect.y += 0.5 + ii->v_position() - rect.height / 2; + break; + case dcp::VALIGN_BOTTOM: + rect.y += 1 - ii->v_position() - rect.height; + break; + } + + decoder->emit_bitmap ( + ContentTimePeriod ( + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()), + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ()) + ), + image, rect + ); + } } } } @@ -320,7 +364,7 @@ DCPDecoder::seek (ContentTime t, bool accurate) double const vfr = _dcp_content->active_video_frame_rate (); for (int i = 0; i < pre_roll_seconds * vfr; ++i) { - pass_texts (pre); + pass_texts (pre, (*_reel)->main_picture()->asset()->size()); pre += ContentTime::from_frames (1, vfr); }