A possibly-better approach to seeking.
[dcpomatic.git] / src / lib / dcp_decoder.cc
index 3bf7c1464c82b298a45c3913581f687e4f3fba58..38c2a7ccfa2bdcab2bb96ecf850eed4de53ba5e4 100644 (file)
 #include <dcp/sound_frame.h>
 #include <dcp/sound_asset_reader.h>
 #include <boost/foreach.hpp>
-#include <boost/make_shared.hpp>
 #include <iostream>
 
 using std::list;
 using std::cout;
 using boost::shared_ptr;
 using boost::dynamic_pointer_cast;
-using boost::make_shared;
 
-DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, bool fast)
-       : _dcp_content (c)
+DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log)
+       : DCP (c)
+       , _decode_referenced (false)
 {
        video.reset (new VideoDecoder (this, c, log));
-       audio.reset (new AudioDecoder (this, c->audio, fast, log));
+       audio.reset (new AudioDecoder (this, c->audio, log));
 
        subtitle.reset (
                new SubtitleDecoder (
@@ -67,13 +66,21 @@ DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, boo
                        )
                );
 
-       dcp::DCP dcp (c->directory ());
-       dcp.read (false, 0, true);
-       if (c->kdm ()) {
-               dcp.add (dcp::DecryptedKDM (c->kdm().get (), Config::instance()->decryption_chain()->key().get ()));
+       shared_ptr<dcp::CPL> cpl;
+       BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpls ()) {
+               if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) {
+                       cpl = i;
+               }
+       }
+
+       if (!cpl) {
+               /* No CPL found; probably an old file that doesn't specify it;
+                  just use the first one.
+               */
+               cpl = cpls().front ();
        }
-       DCPOMATIC_ASSERT (dcp.cpls().size() == 1);
-       _reels = dcp.cpls().front()->reels ();
+
+       _reels = cpl->reels ();
 
        _reel = _reels.begin ();
        _offset = 0;
@@ -92,35 +99,43 @@ DCPDecoder::pass (PassReason reason, bool)
        /* Frame within the (played part of the) reel that is coming up next */
        int64_t const frame = _next.frames_round (vfr);
 
-       if ((_mono_reader || _stereo_reader) && reason != PASS_REASON_SUBTITLE) {
+       if ((_mono_reader || _stereo_reader) && reason != PASS_REASON_SUBTITLE && (_decode_referenced || !_dcp_content->reference_video())) {
                shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
                int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
                if (_mono_reader) {
-                       video->give (make_shared<J2KImageProxy> (_mono_reader->get_frame (entry_point + frame), asset->size()), _offset + frame);
+                       video->give (
+                               shared_ptr<ImageProxy> (
+                                       new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size(), AV_PIX_FMT_XYZ12LE)
+                                       ),
+                               _offset + frame
+                               );
                } else {
                        video->give (
-                               make_shared<J2KImageProxy> (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT),
+                               shared_ptr<ImageProxy> (
+                                       new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)),
                                _offset + frame
                                );
 
                        video->give (
-                               make_shared<J2KImageProxy> (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT),
+                               shared_ptr<ImageProxy> (
+                                       new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)),
                                _offset + frame
                                );
                }
        }
 
-       if (_sound_reader && reason != PASS_REASON_SUBTITLE) {
+       if (_sound_reader && reason != PASS_REASON_SUBTITLE && (_decode_referenced || !_dcp_content->reference_audio())) {
                int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
                shared_ptr<const dcp::SoundFrame> sf = _sound_reader->get_frame (entry_point + frame);
                uint8_t const * from = sf->data ();
 
                int const channels = _dcp_content->audio->stream()->channels ();
                int const frames = sf->size() / (3 * channels);
-               shared_ptr<AudioBuffers> data = make_shared<AudioBuffers> (channels, frames);
+               shared_ptr<AudioBuffers> data (new AudioBuffers (channels, frames));
+               float** data_data = data->data();
                for (int i = 0; i < frames; ++i) {
                        for (int j = 0; j < channels; ++j) {
-                               data->data()[j][i] = static_cast<int> ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast<float> (INT_MAX - 256);
+                               data_data[j][i] = static_cast<int> ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast<float> (INT_MAX - 256);
                                from += 3;
                        }
                }
@@ -128,7 +143,7 @@ DCPDecoder::pass (PassReason reason, bool)
                audio->give (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
        }
 
-       if ((*_reel)->main_subtitle ()) {
+       if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) {
                int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
                list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
                        dcp::Time (entry_point + frame, vfr, vfr),
@@ -148,6 +163,7 @@ DCPDecoder::pass (PassReason reason, bool)
                }
        }
 
+       _position = _next;
        _next += ContentTime::from_frames (1, vfr);
 
        if ((*_reel)->main_picture ()) {
@@ -171,7 +187,7 @@ DCPDecoder::next_reel ()
 void
 DCPDecoder::get_readers ()
 {
-       if (_reel == _reels.end()) {
+       if (_reel == _reels.end() || !_dcp_content->can_be_played ()) {
                _mono_reader.reset ();
                _stereo_reader.reset ();
                _sound_reader.reset ();
@@ -209,8 +225,10 @@ DCPDecoder::seek (ContentTime t, bool accurate)
        audio->seek (t, accurate);
        subtitle->seek (t, accurate);
 
-       _offset = 0;
        _reel = _reels.begin ();
+       _offset = 0;
+       get_readers ();
+
        while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
                t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
                next_reel ();
@@ -234,28 +252,39 @@ DCPDecoder::text_subtitles_during (ContentTimePeriod period, bool starting) cons
        list<ContentTimePeriod> ctp;
        double const vfr = _dcp_content->active_video_frame_rate ();
 
+       int offset = 0;
+
        BOOST_FOREACH (shared_ptr<dcp::Reel> r, _reels) {
                if (!r->main_subtitle ()) {
+                       offset += r->main_picture()->duration();
                        continue;
                }
 
                int64_t const entry_point = r->main_subtitle()->entry_point ();
 
                list<dcp::SubtitleString> subs = r->main_subtitle()->asset()->subtitles_during (
-                       dcp::Time (period.from.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
-                       dcp::Time (period.to.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
+                       dcp::Time (period.from.seconds(), 1000) - dcp::Time (offset - entry_point, vfr, vfr),
+                       dcp::Time (period.to.seconds(), 1000) - dcp::Time (offset - entry_point, vfr, vfr),
                        starting
                        );
 
                BOOST_FOREACH (dcp::SubtitleString const & s, subs) {
                        ctp.push_back (
                                ContentTimePeriod (
-                                       ContentTime::from_seconds (s.in().as_seconds ()),
-                                       ContentTime::from_seconds (s.out().as_seconds ())
+                                       ContentTime::from_seconds (s.in().as_seconds ()) + ContentTime::from_frames (offset - entry_point, vfr),
+                                       ContentTime::from_seconds (s.out().as_seconds ()) + ContentTime::from_frames (offset - entry_point, vfr)
                                        )
                                );
                }
+
+               offset += r->main_subtitle()->duration();
        }
 
        return ctp;
 }
+
+void
+DCPDecoder::set_decode_referenced ()
+{
+       _decode_referenced = true;
+}