Move video frame rate ('prepared-for') into Content.
[dcpomatic.git] / src / lib / dcp_decoder.cc
index 1939fc1c9d21ef731467fb2d2d5075361855835c..8008fe515590fa52df974e4e14ac4f59e3eaff56 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2014-2016 Carl Hetherington <cth@carlh.net>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
 
 */
 
+#include "dcp_decoder.h"
+#include "dcp_content.h"
+#include "audio_content.h"
+#include "j2k_image_proxy.h"
+#include "image.h"
+#include "config.h"
 #include <dcp/dcp.h>
+#include <dcp/decrypted_kdm.h>
 #include <dcp/cpl.h>
 #include <dcp/reel.h>
-#include <dcp/mono_picture_mxf.h>
-#include <dcp/stereo_picture_mxf.h>
+#include <dcp/mono_picture_asset.h>
+#include <dcp/stereo_picture_asset.h>
 #include <dcp/reel_picture_asset.h>
+#include <dcp/reel_sound_asset.h>
+#include <dcp/reel_subtitle_asset.h>
 #include <dcp/mono_picture_frame.h>
 #include <dcp/stereo_picture_frame.h>
-#include "dcp_decoder.h"
-#include "dcp_content.h"
-#include "image_proxy.h"
-#include "image.h"
+#include <dcp/sound_frame.h>
+#include <boost/foreach.hpp>
+#include <iostream>
 
 using std::list;
 using std::cout;
 using boost::shared_ptr;
 using boost::dynamic_pointer_cast;
 
-DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log)
-       : VideoDecoder (c)
-       , AudioDecoder (c)
-       , SubtitleDecoder (c)
-       , _log (log)
+DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, bool fast)
+       : VideoDecoder (c, log)
+       , AudioDecoder (c->audio, fast, log)
+       , SubtitleDecoder (c->subtitle)
        , _dcp_content (c)
 {
        dcp::DCP dcp (c->directory ());
-       dcp.read ();
-       assert (dcp.cpls().size() == 1);
+       dcp.read (false, 0, true);
+       if (c->kdm ()) {
+               dcp.add (dcp::DecryptedKDM (c->kdm().get (), Config::instance()->decryption_chain()->key().get ()));
+       }
+       DCPOMATIC_ASSERT (dcp.cpls().size() == 1);
        _reels = dcp.cpls().front()->reels ();
        _reel = _reels.begin ();
 }
 
 bool
-DCPDecoder::pass ()
+DCPDecoder::pass (PassReason reason, bool)
 {
-       if (_reel == _reels.end ()) {
+       if (_reel == _reels.end () || !_dcp_content->can_be_played ()) {
                return true;
        }
 
-       float const vfr = _dcp_content->video_frame_rate ();
-       
-       if ((*_reel)->main_picture ()) {
-               shared_ptr<dcp::PictureMXF> mxf = (*_reel)->main_picture()->mxf ();
-               shared_ptr<dcp::MonoPictureMXF> mono = dynamic_pointer_cast<dcp::MonoPictureMXF> (mxf);
-               shared_ptr<dcp::StereoPictureMXF> stereo = dynamic_pointer_cast<dcp::StereoPictureMXF> (mxf);
-               int64_t const ep = (*_reel)->main_picture()->entry_point ();
+       /* Offset of the start of the current reel from the start of the content in frames */
+       int offset = 0;
+       list<shared_ptr<dcp::Reel> >::const_iterator i = _reels.begin();
+       while (i != _reel) {
+               offset += (*i)->main_picture()->duration ();
+               ++i;
+       }
+
+       double const vfr = _dcp_content->active_video_frame_rate ();
+
+       /* Frame within the (played part of the) reel that is coming up next */
+       int64_t const frame = _next.frames_round (vfr);
+
+       if ((*_reel)->main_picture () && reason != PASS_REASON_SUBTITLE) {
+               shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
+               shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
+               shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
+               int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
                if (mono) {
-                       shared_ptr<Image> image (new Image (PIX_FMT_RGB24, mxf->size(), false));
-                       mono->get_frame (ep + _next.frames (vfr))->rgb_frame (image->data()[0]);
-                       shared_ptr<Image> aligned (new Image (image, true));
-                       video (shared_ptr<ImageProxy> (new RawImageProxy (aligned, _log)), _next.frames (vfr));
+                       video (shared_ptr<ImageProxy> (new J2KImageProxy (mono->get_frame (entry_point + frame), asset->size())), offset + frame);
                } else {
+                       video (
+                               shared_ptr<ImageProxy> (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT)),
+                               offset + frame
+                               );
+
+                       video (
+                               shared_ptr<ImageProxy> (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT)),
+                               offset + frame
+                               );
+               }
+       }
 
-                       shared_ptr<Image> left (new Image (PIX_FMT_RGB24, mxf->size(), false));
-                       stereo->get_frame (ep + _next.frames (vfr))->rgb_frame (dcp::EYE_LEFT, left->data()[0]);
-                       shared_ptr<Image> aligned_left (new Image (left, true));
-                       video (shared_ptr<ImageProxy> (new RawImageProxy (aligned_left, _log)), _next.frames (vfr));
-
-                       shared_ptr<Image> right (new Image (PIX_FMT_RGB24, mxf->size(), false));
-                       stereo->get_frame (ep + _next.frames (vfr))->rgb_frame (dcp::EYE_RIGHT, right->data()[0]);
-                       shared_ptr<Image> aligned_right (new Image (right, true));
-                       video (shared_ptr<ImageProxy> (new RawImageProxy (aligned_right, _log)), _next.frames (vfr));
+       if ((*_reel)->main_sound () && reason != PASS_REASON_SUBTITLE) {
+               int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
+               shared_ptr<const dcp::SoundFrame> sf = (*_reel)->main_sound()->asset()->get_frame (entry_point + frame);
+               uint8_t const * from = sf->data ();
+
+               int const channels = _dcp_content->audio->stream()->channels ();
+               int const frames = sf->size() / (3 * channels);
+               shared_ptr<AudioBuffers> data (new AudioBuffers (channels, frames));
+               for (int i = 0; i < frames; ++i) {
+                       for (int j = 0; j < channels; ++j) {
+                               data->data()[j][i] = static_cast<int> ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast<float> (INT_MAX - 256);
+                               from += 3;
+                       }
                }
+
+               audio (_dcp_content->audio->stream(), data, ContentTime::from_frames (offset, vfr) + _next);
        }
 
-       /* XXX: sound */
-       /* XXX: subtitle */
+       if ((*_reel)->main_subtitle ()) {
+               int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
+               list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
+                       dcp::Time (entry_point + frame, vfr, vfr),
+                       dcp::Time (entry_point + frame + 1, vfr, vfr),
+                       true
+                       );
+
+               if (!subs.empty ()) {
+                       /* XXX: assuming that all `subs' are at the same time; maybe this is ok */
+                       text_subtitle (
+                               ContentTimePeriod (
+                                       ContentTime::from_frames (offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()),
+                                       ContentTime::from_frames (offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ())
+                                       ),
+                               subs
+                               );
+               }
+       }
 
        _next += ContentTime::from_frames (1, vfr);
 
        if ((*_reel)->main_picture ()) {
-               if ((*_reel)->main_picture()->duration() >= _next.frames (vfr)) {
+               if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) {
                        ++_reel;
+                       _next = ContentTime ();
                }
        }
-       
+
        return false;
 }
 
@@ -104,8 +157,8 @@ DCPDecoder::seek (ContentTime t, bool accurate)
        SubtitleDecoder::seek (t, accurate);
 
        _reel = _reels.begin ();
-       while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->video_frame_rate ())) {
-               t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->video_frame_rate ());
+       while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
+               t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
                ++_reel;
        }
 
@@ -114,7 +167,41 @@ DCPDecoder::seek (ContentTime t, bool accurate)
 
 
 list<ContentTimePeriod>
-DCPDecoder::subtitles_during (ContentTimePeriod, bool starting) const
+DCPDecoder::image_subtitles_during (ContentTimePeriod, bool) const
 {
        return list<ContentTimePeriod> ();
 }
+
+list<ContentTimePeriod>
+DCPDecoder::text_subtitles_during (ContentTimePeriod period, bool starting) const
+{
+       /* XXX: inefficient */
+
+       list<ContentTimePeriod> ctp;
+       double const vfr = _dcp_content->active_video_frame_rate ();
+
+       BOOST_FOREACH (shared_ptr<dcp::Reel> r, _reels) {
+               if (!r->main_subtitle ()) {
+                       continue;
+               }
+
+               int64_t const entry_point = r->main_subtitle()->entry_point ();
+
+               list<dcp::SubtitleString> subs = r->main_subtitle()->asset()->subtitles_during (
+                       dcp::Time (period.from.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
+                       dcp::Time (period.to.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
+                       starting
+                       );
+
+               BOOST_FOREACH (dcp::SubtitleString const & s, subs) {
+                       ctp.push_back (
+                               ContentTimePeriod (
+                                       ContentTime::from_seconds (s.in().as_seconds ()),
+                                       ContentTime::from_seconds (s.out().as_seconds ())
+                                       )
+                               );
+               }
+       }
+
+       return ctp;
+}