Expand Player to support main and sign language video types.
authorCarl Hetherington <cth@carlh.net>
Wed, 19 Oct 2022 22:27:15 +0000 (00:27 +0200)
committerCarl Hetherington <cth@carlh.net>
Thu, 22 Dec 2022 23:12:00 +0000 (00:12 +0100)
12 files changed:
src/lib/butler.cc
src/lib/dcp_encoder.cc
src/lib/player.cc
src/lib/player.h
src/lib/player_video.cc
src/lib/player_video.h
src/lib/util.cc
src/lib/util.h
test/client_server_test.cc
test/low_bitrate_test.cc
test/overlap_video_test.cc
test/player_test.cc

index b2fbc6c60b630be0c0478dace3b922c735517878..570cf2dd87b00ef201b199379eb7ed29b9fc1a13 100644 (file)
@@ -410,7 +410,7 @@ Butler::player_change (ChangeType type, int property)
                if (type == ChangeType::DONE) {
                        auto film = _film.lock();
                        if (film) {
-                               _video.reset_metadata(film, _player.video_container_size());
+                               _video.reset_metadata(film, _player.video_container_size(VideoType::MAIN));
                        }
                }
                return;
index 9a840c8ab916b6a594f4e6881f223ad20346f4f5..78a4ce2b3226d23c23789674a27b507c1ce5cbc4 100644 (file)
@@ -121,7 +121,9 @@ DCPEncoder::go ()
 void
 DCPEncoder::video (shared_ptr<PlayerVideo> data, DCPTime time)
 {
-       _j2k_encoder.encode(data, time);
+       if (data->type() == VideoType::MAIN) {
+               _j2k_encoder.encode(data, time);
+       }
 }
 
 void
index db179eabb5b7d3d67b8b63ef6a64be26c39be881..a55b4153cf9d9c587e68f4882ce497bad2b08226 100644 (file)
@@ -28,6 +28,7 @@
 #include "config.h"
 #include "content_audio.h"
 #include "content_video.h"
+#include "constants.h"
 #include "dcp_content.h"
 #include "dcp_decoder.h"
 #include "dcpomatic_log.h"
@@ -132,14 +133,32 @@ Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist
 }
 
 
+dcp::Size
+Player::full_size(VideoType type) const
+{
+       switch (type) {
+       case VideoType::MAIN:
+       {
+               auto film = _film.lock();
+               DCPOMATIC_ASSERT(film);
+               return film->frame_size();
+       }
+       case VideoType::SIGN_LANGUAGE:
+               return { 480, 640 };
+       default:
+               DCPOMATIC_ASSERT(false);
+       }
+}
+
+
 void
 Player::construct ()
 {
-       auto film = _film.lock();
-       DCPOMATIC_ASSERT(film);
-
        connect();
-       set_video_container_size(film->frame_size());
+       set_video_container_size(VideoType::MAIN, full_size(VideoType::MAIN));
+       set_video_container_size(VideoType::SIGN_LANGUAGE, full_size(VideoType::SIGN_LANGUAGE));
+
+       _black_image[VideoType::SIGN_LANGUAGE] = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size(SIGN_LANGUAGE_WIDTH, SIGN_LANGUAGE_HEIGHT), Image::Alignment::PADDED);
 
        film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
 
@@ -168,7 +187,7 @@ Player::Player(Player&& other)
        , _playlist(std::move(other._playlist))
        , _suspended(other._suspended.load())
        , _pieces(std::move(other._pieces))
-       , _video_container_size(other._video_container_size.load())
+       , _video_container_size(other._video_container_size)
        , _black_image(std::move(other._black_image))
        , _ignore_video(other._ignore_video.load())
        , _ignore_audio(other._ignore_audio.load())
@@ -207,7 +226,7 @@ Player::operator=(Player&& other)
        _playlist = std::move(other._playlist);
        _suspended = other._suspended.load();
        _pieces = std::move(other._pieces);
-       _video_container_size = other._video_container_size.load();
+       _video_container_size = other._video_container_size;
        _black_image = std::move(other._black_image);
        _ignore_video = other._ignore_video.load();
        _ignore_audio = other._ignore_audio.load();
@@ -238,9 +257,12 @@ Player::operator=(Player&& other)
 
 
 bool
-have_video (shared_ptr<const Content> content)
+have_video(shared_ptr<const Content> content, VideoType type)
 {
-       return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
+       return static_cast<bool>(content->video)
+               && content->video->use()
+               && content->can_be_played()
+               && content->video->type() == type;
 }
 
 
@@ -388,10 +410,15 @@ Player::setup_pieces ()
                }
        }
 
-       _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
+       _have_sign_language = contains_sign_language(playlist_content);
+
+       _black[VideoType::MAIN] = Empty(film, playlist(), bind(&have_video, _1, VideoType::MAIN), _playback_length);
+       if (_have_sign_language) {
+               _black[VideoType::SIGN_LANGUAGE] = Empty(film, playlist(), bind(&have_video, _1, VideoType::SIGN_LANGUAGE), _playback_length);
+       }
        _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
 
-       _next_video_time = boost::none;
+       _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = boost::none;
        _next_audio_time = boost::none;
 }
 
@@ -408,7 +435,7 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
                if (type == ChangeType::DONE) {
                        boost::mutex::scoped_lock lm (_mutex);
                        for (auto const& i: _delay) {
-                               i.first->reset_metadata(film, _video_container_size);
+                               i.first->reset_metadata(film, video_container_size(VideoType::MAIN));
                        }
                }
        } else {
@@ -432,21 +459,22 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
 
 
 void
-Player::set_video_container_size (dcp::Size s)
+Player::set_video_container_size(VideoType type, dcp::Size size)
 {
        ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
 
-       if (s == _video_container_size) {
+       if (size == video_container_size(type)) {
                cc.abort();
                return;
        }
 
-       _video_container_size = s;
+       boost::mutex::scoped_lock lm(_video_container_size_mutex);
+       _video_container_size[type] = size;
 
        {
                boost::mutex::scoped_lock lm(_black_image_mutex);
-               _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
-               _black_image->make_black ();
+               _black_image[type] = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size[type], Image::Alignment::PADDED);
+               _black_image[type]->make_black();
        }
 }
 
@@ -499,16 +527,19 @@ Player::film_change (ChangeType type, Film::Property p)
 
 
 shared_ptr<PlayerVideo>
-Player::black_player_video_frame (Eyes eyes) const
+Player::black_player_video_frame(VideoType type, Eyes eyes) const
 {
        boost::mutex::scoped_lock lm(_black_image_mutex);
 
+       auto const image = _black_image[type];
+
        return std::make_shared<PlayerVideo> (
-               make_shared<const RawImageProxy>(_black_image),
+               make_shared<const RawImageProxy>(image),
                Crop(),
                optional<double>(),
-               _video_container_size,
-               _video_container_size,
+               image->size(),
+               image->size(),
+               type,
                eyes,
                Part::WHOLE,
                PresetColourConversion::all().front().conversion,
@@ -682,7 +713,8 @@ Player::pass ()
 
        if (_playback_length.load() == DCPTime() || !film) {
                /* Special; just give one black frame */
-               use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
+               use_video(black_player_video_frame(VideoType::MAIN, Eyes::BOTH), DCPTime(), one_video_frame());
+               use_video(black_player_video_frame(VideoType::SIGN_LANGUAGE, Eyes::BOTH), DCPTime(), one_video_frame());
                return true;
        }
 
@@ -720,14 +752,22 @@ Player::pass ()
                SILENT
        } which = NONE;
 
+       optional<VideoType> black_type;
+
        if (earliest_content) {
                which = CONTENT;
        }
 
-       if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
-               earliest_time = _black.position ();
-               which = BLACK;
-       }
+       auto check_black = [this, &earliest_time, &which, &black_type](VideoType type) {
+               if (!_black[type].done() && !_ignore_video && (!earliest_time || _black[type].position() < *earliest_time)) {
+                       earliest_time = _black[type].position();
+                       which = BLACK;
+                       black_type = type;
+               }
+       };
+
+       check_black(VideoType::MAIN);
+       check_black(VideoType::SIGN_LANGUAGE);
 
        if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
                earliest_time = _silent.position ();
@@ -742,7 +782,7 @@ Player::pass ()
                auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced) {
                        if (dcp->reference_video()) {
-                               _next_video_time = dcp->end(film);
+                               _next_video_time[earliest_content->content->video->type()] = dcp->end(film);
                        }
                        if (dcp->reference_audio()) {
                                /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
@@ -755,9 +795,10 @@ Player::pass ()
                break;
        }
        case BLACK:
-               LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
-               use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
-               _black.set_position (_black.position() + one_video_frame());
+               DCPOMATIC_ASSERT(black_type);
+               LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black[*black_type].position()));
+               use_video(black_player_video_frame(*black_type, Eyes::BOTH), _black[*black_type].position(), _black[*black_type].period_at_position().to);
+               _black[*black_type].set_position(_black[*black_type].position() + one_video_frame());
                break;
        case SILENT:
        {
@@ -857,7 +898,10 @@ Player::pass ()
        }
 
        if (done) {
-               emit_video_until(film->length());
+               emit_video_until(VideoType::MAIN, film->length());
+               if (_have_sign_language) {
+                       emit_video_until(VideoType::SIGN_LANGUAGE, film->length());
+               }
 
                if (_shuffler) {
                        _shuffler->flush ();
@@ -882,6 +926,7 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
        list<PositionImage> captions;
        int const vfr = film->video_frame_rate();
+       auto const container = video_container_size(VideoType::MAIN);
 
        for (
                auto j:
@@ -894,15 +939,15 @@ Player::open_subtitles_for_frame (DCPTime time) const
                                continue;
                        }
 
-                       /* i.image will already have been scaled to fit _video_container_size */
-                       dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
+                       /* i.image will already have been scaled to fit video_container_size */
+                       dcp::Size scaled_size(i.rectangle.width * container.width, i.rectangle.height * container.height);
 
                        captions.push_back (
                                PositionImage (
                                        i.image,
                                        Position<int> (
-                                               lrint(_video_container_size.load().width * i.rectangle.x),
-                                               lrint(_video_container_size.load().height * i.rectangle.y)
+                                               lrint(container.width * i.rectangle.x),
+                                               lrint(container.height * i.rectangle.y)
                                                )
                                        )
                                );
@@ -910,7 +955,7 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
                /* String subtitles (rendered to an image) */
                if (!j.string.empty()) {
-                       auto s = render_text(j.string, _video_container_size, time, vfr);
+                       auto s = render_text(j.string, container, time, vfr);
                        copy (s.begin(), s.end(), back_inserter (captions));
                }
        }
@@ -936,16 +981,16 @@ increment_eyes (Eyes e)
 
 
 void
-Player::emit_video_until(DCPTime time)
+Player::emit_video_until(VideoType type, DCPTime time)
 {
-       auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+       auto frame = [this, type](shared_ptr<PlayerVideo> pv, DCPTime time) {
                /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
                   player before the video that requires them.
                */
                _delay.push_back(make_pair(pv, time));
 
                if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-                       _next_video_time = time + one_video_frame();
+                       _next_video_time[type] = time + one_video_frame();
                }
 
                if (_delay.size() < 3) {
@@ -959,12 +1004,12 @@ Player::emit_video_until(DCPTime time)
 
        auto const age_threshold = one_video_frame() * 2;
 
-       while (_next_video_time.get_value_or({}) < time) {
-               auto left = _last_video[Eyes::LEFT];
-               auto right = _last_video[Eyes::RIGHT];
-               auto both = _last_video[Eyes::BOTH];
+       while (_next_video_time[type].get_value_or({}) < time) {
+               auto left = _last_video[type][Eyes::LEFT];
+               auto right = _last_video[type][Eyes::RIGHT];
+               auto both = _last_video[type][Eyes::BOTH];
 
-               auto const next = _next_video_time.get_value_or({});
+               auto const next = _next_video_time[type].get_value_or({});
 
                if (
                        left.first &&
@@ -979,7 +1024,7 @@ Player::emit_video_until(DCPTime time)
                        frame(both.first, next);
                        LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
                } else {
-                       frame(black_player_video_frame(Eyes::BOTH), next);
+                       frame(black_player_video_frame(type, Eyes::BOTH), next);
                        LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
                }
        }
@@ -998,7 +1043,8 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       if (!piece->content->video->use()) {
+       auto const content_video = piece->content->video;
+       if (!content_video->use()) {
                return;
        }
 
@@ -1008,6 +1054,7 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
        }
 
        auto const three_d = film->three_d();
+       auto const type = content_video->type();
 
        if (!three_d) {
                if (video.eyes == Eyes::LEFT) {
@@ -1031,23 +1078,24 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       if (!_next_video_time) {
-               _next_video_time = time.round(film->video_frame_rate());
+       if (!_next_video_time[type]) {
+               /* XXX: round to 24fps for SL? */
+               _next_video_time[type] = time.round(film->video_frame_rate());
        }
 
-       auto const content_video = piece->content->video;
        use_video(
                std::make_shared<PlayerVideo>(
                        video.image,
                        content_video->actual_crop(),
                        content_video->fade(film, video.time),
                        scale_for_display(
-                               content_video->scaled_size(film->frame_size()),
-                               _video_container_size,
-                               film->frame_size(),
+                               content_video->scaled_size(full_size(type)),
+                               video_container_size(type),
+                               full_size(type),
                                content_video->pixel_quanta()
                                ),
-                       _video_container_size,
+                       video_container_size(type),
+                       type,
                        video.eyes,
                        video.part,
                        content_video->colour_conversion(),
@@ -1064,9 +1112,9 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 void
 Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
 {
-       _last_video[pv->eyes()] = { pv, time };
+       _last_video[pv->type()][pv->eyes()] = { pv, time };
        if (pv->eyes() != Eyes::LEFT) {
-               emit_video_until(std::min(time + one_video_frame() / 2, end));
+               emit_video_until(pv->type(), std::min(time + one_video_frame() / 2, end));
        }
 }
 
@@ -1197,8 +1245,9 @@ Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextConten
                auto image = sub.image;
 
                /* We will scale the subtitle up to fit _video_container_size */
-               int const width = sub.rectangle.width * _video_container_size.load().width;
-               int const height = sub.rectangle.height * _video_container_size.load().height;
+               auto const container = video_container_size(VideoType::MAIN);
+               int const width = sub.rectangle.width * container.width;
+               int const height = sub.rectangle.height * container.height;
                if (width == 0 || height == 0) {
                        return;
                }
@@ -1347,19 +1396,19 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        if (accurate) {
-               _next_video_time = time;
+               _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = time;
                _next_audio_time = time;
        } else {
-               _next_video_time = boost::none;
+               _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = boost::none;
                _next_audio_time = boost::none;
        }
 
-       _black.set_position (time);
+       _black[VideoType::MAIN].set_position(time);
+       _black[VideoType::SIGN_LANGUAGE].set_position(time);
        _silent.set_position (time);
 
-       _last_video[Eyes::LEFT] = {};
-       _last_video[Eyes::RIGHT] = {};
-       _last_video[Eyes::BOTH] = {};
+       _last_video[VideoType::MAIN][Eyes::LEFT] = _last_video[VideoType::MAIN][Eyes::RIGHT] = _last_video[VideoType::MAIN][Eyes::BOTH] = {};
+       _last_video[VideoType::SIGN_LANGUAGE][Eyes::LEFT] = _last_video[VideoType::SIGN_LANGUAGE][Eyes::RIGHT] = _last_video[VideoType::SIGN_LANGUAGE][Eyes::BOTH] = {};
 
        for (auto& state: _stream_states) {
                state.second.last_push_end = boost::none;
@@ -1370,13 +1419,15 @@ Player::seek (DCPTime time, bool accurate)
 void
 Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
 {
-       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
-       }
+       if (pv->type() == VideoType::MAIN) {
+               if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+                       std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
+               }
 
-       auto subtitles = open_subtitles_for_frame (time);
-       if (subtitles) {
-               pv->set_text (subtitles.get ());
+               auto subtitles = open_subtitles_for_frame (time);
+               if (subtitles) {
+                       pv->set_text (subtitles.get ());
+               }
        }
 
        Video (pv, time);
index 8d95816ca4d249a014338344c9ed7502d1a32831..a03cfb6d4daa4187e869d4cc11ce1c50d7a8ec88 100644 (file)
@@ -38,6 +38,7 @@
 #include "player_text.h"
 #include "position_image.h"
 #include "shuffler.h"
+#include "video_type.h"
 #include <boost/atomic.hpp>
 #include <list>
 
@@ -91,11 +92,16 @@ public:
 
        std::vector<std::shared_ptr<dcpomatic::Font>> get_subtitle_fonts ();
 
-       dcp::Size video_container_size () const {
-               return _video_container_size;
+       dcp::Size video_container_size(VideoType type) const {
+               boost::mutex::scoped_lock lm(_video_container_size_mutex);
+               return _video_container_size[type];
        }
 
-       void set_video_container_size (dcp::Size);
+       bool have_sign_language() const {
+               return _have_sign_language;
+       }
+
+       void set_video_container_size(VideoType type, dcp::Size size);
        void set_ignore_video ();
        void set_ignore_audio ();
        void set_ignore_text ();
@@ -151,9 +157,10 @@ private:
        dcpomatic::DCPTime resampled_audio_to_dcp (std::shared_ptr<const Piece> piece, Frame f) const;
        dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
        dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const;
-       std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
-       void emit_video_until(dcpomatic::DCPTime time);
+       std::shared_ptr<PlayerVideo> black_player_video_frame(VideoType type, Eyes eyes) const;
+       void emit_video_until(VideoType type, dcpomatic::DCPTime time);
        void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
+       dcp::Size full_size(VideoType type) const;
 
        void video (std::weak_ptr<Piece>, ContentVideo);
        void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
@@ -187,13 +194,16 @@ private:
        boost::atomic<int> _suspended;
        std::list<std::shared_ptr<Piece>> _pieces;
 
-       /** Size of the image we are rendering to; this may be the DCP frame size, or
-        *  the size of preview in a window.
+       boost::atomic<bool> _have_sign_language;
+
+       mutable boost::mutex _video_container_size_mutex;
+       /** Size of the images we are rendering to; for the MAIN video this
+        *  may be the DCP frame size, or the size of preview in a window.
         */
-       boost::atomic<dcp::Size> _video_container_size;
+       EnumIndexedVector<dcp::Size, VideoType> _video_container_size;
 
        mutable boost::mutex _black_image_mutex;
-       std::shared_ptr<Image> _black_image;
+       EnumIndexedVector<std::shared_ptr<Image>, VideoType> _black_image;
 
        /** true if the player should ignore all video; i.e. never produce any */
        boost::atomic<bool> _ignore_video;
@@ -209,13 +219,13 @@ private:
        boost::atomic<bool> _play_referenced;
 
        /** Time of the next video that we will emit, or the time of the last accurate seek */
-       boost::optional<dcpomatic::DCPTime> _next_video_time;
+       EnumIndexedVector<boost::optional<dcpomatic::DCPTime>, VideoType> _next_video_time;
        /** Time of the next audio that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_audio_time;
 
        boost::atomic<boost::optional<int>> _dcp_decode_reduction;
 
-       EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video;
+       EnumIndexedVector<EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes>, VideoType> _last_video;
 
        AudioMerger _audio_merger;
        std::unique_ptr<Shuffler> _shuffler;
@@ -235,7 +245,7 @@ private:
        };
        std::map<AudioStreamPtr, StreamState> _stream_states;
 
-       Empty _black;
+       EnumIndexedVector<Empty, VideoType> _black;
        Empty _silent;
 
        EnumIndexedVector<ActiveText, TextType> _active_texts;
index b020ca1cd8172ba1cd5193c5658d42c621166171..7282b93dacf83fc4a761248c9223f7525d7b13a6 100644 (file)
@@ -54,6 +54,7 @@ PlayerVideo::PlayerVideo (
        boost::optional<double> fade,
        dcp::Size inter_size,
        dcp::Size out_size,
+       VideoType type,
        Eyes eyes,
        Part part,
        optional<ColourConversion> colour_conversion,
@@ -67,6 +68,7 @@ PlayerVideo::PlayerVideo (
        , _fade (fade)
        , _inter_size (inter_size)
        , _out_size (out_size)
+       , _type(type)
        , _eyes (eyes)
        , _part (part)
        , _colour_conversion (colour_conversion)
@@ -87,6 +89,7 @@ PlayerVideo::PlayerVideo (shared_ptr<cxml::Node> node, shared_ptr<Socket> socket
        _inter_size = dcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
        _out_size = dcp::Size (node->number_child<int> ("OutWidth"), node->number_child<int> ("OutHeight"));
        _eyes = static_cast<Eyes>(node->number_child<int>("Eyes"));
+       _type = static_cast<VideoType>(node->number_child<int>("Type"));
        _part = static_cast<Part>(node->number_child<int>("Part"));
        _video_range = static_cast<VideoRange>(node->number_child<int>("VideoRange"));
        _error = node->optional_bool_child("Error").get_value_or (false);
@@ -212,6 +215,7 @@ PlayerVideo::add_metadata (xmlpp::Node* node) const
        node->add_child("OutWidth")->add_child_text (raw_convert<string> (_out_size.width));
        node->add_child("OutHeight")->add_child_text (raw_convert<string> (_out_size.height));
        node->add_child("Eyes")->add_child_text (raw_convert<string> (static_cast<int> (_eyes)));
+       node->add_child("Type")->add_child_text(raw_convert<string>(static_cast<int>(_type)));
        node->add_child("Part")->add_child_text (raw_convert<string> (static_cast<int> (_part)));
        node->add_child("VideoRange")->add_child_text(raw_convert<string>(static_cast<int>(_video_range)));
        node->add_child("Error")->add_child_text(_error ? "1" : "0");
@@ -339,6 +343,7 @@ PlayerVideo::shallow_copy () const
                _fade,
                _inter_size,
                _out_size,
+               _type,
                _eyes,
                _part,
                _colour_conversion,
index 10b2078a00af83e73b1505b97aa2801145c9edde..fe35344aa94e8ba2b69eef898da6bd6e7aaadd7f 100644 (file)
@@ -29,6 +29,7 @@
 #include "position.h"
 #include "position_image.h"
 #include "types.h"
+#include "video_type.h"
 extern "C" {
 #include <libavutil/pixfmt.h>
 }
@@ -54,6 +55,7 @@ public:
                boost::optional<double> fade,
                dcp::Size inter_size,
                dcp::Size out_size,
+               VideoType type,
                Eyes eyes,
                Part part,
                boost::optional<ColourConversion> colour_conversion,
@@ -98,6 +100,10 @@ public:
                _eyes = e;
        }
 
+       VideoType type() const {
+               return _type;
+       }
+
        boost::optional<ColourConversion> colour_conversion () const {
                return _colour_conversion;
        }
@@ -134,6 +140,7 @@ private:
        boost::optional<double> _fade;
        dcp::Size _inter_size;
        dcp::Size _out_size;
+       VideoType _type;
        Eyes _eyes;
        Part _part;
        boost::optional<ColourConversion> _colour_conversion;
index 95787293b654f2dd560fbd0379596f94bcaa6224..652b5d83f1ca6f676ad366fb8e992a43cd09ec6f 100644 (file)
@@ -1043,3 +1043,15 @@ contains_assetmap(boost::filesystem::path dir)
        return boost::filesystem::is_regular_file(dir / "ASSETMAP") || boost::filesystem::is_regular_file(dir / "ASSETMAP.xml");
 }
 
+
+bool
+contains_sign_language(ContentList const& content)
+{
+       return std::any_of(
+               content.begin(),
+               content.end(),
+               [](shared_ptr<const Content> c) {
+                       return c->video && c->video->type() == VideoType::SIGN_LANGUAGE;
+               });
+}
+
index 3276999567c28a73ae6097ec45a5ec819ce6fc56..cb643e69fc83315c971950d9b311f3fd8612ec32 100644 (file)
@@ -97,6 +97,7 @@ extern void start_of_thread (std::string name);
 extern void capture_asdcp_logs ();
 extern std::string error_details(boost::system::error_code ec);
 extern bool contains_assetmap(boost::filesystem::path dir);
+extern bool contains_sign_language(ContentList const& content);
 
 
 template <class T>
index 32af60cbe6c0b4efd755bcdff20f14521f7a1a76..d2be1be7c10ea022359167ca1e149155721aee7a 100644 (file)
@@ -101,6 +101,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_rgb)
                optional<double> (),
                dcp::Size (1998, 1080),
                dcp::Size (1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                ColourConversion(),
@@ -184,6 +185,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_yuv)
                optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                ColourConversion(),
@@ -254,6 +256,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                ColourConversion(),
@@ -279,6 +282,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                PresetColourConversion::all().front().conversion,
index 52b8d54be118c33559b9f1722d58141cbd34c116..575b12d27c6702fef6ea4c35808be361c6d61ae3 100644 (file)
@@ -47,6 +47,7 @@ BOOST_AUTO_TEST_CASE (low_bitrate_test)
                boost::optional<double>(),
                dcp::Size(1998, 1080),
                dcp::Size(1998, 1080),
+               VideoType::MAIN,
                Eyes::BOTH,
                Part::WHOLE,
                boost::optional<ColourConversion>(),
index 12a26fe609a792a214bc8aafd95c672091f790d2..fcfa42a0cf08b8c65dca4429c6ec840828f90635 100644 (file)
@@ -65,7 +65,7 @@ BOOST_AUTO_TEST_CASE (overlap_video_test1)
        BOOST_CHECK (pieces.front()->ignore_video);
        BOOST_CHECK (pieces.front()->ignore_video.get() == dcpomatic::DCPTimePeriod(dcpomatic::DCPTime::from_seconds(1), dcpomatic::DCPTime::from_seconds(1) + B->length_after_trim(film)));
 
-       BOOST_CHECK (player->_black.done());
+       BOOST_CHECK(player->_black[VideoType::MAIN].done());
 
        make_and_verify_dcp (film);
 
index 733883b5a5fba4b6e0b58e7127f00822bb542763..bc35f757725df149834aa2201a456e97fa755c66 100644 (file)
@@ -167,11 +167,11 @@ BOOST_AUTO_TEST_CASE (player_subframe_test)
        BOOST_CHECK (film->length() == DCPTime::from_frames(3 * 24 + 1, 24));
 
        Player player(film, Image::Alignment::COMPACT);
-       player.setup_pieces();
-       BOOST_REQUIRE_EQUAL(player._black._periods.size(), 1U);
-       BOOST_CHECK(player._black._periods.front() == DCPTimePeriod(DCPTime::from_frames(3 * 24, 24), DCPTime::from_frames(3 * 24 + 1, 24)));
-       BOOST_REQUIRE_EQUAL(player._silent._periods.size(), 1U);
-       BOOST_CHECK(player._silent._periods.front() == DCPTimePeriod(DCPTime(289920), DCPTime::from_frames(3 * 24 + 1, 24)));
+       player.setup_pieces ();
+       BOOST_REQUIRE_EQUAL(player._black[VideoType::MAIN]._periods.size(), 1U);
+       BOOST_CHECK(player._black[VideoType::MAIN]._periods.front() == DCPTimePeriod(DCPTime::from_frames(3 * 24, 24), DCPTime::from_frames(3 * 24 + 1, 24)));
+       BOOST_REQUIRE_EQUAL (player._silent._periods.size(), 1U);
+       BOOST_CHECK (player._silent._periods.front() == DCPTimePeriod(DCPTime(289920), DCPTime::from_frames(3 * 24 + 1, 24)));
 }