#include "config.h"
#include "content_audio.h"
#include "content_video.h"
+#include "constants.h"
#include "dcp_content.h"
#include "dcp_decoder.h"
#include "dcpomatic_log.h"
}
+dcp::Size
+Player::full_size(VideoType type) const
+{
+ switch (type) {
+ case VideoType::MAIN:
+ {
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+ return film->frame_size();
+ }
+ case VideoType::SIGN_LANGUAGE:
+ return { 480, 640 };
+ default:
+ DCPOMATIC_ASSERT(false);
+ }
+}
+
+
void
Player::construct ()
{
- auto film = _film.lock();
- DCPOMATIC_ASSERT(film);
-
connect();
- set_video_container_size(film->frame_size());
+ set_video_container_size(VideoType::MAIN, full_size(VideoType::MAIN));
+ set_video_container_size(VideoType::SIGN_LANGUAGE, full_size(VideoType::SIGN_LANGUAGE));
+
+ _black_image[VideoType::SIGN_LANGUAGE] = make_shared<Image>(AV_PIX_FMT_RGB24, dcp::Size(SIGN_LANGUAGE_WIDTH, SIGN_LANGUAGE_HEIGHT), Image::Alignment::PADDED);
film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
, _playlist(std::move(other._playlist))
, _suspended(other._suspended.load())
, _pieces(std::move(other._pieces))
- , _video_container_size(other._video_container_size.load())
+ , _video_container_size(other._video_container_size)
, _black_image(std::move(other._black_image))
, _ignore_video(other._ignore_video.load())
, _ignore_audio(other._ignore_audio.load())
_playlist = std::move(other._playlist);
_suspended = other._suspended.load();
_pieces = std::move(other._pieces);
- _video_container_size = other._video_container_size.load();
+ _video_container_size = other._video_container_size;
_black_image = std::move(other._black_image);
_ignore_video = other._ignore_video.load();
_ignore_audio = other._ignore_audio.load();
bool
-have_video (shared_ptr<const Content> content)
+have_video(shared_ptr<const Content> content, VideoType type)
{
- return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
+ return static_cast<bool>(content->video)
+ && content->video->use()
+ && content->can_be_played()
+ && content->video->type() == type;
}
}
}
- _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
+ _have_sign_language = contains_sign_language(playlist_content);
+
+ _black[VideoType::MAIN] = Empty(film, playlist(), bind(&have_video, _1, VideoType::MAIN), _playback_length);
+ if (_have_sign_language) {
+ _black[VideoType::SIGN_LANGUAGE] = Empty(film, playlist(), bind(&have_video, _1, VideoType::SIGN_LANGUAGE), _playback_length);
+ }
_silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
- _next_video_time = boost::none;
+ _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = boost::none;
_next_audio_time = boost::none;
}
if (type == ChangeType::DONE) {
boost::mutex::scoped_lock lm (_mutex);
for (auto const& i: _delay) {
- i.first->reset_metadata(film, _video_container_size);
+ i.first->reset_metadata(film, video_container_size(VideoType::MAIN));
}
}
} else {
void
-Player::set_video_container_size (dcp::Size s)
+Player::set_video_container_size(VideoType type, dcp::Size size)
{
ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
- if (s == _video_container_size) {
+ if (size == video_container_size(type)) {
cc.abort();
return;
}
- _video_container_size = s;
+ boost::mutex::scoped_lock lm(_video_container_size_mutex);
+ _video_container_size[type] = size;
{
boost::mutex::scoped_lock lm(_black_image_mutex);
- _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
- _black_image->make_black ();
+ _black_image[type] = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size[type], Image::Alignment::PADDED);
+ _black_image[type]->make_black();
}
}
shared_ptr<PlayerVideo>
-Player::black_player_video_frame (Eyes eyes) const
+Player::black_player_video_frame(VideoType type, Eyes eyes) const
{
boost::mutex::scoped_lock lm(_black_image_mutex);
+ auto const image = _black_image[type];
+
return std::make_shared<PlayerVideo> (
- make_shared<const RawImageProxy>(_black_image),
+ make_shared<const RawImageProxy>(image),
Crop(),
optional<double>(),
- _video_container_size,
- _video_container_size,
+ image->size(),
+ image->size(),
+ type,
eyes,
Part::WHOLE,
PresetColourConversion::all().front().conversion,
if (_playback_length.load() == DCPTime() || !film) {
/* Special; just give one black frame */
- use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
+ use_video(black_player_video_frame(VideoType::MAIN, Eyes::BOTH), DCPTime(), one_video_frame());
+ use_video(black_player_video_frame(VideoType::SIGN_LANGUAGE, Eyes::BOTH), DCPTime(), one_video_frame());
return true;
}
SILENT
} which = NONE;
+ optional<VideoType> black_type;
+
if (earliest_content) {
which = CONTENT;
}
- if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
- earliest_time = _black.position ();
- which = BLACK;
- }
+ auto check_black = [this, &earliest_time, &which, &black_type](VideoType type) {
+ if (!_black[type].done() && !_ignore_video && (!earliest_time || _black[type].position() < *earliest_time)) {
+ earliest_time = _black[type].position();
+ which = BLACK;
+ black_type = type;
+ }
+ };
+
+ check_black(VideoType::MAIN);
+ check_black(VideoType::SIGN_LANGUAGE);
if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
earliest_time = _silent.position ();
auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
if (dcp && !_play_referenced) {
if (dcp->reference_video()) {
- _next_video_time = dcp->end(film);
+ _next_video_time[earliest_content->content->video->type()] = dcp->end(film);
}
if (dcp->reference_audio()) {
/* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
break;
}
case BLACK:
- LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
- use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
- _black.set_position (_black.position() + one_video_frame());
+ DCPOMATIC_ASSERT(black_type);
+ LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black[*black_type].position()));
+ use_video(black_player_video_frame(*black_type, Eyes::BOTH), _black[*black_type].position(), _black[*black_type].period_at_position().to);
+ _black[*black_type].set_position(_black[*black_type].position() + one_video_frame());
break;
case SILENT:
{
}
if (done) {
- emit_video_until(film->length());
+ emit_video_until(VideoType::MAIN, film->length());
+ if (_have_sign_language) {
+ emit_video_until(VideoType::SIGN_LANGUAGE, film->length());
+ }
if (_shuffler) {
_shuffler->flush ();
list<PositionImage> captions;
int const vfr = film->video_frame_rate();
+ auto const container = video_container_size(VideoType::MAIN);
for (
auto j:
continue;
}
- /* i.image will already have been scaled to fit _video_container_size */
- dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
+ /* i.image will already have been scaled to fit video_container_size */
+ dcp::Size scaled_size(i.rectangle.width * container.width, i.rectangle.height * container.height);
captions.push_back (
PositionImage (
i.image,
Position<int> (
- lrint(_video_container_size.load().width * i.rectangle.x),
- lrint(_video_container_size.load().height * i.rectangle.y)
+ lrint(container.width * i.rectangle.x),
+ lrint(container.height * i.rectangle.y)
)
)
);
/* String subtitles (rendered to an image) */
if (!j.string.empty()) {
- auto s = render_text(j.string, _video_container_size, time, vfr);
+ auto s = render_text(j.string, container, time, vfr);
copy (s.begin(), s.end(), back_inserter (captions));
}
}
void
-Player::emit_video_until(DCPTime time)
+Player::emit_video_until(VideoType type, DCPTime time)
{
- auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+ auto frame = [this, type](shared_ptr<PlayerVideo> pv, DCPTime time) {
/* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
player before the video that requires them.
*/
_delay.push_back(make_pair(pv, time));
if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
- _next_video_time = time + one_video_frame();
+ _next_video_time[type] = time + one_video_frame();
}
if (_delay.size() < 3) {
auto const age_threshold = one_video_frame() * 2;
- while (_next_video_time.get_value_or({}) < time) {
- auto left = _last_video[Eyes::LEFT];
- auto right = _last_video[Eyes::RIGHT];
- auto both = _last_video[Eyes::BOTH];
+ while (_next_video_time[type].get_value_or({}) < time) {
+ auto left = _last_video[type][Eyes::LEFT];
+ auto right = _last_video[type][Eyes::RIGHT];
+ auto both = _last_video[type][Eyes::BOTH];
- auto const next = _next_video_time.get_value_or({});
+ auto const next = _next_video_time[type].get_value_or({});
if (
left.first &&
frame(both.first, next);
LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
} else {
- frame(black_player_video_frame(Eyes::BOTH), next);
+ frame(black_player_video_frame(type, Eyes::BOTH), next);
LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
}
}
return;
}
- if (!piece->content->video->use()) {
+ auto const content_video = piece->content->video;
+ if (!content_video->use()) {
return;
}
}
auto const three_d = film->three_d();
+ auto const type = content_video->type();
if (!three_d) {
if (video.eyes == Eyes::LEFT) {
return;
}
- if (!_next_video_time) {
- _next_video_time = time.round(film->video_frame_rate());
+ if (!_next_video_time[type]) {
+ /* XXX: round to 24fps for SL? */
+ _next_video_time[type] = time.round(film->video_frame_rate());
}
- auto const content_video = piece->content->video;
use_video(
std::make_shared<PlayerVideo>(
video.image,
content_video->actual_crop(),
content_video->fade(film, video.time),
scale_for_display(
- content_video->scaled_size(film->frame_size()),
- _video_container_size,
- film->frame_size(),
+ content_video->scaled_size(full_size(type)),
+ video_container_size(type),
+ full_size(type),
content_video->pixel_quanta()
),
- _video_container_size,
+ video_container_size(type),
+ type,
video.eyes,
video.part,
content_video->colour_conversion(),
void
Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
{
- _last_video[pv->eyes()] = { pv, time };
+ _last_video[pv->type()][pv->eyes()] = { pv, time };
if (pv->eyes() != Eyes::LEFT) {
- emit_video_until(std::min(time + one_video_frame() / 2, end));
+ emit_video_until(pv->type(), std::min(time + one_video_frame() / 2, end));
}
}
auto image = sub.image;
/* We will scale the subtitle up to fit _video_container_size */
- int const width = sub.rectangle.width * _video_container_size.load().width;
- int const height = sub.rectangle.height * _video_container_size.load().height;
+ auto const container = video_container_size(VideoType::MAIN);
+ int const width = sub.rectangle.width * container.width;
+ int const height = sub.rectangle.height * container.height;
if (width == 0 || height == 0) {
return;
}
}
if (accurate) {
- _next_video_time = time;
+ _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = time;
_next_audio_time = time;
} else {
- _next_video_time = boost::none;
+ _next_video_time[VideoType::MAIN] = _next_video_time[VideoType::SIGN_LANGUAGE] = boost::none;
_next_audio_time = boost::none;
}
- _black.set_position (time);
+ _black[VideoType::MAIN].set_position(time);
+ _black[VideoType::SIGN_LANGUAGE].set_position(time);
_silent.set_position (time);
- _last_video[Eyes::LEFT] = {};
- _last_video[Eyes::RIGHT] = {};
- _last_video[Eyes::BOTH] = {};
+ _last_video[VideoType::MAIN][Eyes::LEFT] = _last_video[VideoType::MAIN][Eyes::RIGHT] = _last_video[VideoType::MAIN][Eyes::BOTH] = {};
+ _last_video[VideoType::SIGN_LANGUAGE][Eyes::LEFT] = _last_video[VideoType::SIGN_LANGUAGE][Eyes::RIGHT] = _last_video[VideoType::SIGN_LANGUAGE][Eyes::BOTH] = {};
for (auto& state: _stream_states) {
state.second.last_push_end = boost::none;
void
Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
{
- if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
- std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
- }
+ if (pv->type() == VideoType::MAIN) {
+ if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+ std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
+ }
- auto subtitles = open_subtitles_for_frame (time);
- if (subtitles) {
- pv->set_text (subtitles.get ());
+ auto subtitles = open_subtitles_for_frame (time);
+ if (subtitles) {
+ pv->set_text (subtitles.get ());
+ }
}
Video (pv, time);
#include "player_text.h"
#include "position_image.h"
#include "shuffler.h"
+#include "video_type.h"
#include <boost/atomic.hpp>
#include <list>
std::vector<std::shared_ptr<dcpomatic::Font>> get_subtitle_fonts ();
- dcp::Size video_container_size () const {
- return _video_container_size;
+ dcp::Size video_container_size(VideoType type) const {
+ boost::mutex::scoped_lock lm(_video_container_size_mutex);
+ return _video_container_size[type];
}
- void set_video_container_size (dcp::Size);
+ bool have_sign_language() const {
+ return _have_sign_language;
+ }
+
+ void set_video_container_size(VideoType type, dcp::Size size);
void set_ignore_video ();
void set_ignore_audio ();
void set_ignore_text ();
dcpomatic::DCPTime resampled_audio_to_dcp (std::shared_ptr<const Piece> piece, Frame f) const;
dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const;
- std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
- void emit_video_until(dcpomatic::DCPTime time);
+ std::shared_ptr<PlayerVideo> black_player_video_frame(VideoType type, Eyes eyes) const;
+ void emit_video_until(VideoType type, dcpomatic::DCPTime time);
void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
+ dcp::Size full_size(VideoType type) const;
void video (std::weak_ptr<Piece>, ContentVideo);
void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
boost::atomic<int> _suspended;
std::list<std::shared_ptr<Piece>> _pieces;
- /** Size of the image we are rendering to; this may be the DCP frame size, or
- * the size of preview in a window.
+ boost::atomic<bool> _have_sign_language;
+
+ mutable boost::mutex _video_container_size_mutex;
+ /** Size of the images we are rendering to; for the MAIN video this
+ * may be the DCP frame size, or the size of preview in a window.
*/
- boost::atomic<dcp::Size> _video_container_size;
+ EnumIndexedVector<dcp::Size, VideoType> _video_container_size;
mutable boost::mutex _black_image_mutex;
- std::shared_ptr<Image> _black_image;
+ EnumIndexedVector<std::shared_ptr<Image>, VideoType> _black_image;
/** true if the player should ignore all video; i.e. never produce any */
boost::atomic<bool> _ignore_video;
boost::atomic<bool> _play_referenced;
/** Time of the next video that we will emit, or the time of the last accurate seek */
- boost::optional<dcpomatic::DCPTime> _next_video_time;
+ EnumIndexedVector<boost::optional<dcpomatic::DCPTime>, VideoType> _next_video_time;
/** Time of the next audio that we will emit, or the time of the last accurate seek */
boost::optional<dcpomatic::DCPTime> _next_audio_time;
boost::atomic<boost::optional<int>> _dcp_decode_reduction;
- EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video;
+ EnumIndexedVector<EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes>, VideoType> _last_video;
AudioMerger _audio_merger;
std::unique_ptr<Shuffler> _shuffler;
};
std::map<AudioStreamPtr, StreamState> _stream_states;
- Empty _black;
+ EnumIndexedVector<Empty, VideoType> _black;
Empty _silent;
EnumIndexedVector<ActiveText, TextType> _active_texts;