#include "i18n.h"
-#define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
-
using std::list;
using std::cout;
using std::min;
, _shuffler (0)
{
_film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
- _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1));
+ /* The butler must hear about this first, so since we are proxying this through to the butler we must
+ be first.
+ */
+ _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
_playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
set_video_container_size (_film->frame_size ());
setup_pieces_unlocked ();
}
+bool
+have_video (shared_ptr<Piece> piece)
+{
+ return piece->decoder && piece->decoder->video;
+}
+
+bool
+have_audio (shared_ptr<Piece> piece)
+{
+ return piece->decoder && piece->decoder->audio;
+}
+
void
Player::setup_pieces_unlocked ()
{
continue;
}
- shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
- FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
+ shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
+ FrameRateChange frc (_film, i);
if (!decoder) {
/* Not something that we can decode; e.g. Atmos content */
bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
);
(*j)->Stop.connect (
- bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
+ bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
);
++j;
}
}
- _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
- _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
+ _black = Empty (_film, _pieces, bind(&have_video, _1));
+ _silent = Empty (_film, _pieces, bind(&have_audio, _1));
_last_video_time = DCPTime ();
_last_video_eyes = EYES_BOTH;
} else if (type == CHANGE_TYPE_DONE) {
/* A change in our content has gone through. Re-build our pieces. */
setup_pieces ();
+ } else if (type == CHANGE_TYPE_CANCELLED) {
+ boost::mutex::scoped_lock lm (_mutex);
+ _suspended = false;
}
Change (type, property, frequent);
}
}
-list<PositionImage>
-Player::transform_bitmap_texts (list<BitmapText> subs) const
-{
- list<PositionImage> all;
-
- for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
- if (!i->image) {
- continue;
- }
-
- /* We will scale the subtitle up to fit _video_container_size */
- dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
-
- all.push_back (
- PositionImage (
- i->image->scale (
- scaled_size,
- dcp::YUV_TO_RGB_REC601,
- i->image->pixel_format (),
- true,
- _fast
- ),
- Position<int> (
- lrint (_video_container_size.width * i->rectangle.x),
- lrint (_video_container_size.height * i->rectangle.y)
- )
- )
- );
- }
-
- return all;
-}
-
shared_ptr<PlayerVideo>
Player::black_player_video_frame (Eyes eyes) const
{
Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
{
DCPTime s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(), s);
+ s = min (piece->content->length_after_trim(_film), s);
s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
/* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
DCPTime s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(), s);
+ s = min (piece->content->length_after_trim(_film), s);
/* See notes in dcp_to_content_video */
return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
}
Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
{
DCPTime s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(), s);
+ s = min (piece->content->length_after_trim(_film), s);
return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
}
scoped_ptr<DCPDecoder> decoder;
try {
- decoder.reset (new DCPDecoder (j, _film->log(), false));
+ decoder.reset (new DCPDecoder (_film, j, false));
} catch (...) {
return a;
}
}
if (j->reference_text (TEXT_CLOSED_CAPTION)) {
- shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
- DCPOMATIC_ASSERT (ra);
- ra->set_entry_point (ra->entry_point() + trim_start);
- ra->set_duration (ra->duration() - trim_start - trim_end);
- a.push_back (
- ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
- );
+ BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
+ DCPOMATIC_ASSERT (l);
+ l->set_entry_point (l->entry_point() + trim_start);
+ l->set_duration (l->duration() - trim_start - trim_end);
+ a.push_back (
+ ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
+ );
+ }
}
/* Assume that main picture duration is the length of the reel */
return false;
}
- if (_playlist->length() == DCPTime()) {
+ if (_playlist->length(_film) == DCPTime()) {
/* Special case of an empty Film; just give one black frame */
emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
return true;
}
DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
- if (t > i->content->end()) {
+ if (t > i->content->end(_film)) {
i->done = true;
} else {
) {
/* Bitmap subtitles */
- list<PositionImage> c = transform_bitmap_texts (j.bitmap);
- copy (c.begin(), c.end(), back_inserter (captions));
+ BOOST_FOREACH (BitmapText i, j.bitmap) {
+ if (!i.image) {
+ continue;
+ }
+
+ /* i.image will already have been scaled to fit _video_container_size */
+ dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
+
+ captions.push_back (
+ PositionImage (
+ i.image,
+ Position<int> (
+ lrint (_video_container_size.width * i.rectangle.x),
+ lrint (_video_container_size.height * i.rectangle.y)
+ )
+ )
+ );
+ }
/* String subtitles (rendered to an image) */
if (!j.string.empty ()) {
return;
}
- FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
+ FrameRateChange frc (_film, piece->content);
if (frc.skip && (video.frame % 2) == 1) {
return;
}
/* Fill gaps that we discover now that we have some video which needs to be emitted.
This is where we need to fill to.
*/
- DCPTime fill_to = min (time, piece->content->end());
+ DCPTime fill_to = min (time, piece->content->end(_film));
if (_last_video_time) {
DCPTime fill_from = max (*_last_video_time, piece->content->position());
if (fill_to_eyes == EYES_BOTH) {
fill_to_eyes = EYES_LEFT;
}
- if (fill_to == piece->content->end()) {
+ if (fill_to == piece->content->end(_film)) {
/* Don't fill after the end of the content */
fill_to_eyes = EYES_LEFT;
}
new PlayerVideo (
video.image,
piece->content->video->crop (),
- piece->content->video->fade (video.frame),
+ piece->content->video->fade (_film, video.frame),
piece->content->video->scale().size (
piece->content->video, _video_container_size, _film->frame_size ()
),
DCPTime t = time;
for (int i = 0; i < frc.repeat; ++i) {
- if (t < piece->content->end()) {
+ if (t < piece->content->end(_film)) {
emit_video (_last_video[wp], t);
}
t += one_video_frame ();
/* Compute time in the DCP */
DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
/* And the end of this block in the DCP */
- DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
+ DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
}
content_audio.audio = cut.first;
time = cut.second;
- } else if (time > piece->content->end()) {
+ } else if (time > piece->content->end(_film)) {
/* Discard it all */
return;
- } else if (end > piece->content->end()) {
- Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
+ } else if (end > piece->content->end(_film)) {
+ Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
if (remaining_frames == 0) {
return;
}
subtitle.sub.rectangle.height *= text->y_scale ();
PlayerText ps;
- ps.bitmap.push_back (subtitle.sub);
+ shared_ptr<Image> image = subtitle.sub.image;
+ /* We will scale the subtitle up to fit _video_container_size */
+ dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
+ ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
- _active_texts[subtitle.type()].add_from (wc, ps, from);
+ _active_texts[text->type()].add_from (wc, ps, from);
}
void
PlayerText ps;
DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
- if (from > piece->content->end()) {
+ if (from > piece->content->end(_film)) {
return;
}
ps.add_fonts (text->fonts ());
}
- _active_texts[subtitle.type()].add_from (wc, ps, from);
+ _active_texts[text->type()].add_from (wc, ps, from);
}
void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
+Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
{
- if (!_active_texts[type].have (wc)) {
+ shared_ptr<const TextContent> text = wc.lock ();
+ if (!text) {
+ return;
+ }
+
+ if (!_active_texts[text->type()].have(wc)) {
return;
}
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<const TextContent> text = wc.lock ();
- if (!piece || !text) {
+ if (!piece) {
return;
}
DCPTime const dcp_to = content_time_to_dcp (piece, to);
- if (dcp_to > piece->content->end()) {
+ if (dcp_to > piece->content->end(_film)) {
return;
}
- pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
+ pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
- bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
+ bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
if (text->use() && !always && !text->burn()) {
- Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
+ Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
}
}
/* Before; seek to the start of the content */
i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
i->done = false;
- } else if (i->content->position() <= time && time < i->content->end()) {
+ } else if (i->content->position() <= time && time < i->content->end(_film)) {
/* During; seek to position */
i->decoder->seek (dcp_to_content_time (i, time), accurate);
i->done = false;