/*
- Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "dcp_decoder.h"
#include "image_decoder.h"
#include "compose.hpp"
+#include "shuffler.h"
#include <dcp/reel.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
, _fast (false)
, _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
+ , _shuffler (0)
{
_film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
_playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
seek (DCPTime (), true);
}
+Player::~Player ()
+{
+ delete _shuffler;
+}
+
void
Player::setup_pieces ()
{
_pieces.clear ();
+ delete _shuffler;
+ _shuffler = new Shuffler();
+ _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+
BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
if (!i->paths_valid ()) {
continue;
}
- shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
+ shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
if (!decoder) {
shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
if (dcp && _play_referenced) {
- dcp->set_decode_referenced ();
+ if (_play_referenced) {
+ dcp->set_decode_referenced ();
+ }
+ dcp->set_forced_reduction (_dcp_decode_reduction);
}
shared_ptr<Piece> piece (new Piece (i, decoder, frc));
_pieces.push_back (piece);
if (decoder->video) {
- decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
+ if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+ decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+ } else {
+ decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
+ }
}
if (decoder->audio) {
}
}
+ _stream_states.clear ();
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
if (i->content->audio) {
BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
}
}
- _black = Empty (_film, bind(&Content::video, _1));
- _silent = Empty (_film, bind(&Content::audio, _1));
+ _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
+ _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
_last_video_time = DCPTime ();
+ _last_video_eyes = EYES_BOTH;
_last_audio_time = DCPTime ();
_have_valid_pieces = true;
}
property == DCPContentProperty::NEEDS_ASSETS ||
property == DCPContentProperty::NEEDS_KDM ||
property == SubtitleContentProperty::COLOUR ||
- property == SubtitleContentProperty::OUTLINE ||
- property == SubtitleContentProperty::SHADOW ||
+ property == SubtitleContentProperty::EFFECT ||
property == SubtitleContentProperty::EFFECT_COLOUR ||
property == FFmpegContentProperty::SUBTITLE_STREAM ||
+ property == FFmpegContentProperty::FILTERS ||
property == VideoContentProperty::COLOUR_CONVERSION
) {
}
shared_ptr<PlayerVideo>
-Player::black_player_video_frame () const
+Player::black_player_video_frame (Eyes eyes) const
{
return shared_ptr<PlayerVideo> (
new PlayerVideo (
optional<double> (),
_video_container_size,
_video_container_size,
- EYES_BOTH,
+ eyes,
PART_WHOLE,
PresetColourConversion::all().front().conversion
)
Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
{
/* See comment in dcp_to_content_video */
- DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
- return max (DCPTime (), d + piece->content->position ());
+ DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
+ return d + piece->content->position();
}
Frame
_always_burn_subtitles = burn;
}
+/** Sets up the player to be faster, possibly at the expense of quality */
void
Player::set_fast ()
{
scoped_ptr<DCPDecoder> decoder;
try {
- decoder.reset (new DCPDecoder (j, _film->log()));
+ decoder.reset (new DCPDecoder (j, _film->log(), false));
} catch (...) {
return a;
}
setup_pieces ();
}
+ if (_playlist->length() == DCPTime()) {
+ /* Special case of an empty Film; just give one black frame */
+ emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
+ return true;
+ }
+
/* Find the decoder or empty which is farthest behind where we are and make it emit some data */
- shared_ptr<Piece> earliest;
- DCPTime earliest_content;
+ shared_ptr<Piece> earliest_content;
+ optional<DCPTime> earliest_time;
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (!i->done) {
- DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+ if (i->done) {
+ continue;
+ }
+
+ DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+ if (t > i->content->end()) {
+ i->done = true;
+ } else {
+
/* Given two choices at the same time, pick the one with a subtitle so we see it before
the video.
*/
- if (!earliest || t < earliest_content || (t == earliest_content && i->decoder->subtitle)) {
- earliest_content = t;
- earliest = i;
+ if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
+ earliest_time = t;
+ earliest_content = i;
}
}
}
bool done = false;
- if (!_black.done() && (!earliest || _black.position() < earliest_content)) {
- /* There is some black that must be emitted */
- emit_video (black_player_video_frame(), _black.position());
+ enum {
+ NONE,
+ CONTENT,
+ BLACK,
+ SILENT
+ } which = NONE;
+
+ if (earliest_content) {
+ which = CONTENT;
+ }
+
+ if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
+ earliest_time = _black.position ();
+ which = BLACK;
+ }
+
+ if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
+ earliest_time = _silent.position ();
+ which = SILENT;
+ }
+
+ switch (which) {
+ case CONTENT:
+ earliest_content->done = earliest_content->decoder->pass ();
+ break;
+ case BLACK:
+ emit_video (black_player_video_frame(EYES_BOTH), _black.position());
_black.set_position (_black.position() + one_video_frame());
- } else if (!_silent.done() && (!earliest || _silent.position() < earliest_content)) {
- /* There is some silence that must be emitted */
+ break;
+ case SILENT:
+ {
DCPTimePeriod period (_silent.period_at_position());
+ if (_last_audio_time) {
+ /* Sometimes the thing that happened last finishes fractionally before
+ this silence. Bodge the start time of the silence to fix it. I'm
+ not sure if this is the right solution --- maybe the last thing should
+ be padded `forward' rather than this thing padding `back'.
+ */
+ period.from = min(period.from, *_last_audio_time);
+ }
if (period.duration() > one_video_frame()) {
period.to = period.from + one_video_frame();
}
fill_audio (period);
_silent.set_position (period.to);
- } else if (_playlist->length() == DCPTime()) {
- /* Special case of an empty Film; just give one black frame */
- emit_video (black_player_video_frame(), DCPTime());
- } else if (earliest) {
- earliest->done = earliest->decoder->pass ();
- } else {
+ break;
+ }
+ case NONE:
done = true;
+ break;
}
/* Emit any audio that is ready */
+ /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
+ of our streams, or the position of the _silent.
+ */
DCPTime pull_to = _film->length ();
for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
if (!i->second.piece->done && i->second.last_push_end < pull_to) {
pull_to = i->second.last_push_end;
}
}
+ if (!_silent.done() && _silent.position() < pull_to) {
+ pull_to = _silent.position();
+ }
list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
if (_last_audio_time && i->second < *_last_audio_time) {
- /* There has been an accurate seek and we have received some audio before the seek time;
- discard it.
- */
+ /* This new data comes before the last we emitted (or the last seek); discard it */
pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
if (!cut.first) {
continue;
}
*i = cut;
+ } else if (_last_audio_time && i->second > *_last_audio_time) {
+ /* There's a gap between this data and the last we emitted; fill with silence */
+ fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
}
emit_audio (i->first, i->second);
}
+ if (done) {
+ _shuffler->flush ();
+ }
return done;
}
{
list<PositionImage> subtitles;
- BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
+ int const vfr = _film->video_frame_rate();
+
+ BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
/* Image subtitles */
list<PositionImage> c = transform_image_subtitles (i.image);
/* Text subtitles (rendered to an image) */
if (!i.text.empty ()) {
- list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
+ list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
copy (s.begin(), s.end(), back_inserter (subtitles));
}
}
return;
}
- /* Time and period of the frame we will emit */
+ /* Time of the first frame we will emit */
DCPTime const time = content_video_to_dcp (piece, video.frame);
- DCPTimePeriod const period (time, time + one_video_frame());
-
- /* Fill gaps that we discover now that we have some video which needs to be emitted */
-
- if (_last_video_time) {
- /* XXX: this may not work for 3D */
- DCPTime fill_from = max (*_last_video_time, piece->content->position());
- for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
- LastVideoMap::const_iterator k = _last_video.find (wp);
- if (k != _last_video.end ()) {
- emit_video (k->second, j);
- } else {
- emit_video (black_player_video_frame(), j);
- }
- }
- }
/* Discard if it's outside the content's period or if it's before the last accurate seek */
if (
return;
}
+ /* Fill gaps that we discover now that we have some video which needs to be emitted */
+
+ if (_last_video_time) {
+ DCPTime fill_from = max (*_last_video_time, piece->content->position());
+ LastVideoMap::const_iterator last = _last_video.find (wp);
+ if (_film->three_d()) {
+ DCPTime j = fill_from;
+ Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
+ if (eyes == EYES_BOTH) {
+ eyes = EYES_LEFT;
+ }
+ while (j < time || eyes != video.eyes) {
+ if (last != _last_video.end()) {
+ shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
+ copy->set_eyes (eyes);
+ emit_video (copy, j);
+ } else {
+ emit_video (black_player_video_frame(eyes), j);
+ }
+ if (eyes == EYES_RIGHT) {
+ j += one_video_frame();
+ }
+ eyes = increment_eyes (eyes);
+ }
+ } else {
+ for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
+ if (last != _last_video.end()) {
+ emit_video (last->second, j);
+ } else {
+ emit_video (black_player_video_frame(EYES_BOTH), j);
+ }
+ }
+ }
+ }
+
_last_video[wp].reset (
new PlayerVideo (
video.image,
)
);
- emit_video (_last_video[wp], time);
+ DCPTime t = time;
+ for (int i = 0; i < frc.repeat; ++i) {
+ emit_video (_last_video[wp], t);
+ t += one_video_frame ();
+ }
}
void
void
Player::seek (DCPTime time, bool accurate)
{
+ if (!_have_valid_pieces) {
+ setup_pieces ();
+ }
+
+ if (_shuffler) {
+ _shuffler->clear ();
+ }
+
if (_audio_processor) {
_audio_processor->flush ();
}
if (accurate) {
_last_video_time = time;
+ _last_video_eyes = EYES_LEFT;
_last_audio_time = time;
} else {
_last_video_time = optional<DCPTime>();
+ _last_video_eyes = optional<Eyes>();
_last_audio_time = optional<DCPTime>();
}
_last_video_time = time + one_video_frame();
_active_subtitles.clear_before (time);
}
+ _last_video_eyes = increment_eyes (pv->eyes());
}
void
Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
{
+ /* This audio must follow on from the previous */
+ DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
Audio (data, time);
_last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
}
cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
return make_pair(cut, time + discard_time);
}
+
+void
+Player::set_dcp_decode_reduction (optional<int> reduction)
+{
+ if (reduction == _dcp_decode_reduction) {
+ return;
+ }
+
+ _dcp_decode_reduction = reduction;
+ _have_valid_pieces = false;
+ Changed (false);
+}