/*
- Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
*/
+#include "atmos_decoder.h"
#include "player.h"
#include "film.h"
#include "audio_buffers.h"
#include "content_audio.h"
#include "dcp_content.h"
+#include "dcpomatic_log.h"
#include "job.h"
#include "image.h"
#include "raw_image_proxy.h"
int const PlayerProperty::FILM_CONTAINER = 702;
int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
+int const PlayerProperty::PLAYBACK_LENGTH = 705;
-Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
+Player::Player (shared_ptr<const Film> film)
: _film (film)
- , _playlist (playlist)
- , _suspended (false)
+ , _suspended (0)
, _ignore_video (false)
, _ignore_audio (false)
, _ignore_text (false)
, _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
, _shuffler (0)
+{
+ construct ();
+}
+
+Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
+ : _film (film)
+ , _playlist (playlist_)
+ , _suspended (0)
+ , _ignore_video (false)
+ , _ignore_audio (false)
+ , _ignore_text (false)
+ , _always_burn_open_subtitles (false)
+ , _fast (false)
+ , _tolerant (film->tolerant())
+ , _play_referenced (false)
+ , _audio_merger (_film->audio_frame_rate())
+ , _shuffler (0)
+{
+ construct ();
+}
+
+void
+Player::construct ()
{
_film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
/* The butler must hear about this first, so since we are proxying this through to the butler we must
be first.
*/
- _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
- _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
+ _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
+ _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
set_video_container_size (_film->frame_size ());
film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
setup_pieces_unlocked ();
}
+
bool
-have_video (shared_ptr<Piece> piece)
+have_video (shared_ptr<const Content> content)
{
- return piece->decoder && piece->decoder->video;
+ return static_cast<bool>(content->video) && content->video->use();
}
bool
-have_audio (shared_ptr<Piece> piece)
+have_audio (shared_ptr<const Content> content)
{
- return piece->decoder && piece->decoder->audio;
+ return static_cast<bool>(content->audio);
}
void
Player::setup_pieces_unlocked ()
{
+ _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+
list<shared_ptr<Piece> > old_pieces = _pieces;
_pieces.clear ();
_shuffler = new Shuffler();
_shuffler->Video.connect(bind(&Player::video, this, _1, _2));
- BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+ BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
if (!i->paths_valid ()) {
continue;
}
shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
- FrameRateChange frc (_film, i);
+ DCPOMATIC_ASSERT (decoder);
- if (!decoder) {
- /* Not something that we can decode; e.g. Atmos content */
- continue;
- }
+ FrameRateChange frc (_film, i);
if (decoder->video && _ignore_video) {
decoder->video->set_ignore (true);
++j;
}
+
+ if (decoder->atmos) {
+ decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
+ }
}
_stream_states.clear ();
}
}
- _black = Empty (_film, _pieces, bind(&have_video, _1));
- _silent = Empty (_film, _pieces, bind(&have_audio, _1));
+ _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
+ _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
_last_video_time = DCPTime ();
_last_video_eyes = EYES_BOTH;
Player::playlist_content_change (ChangeType type, int property, bool frequent)
{
if (type == CHANGE_TYPE_PENDING) {
- boost::mutex::scoped_lock lm (_mutex);
/* The player content is probably about to change, so we can't carry on
until that has happened and we've rebuilt our pieces. Stop pass()
and seek() from working until then.
*/
- _suspended = true;
+ ++_suspended;
} else if (type == CHANGE_TYPE_DONE) {
/* A change in our content has gone through. Re-build our pieces. */
setup_pieces ();
- _suspended = false;
+ --_suspended;
} else if (type == CHANGE_TYPE_CANCELLED) {
- boost::mutex::scoped_lock lm (_mutex);
- _suspended = false;
+ --_suspended;
}
Change (type, property, frequent);
PresetColourConversion::all().front().conversion,
VIDEO_RANGE_FULL,
boost::weak_ptr<Content>(),
- boost::optional<Frame>()
+ boost::optional<Frame>(),
+ false
)
);
}
list<ReferencedReelAsset> a;
- BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+ BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
if (!j) {
continue;
if (_suspended) {
/* We can't pass in this state */
+ LOG_DEBUG_PLAYER_NC ("Player is suspended");
return false;
}
- if (_playlist->length(_film) == DCPTime()) {
- /* Special case of an empty Film; just give one black frame */
+ if (_playback_length == DCPTime()) {
+ /* Special; just give one black frame */
emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
return true;
}
switch (which) {
case CONTENT:
{
+ LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
earliest_content->done = earliest_content->decoder->pass ();
shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
if (dcp && !_play_referenced && dcp->reference_audio()) {
break;
}
case BLACK:
+ LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
emit_video (black_player_video_frame(EYES_BOTH), _black.position());
_black.set_position (_black.position() + one_video_frame());
break;
case SILENT:
{
+ LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
DCPTimePeriod period (_silent.period_at_position());
if (_last_audio_time) {
/* Sometimes the thing that happened last finishes fractionally before
remove a little silence at the end of some content.
*/
int64_t const error = labs(period.from.get() - _last_audio_time->get());
- int64_t const too_much_error = 4;
+ /* Let's not worry about less than a frame at 24fps */
+ int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
if (error >= too_much_error) {
_film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
}
/* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
of our streams, or the position of the _silent.
*/
- DCPTime pull_to = _film->length ();
+ DCPTime pull_to = _playback_length;
for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
if (!i->second.piece->done && i->second.last_push_end < pull_to) {
pull_to = i->second.last_push_end;
pull_to = _silent.position();
}
+ LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
if (_last_audio_time && i->second < *_last_audio_time) {
return;
}
+ if (!piece->content->video->use()) {
+ return;
+ }
+
FrameRateChange frc (_film, piece->content);
if (frc.skip && (video.frame % 2) == 1) {
return;
/* Time of the first frame we will emit */
DCPTime const time = content_video_to_dcp (piece, video.frame);
+ LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
/* Discard if it's before the content's period or the last accurate seek. We can't discard
if it's after the content's period here as in that case we still need to fill any gap between
}
while (j < fill_to || eyes != fill_to_eyes) {
if (last != _last_video.end()) {
+ LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
copy->set_eyes (eyes);
emit_video (copy, j);
} else {
+ LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
emit_video (black_player_video_frame(eyes), j);
}
if (eyes == EYES_RIGHT) {
video.image,
piece->content->video->crop (),
piece->content->video->fade (_film, video.frame),
- piece->content->video->scale().size (
- piece->content->video, _video_container_size, _film->frame_size ()
- ),
+ scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
_video_container_size,
video.eyes,
video.part,
piece->content->video->colour_conversion(),
piece->content->video->range(),
piece->content,
- video.frame
+ video.frame,
+ false
)
);
/* Compute time in the DCP */
DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
+ LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+
/* And the end of this block in the DCP */
DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
if (remaining_frames == 0) {
return;
}
- shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
- cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
- content_audio.audio = cut;
+ content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
}
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
PlayerText ps;
shared_ptr<Image> image = subtitle.sub.image;
+
/* We will scale the subtitle up to fit _video_container_size */
- dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
+ int const width = subtitle.sub.rectangle.width * _video_container_size.width;
+ int const height = subtitle.sub.rectangle.height * _video_container_size.height;
+ if (width == 0 || height == 0) {
+ return;
+ }
+
+ dcp::Size scaled_size (width, height);
ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
Player::seek (DCPTime time, bool accurate)
{
boost::mutex::scoped_lock lm (_mutex);
+ LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
if (_suspended) {
/* We can't seek in this state */
if (remaining_frames <= 0) {
return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
}
- shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
- cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
+ shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
return make_pair(cut, time + discard_time);
}
/* We couldn't find this content; perhaps things are being changed over */
return optional<DCPTime>();
}
+
+
+shared_ptr<const Playlist>
+Player::playlist () const
+{
+ return _playlist ? _playlist : _film->playlist();
+}
+
+
+void
+Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+{
+ Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
+}
+