/*
- Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "image_decoder.h"
#include "compose.hpp"
#include "shuffler.h"
+#include "timer.h"
#include <dcp/reel.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
#include "i18n.h"
-#define LOG_GENERAL(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
-
using std::list;
using std::cout;
using std::min;
using boost::dynamic_pointer_cast;
using boost::optional;
using boost::scoped_ptr;
+using namespace dcpomatic;
int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
int const PlayerProperty::PLAYLIST = 701;
Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
: _film (film)
, _playlist (playlist)
- , _suspended (false)
+ , _suspended (0)
, _ignore_video (false)
, _ignore_audio (false)
, _ignore_text (false)
, _always_burn_open_subtitles (false)
, _fast (false)
+ , _tolerant (film->tolerant())
, _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
, _shuffler (0)
void
Player::setup_pieces_unlocked ()
{
+ list<shared_ptr<Piece> > old_pieces = _pieces;
_pieces.clear ();
delete _shuffler;
continue;
}
- shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
- FrameRateChange frc (i->active_video_frame_rate(_film), _film->video_frame_rate());
+ shared_ptr<Decoder> old_decoder;
+ BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
+ if (j->content == i) {
+ old_decoder = j->decoder;
+ break;
+ }
+ }
+
+ shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+ FrameRateChange frc (_film, i);
if (!decoder) {
/* Not something that we can decode; e.g. Atmos content */
_last_video_time = DCPTime ();
_last_video_eyes = EYES_BOTH;
_last_audio_time = DCPTime ();
- _suspended = false;
+
+ /* Cached value to save recalculating it on every ::pass */
+ _film_length = _film->length ();
}
void
Player::playlist_content_change (ChangeType type, int property, bool frequent)
{
if (type == CHANGE_TYPE_PENDING) {
- boost::mutex::scoped_lock lm (_mutex);
/* The player content is probably about to change, so we can't carry on
until that has happened and we've rebuilt our pieces. Stop pass()
and seek() from working until then.
*/
- _suspended = true;
+ ++_suspended;
} else if (type == CHANGE_TYPE_DONE) {
/* A change in our content has gone through. Re-build our pieces. */
setup_pieces ();
+ --_suspended;
} else if (type == CHANGE_TYPE_CANCELLED) {
- boost::mutex::scoped_lock lm (_mutex);
- _suspended = false;
+ --_suspended;
}
Change (type, property, frequent);
eyes,
PART_WHOLE,
PresetColourConversion::all().front().conversion,
+ VIDEO_RANGE_FULL,
boost::weak_ptr<Content>(),
boost::optional<Frame>()
)
setup_pieces_unlocked ();
}
+static void
+maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
+{
+ DCPOMATIC_ASSERT (r);
+ r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
+ r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
+ if (r->actual_duration() > 0) {
+ a.push_back (
+ ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
+ );
+ }
+}
+
list<ReferencedReelAsset>
Player::get_reel_assets ()
{
scoped_ptr<DCPDecoder> decoder;
try {
- decoder.reset (new DCPDecoder (j, false));
+ decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
} catch (...) {
return a;
}
- int64_t offset = 0;
+ DCPOMATIC_ASSERT (j->video_frame_rate ());
+ double const cfr = j->video_frame_rate().get();
+ Frame const trim_start = j->trim_start().frames_round (cfr);
+ Frame const trim_end = j->trim_end().frames_round (cfr);
+ int const ffr = _film->video_frame_rate ();
+
+ /* position in the asset from the start */
+ int64_t offset_from_start = 0;
+ /* position in the asset from the end */
+ int64_t offset_from_end = 0;
+ BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+ /* Assume that main picture duration is the length of the reel */
+ offset_from_end += k->main_picture()->actual_duration();
+ }
+
BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
- DCPOMATIC_ASSERT (j->video_frame_rate ());
- double const cfr = j->video_frame_rate().get();
- Frame const trim_start = j->trim_start().frames_round (cfr);
- Frame const trim_end = j->trim_end().frames_round (cfr);
- int const ffr = _film->video_frame_rate ();
+ /* Assume that main picture duration is the length of the reel */
+ int64_t const reel_duration = k->main_picture()->actual_duration();
+
+ /* See doc/design/trim_reels.svg */
+ Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
+ Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
- DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
+ DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
if (j->reference_video ()) {
- shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
- DCPOMATIC_ASSERT (ra);
- ra->set_entry_point (ra->entry_point() + trim_start);
- ra->set_duration (ra->duration() - trim_start - trim_end);
- a.push_back (
- ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
- );
+ maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
}
if (j->reference_audio ()) {
- shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
- DCPOMATIC_ASSERT (ra);
- ra->set_entry_point (ra->entry_point() + trim_start);
- ra->set_duration (ra->duration() - trim_start - trim_end);
- a.push_back (
- ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
- );
+ maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
}
if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
- shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
- DCPOMATIC_ASSERT (ra);
- ra->set_entry_point (ra->entry_point() + trim_start);
- ra->set_duration (ra->duration() - trim_start - trim_end);
- a.push_back (
- ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
- );
+ maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
}
if (j->reference_text (TEXT_CLOSED_CAPTION)) {
BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
- DCPOMATIC_ASSERT (l);
- l->set_entry_point (l->entry_point() + trim_start);
- l->set_duration (l->duration() - trim_start - trim_end);
- a.push_back (
- ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
- );
+ maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
}
}
- /* Assume that main picture duration is the length of the reel */
- offset += k->main_picture()->duration ();
+ offset_from_start += reel_duration;
+ offset_from_end -= reel_duration;
}
}
Player::pass ()
{
boost::mutex::scoped_lock lm (_mutex);
+ DCPOMATIC_ASSERT (_film_length);
if (_suspended) {
/* We can't pass in this state */
return false;
}
- if (_playlist->length(_film) == DCPTime()) {
+ if (*_film_length == DCPTime()) {
/* Special case of an empty Film; just give one black frame */
emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
return true;
continue;
}
- DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(_film), i->content->trim_start()));
+ DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
if (t > i->content->end(_film)) {
i->done = true;
} else {
which = CONTENT;
}
- if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
+ if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
earliest_time = _black.position ();
which = BLACK;
}
- if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
+ if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
earliest_time = _silent.position ();
which = SILENT;
}
switch (which) {
case CONTENT:
- earliest_content->done = earliest_content->decoder->pass (_film);
+ {
+ earliest_content->done = earliest_content->decoder->pass ();
+ shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
+ if (dcp && !_play_referenced && dcp->reference_audio()) {
+ /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
+ to `hide' the fact that no audio was emitted during the referenced DCP (though
+ we need to behave as though it was).
+ */
+ _last_audio_time = dcp->end (_film);
+ }
break;
+ }
case BLACK:
emit_video (black_player_video_frame(EYES_BOTH), _black.position());
_black.set_position (_black.position() + one_video_frame());
DCPTimePeriod period (_silent.period_at_position());
if (_last_audio_time) {
/* Sometimes the thing that happened last finishes fractionally before
- this silence. Bodge the start time of the silence to fix it. I'm
- not sure if this is the right solution --- maybe the last thing should
- be padded `forward' rather than this thing padding `back'.
+ or after this silence. Bodge the start time of the silence to fix it.
+ I think this is nothing to worry about since we will just add or
+ remove a little silence at the end of some content.
*/
- period.from = min(period.from, *_last_audio_time);
+ int64_t const error = labs(period.from.get() - _last_audio_time->get());
+ /* Let's not worry about less than a frame at 24fps */
+ int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
+ if (error >= too_much_error) {
+ _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
+ }
+ DCPOMATIC_ASSERT (error < too_much_error);
+ period.from = *_last_audio_time;
}
if (period.duration() > one_video_frame()) {
period.to = period.from + one_video_frame();
/* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
of our streams, or the position of the _silent.
*/
- DCPTime pull_to = _film->length ();
+ DCPTime pull_to = *_film_length;
for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
if (!i->second.piece->done && i->second.last_push_end < pull_to) {
pull_to = i->second.last_push_end;
return;
}
- FrameRateChange frc(piece->content->active_video_frame_rate(_film), _film->video_frame_rate());
+ FrameRateChange frc (_film, piece->content);
if (frc.skip && (video.frame % 2) == 1) {
return;
}
if (_last_video_time) {
DCPTime fill_from = max (*_last_video_time, piece->content->position());
- LastVideoMap::const_iterator last = _last_video.find (wp);
- if (_film->three_d()) {
- Eyes fill_to_eyes = video.eyes;
- if (fill_to_eyes == EYES_BOTH) {
- fill_to_eyes = EYES_LEFT;
- }
- if (fill_to == piece->content->end(_film)) {
- /* Don't fill after the end of the content */
- fill_to_eyes = EYES_LEFT;
- }
- DCPTime j = fill_from;
- Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
- if (eyes == EYES_BOTH) {
- eyes = EYES_LEFT;
- }
- while (j < fill_to || eyes != fill_to_eyes) {
- if (last != _last_video.end()) {
- shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
- copy->set_eyes (eyes);
- emit_video (copy, j);
- } else {
- emit_video (black_player_video_frame(eyes), j);
+
+ /* Fill if we have more than half a frame to do */
+ if ((fill_to - fill_from) > one_video_frame() / 2) {
+ LastVideoMap::const_iterator last = _last_video.find (wp);
+ if (_film->three_d()) {
+ Eyes fill_to_eyes = video.eyes;
+ if (fill_to_eyes == EYES_BOTH) {
+ fill_to_eyes = EYES_LEFT;
}
- if (eyes == EYES_RIGHT) {
- j += one_video_frame();
+ if (fill_to == piece->content->end(_film)) {
+ /* Don't fill after the end of the content */
+ fill_to_eyes = EYES_LEFT;
}
- eyes = increment_eyes (eyes);
- }
- } else {
- for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
- if (last != _last_video.end()) {
- emit_video (last->second, j);
- } else {
- emit_video (black_player_video_frame(EYES_BOTH), j);
+ DCPTime j = fill_from;
+ Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
+ if (eyes == EYES_BOTH) {
+ eyes = EYES_LEFT;
+ }
+ while (j < fill_to || eyes != fill_to_eyes) {
+ if (last != _last_video.end()) {
+ shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
+ copy->set_eyes (eyes);
+ emit_video (copy, j);
+ } else {
+ emit_video (black_player_video_frame(eyes), j);
+ }
+ if (eyes == EYES_RIGHT) {
+ j += one_video_frame();
+ }
+ eyes = increment_eyes (eyes);
+ }
+ } else {
+ for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
+ if (last != _last_video.end()) {
+ emit_video (last->second, j);
+ } else {
+ emit_video (black_player_video_frame(EYES_BOTH), j);
+ }
}
}
}
video.eyes,
video.part,
piece->content->video->colour_conversion(),
+ piece->content->video->range(),
piece->content,
video.frame
)
shared_ptr<AudioContent> content = piece->content->audio;
DCPOMATIC_ASSERT (content);
+ int const rfr = content->resampled_frame_rate (_film);
+
/* Compute time in the DCP */
DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
/* And the end of this block in the DCP */
- DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
+ DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
/* Discard it all */
return;
} else if (end > piece->content->end(_film)) {
- Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
+ Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
if (remaining_frames == 0) {
return;
}
- shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
- cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
- content_audio.audio = cut;
+ content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
}
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
PlayerText ps;
shared_ptr<Image> image = subtitle.sub.image;
+
/* We will scale the subtitle up to fit _video_container_size */
- dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
+ int const width = subtitle.sub.rectangle.width * _video_container_size.width;
+ int const height = subtitle.sub.rectangle.height * _video_container_size.height;
+ if (width == 0 || height == 0) {
+ return;
+ }
+
+ dcp::Size scaled_size (width, height);
ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
if (time < i->content->position()) {
- /* Before; seek to the start of the content */
- i->decoder->seek (_film, dcp_to_content_time (i, i->content->position()), accurate);
+ /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
+ we must seek this (following) content accurately, otherwise when we come to the end of the current
+ content we may not start right at the beginning of the next, causing a gap (if the next content has
+ been trimmed to a point between keyframes, or something).
+ */
+ i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
i->done = false;
} else if (i->content->position() <= time && time < i->content->end(_film)) {
/* During; seek to position */
- i->decoder->seek (_film, dcp_to_content_time (i, time), accurate);
+ i->decoder->seek (dcp_to_content_time (i, time), accurate);
i->done = false;
} else {
/* After; this piece is done */
Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
{
/* Log if the assert below is about to fail */
- if (_last_audio_time && time != *_last_audio_time) {
+ if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
_film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
}
- /* This audio must follow on from the previous */
- DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
- Audio (data, time);
+ /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
+ DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
+ Audio (data, time, _film->audio_frame_rate());
_last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
}
if (remaining_frames <= 0) {
return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
}
- shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
- cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
+ shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
return make_pair(cut, time + discard_time);
}