#include "atmos_decoder.h"
-#include "player.h"
-#include "film.h"
#include "audio_buffers.h"
+#include "audio_content.h"
+#include "audio_decoder.h"
+#include "audio_processor.h"
+#include "compose.hpp"
+#include "config.h"
#include "content_audio.h"
+#include "content_video.h"
#include "dcp_content.h"
+#include "dcp_decoder.h"
#include "dcpomatic_log.h"
-#include "job.h"
+#include "decoder.h"
+#include "decoder_factory.h"
+#include "ffmpeg_content.h"
+#include "film.h"
+#include "frame_rate_change.h"
#include "image.h"
-#include "raw_image_proxy.h"
-#include "ratio.h"
+#include "image_decoder.h"
+#include "job.h"
#include "log.h"
-#include "render_text.h"
-#include "config.h"
-#include "content_video.h"
+#include "maths_util.h"
+#include "piece.h"
+#include "player.h"
#include "player_video.h"
-#include "frame_rate_change.h"
-#include "audio_processor.h"
#include "playlist.h"
-#include "referenced_reel_asset.h"
-#include "decoder_factory.h"
-#include "decoder.h"
-#include "video_decoder.h"
-#include "audio_decoder.h"
+#include "ratio.h"
+#include "raw_image_proxy.h"
+#include "render_text.h"
+#include "shuffler.h"
#include "text_content.h"
#include "text_decoder.h"
-#include "ffmpeg_content.h"
-#include "audio_content.h"
-#include "dcp_decoder.h"
-#include "image_decoder.h"
-#include "compose.hpp"
-#include "shuffler.h"
#include "timer.h"
+#include "video_decoder.h"
#include <dcp/reel.h>
+#include <dcp/reel_closed_caption_asset.h>
+#include <dcp/reel_picture_asset.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
-#include <dcp/reel_picture_asset.h>
-#include <dcp/reel_closed_caption_asset.h>
-#include <stdint.h>
#include <algorithm>
#include <iostream>
+#include <stdint.h>
#include "i18n.h"
using std::list;
using std::make_pair;
using std::make_shared;
+using std::make_shared;
using std::max;
using std::min;
using std::min;
using std::shared_ptr;
using std::vector;
using std::weak_ptr;
-using std::make_shared;
using boost::optional;
using boost::scoped_ptr;
#if BOOST_VERSION >= 106100
int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
int const PlayerProperty::PLAYBACK_LENGTH = 705;
+int const PlayerProperty::IGNORE_VIDEO = 706;
+int const PlayerProperty::IGNORE_AUDIO = 707;
+int const PlayerProperty::IGNORE_TEXT = 708;
+int const PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES = 709;
+int const PlayerProperty::PLAY_REFERENCED = 710;
Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
: _film (film)
, _suspended (0)
+ , _ignore_video(false)
+ , _ignore_audio(false)
+ , _ignore_text(false)
+ , _always_burn_open_subtitles(false)
+ , _fast(false)
, _tolerant (film->tolerant())
- , _audio_merger (_film->audio_frame_rate())
+ , _play_referenced(false)
+ , _audio_merger(film->audio_frame_rate())
, _subtitle_alignment (subtitle_alignment)
{
construct ();
: _film (film)
, _playlist (playlist_)
, _suspended (0)
+ , _ignore_video(false)
+ , _ignore_audio(false)
+ , _ignore_text(false)
+ , _always_burn_open_subtitles(false)
+ , _fast(false)
, _tolerant (film->tolerant())
- , _audio_merger (_film->audio_frame_rate())
+ , _play_referenced(false)
+ , _audio_merger(film->audio_frame_rate())
{
construct ();
}
void
Player::construct ()
{
- _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ connect();
+ set_video_container_size(film->frame_size());
+
+ film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
+
+ setup_pieces ();
+ seek (DCPTime (), true);
+}
+
+
+void
+Player::connect()
+{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ _film_changed_connection = film->Change.connect(bind(&Player::film_change, this, _1, _2));
/* The butler must hear about this first, so since we are proxying this through to the butler we must
be first.
*/
_playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
_playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
- set_video_container_size (_film->frame_size ());
+}
- film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
- setup_pieces ();
- seek (DCPTime (), true);
+Player::Player(Player&& other)
+ : _film(other._film)
+ , _playlist(std::move(other._playlist))
+ , _suspended(other._suspended.load())
+ , _pieces(std::move(other._pieces))
+ , _video_container_size(other._video_container_size.load())
+ , _black_image(std::move(other._black_image))
+ , _ignore_video(other._ignore_video.load())
+ , _ignore_audio(other._ignore_audio.load())
+ , _ignore_text(other._ignore_text.load())
+ , _always_burn_open_subtitles(other._always_burn_open_subtitles.load())
+ , _fast(other._fast.load())
+ , _tolerant(other._tolerant)
+ , _play_referenced(other._play_referenced.load())
+ , _next_video_time(other._next_video_time)
+ , _next_audio_time(other._next_audio_time)
+ , _dcp_decode_reduction(other._dcp_decode_reduction.load())
+ , _last_video(std::move(other._last_video))
+ , _audio_merger(std::move(other._audio_merger))
+ , _shuffler(std::move(other._shuffler))
+ , _delay(std::move(other._delay))
+ , _stream_states(std::move(other._stream_states))
+ , _black(std::move(other._black))
+ , _silent(std::move(other._silent))
+ , _active_texts(std::move(other._active_texts))
+ , _audio_processor(std::move(other._audio_processor))
+ , _playback_length(other._playback_length.load())
+ , _subtitle_alignment(other._subtitle_alignment)
+{
+ connect();
}
-void
-Player::setup_pieces ()
+Player&
+Player::operator=(Player&& other)
{
- boost::mutex::scoped_lock lm (_mutex);
- setup_pieces_unlocked ();
+ if (this == &other) {
+ return *this;
+ }
+
+ _film = std::move(other._film);
+ _playlist = std::move(other._playlist);
+ _suspended = other._suspended.load();
+ _pieces = std::move(other._pieces);
+ _video_container_size = other._video_container_size.load();
+ _black_image = std::move(other._black_image);
+ _ignore_video = other._ignore_video.load();
+ _ignore_audio = other._ignore_audio.load();
+ _ignore_text = other._ignore_text.load();
+ _always_burn_open_subtitles = other._always_burn_open_subtitles.load();
+ _fast = other._fast.load();
+ _tolerant = other._tolerant;
+ _play_referenced = other._play_referenced.load();
+ _next_video_time = other._next_video_time;
+ _next_audio_time = other._next_audio_time;
+ _dcp_decode_reduction = other._dcp_decode_reduction.load();
+ _last_video = std::move(other._last_video);
+ _audio_merger = std::move(other._audio_merger);
+ _shuffler = std::move(other._shuffler);
+ _delay = std::move(other._delay);
+ _stream_states = std::move(other._stream_states);
+ _black = std::move(other._black);
+ _silent = std::move(other._silent);
+ _active_texts = std::move(other._active_texts);
+ _audio_processor = std::move(other._audio_processor);
+ _playback_length = other._playback_length.load();
+ _subtitle_alignment = other._subtitle_alignment;
+
+ connect();
+
+ return *this;
}
bool
have_video (shared_ptr<const Content> content)
{
- return static_cast<bool>(content->video) && content->video->use();
+ return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
}
bool
have_audio (shared_ptr<const Content> content)
{
- return static_cast<bool>(content->audio);
+ return static_cast<bool>(content->audio) && content->can_be_played();
}
void
-Player::setup_pieces_unlocked ()
+Player::setup_pieces ()
{
- _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+ boost::mutex::scoped_lock lm (_mutex);
auto old_pieces = _pieces;
_pieces.clear ();
- _shuffler.reset (new Shuffler());
- _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
+ _playback_length = _playlist ? _playlist->length(film) : film->length();
+
+ auto playlist_content = playlist()->content();
+ bool const have_threed = std::any_of(
+ playlist_content.begin(),
+ playlist_content.end(),
+ [](shared_ptr<const Content> c) {
+ return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
+ });
+
+
+ if (have_threed) {
+ _shuffler.reset(new Shuffler());
+ _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+ }
- for (auto i: playlist()->content()) {
+ for (auto content: playlist()->content()) {
- if (!i->paths_valid ()) {
+ if (!content->paths_valid()) {
continue;
}
- if (_ignore_video && _ignore_audio && i->text.empty()) {
+ if (_ignore_video && _ignore_audio && content->text.empty()) {
/* We're only interested in text and this content has none */
continue;
}
shared_ptr<Decoder> old_decoder;
for (auto j: old_pieces) {
- if (j->content == i) {
+ if (j->content == content) {
old_decoder = j->decoder;
break;
}
}
- auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+ auto decoder = decoder_factory(film, content, _fast, _tolerant, old_decoder);
DCPOMATIC_ASSERT (decoder);
- FrameRateChange frc (_film, i);
+ FrameRateChange frc(film, content);
if (decoder->video && _ignore_video) {
decoder->video->set_ignore (true);
}
}
- auto piece = make_shared<Piece>(i, decoder, frc);
+ auto piece = make_shared<Piece>(content, decoder, frc);
_pieces.push_back (piece);
if (decoder->video) {
- if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
+ if (have_threed) {
/* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
} else {
for (auto i: _pieces) {
if (i->content->audio) {
for (auto j: i->content->audio->streams()) {
- _stream_states[j] = StreamState (i, i->content->position ());
+ _stream_states[j] = StreamState(i);
}
}
}
+ auto ignore_overlap = [](shared_ptr<VideoContent> v) {
+ return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
+ };
+
for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
- if (auto video = (*i)->content->video) {
- if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
- /* Look for content later in the content list with in-use video that overlaps this */
- auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
- auto j = i;
- ++j;
- for (; j != _pieces.end(); ++j) {
- if ((*j)->content->video && (*j)->content->video->use()) {
- (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
- }
+ if (ignore_overlap((*i)->content->video)) {
+ /* Look for content later in the content list with in-use video that overlaps this */
+ auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(film));
+ for (auto j = std::next(i); j != _pieces.end(); ++j) {
+ if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
+ (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(film)).overlap(period);
}
}
}
}
- _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
- _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
+ _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
+ _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
- _last_video_time = boost::optional<dcpomatic::DCPTime>();
- _last_video_eyes = Eyes::BOTH;
- _last_audio_time = boost::optional<dcpomatic::DCPTime>();
+ _next_video_time = boost::none;
+ _next_video_eyes = Eyes::BOTH;
+ _next_audio_time = boost::none;
}
void
Player::playlist_content_change (ChangeType type, int property, bool frequent)
{
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
if (property == VideoContentProperty::CROP) {
if (type == ChangeType::DONE) {
- auto const vcs = video_container_size();
boost::mutex::scoped_lock lm (_mutex);
for (auto const& i: _delay) {
- i.first->reset_metadata (_film, vcs);
+ i.first->reset_metadata(film, _video_container_size);
}
}
} else {
void
Player::set_video_container_size (dcp::Size s)
{
- Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
- {
- boost::mutex::scoped_lock lm (_mutex);
-
- if (s == _video_container_size) {
- lm.unlock ();
- Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
- return;
- }
+ if (s == _video_container_size) {
+ cc.abort();
+ return;
+ }
- _video_container_size = s;
+ _video_container_size = s;
+ {
+ boost::mutex::scoped_lock lm(_black_image_mutex);
_black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
_black_image->make_black ();
}
-
- Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
}
last time we were run.
*/
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
if (p == Film::Property::CONTAINER) {
Change (type, PlayerProperty::FILM_CONTAINER, false);
} else if (p == Film::Property::VIDEO_FRAME_RATE) {
}
Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
} else if (p == Film::Property::AUDIO_PROCESSOR) {
- if (type == ChangeType::DONE && _film->audio_processor ()) {
+ if (type == ChangeType::DONE && film->audio_processor ()) {
boost::mutex::scoped_lock lm (_mutex);
- _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
+ _audio_processor = film->audio_processor()->clone(film->audio_frame_rate());
}
} else if (p == Film::Property::AUDIO_CHANNELS) {
if (type == ChangeType::DONE) {
shared_ptr<PlayerVideo>
Player::black_player_video_frame (Eyes eyes) const
{
+ boost::mutex::scoped_lock lm(_black_image_mutex);
+
return std::make_shared<PlayerVideo> (
std::make_shared<const RawImageProxy>(_black_image),
Crop(),
Frame
Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(_film), s);
+ s = min (piece->content->length_after_trim(film), s);
s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
/* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
Frame
Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(_film), s);
+ s = min (piece->content->length_after_trim(film), s);
/* See notes in dcp_to_content_video */
- return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
+ return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(film->audio_frame_rate());
}
DCPTime
Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
/* See comment in dcp_to_content_video */
- return DCPTime::from_frames (f, _film->audio_frame_rate())
+ return DCPTime::from_frames(f, film->audio_frame_rate())
- DCPTime (piece->content->trim_start(), piece->frc)
+ piece->content->position();
}
ContentTime
Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(_film), s);
+ s = min (piece->content->length_after_trim(film), s);
return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
}
}
-vector<FontData>
+vector<shared_ptr<Font>>
Player::get_subtitle_fonts ()
{
boost::mutex::scoped_lock lm (_mutex);
- vector<FontData> fonts;
- for (auto i: _pieces) {
- /* XXX: things may go wrong if there are duplicate font IDs
- with different font files.
- */
- auto f = i->decoder->fonts ();
- copy (f.begin(), f.end(), back_inserter(fonts));
+ vector<shared_ptr<Font>> fonts;
+ for (auto piece: _pieces) {
+ for (auto text: piece->content->text) {
+ auto text_fonts = text->fonts();
+ copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
+ }
}
return fonts;
void
Player::set_ignore_video ()
{
- boost::mutex::scoped_lock lm (_mutex);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_VIDEO);
_ignore_video = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_ignore_audio ()
{
- boost::mutex::scoped_lock lm (_mutex);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_AUDIO);
_ignore_audio = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_ignore_text ()
{
- boost::mutex::scoped_lock lm (_mutex);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_TEXT);
_ignore_text = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_always_burn_open_subtitles ()
{
- boost::mutex::scoped_lock lm (_mutex);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES);
_always_burn_open_subtitles = true;
}
void
Player::set_fast ()
{
- boost::mutex::scoped_lock lm (_mutex);
_fast = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_play_referenced ()
{
- boost::mutex::scoped_lock lm (_mutex);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::PLAY_REFERENCED);
_play_referenced = true;
- setup_pieces_unlocked ();
-}
-
-
-static void
-maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
-{
- DCPOMATIC_ASSERT (r);
- r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
- r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
- if (r->actual_duration() > 0) {
- a.push_back (
- ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
- );
- }
-}
-
-
-list<ReferencedReelAsset>
-Player::get_reel_assets ()
-{
- /* Does not require a lock on _mutex as it's only called from DCPEncoder */
-
- list<ReferencedReelAsset> reel_assets;
-
- for (auto i: playlist()->content()) {
- auto j = dynamic_pointer_cast<DCPContent> (i);
- if (!j) {
- continue;
- }
-
- scoped_ptr<DCPDecoder> decoder;
- try {
- decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
- } catch (...) {
- return reel_assets;
- }
-
- DCPOMATIC_ASSERT (j->video_frame_rate ());
- double const cfr = j->video_frame_rate().get();
- Frame const trim_start = j->trim_start().frames_round (cfr);
- Frame const trim_end = j->trim_end().frames_round (cfr);
- int const ffr = _film->video_frame_rate ();
-
- /* position in the asset from the start */
- int64_t offset_from_start = 0;
- /* position in the asset from the end */
- int64_t offset_from_end = 0;
- for (auto k: decoder->reels()) {
- /* Assume that main picture duration is the length of the reel */
- offset_from_end += k->main_picture()->actual_duration();
- }
-
- for (auto k: decoder->reels()) {
-
- /* Assume that main picture duration is the length of the reel */
- int64_t const reel_duration = k->main_picture()->actual_duration();
-
- /* See doc/design/trim_reels.svg */
- Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
- Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
-
- auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
- if (j->reference_video ()) {
- maybe_add_asset (reel_assets, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
- }
-
- if (j->reference_audio ()) {
- maybe_add_asset (reel_assets, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
- }
-
- if (j->reference_text (TextType::OPEN_SUBTITLE)) {
- maybe_add_asset (reel_assets, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
- }
-
- if (j->reference_text (TextType::CLOSED_CAPTION)) {
- for (auto l: k->closed_captions()) {
- maybe_add_asset (reel_assets, l, reel_trim_start, reel_trim_end, from, ffr);
- }
- }
-
- offset_from_start += reel_duration;
- offset_from_end -= reel_duration;
- }
- }
-
- return reel_assets;
+ setup_pieces();
}
return false;
}
- if (_playback_length == DCPTime()) {
+ auto film = _film.lock();
+
+ if (_playback_length.load() == DCPTime() || !film) {
/* Special; just give one black frame */
emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
return true;
}
auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
- if (t > i->content->end(_film)) {
+ if (t > i->content->end(film)) {
i->done = true;
} else {
earliest_content->done = earliest_content->decoder->pass ();
auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
if (dcp && !_play_referenced && dcp->reference_audio()) {
- /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
+ /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
to `hide' the fact that no audio was emitted during the referenced DCP (though
we need to behave as though it was).
*/
- _last_audio_time = dcp->end (_film);
+ _next_audio_time = dcp->end(film);
}
break;
}
{
LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
DCPTimePeriod period (_silent.period_at_position());
- if (_last_audio_time) {
+ if (_next_audio_time) {
/* Sometimes the thing that happened last finishes fractionally before
or after this silence. Bodge the start time of the silence to fix it.
I think this is nothing to worry about since we will just add or
remove a little silence at the end of some content.
*/
- int64_t const error = labs(period.from.get() - _last_audio_time->get());
+ int64_t const error = labs(period.from.get() - _next_audio_time->get());
/* Let's not worry about less than a frame at 24fps */
int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
if (error >= too_much_error) {
- _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
+ film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
}
DCPOMATIC_ASSERT (error < too_much_error);
- period.from = *_last_audio_time;
+ period.from = *_next_audio_time;
}
if (period.duration() > one_video_frame()) {
period.to = period.from + one_video_frame();
using state_pair = std::pair<AudioStreamPtr, StreamState>;
+ /* Find streams that have pushed */
+ std::vector<state_pair> have_pushed;
+ std::copy_if(_stream_states.begin(), _stream_states.end(), std::back_inserter(have_pushed), [](state_pair const& a) { return static_cast<bool>(a.second.last_push_end); });
+
/* Find the 'leading' stream (i.e. the one that pushed data most recently) */
auto latest_last_push_end = std::max_element(
- _stream_states.begin(),
- _stream_states.end(),
- [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
+ have_pushed.begin(),
+ have_pushed.end(),
+ [](state_pair const& a, state_pair const& b) { return a.second.last_push_end.get() < b.second.last_push_end.get(); }
);
- if (latest_last_push_end != _stream_states.end()) {
- LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
+ if (latest_last_push_end != have_pushed.end()) {
+ LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end.get()));
}
/* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
std::map<AudioStreamPtr, StreamState> alive_stream_states;
for (auto const& i: _stream_states) {
- if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+ if (!i.second.last_push_end || (latest_last_push_end->second.last_push_end.get() - i.second.last_push_end.get()) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
alive_stream_states.insert(i);
} else {
LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
}
}
- auto pull_to = _playback_length;
+ auto pull_to = _playback_length.load();
for (auto const& i: alive_stream_states) {
- if (!i.second.piece->done && i.second.last_push_end < pull_to) {
- pull_to = i.second.last_push_end;
+ auto position = i.second.last_push_end.get_value_or(i.second.piece->content->position());
+ if (!i.second.piece->done && position < pull_to) {
+ pull_to = position;
}
}
if (!_silent.done() && _silent.position() < pull_to) {
LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
auto audio = _audio_merger.pull (pull_to);
for (auto i = audio.begin(); i != audio.end(); ++i) {
- if (_last_audio_time && i->second < *_last_audio_time) {
+ if (_next_audio_time && i->second < *_next_audio_time) {
/* This new data comes before the last we emitted (or the last seek); discard it */
- auto cut = discard_audio (i->first, i->second, *_last_audio_time);
+ auto cut = discard_audio (i->first, i->second, *_next_audio_time);
if (!cut.first) {
continue;
}
*i = cut;
- } else if (_last_audio_time && i->second > *_last_audio_time) {
+ } else if (_next_audio_time && i->second > *_next_audio_time) {
/* There's a gap between this data and the last we emitted; fill with silence */
- fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
+ fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
}
emit_audio (i->first, i->second);
}
if (done) {
- _shuffler->flush ();
+ if (_shuffler) {
+ _shuffler->flush ();
+ }
for (auto const& i: _delay) {
do_emit_video(i.first, i.second);
}
+
+ /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
+ * However, if we have L and R video files, and one is shorter than the other,
+ * the fill code in ::video mostly takes care of filling in the gaps.
+ * However, since it fills at the point when it knows there is more video coming
+ * at time t (so it should fill any gap up to t) it can't do anything right at the
+ * end. This is particularly bad news if the last frame emitted is a LEFT
+ * eye, as the MXF writer will complain about the 3D sequence being wrong.
+ * Here's a hack to workaround that particular case.
+ */
+ if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
+ do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+ }
}
return done;
optional<PositionImage>
Player::open_subtitles_for_frame (DCPTime time) const
{
+ auto film = _film.lock();
+ if (!film) {
+ return {};
+ }
+
list<PositionImage> captions;
- int const vfr = _film->video_frame_rate();
+ int const vfr = film->video_frame_rate();
for (
auto j:
- _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+ _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
) {
/* Bitmap subtitles */
}
/* i.image will already have been scaled to fit _video_container_size */
- dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
+ dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
captions.push_back (
PositionImage (
i.image,
Position<int> (
- lrint(_video_container_size.width * i.rectangle.x),
- lrint(_video_container_size.height * i.rectangle.y)
+ lrint(_video_container_size.load().width * i.rectangle.x),
+ lrint(_video_container_size.load().height * i.rectangle.y)
)
)
);
/* String subtitles (rendered to an image) */
if (!j.string.empty()) {
- auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
+ auto s = render_text(j.string, _video_container_size, time, vfr);
copy (s.begin(), s.end(), back_inserter (captions));
}
}
}
+static
+Eyes
+increment_eyes (Eyes e)
+{
+ if (e == Eyes::LEFT) {
+ return Eyes::RIGHT;
+ }
+
+ return Eyes::LEFT;
+}
+
+
void
-Player::video (weak_ptr<Piece> wp, ContentVideo video)
+Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
{
if (_suspended) {
return;
}
- auto piece = wp.lock ();
+ auto piece = weak_piece.lock ();
if (!piece) {
return;
}
return;
}
- FrameRateChange frc (_film, piece->content);
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
+ FrameRateChange frc(film, piece->content);
if (frc.skip && (video.frame % 2) == 1) {
return;
}
if it's after the content's period here as in that case we still need to fill any gap between
`now' and the end of the content's period.
*/
- if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
+ if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
return;
}
/* Fill gaps that we discover now that we have some video which needs to be emitted.
This is where we need to fill to.
*/
- DCPTime fill_to = min (time, piece->content->end(_film));
+ DCPTime fill_to = min(time, piece->content->end(film));
- if (_last_video_time) {
- DCPTime fill_from = max (*_last_video_time, piece->content->position());
+ if (_next_video_time) {
+ DCPTime fill_from = max (*_next_video_time, piece->content->position());
/* Fill if we have more than half a frame to do */
if ((fill_to - fill_from) > one_video_frame() / 2) {
- auto last = _last_video.find (wp);
- if (_film->three_d()) {
+ auto last = _last_video.find (weak_piece);
+ if (film->three_d()) {
auto fill_to_eyes = video.eyes;
if (fill_to_eyes == Eyes::BOTH) {
fill_to_eyes = Eyes::LEFT;
}
- if (fill_to == piece->content->end(_film)) {
+ if (fill_to == piece->content->end(film)) {
/* Don't fill after the end of the content */
fill_to_eyes = Eyes::LEFT;
}
auto j = fill_from;
- auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
+ auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
if (eyes == Eyes::BOTH) {
eyes = Eyes::LEFT;
}
auto const content_video = piece->content->video;
- _last_video[wp] = std::make_shared<PlayerVideo>(
+ _last_video[weak_piece] = std::make_shared<PlayerVideo>(
video.image,
content_video->actual_crop(),
- content_video->fade (_film, video.frame),
+ content_video->fade(film, video.frame),
scale_for_display(
- content_video->scaled_size(_film->frame_size()),
+ content_video->scaled_size(film->frame_size()),
_video_container_size,
- _film->frame_size(),
+ film->frame_size(),
content_video->pixel_quanta()
),
_video_container_size,
DCPTime t = time;
for (int i = 0; i < frc.repeat; ++i) {
- if (t < piece->content->end(_film)) {
- emit_video (_last_video[wp], t);
+ if (t < piece->content->end(film)) {
+ emit_video (_last_video[weak_piece], t);
}
t += one_video_frame ();
}
void
-Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
{
if (_suspended) {
return;
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
- auto piece = wp.lock ();
+ auto piece = weak_piece.lock ();
if (!piece) {
return;
}
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
auto content = piece->content->audio;
DCPOMATIC_ASSERT (content);
- int const rfr = content->resampled_frame_rate (_film);
+ int const rfr = content->resampled_frame_rate(film);
/* Compute time in the DCP */
auto time = resampled_audio_to_dcp (piece, content_audio.frame);
- LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
/* And the end of this block in the DCP */
auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+ LOG_DEBUG_PLAYER("Received audio frame %1 covering %2 to %3 (%4)", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename());
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
}
content_audio.audio = cut.first;
time = cut.second;
- } else if (time > piece->content->end(_film)) {
+ } else if (time > piece->content->end(film)) {
/* Discard it all */
return;
- } else if (end > piece->content->end(_film)) {
- Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
+ } else if (end > piece->content->end(film)) {
+ Frame const remaining_frames = DCPTime(piece->content->end(film) - time).frames_round(rfr);
if (remaining_frames == 0) {
return;
}
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
- /* Gain */
-
- if (content->gain() != 0) {
- auto gain = make_shared<AudioBuffers>(content_audio.audio);
- gain->apply_gain (content->gain());
- content_audio.audio = gain;
+ /* Gain and fade */
+
+ auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
+ if (content->gain() != 0 || !fade_coeffs.empty()) {
+ auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
+ if (!fade_coeffs.empty()) {
+ /* Apply both fade and gain */
+ DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
+ auto const channels = gain_buffers->channels();
+ auto const frames = fade_coeffs.size();
+ auto data = gain_buffers->data();
+ auto const gain = db_to_linear (content->gain());
+ for (auto channel = 0; channel < channels; ++channel) {
+ for (auto frame = 0U; frame < frames; ++frame) {
+ data[channel][frame] *= gain * fade_coeffs[frame];
+ }
+ }
+ } else {
+ /* Just apply gain */
+ gain_buffers->apply_gain (content->gain());
+ }
+ content_audio.audio = gain_buffers;
}
/* Remap */
- content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
+ content_audio.audio = remap(content_audio.audio, film->audio_channels(), stream->mapping());
/* Process */
if (_audio_processor) {
- content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+ content_audio.audio = _audio_processor->run(content_audio.audio, film->audio_channels());
}
/* Push */
_audio_merger.push (content_audio.audio, time);
DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
- _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
+ _stream_states[stream].last_push_end = time + DCPTime::from_frames(content_audio.audio->frames(), film->audio_frame_rate());
}
void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
{
if (_suspended) {
return;
}
- auto piece = wp.lock ();
- auto text = wc.lock ();
- if (!piece || !text) {
+ auto piece = weak_piece.lock ();
+ auto content = weak_content.lock ();
+ if (!piece || !content) {
return;
}
- /* Apply content's subtitle offsets */
- subtitle.sub.rectangle.x += text->x_offset ();
- subtitle.sub.rectangle.y += text->y_offset ();
+ PlayerText ps;
+ for (auto& sub: subtitle.subs)
+ {
+ /* Apply content's subtitle offsets */
+ sub.rectangle.x += content->x_offset ();
+ sub.rectangle.y += content->y_offset ();
- /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
- subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
- subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
+ /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
+ sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
+ sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
- /* Apply content's subtitle scale */
- subtitle.sub.rectangle.width *= text->x_scale ();
- subtitle.sub.rectangle.height *= text->y_scale ();
+ /* Apply content's subtitle scale */
+ sub.rectangle.width *= content->x_scale ();
+ sub.rectangle.height *= content->y_scale ();
- PlayerText ps;
- auto image = subtitle.sub.image;
+ auto image = sub.image;
- /* We will scale the subtitle up to fit _video_container_size */
- int const width = subtitle.sub.rectangle.width * _video_container_size.width;
- int const height = subtitle.sub.rectangle.height * _video_container_size.height;
- if (width == 0 || height == 0) {
- return;
- }
+ /* We will scale the subtitle up to fit _video_container_size */
+ int const width = sub.rectangle.width * _video_container_size.load().width;
+ int const height = sub.rectangle.height * _video_container_size.load().height;
+ if (width == 0 || height == 0) {
+ return;
+ }
- dcp::Size scaled_size (width, height);
- ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
- DCPTime from (content_time_to_dcp (piece, subtitle.from()));
+ dcp::Size scaled_size (width, height);
+ ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
+ }
- _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+ DCPTime from(content_time_to_dcp(piece, subtitle.from()));
+ _active_texts[content->type()].add_from(weak_content, ps, from);
}
void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
+Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
{
if (_suspended) {
return;
}
- auto piece = wp.lock ();
- auto text = wc.lock ();
- if (!piece || !text) {
+ auto piece = weak_piece.lock ();
+ auto content = weak_content.lock ();
+ auto film = _film.lock();
+ if (!piece || !content || !film) {
return;
}
PlayerText ps;
DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
- if (from > piece->content->end(_film)) {
+ if (from > piece->content->end(film)) {
return;
}
for (auto s: subtitle.subs) {
- s.set_h_position (s.h_position() + text->x_offset ());
- s.set_v_position (s.v_position() + text->y_offset ());
- float const xs = text->x_scale();
- float const ys = text->y_scale();
+ s.set_h_position (s.h_position() + content->x_offset());
+ s.set_v_position (s.v_position() + content->y_offset());
+ float const xs = content->x_scale();
+ float const ys = content->y_scale();
float size = s.size();
/* Adjust size to express the common part of the scaling;
}
s.set_in (dcp::Time(from.seconds(), 1000));
- ps.string.push_back (StringText (s, text->outline_width()));
- ps.add_fonts (text->fonts ());
+ ps.string.push_back (s);
}
- _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+ _active_texts[content->type()].add_from(weak_content, ps, from);
}
void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
{
if (_suspended) {
return;
}
- auto text = wc.lock ();
- if (!text) {
+ auto content = weak_content.lock ();
+ if (!content) {
return;
}
- if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
+ if (!_active_texts[content->type()].have(weak_content)) {
return;
}
- shared_ptr<Piece> piece = wp.lock ();
- if (!piece) {
+ auto piece = weak_piece.lock ();
+ auto film = _film.lock();
+ if (!piece || !film) {
return;
}
DCPTime const dcp_to = content_time_to_dcp (piece, to);
- if (dcp_to > piece->content->end(_film)) {
+ if (dcp_to > piece->content->end(film)) {
return;
}
- auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
+ auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
- bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
- if (text->use() && !always && !text->burn()) {
- Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
+ bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
+ if (content->use() && !always && !content->burn()) {
+ Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
}
}
return;
}
+ auto film = _film.lock();
+ if (!film) {
+ return;
+ }
+
if (_shuffler) {
_shuffler->clear ();
}
}
_audio_merger.clear ();
- for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
- _active_texts[i].clear ();
- }
+ std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
for (auto i: _pieces) {
if (time < i->content->position()) {
*/
i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
i->done = false;
- } else if (i->content->position() <= time && time < i->content->end(_film)) {
+ } else if (i->content->position() <= time && time < i->content->end(film)) {
/* During; seek to position */
i->decoder->seek (dcp_to_content_time (i, time), accurate);
i->done = false;
}
if (accurate) {
- _last_video_time = time;
- _last_video_eyes = Eyes::LEFT;
- _last_audio_time = time;
+ _next_video_time = time;
+ _next_video_eyes = Eyes::LEFT;
+ _next_audio_time = time;
} else {
- _last_video_time = optional<DCPTime>();
- _last_video_eyes = optional<Eyes>();
- _last_audio_time = optional<DCPTime>();
+ _next_video_time = boost::none;
+ _next_video_eyes = boost::none;
+ _next_audio_time = boost::none;
}
_black.set_position (time);
_silent.set_position (time);
_last_video.clear ();
+
+ for (auto& state: _stream_states) {
+ state.second.last_push_end = boost::none;
+ }
}
void
Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
{
- if (!_film->three_d()) {
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ if (!film->three_d()) {
if (pv->eyes() == Eyes::LEFT) {
/* Use left-eye images for both eyes... */
pv->set_eyes (Eyes::BOTH);
}
}
- /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
+ /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
player before the video that requires them.
*/
_delay.push_back (make_pair (pv, time));
if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
- _last_video_time = time + one_video_frame();
+ _next_video_time = time + one_video_frame();
}
- _last_video_eyes = increment_eyes (pv->eyes());
+ _next_video_eyes = increment_eyes (pv->eyes());
if (_delay.size() < 3) {
return;
Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
{
if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
- for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
- _active_texts[i].clear_before (time);
- }
+ std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
}
auto subtitles = open_subtitles_for_frame (time);
void
Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
/* Log if the assert below is about to fail */
- if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
- _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
+ if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
+ film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
}
/* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
- DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
- Audio (data, time, _film->audio_frame_rate());
- _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
+ DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
+ Audio(data, time, film->audio_frame_rate());
+ _next_audio_time = time + DCPTime::from_frames(data->frames(), film->audio_frame_rate());
}
void
Player::fill_audio (DCPTimePeriod period)
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
if (period.from == period.to) {
return;
}
DCPTime t = period.from;
while (t < period.to) {
DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
- Frame const samples = block.frames_round(_film->audio_frame_rate());
+ Frame const samples = block.frames_round(film->audio_frame_rate());
if (samples) {
- auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
+ auto silence = make_shared<AudioBuffers>(film->audio_channels(), samples);
silence->make_silent ();
emit_audio (silence, t);
}
DCPTime
Player::one_video_frame () const
{
- return DCPTime::from_frames (1, _film->video_frame_rate ());
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ return DCPTime::from_frames(1, film->video_frame_rate ());
}
pair<shared_ptr<AudioBuffers>, DCPTime>
Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
{
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
auto const discard_time = discard_to - time;
- auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
+ auto const discard_frames = discard_time.frames_round(film->audio_frame_rate());
auto remaining_frames = audio->frames() - discard_frames;
if (remaining_frames <= 0) {
return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
void
Player::set_dcp_decode_reduction (optional<int> reduction)
{
- Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ ChangeSignaller<Player, int> cc(this, PlayerProperty::DCP_DECODE_REDUCTION);
- {
- boost::mutex::scoped_lock lm (_mutex);
-
- if (reduction == _dcp_decode_reduction) {
- lm.unlock ();
- Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
- return;
- }
-
- _dcp_decode_reduction = reduction;
- setup_pieces_unlocked ();
+ if (reduction == _dcp_decode_reduction.load()) {
+ cc.abort();
+ return;
}
- Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ _dcp_decode_reduction = reduction;
+ setup_pieces();
}
optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
+Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
{
boost::mutex::scoped_lock lm (_mutex);
}
+optional<ContentTime>
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
+{
+ boost::mutex::scoped_lock lm (_mutex);
+
+ for (auto i: _pieces) {
+ if (i->content == content) {
+ return dcp_to_content_time (i, t);
+ }
+ }
+
+ /* We couldn't find this content; perhaps things are being changed over */
+ return {};
+}
+
+
shared_ptr<const Playlist>
Player::playlist () const
{
- return _playlist ? _playlist : _film->playlist();
+ auto film = _film.lock();
+ if (!film) {
+ return {};
+ }
+
+ return _playlist ? _playlist : film->playlist();
}
void
-Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
{
if (_suspended) {
return;
}
- Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
+ auto film = _film.lock();
+ DCPOMATIC_ASSERT(film);
+
+ auto piece = weak_piece.lock ();
+ DCPOMATIC_ASSERT (piece);
+
+ auto const vfr = film->video_frame_rate();
+
+ DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
+ if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(film))) {
+ return;
+ }
+
+ Atmos (data.data, dcp_time, data.metadata);
+}
+
+
+void
+Player::signal_change(ChangeType type, int property)
+{
+ Change(type, property, false);
}