/*
- Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
- This program is free software; you can redistribute it and/or modify
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
- This program is distributed in the hope that it will be useful,
+ DCP-o-matic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "player.h"
-#include "film.h"
-#include "ffmpeg_decoder.h"
+
+#include "atmos_decoder.h"
#include "audio_buffers.h"
+#include "audio_content.h"
+#include "audio_decoder.h"
+#include "audio_processor.h"
+#include "compose.hpp"
+#include "config.h"
+#include "content_audio.h"
+#include "content_video.h"
+#include "dcp_content.h"
+#include "dcp_decoder.h"
+#include "dcpomatic_log.h"
+#include "decoder.h"
+#include "decoder_factory.h"
#include "ffmpeg_content.h"
+#include "film.h"
+#include "frame_rate_change.h"
+#include "image.h"
#include "image_decoder.h"
-#include "image_content.h"
-#include "sndfile_decoder.h"
-#include "sndfile_content.h"
-#include "subtitle_content.h"
-#include "text_subtitle_decoder.h"
-#include "text_subtitle_content.h"
-#include "dcp_content.h"
#include "job.h"
-#include "image.h"
-#include "raw_image_proxy.h"
-#include "ratio.h"
#include "log.h"
-#include "render_subtitles.h"
-#include "config.h"
-#include "content_video.h"
+#include "maths_util.h"
+#include "piece.h"
+#include "player.h"
#include "player_video.h"
-#include "frame_rate_change.h"
-#include "dcp_content.h"
-#include "dcp_decoder.h"
-#include "dcp_subtitle_content.h"
-#include "dcp_subtitle_decoder.h"
-#include "audio_processor.h"
#include "playlist.h"
+#include "ratio.h"
+#include "raw_image_proxy.h"
#include "referenced_reel_asset.h"
+#include "render_text.h"
+#include "shuffler.h"
+#include "text_content.h"
+#include "text_decoder.h"
+#include "timer.h"
+#include "video_decoder.h"
#include <dcp/reel.h>
+#include <dcp/reel_closed_caption_asset.h>
+#include <dcp/reel_picture_asset.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
-#include <dcp/reel_picture_asset.h>
-#include <boost/foreach.hpp>
-#include <stdint.h>
#include <algorithm>
#include <iostream>
+#include <stdint.h>
#include "i18n.h"
-#define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
-using std::list;
+using std::copy;
using std::cout;
-using std::min;
+using std::dynamic_pointer_cast;
+using std::list;
+using std::make_pair;
+using std::make_shared;
+using std::make_shared;
using std::max;
using std::min;
-using std::vector;
+using std::min;
using std::pair;
-using std::map;
-using std::make_pair;
-using std::copy;
-using boost::shared_ptr;
-using boost::weak_ptr;
-using boost::dynamic_pointer_cast;
+using std::shared_ptr;
+using std::vector;
+using std::weak_ptr;
using boost::optional;
using boost::scoped_ptr;
+#if BOOST_VERSION >= 106100
+using namespace boost::placeholders;
+#endif
+using namespace dcpomatic;
+
-Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
+int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
+int const PlayerProperty::PLAYLIST = 701;
+int const PlayerProperty::FILM_CONTAINER = 702;
+int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
+int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
+int const PlayerProperty::PLAYBACK_LENGTH = 705;
+
+
+Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
+ : _film (film)
+ , _suspended (0)
+ , _tolerant (film->tolerant())
+ , _audio_merger (_film->audio_frame_rate())
+ , _subtitle_alignment (subtitle_alignment)
+{
+ construct ();
+}
+
+
+Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
: _film (film)
- , _playlist (playlist)
- , _have_valid_pieces (false)
- , _ignore_video (false)
- , _ignore_audio (false)
- , _always_burn_subtitles (false)
- , _fast (false)
- , _play_referenced (false)
-{
- _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
- _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
- _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
+ , _playlist (playlist_)
+ , _suspended (0)
+ , _tolerant (film->tolerant())
+ , _audio_merger (_film->audio_frame_rate())
+{
+ construct ();
+}
+
+
+void
+Player::construct ()
+{
+ _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
+ /* The butler must hear about this first, so since we are proxying this through to the butler we must
+ be first.
+ */
+ _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
+ _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
set_video_container_size (_film->frame_size ());
- film_changed (Film::AUDIO_PROCESSOR);
+ film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
+
+ setup_pieces ();
+ seek (DCPTime (), true);
}
+
void
Player::setup_pieces ()
{
- list<shared_ptr<Piece> > old_pieces = _pieces;
+ boost::mutex::scoped_lock lm (_mutex);
+ setup_pieces_unlocked ();
+}
+
+
+bool
+have_video (shared_ptr<const Content> content)
+{
+ return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
+}
+
+
+bool
+have_audio (shared_ptr<const Content> content)
+{
+ return static_cast<bool>(content->audio);
+}
+
+
+void
+Player::setup_pieces_unlocked ()
+{
+ _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+
+ auto old_pieces = _pieces;
_pieces.clear ();
- BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+ auto playlist_content = playlist()->content();
+ bool const have_threed = std::any_of(
+ playlist_content.begin(),
+ playlist_content.end(),
+ [](shared_ptr<const Content> c) {
+ return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
+ });
+
+
+ if (have_threed) {
+ _shuffler.reset(new Shuffler());
+ _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+ }
+
+ for (auto i: playlist()->content()) {
if (!i->paths_valid ()) {
continue;
}
- shared_ptr<Decoder> decoder;
- optional<FrameRateChange> frc;
-
- /* FFmpeg */
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
- if (fc) {
- decoder.reset (new FFmpegDecoder (fc, _film->log(), _fast));
- frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
+ if (_ignore_video && _ignore_audio && i->text.empty()) {
+ /* We're only interested in text and this content has none */
+ continue;
}
- shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
- if (dc) {
- decoder.reset (new DCPDecoder (dc, _fast));
- frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
+ shared_ptr<Decoder> old_decoder;
+ for (auto j: old_pieces) {
+ if (j->content == i) {
+ old_decoder = j->decoder;
+ break;
+ }
}
- /* ImageContent */
- shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
- if (ic) {
- /* See if we can re-use an old ImageDecoder */
- for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
- shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
- if (imd && imd->content() == ic) {
- decoder = imd;
- }
- }
+ auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+ DCPOMATIC_ASSERT (decoder);
- if (!decoder) {
- decoder.reset (new ImageDecoder (ic));
- }
+ FrameRateChange frc (_film, i);
- frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
+ if (decoder->video && _ignore_video) {
+ decoder->video->set_ignore (true);
}
- /* SndfileContent */
- shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (i);
- if (sc) {
- decoder.reset (new SndfileDecoder (sc, _fast));
+ if (decoder->audio && _ignore_audio) {
+ decoder->audio->set_ignore (true);
+ }
- /* Work out a FrameRateChange for the best overlap video for this content */
- DCPTime best_overlap_t;
- shared_ptr<VideoContent> best_overlap;
- BOOST_FOREACH (shared_ptr<Content> j, _playlist->content ()) {
- shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (j);
- if (!vc) {
- continue;
- }
+ if (_ignore_text) {
+ for (auto i: decoder->text) {
+ i->set_ignore (true);
+ }
+ }
- DCPTime const overlap = min (vc->end(), i->end()) - max (vc->position(), i->position());
- if (overlap > best_overlap_t) {
- best_overlap = vc;
- best_overlap_t = overlap;
- }
+ auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
+ if (dcp) {
+ dcp->set_decode_referenced (_play_referenced);
+ if (_play_referenced) {
+ dcp->set_forced_reduction (_dcp_decode_reduction);
}
+ }
+
+ auto piece = make_shared<Piece>(i, decoder, frc);
+ _pieces.push_back (piece);
- if (best_overlap) {
- frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
+ if (decoder->video) {
+ if (have_threed) {
+ /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
+ decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
} else {
- /* No video overlap; e.g. if the DCP is just audio */
- frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
+ decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
}
}
- /* It's questionable whether subtitle content should have a video frame rate; perhaps
- it should be assumed that any subtitle content has been prepared at the same rate
- as simultaneous video content (like we do with audio).
- */
-
- /* TextSubtitleContent */
- shared_ptr<const TextSubtitleContent> rc = dynamic_pointer_cast<const TextSubtitleContent> (i);
- if (rc) {
- decoder.reset (new TextSubtitleDecoder (rc));
- frc = FrameRateChange (rc->subtitle_video_frame_rate(), _film->video_frame_rate());
+ if (decoder->audio) {
+ decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
}
- /* DCPSubtitleContent */
- shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
- if (dsc) {
- decoder.reset (new DCPSubtitleDecoder (dsc));
- frc = FrameRateChange (dsc->subtitle_video_frame_rate(), _film->video_frame_rate());
+ auto j = decoder->text.begin();
+
+ while (j != decoder->text.end()) {
+ (*j)->BitmapStart.connect (
+ bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
+ );
+ (*j)->PlainStart.connect (
+ bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
+ );
+ (*j)->Stop.connect (
+ bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
+ );
+
+ ++j;
}
- shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> (decoder);
- if (vd && _ignore_video) {
- vd->set_ignore_video ();
+ if (decoder->atmos) {
+ decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
}
+ }
- shared_ptr<AudioDecoder> ad = dynamic_pointer_cast<AudioDecoder> (decoder);
- if (ad && _ignore_audio) {
- ad->set_ignore_audio ();
+ _stream_states.clear ();
+ for (auto i: _pieces) {
+ if (i->content->audio) {
+ for (auto j: i->content->audio->streams()) {
+ _stream_states[j] = StreamState (i, i->content->position ());
+ }
}
+ }
- _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
+ auto ignore_overlap = [](shared_ptr<VideoContent> v) {
+ return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
+ };
+
+ for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
+ if (ignore_overlap((*i)->content->video)) {
+ /* Look for content later in the content list with in-use video that overlaps this */
+ auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
+ for (auto j = std::next(i); j != _pieces.end(); ++j) {
+ if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
+ (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
+ }
+ }
+ }
}
- _have_valid_pieces = true;
+ _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
+ _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
+
+ _next_video_time = boost::none;
+ _next_video_eyes = Eyes::BOTH;
+ _next_audio_time = boost::none;
}
+
void
-Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
+Player::playlist_content_change (ChangeType type, int property, bool frequent)
{
- shared_ptr<Content> c = w.lock ();
- if (!c) {
- return;
+ if (property == VideoContentProperty::CROP) {
+ if (type == ChangeType::DONE) {
+ auto const vcs = video_container_size();
+ boost::mutex::scoped_lock lm (_mutex);
+ for (auto const& i: _delay) {
+ i.first->reset_metadata (_film, vcs);
+ }
+ }
+ } else {
+ if (type == ChangeType::PENDING) {
+ /* The player content is probably about to change, so we can't carry on
+ until that has happened and we've rebuilt our pieces. Stop pass()
+ and seek() from working until then.
+ */
+ ++_suspended;
+ } else if (type == ChangeType::DONE) {
+ /* A change in our content has gone through. Re-build our pieces. */
+ setup_pieces ();
+ --_suspended;
+ } else if (type == ChangeType::CANCELLED) {
+ --_suspended;
+ }
}
- if (
- property == ContentProperty::POSITION ||
- property == ContentProperty::LENGTH ||
- property == ContentProperty::TRIM_START ||
- property == ContentProperty::TRIM_END ||
- property == ContentProperty::PATH ||
- property == VideoContentProperty::VIDEO_FRAME_TYPE ||
- property == DCPContentProperty::CAN_BE_PLAYED ||
- property == TextSubtitleContentProperty::TEXT_SUBTITLE_COLOUR ||
- property == TextSubtitleContentProperty::TEXT_SUBTITLE_OUTLINE ||
- property == TextSubtitleContentProperty::TEXT_SUBTITLE_OUTLINE_COLOUR ||
- property == FFmpegContentProperty::SUBTITLE_STREAM
- ) {
-
- _have_valid_pieces = false;
- Changed (frequent);
-
- } else if (
- property == SubtitleContentProperty::USE_SUBTITLES ||
- property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
- property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
- property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
- property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
- property == SubtitleContentProperty::FONTS ||
- property == VideoContentProperty::VIDEO_CROP ||
- property == VideoContentProperty::VIDEO_SCALE ||
- property == VideoContentProperty::VIDEO_FRAME_RATE ||
- property == VideoContentProperty::VIDEO_FADE_IN ||
- property == VideoContentProperty::VIDEO_FADE_OUT ||
- property == VideoContentProperty::COLOUR_CONVERSION
- ) {
-
- Changed (frequent);
- }
+ Change (type, property, frequent);
}
+
void
Player::set_video_container_size (dcp::Size s)
{
- _video_container_size = s;
+ Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+
+ if (s == _video_container_size) {
+ lm.unlock ();
+ Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ return;
+ }
+
+ _video_container_size = s;
+
+ _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
+ _black_image->make_black ();
+ }
- _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
- _black_image->make_black ();
+ Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
}
+
void
-Player::playlist_changed ()
+Player::playlist_change (ChangeType type)
{
- _have_valid_pieces = false;
- Changed (false);
+ if (type == ChangeType::DONE) {
+ setup_pieces ();
+ }
+ Change (type, PlayerProperty::PLAYLIST, false);
}
+
void
-Player::film_changed (Film::Property p)
+Player::film_change (ChangeType type, Film::Property p)
{
/* Here we should notice Film properties that affect our output, and
alert listeners that our output now would be different to how it was
last time we were run.
*/
- if (p == Film::CONTAINER) {
- Changed (false);
- } else if (p == Film::VIDEO_FRAME_RATE) {
+ if (p == Film::Property::CONTAINER) {
+ Change (type, PlayerProperty::FILM_CONTAINER, false);
+ } else if (p == Film::Property::VIDEO_FRAME_RATE) {
/* Pieces contain a FrameRateChange which contains the DCP frame rate,
so we need new pieces here.
*/
- _have_valid_pieces = false;
- Changed (false);
- } else if (p == Film::AUDIO_PROCESSOR) {
- if (_film->audio_processor ()) {
+ if (type == ChangeType::DONE) {
+ setup_pieces ();
+ }
+ Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
+ } else if (p == Film::Property::AUDIO_PROCESSOR) {
+ if (type == ChangeType::DONE && _film->audio_processor ()) {
+ boost::mutex::scoped_lock lm (_mutex);
_audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
}
- }
-}
-
-list<PositionImage>
-Player::transform_image_subtitles (list<ImageSubtitle> subs) const
-{
- list<PositionImage> all;
-
- for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
- if (!i->image) {
- continue;
+ } else if (p == Film::Property::AUDIO_CHANNELS) {
+ if (type == ChangeType::DONE) {
+ boost::mutex::scoped_lock lm (_mutex);
+ _audio_merger.clear ();
}
-
- /* We will scale the subtitle up to fit _video_container_size */
- dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
-
- /* Then we need a corrective translation, consisting of two parts:
- *
- * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
- * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
- *
- * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
- * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
- * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
- *
- * Combining these two translations gives these expressions.
- */
-
- all.push_back (
- PositionImage (
- i->image->scale (
- scaled_size,
- dcp::YUV_TO_RGB_REC601,
- i->image->pixel_format (),
- true,
- _fast
- ),
- Position<int> (
- lrint (_video_container_size.width * i->rectangle.x),
- lrint (_video_container_size.height * i->rectangle.y)
- )
- )
- );
}
-
- return all;
}
-shared_ptr<PlayerVideo>
-Player::black_player_video_frame (DCPTime time) const
-{
- return shared_ptr<PlayerVideo> (
- new PlayerVideo (
- shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
- time,
- Crop (),
- optional<double> (),
- _video_container_size,
- _video_container_size,
- EYES_BOTH,
- PART_WHOLE,
- PresetColourConversion::all().front().conversion
- )
- );
-}
-/** @return All PlayerVideos at the given time. There may be none if the content
- * at `time' is a DCP which we are passing through (i.e. referring to by reference)
- * or 2 if we have 3D.
- */
-list<shared_ptr<PlayerVideo> >
-Player::get_video (DCPTime time, bool accurate)
+shared_ptr<PlayerVideo>
+Player::black_player_video_frame (Eyes eyes) const
{
- if (!_have_valid_pieces) {
- setup_pieces ();
- }
-
- /* Find subtitles for possible burn-in */
-
- PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate);
-
- list<PositionImage> sub_images;
-
- /* Image subtitles */
- list<PositionImage> c = transform_image_subtitles (ps.image);
- copy (c.begin(), c.end(), back_inserter (sub_images));
-
- /* Text subtitles (rendered to an image) */
- if (!ps.text.empty ()) {
- list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
- copy (s.begin (), s.end (), back_inserter (sub_images));
- }
-
- optional<PositionImage> subtitles;
- if (!sub_images.empty ()) {
- subtitles = merge (sub_images);
- }
-
- /* Find pieces containing video which is happening now */
-
- list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
- time,
- time + DCPTime::from_frames (1, _film->video_frame_rate ())
- );
-
- list<shared_ptr<PlayerVideo> > pvf;
-
- if (ov.empty ()) {
- /* No video content at this time */
- pvf.push_back (black_player_video_frame (time));
- } else {
- /* Some video content at this time */
- shared_ptr<Piece> last = *(ov.rbegin ());
- VideoFrameType const last_type = dynamic_pointer_cast<VideoContent> (last->content)->video_frame_type ();
-
- /* Get video from appropriate piece(s) */
- BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
-
- shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
- DCPOMATIC_ASSERT (decoder);
- shared_ptr<VideoContent> video_content = dynamic_pointer_cast<VideoContent> (piece->content);
- DCPOMATIC_ASSERT (video_content);
-
- shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (video_content);
- if (dcp_content && dcp_content->reference_video () && !_play_referenced) {
- continue;
- }
-
- bool const use =
- /* always use the last video */
- piece == last ||
- /* with a corresponding L/R eye if appropriate */
- (last_type == VIDEO_FRAME_TYPE_3D_LEFT && video_content->video_frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
- (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && video_content->video_frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
-
- if (use) {
- /* We want to use this piece */
- list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
- if (content_video.empty ()) {
- pvf.push_back (black_player_video_frame (time));
- } else {
- dcp::Size image_size = video_content->scale().size (video_content, _video_container_size, _film->frame_size ());
-
- for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
- pvf.push_back (
- shared_ptr<PlayerVideo> (
- new PlayerVideo (
- i->image,
- content_video_to_dcp (piece, i->frame),
- video_content->crop (),
- video_content->fade (i->frame),
- image_size,
- _video_container_size,
- i->eyes,
- i->part,
- video_content->colour_conversion ()
- )
- )
- );
- }
- }
- } else {
- /* Discard unused video */
- decoder->get_video (dcp_to_content_video (piece, time), accurate);
- }
- }
- }
-
- if (subtitles) {
- BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
- p->set_subtitle (subtitles.get ());
- }
- }
-
- return pvf;
+ return std::make_shared<PlayerVideo> (
+ std::make_shared<const RawImageProxy>(_black_image),
+ Crop(),
+ optional<double>(),
+ _video_container_size,
+ _video_container_size,
+ eyes,
+ Part::WHOLE,
+ PresetColourConversion::all().front().conversion,
+ VideoRange::FULL,
+ std::weak_ptr<Content>(),
+ boost::optional<Frame>(),
+ false
+ );
}
-/** @return Audio data or 0 if the only audio data here is referenced DCP data */
-shared_ptr<AudioBuffers>
-Player::get_audio (DCPTime time, DCPTime length, bool accurate)
-{
- if (!_have_valid_pieces) {
- setup_pieces ();
- }
-
- Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
-
- shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
- audio->make_silent ();
-
- list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
- if (ov.empty ()) {
- return audio;
- }
-
- bool all_referenced = true;
- BOOST_FOREACH (shared_ptr<Piece> i, ov) {
- shared_ptr<AudioContent> audio_content = dynamic_pointer_cast<AudioContent> (i->content);
- shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (i->content);
- if (audio_content && (!dcp_content || !dcp_content->reference_audio ())) {
- /* There is audio content which is not from a DCP or not set to be referenced */
- all_referenced = false;
- }
- }
-
- if (all_referenced && !_play_referenced) {
- return shared_ptr<AudioBuffers> ();
- }
-
- BOOST_FOREACH (shared_ptr<Piece> i, ov) {
-
- shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (i->content);
- DCPOMATIC_ASSERT (content);
- shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> (i->decoder);
- DCPOMATIC_ASSERT (decoder);
-
- /* The time that we should request from the content */
- DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
- Frame request_frames = length_frames;
- DCPTime offset;
- if (request < DCPTime ()) {
- /* We went off the start of the content, so we will need to offset
- the stuff we get back.
- */
- offset = -request;
- request_frames += request.frames_round (_film->audio_frame_rate ());
- if (request_frames < 0) {
- request_frames = 0;
- }
- request = DCPTime ();
- }
-
- Frame const content_frame = dcp_to_resampled_audio (i, request);
-
- BOOST_FOREACH (AudioStreamPtr j, content->audio_streams ()) {
-
- if (j->channels() == 0) {
- /* Some content (e.g. DCPs) can have streams with no channels */
- continue;
- }
-
- /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
- ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate);
-
- /* Gain */
- if (content->audio_gain() != 0) {
- shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
- gain->apply_gain (content->audio_gain ());
- all.audio = gain;
- }
-
- /* Remap channels */
- shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
- dcp_mapped->make_silent ();
- AudioMapping map = j->mapping ();
- for (int i = 0; i < map.input_channels(); ++i) {
- for (int j = 0; j < _film->audio_channels(); ++j) {
- if (map.get (i, j) > 0) {
- dcp_mapped->accumulate_channel (
- all.audio.get(),
- i,
- j,
- map.get (i, j)
- );
- }
- }
- }
-
- if (_audio_processor) {
- dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ());
- }
-
- all.audio = dcp_mapped;
-
- audio->accumulate_frames (
- all.audio.get(),
- content_frame - all.frame,
- offset.frames_round (_film->audio_frame_rate()),
- min (Frame (all.audio->frames()), request_frames)
- );
- }
- }
-
- return audio;
-}
Frame
Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
{
- shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
- DCPTime s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(), s);
+ auto s = t - piece->content->position ();
+ s = min (piece->content->length_after_trim(_film), s);
s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
/* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
}
+
DCPTime
Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
{
- shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
/* See comment in dcp_to_content_video */
- DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
- return max (DCPTime (), d + piece->content->position ());
+ auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
+ return d + piece->content->position();
}
+
Frame
Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
- DCPTime s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(), s);
+ auto s = t - piece->content->position ();
+ s = min (piece->content->length_after_trim(_film), s);
/* See notes in dcp_to_content_video */
- return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
+ return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
+}
+
+
+DCPTime
+Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
+{
+ /* See comment in dcp_to_content_video */
+ return DCPTime::from_frames (f, _film->audio_frame_rate())
+ - DCPTime (piece->content->trim_start(), piece->frc)
+ + piece->content->position();
}
+
ContentTime
-Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
+Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
{
- DCPTime s = t - piece->content->position ();
- s = min (piece->content->length_after_trim(), s);
+ auto s = t - piece->content->position ();
+ s = min (piece->content->length_after_trim(_film), s);
return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
}
+
DCPTime
-Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
+Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
{
- return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
+ return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
}
-/** @param burnt true to return only subtitles to be burnt, false to return only
- * subtitles that should not be burnt. This parameter will be ignored if
- * _always_burn_subtitles is true; in this case, all subtitles will be returned.
- */
-PlayerSubtitles
-Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate)
-{
- list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
- PlayerSubtitles ps (time, length);
+vector<FontData>
+Player::get_subtitle_fonts ()
+{
+ boost::mutex::scoped_lock lm (_mutex);
- for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
- shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
- if (!subtitle_content->use_subtitles () || (!_always_burn_subtitles && (burnt != subtitle_content->burn_subtitles ()))) {
- continue;
- }
+ vector<FontData> fonts;
+ for (auto i: _pieces) {
+ /* XXX: things may go wrong if there are duplicate font IDs
+ with different font files.
+ */
+ auto f = i->decoder->fonts ();
+ copy (f.begin(), f.end(), back_inserter(fonts));
+ }
- shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (subtitle_content);
- if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) {
- continue;
- }
+ return fonts;
+}
- shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
- ContentTime const from = dcp_to_content_subtitle (*j, time);
- /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
- ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
- list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting, accurate);
- for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
+/** Set this player never to produce any video data */
+void
+Player::set_ignore_video ()
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _ignore_video = true;
+ setup_pieces_unlocked ();
+}
- /* Apply content's subtitle offsets */
- i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
- i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
- /* Apply content's subtitle scale */
- i->sub.rectangle.width *= subtitle_content->subtitle_x_scale ();
- i->sub.rectangle.height *= subtitle_content->subtitle_y_scale ();
+void
+Player::set_ignore_audio ()
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _ignore_audio = true;
+ setup_pieces_unlocked ();
+}
- /* Apply a corrective translation to keep the subtitle centred after that scale */
- i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_x_scale() - 1);
- i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_y_scale() - 1);
- ps.image.push_back (i->sub);
- }
+void
+Player::set_ignore_text ()
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _ignore_text = true;
+ setup_pieces_unlocked ();
+}
- list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting, accurate);
- BOOST_FOREACH (ContentTextSubtitle& ts, text) {
- BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
- s.set_h_position (s.h_position() + subtitle_content->subtitle_x_offset ());
- s.set_v_position (s.v_position() + subtitle_content->subtitle_y_offset ());
- float const xs = subtitle_content->subtitle_x_scale();
- float const ys = subtitle_content->subtitle_y_scale();
- float size = s.size();
- /* Adjust size to express the common part of the scaling;
- e.g. if xs = ys = 0.5 we scale size by 2.
- */
- if (xs > 1e-5 && ys > 1e-5) {
- size *= 1 / min (1 / xs, 1 / ys);
- }
- s.set_size (size);
+/** Set the player to always burn open texts into the image regardless of the content settings */
+void
+Player::set_always_burn_open_subtitles ()
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _always_burn_open_subtitles = true;
+}
+
+
+/** Sets up the player to be faster, possibly at the expense of quality */
+void
+Player::set_fast ()
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _fast = true;
+ setup_pieces_unlocked ();
+}
+
+
+void
+Player::set_play_referenced ()
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ _play_referenced = true;
+ setup_pieces_unlocked ();
+}
+
+
+static void
+maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
+{
+ DCPOMATIC_ASSERT (r);
+ r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
+ r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
+ if (r->actual_duration() > 0) {
+ a.push_back (
+ ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
+ );
+ }
+}
+
+
+list<ReferencedReelAsset>
+Player::get_reel_assets ()
+{
+ /* Does not require a lock on _mutex as it's only called from DCPEncoder */
+
+ list<ReferencedReelAsset> reel_assets;
+
+ for (auto content: playlist()->content()) {
+ auto dcp = dynamic_pointer_cast<DCPContent>(content);
+ if (!dcp) {
+ continue;
+ }
+
+ if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
+ continue;
+ }
+
+ scoped_ptr<DCPDecoder> decoder;
+ try {
+ decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
+ } catch (...) {
+ return reel_assets;
+ }
+
+ auto const frame_rate = _film->video_frame_rate();
+ DCPOMATIC_ASSERT (dcp->video_frame_rate());
+ /* We should only be referencing if the DCP rate is the same as the film rate */
+ DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
+
+ Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
+ Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
+
+ /* position in the asset from the start */
+ int64_t offset_from_start = 0;
+ /* position i the asset from the end */
+ int64_t offset_from_end = 0;
+ for (auto reel: decoder->reels()) {
+ /* Assume that main picture duration is the length of the reel */
+ offset_from_end += reel->main_picture()->actual_duration();
+ }
+
+ for (auto reel: decoder->reels()) {
+
+ /* Assume that main picture duration is the length of the reel */
+ int64_t const reel_duration = reel->main_picture()->actual_duration();
+
+ /* See doc/design/trim_reels.svg */
+ Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
+ Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
- /* Then express aspect ratio changes */
- if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
- s.set_aspect_adjust (xs / ys);
+ auto const from = max(DCPTime(), content->position() + DCPTime::from_frames(offset_from_start, frame_rate) - DCPTime::from_frames(trim_start, frame_rate));
+ if (dcp->reference_video()) {
+ maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
+ }
+
+ if (dcp->reference_audio()) {
+ maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
+ }
+
+ if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
+ maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
+ }
+
+ if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
+ for (auto caption: reel->closed_captions()) {
+ maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
}
- s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000));
- s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000));
- ps.text.push_back (s);
- ps.add_fonts (subtitle_content->fonts ());
}
+
+ offset_from_start += reel_duration;
+ offset_from_end -= reel_duration;
}
}
- return ps;
+ return reel_assets;
}
-list<shared_ptr<Font> >
-Player::get_subtitle_fonts ()
+
+bool
+Player::pass ()
{
- if (!_have_valid_pieces) {
- setup_pieces ();
+ boost::mutex::scoped_lock lm (_mutex);
+
+ if (_suspended) {
+ /* We can't pass in this state */
+ LOG_DEBUG_PLAYER_NC ("Player is suspended");
+ return false;
+ }
+
+ if (_playback_length == DCPTime()) {
+ /* Special; just give one black frame */
+ emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
+ return true;
}
- list<shared_ptr<Font> > fonts;
- BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
- shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (p->content);
- if (sc) {
- /* XXX: things may go wrong if there are duplicate font IDs
- with different font files.
+ /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
+
+ shared_ptr<Piece> earliest_content;
+ optional<DCPTime> earliest_time;
+
+ for (auto i: _pieces) {
+ if (i->done) {
+ continue;
+ }
+
+ auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
+ if (t > i->content->end(_film)) {
+ i->done = true;
+ } else {
+
+ /* Given two choices at the same time, pick the one with texts so we see it before
+ the video.
*/
- list<shared_ptr<Font> > f = sc->fonts ();
- copy (f.begin(), f.end(), back_inserter (fonts));
+ if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
+ earliest_time = t;
+ earliest_content = i;
+ }
}
}
- return fonts;
+ bool done = false;
+
+ enum {
+ NONE,
+ CONTENT,
+ BLACK,
+ SILENT
+ } which = NONE;
+
+ if (earliest_content) {
+ which = CONTENT;
+ }
+
+ if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
+ earliest_time = _black.position ();
+ which = BLACK;
+ }
+
+ if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
+ earliest_time = _silent.position ();
+ which = SILENT;
+ }
+
+ switch (which) {
+ case CONTENT:
+ {
+ LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
+ earliest_content->done = earliest_content->decoder->pass ();
+ auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
+ if (dcp && !_play_referenced && dcp->reference_audio()) {
+ /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
+ to `hide' the fact that no audio was emitted during the referenced DCP (though
+ we need to behave as though it was).
+ */
+ _next_audio_time = dcp->end (_film);
+ }
+ break;
+ }
+ case BLACK:
+ LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
+ emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+ _black.set_position (_black.position() + one_video_frame());
+ break;
+ case SILENT:
+ {
+ LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
+ DCPTimePeriod period (_silent.period_at_position());
+ if (_next_audio_time) {
+ /* Sometimes the thing that happened last finishes fractionally before
+ or after this silence. Bodge the start time of the silence to fix it.
+ I think this is nothing to worry about since we will just add or
+ remove a little silence at the end of some content.
+ */
+ int64_t const error = labs(period.from.get() - _next_audio_time->get());
+ /* Let's not worry about less than a frame at 24fps */
+ int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
+ if (error >= too_much_error) {
+ _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
+ }
+ DCPOMATIC_ASSERT (error < too_much_error);
+ period.from = *_next_audio_time;
+ }
+ if (period.duration() > one_video_frame()) {
+ period.to = period.from + one_video_frame();
+ }
+ fill_audio (period);
+ _silent.set_position (period.to);
+ break;
+ }
+ case NONE:
+ done = true;
+ break;
+ }
+
+ /* Emit any audio that is ready */
+
+ /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
+ of our streams, or the position of the _silent. First, though we choose only streams that are less than
+ ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
+ behind it has finished). This is so that we don't withhold audio indefinitely awaiting data from a stream
+ that will never come, causing bugs like #2101.
+ */
+ constexpr int ignore_streams_behind = 5;
+
+ using state_pair = std::pair<AudioStreamPtr, StreamState>;
+
+ /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
+ auto latest_last_push_end = std::max_element(
+ _stream_states.begin(),
+ _stream_states.end(),
+ [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
+ );
+
+ if (latest_last_push_end != _stream_states.end()) {
+ LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
+ }
+
+ /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
+ std::map<AudioStreamPtr, StreamState> alive_stream_states;
+ for (auto const& i: _stream_states) {
+ if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+ alive_stream_states.insert(i);
+ } else {
+ LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
+ }
+ }
+
+ auto pull_to = _playback_length;
+ for (auto const& i: alive_stream_states) {
+ if (!i.second.piece->done && i.second.last_push_end < pull_to) {
+ pull_to = i.second.last_push_end;
+ }
+ }
+ if (!_silent.done() && _silent.position() < pull_to) {
+ pull_to = _silent.position();
+ }
+
+ LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
+ auto audio = _audio_merger.pull (pull_to);
+ for (auto i = audio.begin(); i != audio.end(); ++i) {
+ if (_next_audio_time && i->second < *_next_audio_time) {
+ /* This new data comes before the last we emitted (or the last seek); discard it */
+ auto cut = discard_audio (i->first, i->second, *_next_audio_time);
+ if (!cut.first) {
+ continue;
+ }
+ *i = cut;
+ } else if (_next_audio_time && i->second > *_next_audio_time) {
+ /* There's a gap between this data and the last we emitted; fill with silence */
+ fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
+ }
+
+ emit_audio (i->first, i->second);
+ }
+
+ if (done) {
+ if (_shuffler) {
+ _shuffler->flush ();
+ }
+ for (auto const& i: _delay) {
+ do_emit_video(i.first, i.second);
+ }
+
+ /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
+ * However, if we have L and R video files, and one is shorter than the other,
+ * the fill code in ::video mostly takes care of filling in the gaps.
+ * However, since it fills at the point when it knows there is more video coming
+ * at time t (so it should fill any gap up to t) it can't do anything right at the
+ * end. This is particularly bad news if the last frame emitted is a LEFT
+ * eye, as the MXF writer will complain about the 3D sequence being wrong.
+ * Here's a hack to workaround that particular case.
+ */
+ if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
+ do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+ }
+ }
+
+ return done;
}
-/** Set this player never to produce any video data */
+
+/** @return Open subtitles for the frame at the given time, converted to images */
+optional<PositionImage>
+Player::open_subtitles_for_frame (DCPTime time) const
+{
+ list<PositionImage> captions;
+ int const vfr = _film->video_frame_rate();
+
+ for (
+ auto j:
+ _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+ ) {
+
+ /* Bitmap subtitles */
+ for (auto i: j.bitmap) {
+ if (!i.image) {
+ continue;
+ }
+
+ /* i.image will already have been scaled to fit _video_container_size */
+ dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
+
+ captions.push_back (
+ PositionImage (
+ i.image,
+ Position<int> (
+ lrint(_video_container_size.width * i.rectangle.x),
+ lrint(_video_container_size.height * i.rectangle.y)
+ )
+ )
+ );
+ }
+
+ /* String subtitles (rendered to an image) */
+ if (!j.string.empty()) {
+ auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
+ copy (s.begin(), s.end(), back_inserter (captions));
+ }
+ }
+
+ if (captions.empty()) {
+ return {};
+ }
+
+ return merge (captions, _subtitle_alignment);
+}
+
+
void
-Player::set_ignore_video ()
+Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
{
- _ignore_video = true;
+ if (_suspended) {
+ return;
+ }
+
+ auto piece = weak_piece.lock ();
+ if (!piece) {
+ return;
+ }
+
+ if (!piece->content->video->use()) {
+ return;
+ }
+
+ FrameRateChange frc (_film, piece->content);
+ if (frc.skip && (video.frame % 2) == 1) {
+ return;
+ }
+
+ /* Time of the first frame we will emit */
+ DCPTime const time = content_video_to_dcp (piece, video.frame);
+ LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+
+ /* Discard if it's before the content's period or the last accurate seek. We can't discard
+ if it's after the content's period here as in that case we still need to fill any gap between
+ `now' and the end of the content's period.
+ */
+ if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
+ return;
+ }
+
+ if (piece->ignore_video && piece->ignore_video->contains(time)) {
+ return;
+ }
+
+ /* Fill gaps that we discover now that we have some video which needs to be emitted.
+ This is where we need to fill to.
+ */
+ DCPTime fill_to = min (time, piece->content->end(_film));
+
+ if (_next_video_time) {
+ DCPTime fill_from = max (*_next_video_time, piece->content->position());
+
+ /* Fill if we have more than half a frame to do */
+ if ((fill_to - fill_from) > one_video_frame() / 2) {
+ auto last = _last_video.find (weak_piece);
+ if (_film->three_d()) {
+ auto fill_to_eyes = video.eyes;
+ if (fill_to_eyes == Eyes::BOTH) {
+ fill_to_eyes = Eyes::LEFT;
+ }
+ if (fill_to == piece->content->end(_film)) {
+ /* Don't fill after the end of the content */
+ fill_to_eyes = Eyes::LEFT;
+ }
+ auto j = fill_from;
+ auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
+ if (eyes == Eyes::BOTH) {
+ eyes = Eyes::LEFT;
+ }
+ while (j < fill_to || eyes != fill_to_eyes) {
+ if (last != _last_video.end()) {
+ LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
+ auto copy = last->second->shallow_copy();
+ copy->set_eyes (eyes);
+ emit_video (copy, j);
+ } else {
+ LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
+ emit_video (black_player_video_frame(eyes), j);
+ }
+ if (eyes == Eyes::RIGHT) {
+ j += one_video_frame();
+ }
+ eyes = increment_eyes (eyes);
+ }
+ } else {
+ for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
+ if (last != _last_video.end()) {
+ emit_video (last->second, j);
+ } else {
+ emit_video (black_player_video_frame(Eyes::BOTH), j);
+ }
+ }
+ }
+ }
+ }
+
+ auto const content_video = piece->content->video;
+
+ _last_video[weak_piece] = std::make_shared<PlayerVideo>(
+ video.image,
+ content_video->actual_crop(),
+ content_video->fade (_film, video.frame),
+ scale_for_display(
+ content_video->scaled_size(_film->frame_size()),
+ _video_container_size,
+ _film->frame_size(),
+ content_video->pixel_quanta()
+ ),
+ _video_container_size,
+ video.eyes,
+ video.part,
+ content_video->colour_conversion(),
+ content_video->range(),
+ piece->content,
+ video.frame,
+ false
+ );
+
+ DCPTime t = time;
+ for (int i = 0; i < frc.repeat; ++i) {
+ if (t < piece->content->end(_film)) {
+ emit_video (_last_video[weak_piece], t);
+ }
+ t += one_video_frame ();
+ }
}
-/** Set this player never to produce any audio data */
+
void
-Player::set_ignore_audio ()
+Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
{
- _ignore_audio = true;
+ if (_suspended) {
+ return;
+ }
+
+ DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+
+ auto piece = weak_piece.lock ();
+ if (!piece) {
+ return;
+ }
+
+ auto content = piece->content->audio;
+ DCPOMATIC_ASSERT (content);
+
+ int const rfr = content->resampled_frame_rate (_film);
+
+ /* Compute time in the DCP */
+ auto time = resampled_audio_to_dcp (piece, content_audio.frame);
+ LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+
+ /* And the end of this block in the DCP */
+ auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+
+ /* Remove anything that comes before the start or after the end of the content */
+ if (time < piece->content->position()) {
+ auto cut = discard_audio (content_audio.audio, time, piece->content->position());
+ if (!cut.first) {
+ /* This audio is entirely discarded */
+ return;
+ }
+ content_audio.audio = cut.first;
+ time = cut.second;
+ } else if (time > piece->content->end(_film)) {
+ /* Discard it all */
+ return;
+ } else if (end > piece->content->end(_film)) {
+ Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
+ if (remaining_frames == 0) {
+ return;
+ }
+ content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
+ }
+
+ DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+
+ /* Gain and fade */
+
+ auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
+ if (content->gain() != 0 || !fade_coeffs.empty()) {
+ auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
+ if (!fade_coeffs.empty()) {
+ /* Apply both fade and gain */
+ DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
+ auto const channels = gain_buffers->channels();
+ auto const frames = fade_coeffs.size();
+ auto data = gain_buffers->data();
+ auto const gain = db_to_linear (content->gain());
+ for (auto channel = 0; channel < channels; ++channel) {
+ for (auto frame = 0U; frame < frames; ++frame) {
+ data[channel][frame] *= gain * fade_coeffs[frame];
+ }
+ }
+ } else {
+ /* Just apply gain */
+ gain_buffers->apply_gain (content->gain());
+ }
+ content_audio.audio = gain_buffers;
+ }
+
+ /* Remap */
+
+ content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
+
+ /* Process */
+
+ if (_audio_processor) {
+ content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+ }
+
+ /* Push */
+
+ _audio_merger.push (content_audio.audio, time);
+ DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
+ _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
}
-/** Set whether or not this player should always burn text subtitles into the image,
- * regardless of the content settings.
- * @param burn true to always burn subtitles, false to obey content settings.
- */
+
void
-Player::set_always_burn_subtitles (bool burn)
+Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
{
- _always_burn_subtitles = burn;
+ if (_suspended) {
+ return;
+ }
+
+ auto piece = weak_piece.lock ();
+ auto content = weak_content.lock ();
+ if (!piece || !content) {
+ return;
+ }
+
+ PlayerText ps;
+ for (auto& sub: subtitle.subs)
+ {
+ /* Apply content's subtitle offsets */
+ sub.rectangle.x += content->x_offset ();
+ sub.rectangle.y += content->y_offset ();
+
+ /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
+ sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
+ sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
+
+ /* Apply content's subtitle scale */
+ sub.rectangle.width *= content->x_scale ();
+ sub.rectangle.height *= content->y_scale ();
+
+ auto image = sub.image;
+
+ /* We will scale the subtitle up to fit _video_container_size */
+ int const width = sub.rectangle.width * _video_container_size.width;
+ int const height = sub.rectangle.height * _video_container_size.height;
+ if (width == 0 || height == 0) {
+ return;
+ }
+
+ dcp::Size scaled_size (width, height);
+ ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
+ }
+
+ DCPTime from(content_time_to_dcp(piece, subtitle.from()));
+ _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
}
+
void
-Player::set_fast ()
+Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
{
- _fast = true;
- _have_valid_pieces = false;
+ if (_suspended) {
+ return;
+ }
+
+ auto piece = weak_piece.lock ();
+ auto content = weak_content.lock ();
+ if (!piece || !content) {
+ return;
+ }
+
+ PlayerText ps;
+ DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
+
+ if (from > piece->content->end(_film)) {
+ return;
+ }
+
+ for (auto s: subtitle.subs) {
+ s.set_h_position (s.h_position() + content->x_offset());
+ s.set_v_position (s.v_position() + content->y_offset());
+ float const xs = content->x_scale();
+ float const ys = content->y_scale();
+ float size = s.size();
+
+ /* Adjust size to express the common part of the scaling;
+ e.g. if xs = ys = 0.5 we scale size by 2.
+ */
+ if (xs > 1e-5 && ys > 1e-5) {
+ size *= 1 / min (1 / xs, 1 / ys);
+ }
+ s.set_size (size);
+
+ /* Then express aspect ratio changes */
+ if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
+ s.set_aspect_adjust (xs / ys);
+ }
+
+ s.set_in (dcp::Time(from.seconds(), 1000));
+ ps.string.push_back (StringText (s, content->outline_width()));
+ ps.add_fonts (content->fonts ());
+ }
+
+ _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
}
+
void
-Player::set_play_referenced ()
+Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
{
- _play_referenced = true;
- _have_valid_pieces = false;
+ if (_suspended) {
+ return;
+ }
+
+ auto content = weak_content.lock ();
+ if (!content) {
+ return;
+ }
+
+ if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
+ return;
+ }
+
+ auto piece = weak_piece.lock ();
+ if (!piece) {
+ return;
+ }
+
+ DCPTime const dcp_to = content_time_to_dcp (piece, to);
+
+ if (dcp_to > piece->content->end(_film)) {
+ return;
+ }
+
+ auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
+
+ bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
+ if (content->use() && !always && !content->burn()) {
+ Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
+ }
}
-list<ReferencedReelAsset>
-Player::get_reel_assets ()
+
+void
+Player::seek (DCPTime time, bool accurate)
{
- list<ReferencedReelAsset> a;
+ boost::mutex::scoped_lock lm (_mutex);
+ LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
- BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
- shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
- if (!j) {
- continue;
+ if (_suspended) {
+ /* We can't seek in this state */
+ return;
+ }
+
+ if (_shuffler) {
+ _shuffler->clear ();
+ }
+
+ _delay.clear ();
+
+ if (_audio_processor) {
+ _audio_processor->flush ();
+ }
+
+ _audio_merger.clear ();
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
+ _active_texts[i].clear ();
+ }
+
+ for (auto i: _pieces) {
+ if (time < i->content->position()) {
+ /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
+ we must seek this (following) content accurately, otherwise when we come to the end of the current
+ content we may not start right at the beginning of the next, causing a gap (if the next content has
+ been trimmed to a point between keyframes, or something).
+ */
+ i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
+ i->done = false;
+ } else if (i->content->position() <= time && time < i->content->end(_film)) {
+ /* During; seek to position */
+ i->decoder->seek (dcp_to_content_time (i, time), accurate);
+ i->done = false;
+ } else {
+ /* After; this piece is done */
+ i->done = true;
}
+ }
- scoped_ptr<DCPDecoder> decoder;
- try {
- decoder.reset (new DCPDecoder (j, false));
- } catch (...) {
- return a;
+ if (accurate) {
+ _next_video_time = time;
+ _next_video_eyes = Eyes::LEFT;
+ _next_audio_time = time;
+ } else {
+ _next_video_time = boost::none;
+ _next_video_eyes = boost::none;
+ _next_audio_time = boost::none;
+ }
+
+ _black.set_position (time);
+ _silent.set_position (time);
+
+ _last_video.clear ();
+}
+
+
+void
+Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+{
+ if (!_film->three_d()) {
+ if (pv->eyes() == Eyes::LEFT) {
+ /* Use left-eye images for both eyes... */
+ pv->set_eyes (Eyes::BOTH);
+ } else if (pv->eyes() == Eyes::RIGHT) {
+ /* ...and discard the right */
+ return;
}
+ }
- int64_t offset = 0;
- BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
- DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
- if (j->reference_video ()) {
- a.push_back (
- ReferencedReelAsset (
- k->main_picture (),
- DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate()))
- )
- );
- }
+ /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
+ player before the video that requires them.
+ */
+ _delay.push_back (make_pair (pv, time));
- if (j->reference_audio ()) {
- a.push_back (
- ReferencedReelAsset (
- k->main_sound (),
- DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate()))
- )
- );
- }
+ if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+ _next_video_time = time + one_video_frame();
+ }
+ _next_video_eyes = increment_eyes (pv->eyes());
- if (j->reference_subtitle ()) {
- DCPOMATIC_ASSERT (k->main_subtitle ());
- a.push_back (
- ReferencedReelAsset (
- k->main_subtitle (),
- DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate()))
- )
- );
- }
+ if (_delay.size() < 3) {
+ return;
+ }
- /* Assume that main picture duration is the length of the reel */
- offset += k->main_picture()->duration ();
+ auto to_do = _delay.front();
+ _delay.pop_front();
+ do_emit_video (to_do.first, to_do.second);
+}
+
+
+void
+Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+{
+ if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+ for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
+ _active_texts[i].clear_before (time);
}
}
- return a;
+ auto subtitles = open_subtitles_for_frame (time);
+ if (subtitles) {
+ pv->set_text (subtitles.get ());
+ }
+
+ Video (pv, time);
}
+
+
+void
+Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
+{
+ /* Log if the assert below is about to fail */
+ if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
+ _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
+ }
+
+ /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
+ DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
+ Audio (data, time, _film->audio_frame_rate());
+ _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
+}
+
+
+void
+Player::fill_audio (DCPTimePeriod period)
+{
+ if (period.from == period.to) {
+ return;
+ }
+
+ DCPOMATIC_ASSERT (period.from < period.to);
+
+ DCPTime t = period.from;
+ while (t < period.to) {
+ DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
+ Frame const samples = block.frames_round(_film->audio_frame_rate());
+ if (samples) {
+ auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
+ silence->make_silent ();
+ emit_audio (silence, t);
+ }
+ t += block;
+ }
+}
+
+
+DCPTime
+Player::one_video_frame () const
+{
+ return DCPTime::from_frames (1, _film->video_frame_rate ());
+}
+
+
+pair<shared_ptr<AudioBuffers>, DCPTime>
+Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
+{
+ auto const discard_time = discard_to - time;
+ auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
+ auto remaining_frames = audio->frames() - discard_frames;
+ if (remaining_frames <= 0) {
+ return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
+ }
+ auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
+ return make_pair(cut, time + discard_time);
+}
+
+
+void
+Player::set_dcp_decode_reduction (optional<int> reduction)
+{
+ Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
+
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+
+ if (reduction == _dcp_decode_reduction) {
+ lm.unlock ();
+ Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ return;
+ }
+
+ _dcp_decode_reduction = reduction;
+ setup_pieces_unlocked ();
+ }
+
+ Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
+}
+
+
+optional<DCPTime>
+Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
+{
+ boost::mutex::scoped_lock lm (_mutex);
+
+ for (auto i: _pieces) {
+ if (i->content == content) {
+ return content_time_to_dcp (i, t);
+ }
+ }
+
+ /* We couldn't find this content; perhaps things are being changed over */
+ return {};
+}
+
+
+optional<ContentTime>
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
+{
+ boost::mutex::scoped_lock lm (_mutex);
+
+ for (auto i: _pieces) {
+ if (i->content == content) {
+ return dcp_to_content_time (i, t);
+ }
+ }
+
+ /* We couldn't find this content; perhaps things are being changed over */
+ return {};
+}
+
+
+shared_ptr<const Playlist>
+Player::playlist () const
+{
+ return _playlist ? _playlist : _film->playlist();
+}
+
+
+void
+Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
+{
+ if (_suspended) {
+ return;
+ }
+
+ auto piece = weak_piece.lock ();
+ DCPOMATIC_ASSERT (piece);
+
+ auto const vfr = _film->video_frame_rate();
+
+ DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
+ if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
+ return;
+ }
+
+ Atmos (data.data, dcp_time, data.metadata);
+}
+