Various attempts to tidy up and fix the locking of Player (#2323).
};
std::list<Buffer> _buffers;
- int _frame_rate;
+ int const _frame_rate;
};
while (!_player->pass ()) {}
- for (auto i: _player->get_reel_assets()) {
+ for (auto i: get_referenced_reel_assets(_film, _film->playlist())) {
_writer->write (i);
}
auto frame = audio_frame (stream);
auto data = deinterleave_audio (frame);
+ auto const time_base = stream->stream(_format_context)->time_base;
+
ContentTime ct;
if (frame->pts == AV_NOPTS_VALUE) {
/* In some streams we see not every frame coming through with a timestamp; for those
} else {
ct = ContentTime::from_seconds (
frame->best_effort_timestamp *
- av_q2d (stream->stream(_format_context)->time_base))
+ av_q2d(time_base))
+ _pts_offset;
+ LOG_DEBUG_PLAYER(
+ "Process audio with timestamp %1 (BET %2, timebase %3/%4, (PTS offset %5)",
+ to_string(ct),
+ frame->best_effort_timestamp,
+ time_base.num,
+ time_base.den,
+ to_string(_pts_offset)
+ );
}
_next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
data->frames(),
stream->id(),
frame->best_effort_timestamp,
- av_q2d(stream->stream(_format_context)->time_base),
+ av_q2d(time_base),
to_string(_pts_offset)
);
}
auto context = _codec_context[stream->index(_format_context)];
auto frame = audio_frame (stream);
+ LOG_DEBUG_PLAYER("Send audio packet on stream %1", stream->index(_format_context));
int r = avcodec_send_packet (context, packet);
if (r < 0) {
LOG_WARNING("avcodec_send_packet returned %1 for an audio packet", r);
r = avcodec_receive_frame (context, frame);
if (r == AVERROR(EAGAIN)) {
/* More input is required */
+ LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive auido frame");
return;
}
#include "film.h"
#include "j2k_encoder.h"
#include "log.h"
-#include "player.h"
#include "player_video.h"
#include "util.h"
#include "writer.h"
#include "playlist.h"
#include "ratio.h"
#include "raw_image_proxy.h"
-#include "referenced_reel_asset.h"
#include "render_text.h"
#include "shuffler.h"
#include "text_content.h"
Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
: _film (film)
, _suspended (0)
+ , _ignore_video(false)
+ , _ignore_audio(false)
+ , _ignore_text(false)
+ , _always_burn_open_subtitles(false)
+ , _fast(false)
, _tolerant (film->tolerant())
+ , _play_referenced(false)
, _audio_merger (_film->audio_frame_rate())
, _subtitle_alignment (subtitle_alignment)
{
: _film (film)
, _playlist (playlist_)
, _suspended (0)
+ , _ignore_video(false)
+ , _ignore_audio(false)
+ , _ignore_text(false)
+ , _always_burn_open_subtitles(false)
+ , _fast(false)
, _tolerant (film->tolerant())
+ , _play_referenced(false)
, _audio_merger (_film->audio_frame_rate())
{
construct ();
}
-void
-Player::setup_pieces ()
-{
- boost::mutex::scoped_lock lm (_mutex);
- setup_pieces_unlocked ();
-}
-
-
bool
have_video (shared_ptr<const Content> content)
{
void
-Player::setup_pieces_unlocked ()
+Player::setup_pieces ()
{
+ boost::mutex::scoped_lock lm (_mutex);
+
_playback_length = _playlist ? _playlist->length(_film) : _film->length();
auto old_pieces = _pieces;
{
if (property == VideoContentProperty::CROP) {
if (type == ChangeType::DONE) {
- auto const vcs = video_container_size();
boost::mutex::scoped_lock lm (_mutex);
for (auto const& i: _delay) {
- i.first->reset_metadata (_film, vcs);
+ i.first->reset_metadata(_film, _video_container_size);
}
}
} else {
{
Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
- {
- boost::mutex::scoped_lock lm (_mutex);
-
- if (s == _video_container_size) {
- lm.unlock ();
- Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
- return;
- }
+ if (s == _video_container_size) {
+ Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+ return;
+ }
- _video_container_size = s;
+ _video_container_size = s;
- _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
- _black_image->make_black ();
- }
+ auto black = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
+ black->make_black ();
+ std::atomic_store(&_black_image, black);
Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
}
shared_ptr<PlayerVideo>
Player::black_player_video_frame (Eyes eyes) const
{
+ auto black = std::atomic_load(&_black_image);
+
return std::make_shared<PlayerVideo> (
- std::make_shared<const RawImageProxy>(_black_image),
+ std::make_shared<const RawImageProxy>(black),
Crop(),
optional<double>(),
_video_container_size,
void
Player::set_ignore_video ()
{
- boost::mutex::scoped_lock lm (_mutex);
_ignore_video = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_ignore_audio ()
{
- boost::mutex::scoped_lock lm (_mutex);
_ignore_audio = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_ignore_text ()
{
- boost::mutex::scoped_lock lm (_mutex);
_ignore_text = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_always_burn_open_subtitles ()
{
- boost::mutex::scoped_lock lm (_mutex);
_always_burn_open_subtitles = true;
}
void
Player::set_fast ()
{
- boost::mutex::scoped_lock lm (_mutex);
_fast = true;
- setup_pieces_unlocked ();
+ setup_pieces();
}
void
Player::set_play_referenced ()
{
- boost::mutex::scoped_lock lm (_mutex);
_play_referenced = true;
- setup_pieces_unlocked ();
-}
-
-
-static void
-maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
-{
- DCPOMATIC_ASSERT (r);
- r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
- r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
- if (r->actual_duration() > 0) {
- a.push_back (
- ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
- );
- }
-}
-
-
-list<ReferencedReelAsset>
-Player::get_reel_assets ()
-{
- /* Does not require a lock on _mutex as it's only called from DCPEncoder */
-
- list<ReferencedReelAsset> reel_assets;
-
- for (auto content: playlist()->content()) {
- auto dcp = dynamic_pointer_cast<DCPContent>(content);
- if (!dcp) {
- continue;
- }
-
- if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
- continue;
- }
-
- scoped_ptr<DCPDecoder> decoder;
- try {
- decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
- } catch (...) {
- return reel_assets;
- }
-
- auto const frame_rate = _film->video_frame_rate();
- DCPOMATIC_ASSERT (dcp->video_frame_rate());
- /* We should only be referencing if the DCP rate is the same as the film rate */
- DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
-
- Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
- Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
-
- /* position in the asset from the start */
- int64_t offset_from_start = 0;
- /* position i the asset from the end */
- int64_t offset_from_end = 0;
- for (auto reel: decoder->reels()) {
- /* Assume that main picture duration is the length of the reel */
- offset_from_end += reel->main_picture()->actual_duration();
- }
-
- for (auto reel: decoder->reels()) {
-
- /* Assume that main picture duration is the length of the reel */
- int64_t const reel_duration = reel->main_picture()->actual_duration();
-
- /* See doc/design/trim_reels.svg */
- Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
- Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
-
- auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
- if (dcp->reference_video()) {
- maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
- }
-
- if (dcp->reference_audio()) {
- maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
- }
-
- if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
- maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
- }
-
- if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
- for (auto caption: reel->closed_captions()) {
- maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
- }
- }
-
- offset_from_start += reel_duration;
- offset_from_end -= reel_duration;
- }
- }
-
- return reel_assets;
+ setup_pieces();
}
return false;
}
- if (_playback_length == DCPTime()) {
+ if (_playback_length.load() == DCPTime()) {
/* Special; just give one black frame */
emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
return true;
);
if (latest_last_push_end != _stream_states.end()) {
- LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
+ LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
}
/* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
}
}
- auto pull_to = _playback_length;
+ auto pull_to = _playback_length.load();
for (auto const& i: alive_stream_states) {
if (!i.second.piece->done && i.second.last_push_end < pull_to) {
pull_to = i.second.last_push_end;
}
/* i.image will already have been scaled to fit _video_container_size */
- dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
+ dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
captions.push_back (
PositionImage (
i.image,
Position<int> (
- lrint(_video_container_size.width * i.rectangle.x),
- lrint(_video_container_size.height * i.rectangle.y)
+ lrint(_video_container_size.load().width * i.rectangle.x),
+ lrint(_video_container_size.load().height * i.rectangle.y)
)
)
);
auto image = sub.image;
/* We will scale the subtitle up to fit _video_container_size */
- int const width = sub.rectangle.width * _video_container_size.width;
- int const height = sub.rectangle.height * _video_container_size.height;
+ int const width = sub.rectangle.width * _video_container_size.load().width;
+ int const height = sub.rectangle.height * _video_container_size.load().height;
if (width == 0 || height == 0) {
return;
}
{
Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
- {
- boost::mutex::scoped_lock lm (_mutex);
-
- if (reduction == _dcp_decode_reduction) {
- lm.unlock ();
- Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
- return;
- }
-
- _dcp_decode_reduction = reduction;
- setup_pieces_unlocked ();
+ if (reduction == _dcp_decode_reduction.load()) {
+ Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
+ return;
}
+ _dcp_decode_reduction = reduction;
+ setup_pieces();
+
Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
}
optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
+Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
{
boost::mutex::scoped_lock lm (_mutex);
optional<ContentTime>
-Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
{
boost::mutex::scoped_lock lm (_mutex);
void seek (dcpomatic::DCPTime time, bool accurate);
std::vector<std::shared_ptr<dcpomatic::Font>> get_subtitle_fonts ();
- std::list<ReferencedReelAsset> get_reel_assets ();
+
dcp::Size video_container_size () const {
- boost::mutex::scoped_lock lm (_mutex);
return _video_container_size;
}
void set_play_referenced ();
void set_dcp_decode_reduction (boost::optional<int> reduction);
- boost::optional<dcpomatic::DCPTime> content_time_to_dcp (std::shared_ptr<const Content> content, dcpomatic::ContentTime t);
- boost::optional<dcpomatic::ContentTime> dcp_to_content_time (std::shared_ptr<const Content> content, dcpomatic::DCPTime t);
+ boost::optional<dcpomatic::DCPTime> content_time_to_dcp (std::shared_ptr<const Content> content, dcpomatic::ContentTime t) const;
+ boost::optional<dcpomatic::ContentTime> dcp_to_content_time (std::shared_ptr<const Content> content, dcpomatic::DCPTime t) const;
boost::signals2::signal<void (ChangeType, int, bool)> Change;
void construct ();
void setup_pieces ();
- void setup_pieces_unlocked ();
void film_change (ChangeType, Film::Property);
void playlist_change (ChangeType);
void playlist_content_change (ChangeType, int, bool);
*/
mutable boost::mutex _mutex;
- std::shared_ptr<const Film> _film;
+ std::shared_ptr<const Film> const _film;
/** Playlist, or 0 if we are using the one from the _film */
- std::shared_ptr<const Playlist> _playlist;
+ std::shared_ptr<const Playlist> const _playlist;
/** > 0 if we are suspended (i.e. pass() and seek() do nothing) */
boost::atomic<int> _suspended;
/** Size of the image we are rendering to; this may be the DCP frame size, or
* the size of preview in a window.
*/
- dcp::Size _video_container_size;
+ boost::atomic<dcp::Size> _video_container_size;
+ /** Should be accessed using the std::atomic... methods */
std::shared_ptr<Image> _black_image;
/** true if the player should ignore all video; i.e. never produce any */
- bool _ignore_video = false;
- bool _ignore_audio = false;
+ boost::atomic<bool> _ignore_video;
+ boost::atomic<bool> _ignore_audio;
/** true if the player should ignore all text; i.e. never produce any */
- bool _ignore_text = false;
- bool _always_burn_open_subtitles = false;
+ boost::atomic<bool> _ignore_text;
+ boost::atomic<bool> _always_burn_open_subtitles;
/** true if we should try to be fast rather than high quality */
- bool _fast = false;
+ boost::atomic<bool> _fast;
/** true if we should keep going in the face of `survivable' errors */
- bool _tolerant = false;
+ bool const _tolerant;
/** true if we should `play' (i.e output) referenced DCP data (e.g. for preview) */
- bool _play_referenced = false;
+ boost::atomic<bool> _play_referenced;
/** Time of the next video that we will emit, or the time of the last accurate seek */
boost::optional<dcpomatic::DCPTime> _next_video_time;
/** Time of the next audio that we will emit, or the time of the last accurate seek */
boost::optional<dcpomatic::DCPTime> _next_audio_time;
- boost::optional<int> _dcp_decode_reduction;
+ boost::atomic<boost::optional<int>> _dcp_decode_reduction;
typedef std::map<std::weak_ptr<Piece>, std::shared_ptr<PlayerVideo>, std::owner_less<std::weak_ptr<Piece>>> LastVideoMap;
LastVideoMap _last_video;
ActiveText _active_texts[static_cast<int>(TextType::COUNT)];
std::shared_ptr<AudioProcessor> _audio_processor;
- dcpomatic::DCPTime _playback_length;
+ boost::atomic<dcpomatic::DCPTime> _playback_length;
/** Alignment for subtitle images that we create */
- Image::Alignment _subtitle_alignment = Image::Alignment::PADDED;
+ Image::Alignment const _subtitle_alignment = Image::Alignment::PADDED;
boost::signals2::scoped_connection _film_changed_connection;
boost::signals2::scoped_connection _playlist_change_connection;
--- /dev/null
+/*
+ Copyright (C) 2015 Carl Hetherington <cth@carlh.net>
+
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ DCP-o-matic is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+#include "dcp_content.h"
+#include "dcp_decoder.h"
+#include "dcpomatic_assert.h"
+#include "film.h"
+#include "playlist.h"
+#include "referenced_reel_asset.h"
+#include <dcp/reel.h>
+#include <dcp/reel_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
+#include <dcp/reel_picture_asset.h>
+#include <dcp/reel_sound_asset.h>
+#include <dcp/reel_subtitle_asset.h>
+#include <cmath>
+
+
+using std::list;
+using std::max;
+using std::min;
+using std::shared_ptr;
+using boost::dynamic_pointer_cast;
+using boost::scoped_ptr;
+using namespace dcpomatic;
+
+
+static void
+maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
+{
+ DCPOMATIC_ASSERT (r);
+ r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
+ r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
+ if (r->actual_duration() > 0) {
+ a.push_back (
+ ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
+ );
+ }
+}
+
+
+
+/** @return Details of all the DCP assets in a playlist that are marked to refer to */
+list<ReferencedReelAsset>
+get_referenced_reel_assets(shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
+{
+ list<ReferencedReelAsset> reel_assets;
+
+ for (auto content: playlist->content()) {
+ auto dcp = dynamic_pointer_cast<DCPContent>(content);
+ if (!dcp) {
+ continue;
+ }
+
+ if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
+ continue;
+ }
+
+ scoped_ptr<DCPDecoder> decoder;
+ try {
+ decoder.reset(new DCPDecoder(film, dcp, false, false, shared_ptr<DCPDecoder>()));
+ } catch (...) {
+ return reel_assets;
+ }
+
+ auto const frame_rate = film->video_frame_rate();
+ DCPOMATIC_ASSERT (dcp->video_frame_rate());
+ /* We should only be referencing if the DCP rate is the same as the film rate */
+ DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
+
+ Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
+ Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
+
+ /* position in the asset from the start */
+ int64_t offset_from_start = 0;
+ /* position i the asset from the end */
+ int64_t offset_from_end = 0;
+ for (auto reel: decoder->reels()) {
+ /* Assume that main picture duration is the length of the reel */
+ offset_from_end += reel->main_picture()->actual_duration();
+ }
+
+ for (auto reel: decoder->reels()) {
+
+ /* Assume that main picture duration is the length of the reel */
+ int64_t const reel_duration = reel->main_picture()->actual_duration();
+
+ /* See doc/design/trim_reels.svg */
+ Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
+ Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
+
+ auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
+ if (dcp->reference_video()) {
+ maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
+ }
+
+ if (dcp->reference_audio()) {
+ maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
+ }
+
+ if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
+ maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
+ }
+
+ if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
+ for (auto caption: reel->closed_captions()) {
+ maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
+ }
+ }
+
+ offset_from_start += reel_duration;
+ offset_from_end -= reel_duration;
+ }
+ }
+
+ return reel_assets;
+}
+
*/
+
#ifndef DCPOMATIC_REFERENCED_REEL_ASSET_H
#define DCPOMATIC_REFERENCED_REEL_ASSET_H
+
+#include "dcpomatic_time.h"
#include <dcp/reel_asset.h>
+
+class Film;
+class Playlist;
+
+
class ReferencedReelAsset
{
public:
dcpomatic::DCPTimePeriod period;
};
+
+std::list<ReferencedReelAsset> get_referenced_reel_assets(std::shared_ptr<const Film> film, std::shared_ptr<const Playlist> playlist);
+
+
#endif
ratio.cc
raw_image_proxy.cc
reel_writer.cc
+ referenced_reel_asset.cc
release_notes.cc
render_text.cc
resampler.cc
#include "lib/dcpomatic_time.h"
-#include "lib/player.h"
#include "lib/text_ring_buffers.h"
#include <dcp/warnings.h>
LIBDCP_DISABLE_WARNINGS
#include "lib/screen.h"
#include "lib/trusted_device.h"
#include "test.h"
+#include <boost/algorithm/string/predicate.hpp>
#include <boost/filesystem.hpp>
#include <boost/test/unit_test.hpp>
#include <iostream>
auto error = run(args, output);
BOOST_CHECK(!error);
- BOOST_REQUIRE_EQUAL(output.size(), 2);
+ BOOST_REQUIRE_EQUAL(output.size(), 2U);
BOOST_CHECK(boost::algorithm::starts_with(output[0], "Making KDMs valid from"));
BOOST_CHECK_EQUAL(output[1], "Wrote 2 KDM files to build/test");
auto error = run(args, output);
BOOST_CHECK(!error);
- BOOST_REQUIRE_EQUAL(output.size(), 2);
+ BOOST_REQUIRE_EQUAL(output.size(), 2U);
BOOST_CHECK(boost::algorithm::starts_with(output[0], "Making KDMs valid from"));
BOOST_CHECK_EQUAL(output[1], "Wrote 1 KDM files to build/test");
make_and_verify_dcp (vf, {dcp::VerificationNote::Code::EXTERNAL_ASSET});
/* Check that the selected reel assets are right */
- auto player = make_shared<Player>(vf, Image::Alignment::COMPACT);
- auto a = player->get_reel_assets();
+ auto a = get_referenced_reel_assets(vf, vf->playlist());
BOOST_REQUIRE_EQUAL (a.size(), 4U);
auto i = a.begin();
BOOST_CHECK (i->period == DCPTimePeriod(DCPTime(0), DCPTime(960000)));