Fix error when padding is needed in 3D encodes (#2476).
[dcpomatic.git] / src / lib / player.cc
index 74e7480f518166a96666d62b8c40f7a7e21714c6..abcefcab5c57fcae7afbc5db48589cd12088d343 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
 
 */
 
+
 #include "atmos_decoder.h"
-#include "player.h"
-#include "film.h"
 #include "audio_buffers.h"
+#include "audio_content.h"
+#include "audio_decoder.h"
+#include "audio_processor.h"
+#include "compose.hpp"
+#include "config.h"
 #include "content_audio.h"
+#include "content_video.h"
 #include "dcp_content.h"
+#include "dcp_decoder.h"
 #include "dcpomatic_log.h"
-#include "job.h"
+#include "decoder.h"
+#include "decoder_factory.h"
+#include "ffmpeg_content.h"
+#include "film.h"
+#include "frame_rate_change.h"
 #include "image.h"
-#include "raw_image_proxy.h"
-#include "ratio.h"
+#include "image_decoder.h"
+#include "job.h"
 #include "log.h"
-#include "render_text.h"
-#include "config.h"
-#include "content_video.h"
+#include "maths_util.h"
+#include "piece.h"
+#include "player.h"
 #include "player_video.h"
-#include "frame_rate_change.h"
-#include "audio_processor.h"
 #include "playlist.h"
-#include "referenced_reel_asset.h"
-#include "decoder_factory.h"
-#include "decoder.h"
-#include "video_decoder.h"
-#include "audio_decoder.h"
+#include "ratio.h"
+#include "raw_image_proxy.h"
+#include "render_text.h"
+#include "shuffler.h"
 #include "text_content.h"
 #include "text_decoder.h"
-#include "ffmpeg_content.h"
-#include "audio_content.h"
-#include "dcp_decoder.h"
-#include "image_decoder.h"
-#include "compose.hpp"
-#include "shuffler.h"
 #include "timer.h"
+#include "video_decoder.h"
 #include <dcp/reel.h>
+#include <dcp/reel_closed_caption_asset.h>
+#include <dcp/reel_picture_asset.h>
 #include <dcp/reel_sound_asset.h>
 #include <dcp/reel_subtitle_asset.h>
-#include <dcp/reel_picture_asset.h>
-#include <dcp/reel_closed_caption_asset.h>
-#include <stdint.h>
 #include <algorithm>
 #include <iostream>
+#include <stdint.h>
 
 #include "i18n.h"
 
+
 using std::copy;
 using std::cout;
 using std::dynamic_pointer_cast;
 using std::list;
 using std::make_pair;
 using std::make_shared;
-using std::map;
+using std::make_shared;
 using std::max;
 using std::min;
 using std::min;
@@ -83,109 +86,225 @@ using namespace boost::placeholders;
 #endif
 using namespace dcpomatic;
 
+
 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
 int const PlayerProperty::PLAYLIST = 701;
 int const PlayerProperty::FILM_CONTAINER = 702;
 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 int const PlayerProperty::PLAYBACK_LENGTH = 705;
+int const PlayerProperty::IGNORE_VIDEO = 706;
+int const PlayerProperty::IGNORE_AUDIO = 707;
+int const PlayerProperty::IGNORE_TEXT = 708;
+int const PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES = 709;
+int const PlayerProperty::PLAY_REFERENCED = 710;
 
-Player::Player (shared_ptr<const Film> film)
+
+Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
        : _film (film)
        , _suspended (0)
+       , _ignore_video(false)
+       , _ignore_audio(false)
+       , _ignore_text(false)
+       , _always_burn_open_subtitles(false)
+       , _fast(false)
        , _tolerant (film->tolerant())
-       , _audio_merger (_film->audio_frame_rate())
+       , _play_referenced(false)
+       , _audio_merger(film->audio_frame_rate())
+       , _subtitle_alignment (subtitle_alignment)
 {
        construct ();
 }
 
+
 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
        : _film (film)
        , _playlist (playlist_)
        , _suspended (0)
+       , _ignore_video(false)
+       , _ignore_audio(false)
+       , _ignore_text(false)
+       , _always_burn_open_subtitles(false)
+       , _fast(false)
        , _tolerant (film->tolerant())
-       , _audio_merger (_film->audio_frame_rate())
+       , _play_referenced(false)
+       , _audio_merger(film->audio_frame_rate())
 {
        construct ();
 }
 
+
 void
 Player::construct ()
 {
-       _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
+       connect();
+       set_video_container_size(film->frame_size());
+
+       film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
+
+       setup_pieces ();
+       seek (DCPTime (), true);
+}
+
+
+void
+Player::connect()
+{
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
+       _film_changed_connection = film->Change.connect(bind(&Player::film_change, this, _1, _2));
        /* The butler must hear about this first, so since we are proxying this through to the butler we must
           be first.
        */
        _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
        _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
-       set_video_container_size (_film->frame_size ());
-
-       film_change (ChangeType::DONE, Film::AUDIO_PROCESSOR);
-
-       setup_pieces ();
-       seek (DCPTime (), true);
 }
 
-Player::~Player ()
+
+Player::Player(Player&& other)
+       : _film(other._film)
+       , _playlist(std::move(other._playlist))
+       , _suspended(other._suspended.load())
+       , _pieces(std::move(other._pieces))
+       , _video_container_size(other._video_container_size.load())
+       , _black_image(std::move(other._black_image))
+       , _ignore_video(other._ignore_video.load())
+       , _ignore_audio(other._ignore_audio.load())
+       , _ignore_text(other._ignore_text.load())
+       , _always_burn_open_subtitles(other._always_burn_open_subtitles.load())
+       , _fast(other._fast.load())
+       , _tolerant(other._tolerant)
+       , _play_referenced(other._play_referenced.load())
+       , _next_video_time(other._next_video_time)
+       , _next_audio_time(other._next_audio_time)
+       , _dcp_decode_reduction(other._dcp_decode_reduction.load())
+       , _last_video(std::move(other._last_video))
+       , _audio_merger(std::move(other._audio_merger))
+       , _shuffler(std::move(other._shuffler))
+       , _delay(std::move(other._delay))
+       , _stream_states(std::move(other._stream_states))
+       , _black(std::move(other._black))
+       , _silent(std::move(other._silent))
+       , _active_texts(std::move(other._active_texts))
+       , _audio_processor(std::move(other._audio_processor))
+       , _playback_length(other._playback_length.load())
+       , _subtitle_alignment(other._subtitle_alignment)
 {
-       delete _shuffler;
+       connect();
 }
 
-void
-Player::setup_pieces ()
+
+Player&
+Player::operator=(Player&& other)
 {
-       boost::mutex::scoped_lock lm (_mutex);
-       setup_pieces_unlocked ();
+       if (this == &other) {
+               return *this;
+       }
+
+       _film = std::move(other._film);
+       _playlist = std::move(other._playlist);
+       _suspended = other._suspended.load();
+       _pieces = std::move(other._pieces);
+       _video_container_size = other._video_container_size.load();
+       _black_image = std::move(other._black_image);
+       _ignore_video = other._ignore_video.load();
+       _ignore_audio = other._ignore_audio.load();
+       _ignore_text = other._ignore_text.load();
+       _always_burn_open_subtitles = other._always_burn_open_subtitles.load();
+       _fast = other._fast.load();
+       _tolerant = other._tolerant;
+       _play_referenced = other._play_referenced.load();
+       _next_video_time = other._next_video_time;
+       _next_audio_time = other._next_audio_time;
+       _dcp_decode_reduction = other._dcp_decode_reduction.load();
+       _last_video = std::move(other._last_video);
+       _audio_merger = std::move(other._audio_merger);
+       _shuffler = std::move(other._shuffler);
+       _delay = std::move(other._delay);
+       _stream_states = std::move(other._stream_states);
+       _black = std::move(other._black);
+       _silent = std::move(other._silent);
+       _active_texts = std::move(other._active_texts);
+       _audio_processor = std::move(other._audio_processor);
+       _playback_length = other._playback_length.load();
+       _subtitle_alignment = other._subtitle_alignment;
+
+       connect();
+
+       return *this;
 }
 
 
 bool
 have_video (shared_ptr<const Content> content)
 {
-       return static_cast<bool>(content->video) && content->video->use();
+       return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
 }
 
+
 bool
 have_audio (shared_ptr<const Content> content)
 {
-       return static_cast<bool>(content->audio);
+       return static_cast<bool>(content->audio) && content->can_be_played();
 }
 
+
 void
-Player::setup_pieces_unlocked ()
+Player::setup_pieces ()
 {
-       _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+       boost::mutex::scoped_lock lm (_mutex);
 
        auto old_pieces = _pieces;
        _pieces.clear ();
 
-       delete _shuffler;
-       _shuffler = new Shuffler();
-       _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+       auto film = _film.lock();
+       if (!film) {
+               return;
+       }
+
+       _playback_length = _playlist ? _playlist->length(film) : film->length();
 
-       for (auto i: playlist()->content()) {
+       auto playlist_content = playlist()->content();
+       bool const have_threed = std::any_of(
+               playlist_content.begin(),
+               playlist_content.end(),
+               [](shared_ptr<const Content> c) {
+                       return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
+               });
 
-               if (!i->paths_valid ()) {
+
+       if (have_threed) {
+               _shuffler.reset(new Shuffler());
+               _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+       }
+
+       for (auto content: playlist()->content()) {
+
+               if (!content->paths_valid()) {
                        continue;
                }
 
-               if (_ignore_video && _ignore_audio && i->text.empty()) {
+               if (_ignore_video && _ignore_audio && content->text.empty()) {
                        /* We're only interested in text and this content has none */
                        continue;
                }
 
                shared_ptr<Decoder> old_decoder;
                for (auto j: old_pieces) {
-                       if (j->content == i) {
+                       if (j->content == content) {
                                old_decoder = j->decoder;
                                break;
                        }
                }
 
-               auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+               auto decoder = decoder_factory(film, content, _fast, _tolerant, old_decoder);
                DCPOMATIC_ASSERT (decoder);
 
-               FrameRateChange frc (_film, i);
+               FrameRateChange frc(film, content);
 
                if (decoder->video && _ignore_video) {
                        decoder->video->set_ignore (true);
@@ -209,13 +328,13 @@ Player::setup_pieces_unlocked ()
                        }
                }
 
-               auto piece = make_shared<Piece>(i, decoder, frc);
+               auto piece = make_shared<Piece>(content, decoder, frc);
                _pieces.push_back (piece);
 
                if (decoder->video) {
-                       if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
+                       if (have_threed) {
                                /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
-                               decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+                               decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
                        } else {
                                decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
                        }
@@ -250,28 +369,65 @@ Player::setup_pieces_unlocked ()
        for (auto i: _pieces) {
                if (i->content->audio) {
                        for (auto j: i->content->audio->streams()) {
-                               _stream_states[j] = StreamState (i, i->content->position ());
+                               _stream_states[j] = StreamState(i);
                        }
                }
        }
 
-       _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
-       _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
+       auto ignore_overlap = [](shared_ptr<VideoContent> v) {
+               return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
+       };
+
+       for (auto piece = _pieces.begin(); piece != _pieces.end(); ++piece) {
+               if (ignore_overlap((*piece)->content->video)) {
+                       /* Look for content later in the content list with in-use video that overlaps this */
+                       auto const period = (*piece)->content->period(film);
+                       for (auto later_piece = std::next(piece); later_piece != _pieces.end(); ++later_piece) {
+                               if (ignore_overlap((*later_piece)->content->video)) {
+                                       if (auto overlap = (*later_piece)->content->period(film).overlap(period)) {
+                                               (*piece)->ignore_video.push_back(*overlap);
+                                       }
+                               }
+                       }
+               }
+       }
 
-       _last_video_time = DCPTime ();
-       _last_video_eyes = Eyes::BOTH;
-       _last_audio_time = DCPTime ();
+       for (auto piece = _pieces.begin(); piece != _pieces.end(); ++piece) {
+               if ((*piece)->content->atmos) {
+                       /* Look for content later in the content list with ATMOS that overlaps this */
+                       auto const period = (*piece)->content->period(film);
+                       for (auto later_piece = std::next(piece); later_piece != _pieces.end(); ++later_piece) {
+                               if ((*later_piece)->content->atmos) {
+                                       if (auto overlap = (*later_piece)->content->period(film).overlap(period)) {
+                                               (*piece)->ignore_atmos.push_back(*overlap);
+                                       }
+                               }
+                       }
+               }
+       }
+
+       _black = Empty(film, playlist(), bind(&have_video, _1), _playback_length);
+       _silent = Empty(film, playlist(), bind(&have_audio, _1), _playback_length);
+
+       _next_video_time = boost::none;
+       _next_video_eyes = Eyes::BOTH;
+       _next_audio_time = boost::none;
 }
 
+
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
+       auto film = _film.lock();
+       if (!film) {
+               return;
+       }
+
        if (property == VideoContentProperty::CROP) {
                if (type == ChangeType::DONE) {
-                       dcp::Size const vcs = video_container_size();
                        boost::mutex::scoped_lock lm (_mutex);
-                       for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
-                               i->first->reset_metadata (_film, vcs);
+                       for (auto const& i: _delay) {
+                               i.first->reset_metadata(film, _video_container_size);
                        }
                }
        } else {
@@ -293,29 +449,27 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
        Change (type, property, frequent);
 }
 
+
 void
 Player::set_video_container_size (dcp::Size s)
 {
-       Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+       ChangeSignaller<Player, int> cc(this, PlayerProperty::VIDEO_CONTAINER_SIZE);
 
-       {
-               boost::mutex::scoped_lock lm (_mutex);
-
-               if (s == _video_container_size) {
-                       lm.unlock ();
-                       Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
-                       return;
-               }
+       if (s == _video_container_size) {
+               cc.abort();
+               return;
+       }
 
-               _video_container_size = s;
+       _video_container_size = s;
 
-               _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
+       {
+               boost::mutex::scoped_lock lm(_black_image_mutex);
+               _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
                _black_image->make_black ();
        }
-
-       Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
 }
 
+
 void
 Player::playlist_change (ChangeType type)
 {
@@ -325,6 +479,7 @@ Player::playlist_change (ChangeType type)
        Change (type, PlayerProperty::PLAYLIST, false);
 }
 
+
 void
 Player::film_change (ChangeType type, Film::Property p)
 {
@@ -333,9 +488,14 @@ Player::film_change (ChangeType type, Film::Property p)
           last time we were run.
        */
 
-       if (p == Film::CONTAINER) {
+       auto film = _film.lock();
+       if (!film) {
+               return;
+       }
+
+       if (p == Film::Property::CONTAINER) {
                Change (type, PlayerProperty::FILM_CONTAINER, false);
-       } else if (p == Film::VIDEO_FRAME_RATE) {
+       } else if (p == Film::Property::VIDEO_FRAME_RATE) {
                /* Pieces contain a FrameRateChange which contains the DCP frame rate,
                   so we need new pieces here.
                */
@@ -343,12 +503,12 @@ Player::film_change (ChangeType type, Film::Property p)
                        setup_pieces ();
                }
                Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
-       } else if (p == Film::AUDIO_PROCESSOR) {
-               if (type == ChangeType::DONE && _film->audio_processor ()) {
+       } else if (p == Film::Property::AUDIO_PROCESSOR) {
+               if (type == ChangeType::DONE && film->audio_processor ()) {
                        boost::mutex::scoped_lock lm (_mutex);
-                       _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
+                       _audio_processor = film->audio_processor()->clone(film->audio_frame_rate());
                }
-       } else if (p == Film::AUDIO_CHANNELS) {
+       } else if (p == Film::Property::AUDIO_CHANNELS) {
                if (type == ChangeType::DONE) {
                        boost::mutex::scoped_lock lm (_mutex);
                        _audio_merger.clear ();
@@ -356,9 +516,12 @@ Player::film_change (ChangeType type, Film::Property p)
        }
 }
 
+
 shared_ptr<PlayerVideo>
 Player::black_player_video_frame (Eyes eyes) const
 {
+       boost::mutex::scoped_lock lm(_black_image_mutex);
+
        return std::make_shared<PlayerVideo> (
                std::make_shared<const RawImageProxy>(_black_image),
                Crop(),
@@ -375,11 +538,15 @@ Player::black_player_video_frame (Eyes eyes) const
        );
 }
 
+
 Frame
 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
 {
-       DCPTime s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
+       auto s = t - piece->content->position ();
+       s = min (piece->content->length_after_trim(film), s);
        s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
 
        /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
@@ -392,6 +559,7 @@ Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
        return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
 }
 
+
 DCPTime
 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
 {
@@ -400,188 +568,123 @@ Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
        return d + piece->content->position();
 }
 
+
 Frame
 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
 {
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
        auto s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
+       s = min (piece->content->length_after_trim(film), s);
        /* See notes in dcp_to_content_video */
-       return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
+       return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(film->audio_frame_rate());
 }
 
+
 DCPTime
 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
 {
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
        /* See comment in dcp_to_content_video */
-       return DCPTime::from_frames (f, _film->audio_frame_rate())
+       return DCPTime::from_frames(f, film->audio_frame_rate())
                - DCPTime (piece->content->trim_start(), piece->frc)
                + piece->content->position();
 }
 
+
 ContentTime
 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
 {
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
        auto s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
+       s = min (piece->content->length_after_trim(film), s);
        return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
 }
 
+
 DCPTime
 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
 {
        return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
 }
 
-vector<FontData>
+
+vector<shared_ptr<Font>>
 Player::get_subtitle_fonts ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       vector<FontData> fonts;
-       for (auto i: _pieces) {
-               /* XXX: things may go wrong if there are duplicate font IDs
-                  with different font files.
-               */
-               auto f = i->decoder->fonts ();
-               copy (f.begin(), f.end(), back_inserter(fonts));
+       vector<shared_ptr<Font>> fonts;
+       for (auto piece: _pieces) {
+               for (auto text: piece->content->text) {
+                       auto text_fonts = text->fonts();
+                       copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
+               }
        }
 
        return fonts;
 }
 
+
 /** Set this player never to produce any video data */
 void
 Player::set_ignore_video ()
 {
-       boost::mutex::scoped_lock lm (_mutex);
+       ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_VIDEO);
        _ignore_video = true;
-       setup_pieces_unlocked ();
+       setup_pieces();
 }
 
+
 void
 Player::set_ignore_audio ()
 {
-       boost::mutex::scoped_lock lm (_mutex);
+       ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_AUDIO);
        _ignore_audio = true;
-       setup_pieces_unlocked ();
+       setup_pieces();
 }
 
+
 void
 Player::set_ignore_text ()
 {
-       boost::mutex::scoped_lock lm (_mutex);
+       ChangeSignaller<Player, int> cc(this, PlayerProperty::IGNORE_TEXT);
        _ignore_text = true;
-       setup_pieces_unlocked ();
+       setup_pieces();
 }
 
+
 /** Set the player to always burn open texts into the image regardless of the content settings */
 void
 Player::set_always_burn_open_subtitles ()
 {
-       boost::mutex::scoped_lock lm (_mutex);
+       ChangeSignaller<Player, int> cc(this, PlayerProperty::ALWAYS_BURN_OPEN_SUBTITLES);
        _always_burn_open_subtitles = true;
 }
 
+
 /** Sets up the player to be faster, possibly at the expense of quality */
 void
 Player::set_fast ()
 {
-       boost::mutex::scoped_lock lm (_mutex);
        _fast = true;
-       setup_pieces_unlocked ();
+       setup_pieces();
 }
 
+
 void
 Player::set_play_referenced ()
 {
-       boost::mutex::scoped_lock lm (_mutex);
+       ChangeSignaller<Player, int> cc(this, PlayerProperty::PLAY_REFERENCED);
        _play_referenced = true;
-       setup_pieces_unlocked ();
-}
-
-static void
-maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
-{
-       DCPOMATIC_ASSERT (r);
-       r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
-       r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
-       if (r->actual_duration() > 0) {
-               a.push_back (
-                       ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
-                       );
-       }
+       setup_pieces();
 }
 
-list<ReferencedReelAsset>
-Player::get_reel_assets ()
-{
-       /* Does not require a lock on _mutex as it's only called from DCPEncoder */
-
-       list<ReferencedReelAsset> a;
-
-       for (auto i: playlist()->content()) {
-               shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
-               if (!j) {
-                       continue;
-               }
-
-               scoped_ptr<DCPDecoder> decoder;
-               try {
-                       decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
-               } catch (...) {
-                       return a;
-               }
-
-               DCPOMATIC_ASSERT (j->video_frame_rate ());
-               double const cfr = j->video_frame_rate().get();
-               Frame const trim_start = j->trim_start().frames_round (cfr);
-               Frame const trim_end = j->trim_end().frames_round (cfr);
-               int const ffr = _film->video_frame_rate ();
-
-               /* position in the asset from the start */
-               int64_t offset_from_start = 0;
-               /* position in the asset from the end */
-               int64_t offset_from_end = 0;
-               for (auto k: decoder->reels()) {
-                       /* Assume that main picture duration is the length of the reel */
-                       offset_from_end += k->main_picture()->actual_duration();
-               }
-
-               for (auto k: decoder->reels()) {
-
-                       /* Assume that main picture duration is the length of the reel */
-                       int64_t const reel_duration = k->main_picture()->actual_duration();
-
-                       /* See doc/design/trim_reels.svg */
-                       Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
-                       Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
-
-                       DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
-                       if (j->reference_video ()) {
-                               maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
-                       }
-
-                       if (j->reference_audio ()) {
-                               maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
-                       }
-
-                       if (j->reference_text (TextType::OPEN_SUBTITLE)) {
-                               maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
-                       }
-
-                       if (j->reference_text (TextType::CLOSED_CAPTION)) {
-                               for (auto l: k->closed_captions()) {
-                                       maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
-                               }
-                       }
-
-                       offset_from_start += reel_duration;
-                       offset_from_end -= reel_duration;
-               }
-       }
-
-       return a;
-}
 
 bool
 Player::pass ()
@@ -594,7 +697,9 @@ Player::pass ()
                return false;
        }
 
-       if (_playback_length == DCPTime()) {
+       auto film = _film.lock();
+
+       if (_playback_length.load() == DCPTime() || !film) {
                /* Special; just give one black frame */
                emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
                return true;
@@ -610,8 +715,8 @@ Player::pass ()
                        continue;
                }
 
-               DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
-               if (t > i->content->end(_film)) {
+               auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
+               if (t > i->content->end(film)) {
                        i->done = true;
                } else {
 
@@ -653,39 +758,44 @@ Player::pass ()
        {
                LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
-               shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
+               auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced && dcp->reference_audio()) {
-                       /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
+                       /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
                           to `hide' the fact that no audio was emitted during the referenced DCP (though
                           we need to behave as though it was).
                        */
-                       _last_audio_time = dcp->end (_film);
+                       _next_audio_time = dcp->end(film);
                }
                break;
        }
        case BLACK:
                LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
-               emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+               if (film->three_d()) {
+                       emit_video(black_player_video_frame(Eyes::LEFT), _black.position());
+                       emit_video(black_player_video_frame(Eyes::RIGHT), _black.position());
+               } else {
+                       emit_video(black_player_video_frame(Eyes::BOTH), _black.position());
+               }
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
        {
                LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
                DCPTimePeriod period (_silent.period_at_position());
-               if (_last_audio_time) {
+               if (_next_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
                           or after this silence.  Bodge the start time of the silence to fix it.
                           I think this is nothing to worry about since we will just add or
                           remove a little silence at the end of some content.
                        */
-                       int64_t const error = labs(period.from.get() - _last_audio_time->get());
+                       int64_t const error = labs(period.from.get() - _next_audio_time->get());
                        /* Let's not worry about less than a frame at 24fps */
                        int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
                        if (error >= too_much_error) {
-                               _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
+                               film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
                        }
                        DCPOMATIC_ASSERT (error < too_much_error);
-                       period.from = *_last_audio_time;
+                       period.from = *_next_audio_time;
                }
                if (period.duration() > one_video_frame()) {
                        period.to = period.from + one_video_frame();
@@ -702,12 +812,45 @@ Player::pass ()
        /* Emit any audio that is ready */
 
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
-          of our streams, or the position of the _silent.
+          of our streams, or the position of the _silent.  First, though we choose only streams that are less than
+          ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
+          behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
+          that will never come, causing bugs like #2101.
        */
-       DCPTime pull_to = _playback_length;
-       for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
-               if (!i->second.piece->done && i->second.last_push_end < pull_to) {
-                       pull_to = i->second.last_push_end;
+       constexpr int ignore_streams_behind = 5;
+
+       using state_pair = std::pair<AudioStreamPtr, StreamState>;
+
+       /* Find streams that have pushed */
+       std::vector<state_pair> have_pushed;
+       std::copy_if(_stream_states.begin(), _stream_states.end(), std::back_inserter(have_pushed), [](state_pair const& a) { return static_cast<bool>(a.second.last_push_end); });
+
+       /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
+       auto latest_last_push_end = std::max_element(
+               have_pushed.begin(),
+               have_pushed.end(),
+               [](state_pair const& a, state_pair const& b) { return a.second.last_push_end.get() < b.second.last_push_end.get(); }
+               );
+
+       if (latest_last_push_end != have_pushed.end()) {
+               LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end.get()));
+       }
+
+       /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
+       std::map<AudioStreamPtr, StreamState> alive_stream_states;
+       for (auto const& i: _stream_states) {
+               if (!i.second.last_push_end || (latest_last_push_end->second.last_push_end.get() - i.second.last_push_end.get()) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+                       alive_stream_states.insert(i);
+               } else {
+                       LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
+               }
+       }
+
+       auto pull_to = _playback_length.load();
+       for (auto const& i: alive_stream_states) {
+               auto position = i.second.last_push_end.get_value_or(i.second.piece->content->position());
+               if (!i.second.piece->done && position < pull_to) {
+                       pull_to = position;
                }
        }
        if (!_silent.done() && _silent.position() < pull_to) {
@@ -715,43 +858,64 @@ Player::pass ()
        }
 
        LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
-       list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
-       for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
-               if (_last_audio_time && i->second < *_last_audio_time) {
+       auto audio = _audio_merger.pull (pull_to);
+       for (auto i = audio.begin(); i != audio.end(); ++i) {
+               if (_next_audio_time && i->second < *_next_audio_time) {
                        /* This new data comes before the last we emitted (or the last seek); discard it */
-                       pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
+                       auto cut = discard_audio (i->first, i->second, *_next_audio_time);
                        if (!cut.first) {
                                continue;
                        }
                        *i = cut;
-               } else if (_last_audio_time && i->second > *_last_audio_time) {
+               } else if (_next_audio_time && i->second > *_next_audio_time) {
                        /* There's a gap between this data and the last we emitted; fill with silence */
-                       fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
+                       fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
                }
 
                emit_audio (i->first, i->second);
        }
 
        if (done) {
-               _shuffler->flush ();
-               for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
-                       do_emit_video(i->first, i->second);
+               if (_shuffler) {
+                       _shuffler->flush ();
+               }
+               for (auto const& i: _delay) {
+                       do_emit_video(i.first, i.second);
+               }
+
+               /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
+                * However, if we have L and R video files, and one is shorter than the other,
+                * the fill code in ::video mostly takes care of filling in the gaps.
+                * However, since it fills at the point when it knows there is more video coming
+                * at time t (so it should fill any gap up to t) it can't do anything right at the
+                * end.  This is particularly bad news if the last frame emitted is a LEFT
+                * eye, as the MXF writer will complain about the 3D sequence being wrong.
+                * Here's a hack to workaround that particular case.
+                */
+               if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
+                       do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
                }
        }
 
        return done;
 }
 
+
 /** @return Open subtitles for the frame at the given time, converted to images */
 optional<PositionImage>
 Player::open_subtitles_for_frame (DCPTime time) const
 {
+       auto film = _film.lock();
+       if (!film) {
+               return {};
+       }
+
        list<PositionImage> captions;
-       int const vfr = _film->video_frame_rate();
+       int const vfr = film->video_frame_rate();
 
        for (
                auto j:
-               _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+               _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
                ) {
 
                /* Bitmap subtitles */
@@ -761,37 +925,54 @@ Player::open_subtitles_for_frame (DCPTime time) const
                        }
 
                        /* i.image will already have been scaled to fit _video_container_size */
-                       dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
+                       dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
 
                        captions.push_back (
                                PositionImage (
                                        i.image,
                                        Position<int> (
-                                               lrint (_video_container_size.width * i.rectangle.x),
-                                               lrint (_video_container_size.height * i.rectangle.y)
+                                               lrint(_video_container_size.load().width * i.rectangle.x),
+                                               lrint(_video_container_size.load().height * i.rectangle.y)
                                                )
                                        )
                                );
                }
 
                /* String subtitles (rendered to an image) */
-               if (!j.string.empty ()) {
-                       list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
+               if (!j.string.empty()) {
+                       auto s = render_text(j.string, _video_container_size, time, vfr);
                        copy (s.begin(), s.end(), back_inserter (captions));
                }
        }
 
-       if (captions.empty ()) {
-               return optional<PositionImage> ();
+       if (captions.empty()) {
+               return {};
+       }
+
+       return merge (captions, _subtitle_alignment);
+}
+
+
+static
+Eyes
+increment_eyes (Eyes e)
+{
+       if (e == Eyes::LEFT) {
+               return Eyes::RIGHT;
        }
 
-       return merge (captions);
+       return Eyes::LEFT;
 }
 
+
 void
-Player::video (weak_ptr<Piece> wp, ContentVideo video)
+Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 {
-       shared_ptr<Piece> piece = wp.lock ();
+       if (_suspended) {
+               return;
+       }
+
+       auto piece = weak_piece.lock ();
        if (!piece) {
                return;
        }
@@ -800,11 +981,38 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                return;
        }
 
-       FrameRateChange frc (_film, piece->content);
+       auto film = _film.lock();
+       if (!film) {
+               return;
+       }
+
+       FrameRateChange frc(film, piece->content);
        if (frc.skip && (video.frame % 2) == 1) {
                return;
        }
 
+       vector<Eyes> eyes_to_emit;
+
+       if (!film->three_d()) {
+               if (video.eyes == Eyes::RIGHT) {
+                       /* 2D film, 3D content: discard right */
+                       return;
+               } else if (video.eyes == Eyes::LEFT) {
+                       /* 2D film, 3D content: emit left as "both" */
+                       video.eyes = Eyes::BOTH;
+                       eyes_to_emit = { Eyes::BOTH };
+               }
+       } else {
+               if (video.eyes == Eyes::BOTH) {
+                       /* 3D film, 2D content; emit "both" for left and right */
+                       eyes_to_emit = { Eyes::LEFT, Eyes::RIGHT };
+               }
+       }
+
+       if (eyes_to_emit.empty()) {
+               eyes_to_emit = { video.eyes };
+       }
+
        /* Time of the first frame we will emit */
        DCPTime const time = content_video_to_dcp (piece, video.frame);
        LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
@@ -813,39 +1021,48 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
           if it's after the content's period here as in that case we still need to fill any gap between
           `now' and the end of the content's period.
        */
-       if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
+       if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
+               return;
+       }
+
+       auto ignore_video = std::find_if(
+               piece->ignore_video.begin(),
+               piece->ignore_video.end(),
+               [time](DCPTimePeriod period) { return period.contains(time); }
+               );
+       if (ignore_video != piece->ignore_video.end()) {
                return;
        }
 
        /* Fill gaps that we discover now that we have some video which needs to be emitted.
           This is where we need to fill to.
        */
-       DCPTime fill_to = min (time, piece->content->end(_film));
+       DCPTime fill_to = min(time, piece->content->end(film));
 
-       if (_last_video_time) {
-               DCPTime fill_from = max (*_last_video_time, piece->content->position());
+       if (_next_video_time) {
+               DCPTime fill_from = max (*_next_video_time, piece->content->position());
 
                /* Fill if we have more than half a frame to do */
                if ((fill_to - fill_from) > one_video_frame() / 2) {
-                       LastVideoMap::const_iterator last = _last_video.find (wp);
-                       if (_film->three_d()) {
-                               Eyes fill_to_eyes = video.eyes;
+                       auto last = _last_video.find (weak_piece);
+                       if (film->three_d()) {
+                               auto fill_to_eyes = eyes_to_emit[0];
                                if (fill_to_eyes == Eyes::BOTH) {
                                        fill_to_eyes = Eyes::LEFT;
                                }
-                               if (fill_to == piece->content->end(_film)) {
+                               if (fill_to == piece->content->end(film)) {
                                        /* Don't fill after the end of the content */
                                        fill_to_eyes = Eyes::LEFT;
                                }
-                               DCPTime j = fill_from;
-                               Eyes eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
+                               auto j = fill_from;
+                               auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
                                if (eyes == Eyes::BOTH) {
                                        eyes = Eyes::LEFT;
                                }
                                while (j < fill_to || eyes != fill_to_eyes) {
                                        if (last != _last_video.end()) {
                                                LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
-                                               shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
+                                               auto copy = last->second->shallow_copy();
                                                copy->set_eyes (eyes);
                                                emit_video (copy, j);
                                        } else {
@@ -869,160 +1086,207 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                }
        }
 
-       _last_video[wp].reset (
-               new PlayerVideo (
+       auto const content_video = piece->content->video;
+
+       for (auto eyes: eyes_to_emit) {
+               _last_video[weak_piece] = std::make_shared<PlayerVideo>(
                        video.image,
-                       piece->content->video->crop (),
-                       piece->content->video->fade (_film, video.frame),
-                       scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
+                       content_video->actual_crop(),
+                       content_video->fade(film, video.frame),
+                       scale_for_display(
+                               content_video->scaled_size(film->frame_size()),
+                               _video_container_size,
+                               film->frame_size(),
+                               content_video->pixel_quanta()
+                               ),
                        _video_container_size,
-                       video.eyes,
+                       eyes,
                        video.part,
-                       piece->content->video->colour_conversion(),
-                       piece->content->video->range(),
+                       content_video->colour_conversion(),
+                       content_video->range(),
                        piece->content,
                        video.frame,
                        false
-                       )
-               );
+                       );
 
-       DCPTime t = time;
-       for (int i = 0; i < frc.repeat; ++i) {
-               if (t < piece->content->end(_film)) {
-                       emit_video (_last_video[wp], t);
+               DCPTime t = time;
+               for (int i = 0; i < frc.repeat; ++i) {
+                       if (t < piece->content->end(film)) {
+                               emit_video (_last_video[weak_piece], t);
+                       }
+                       t += one_video_frame ();
                }
-               t += one_video_frame ();
        }
 }
 
+
 void
-Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
 {
+       if (_suspended) {
+               return;
+       }
+
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
 
-       shared_ptr<Piece> piece = wp.lock ();
+       auto piece = weak_piece.lock ();
        if (!piece) {
                return;
        }
 
-       shared_ptr<AudioContent> content = piece->content->audio;
+       auto film = _film.lock();
+       if (!film) {
+               return;
+       }
+
+       auto content = piece->content->audio;
        DCPOMATIC_ASSERT (content);
 
-       int const rfr = content->resampled_frame_rate (_film);
+       int const rfr = content->resampled_frame_rate(film);
 
        /* Compute time in the DCP */
-       DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
-       LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+       auto time = resampled_audio_to_dcp (piece, content_audio.frame);
 
        /* And the end of this block in the DCP */
-       DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+       auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+       LOG_DEBUG_PLAYER("Received audio frame %1 covering %2 to %3 (%4)", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename());
 
        /* Remove anything that comes before the start or after the end of the content */
        if (time < piece->content->position()) {
-               pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
+               auto cut = discard_audio (content_audio.audio, time, piece->content->position());
                if (!cut.first) {
                        /* This audio is entirely discarded */
                        return;
                }
                content_audio.audio = cut.first;
                time = cut.second;
-       } else if (time > piece->content->end(_film)) {
+       } else if (time > piece->content->end(film)) {
                /* Discard it all */
                return;
-       } else if (end > piece->content->end(_film)) {
-               Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
+       } else if (end > piece->content->end(film)) {
+               Frame const remaining_frames = DCPTime(piece->content->end(film) - time).frames_round(rfr);
                if (remaining_frames == 0) {
                        return;
                }
-               content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
+               content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
        }
 
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
 
-       /* Gain */
-
-       if (content->gain() != 0) {
-               shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
-               gain->apply_gain (content->gain ());
-               content_audio.audio = gain;
+       /* Gain and fade */
+
+       auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
+       if (content->gain() != 0 || !fade_coeffs.empty()) {
+               auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
+               if (!fade_coeffs.empty()) {
+                       /* Apply both fade and gain */
+                       DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
+                       auto const channels = gain_buffers->channels();
+                       auto const frames = fade_coeffs.size();
+                       auto data = gain_buffers->data();
+                       auto const gain = db_to_linear (content->gain());
+                       for (auto channel = 0; channel < channels; ++channel) {
+                               for (auto frame = 0U; frame < frames; ++frame) {
+                                       data[channel][frame] *= gain * fade_coeffs[frame];
+                               }
+                       }
+               } else {
+                       /* Just apply gain */
+                       gain_buffers->apply_gain (content->gain());
+               }
+               content_audio.audio = gain_buffers;
        }
 
        /* Remap */
 
-       content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
+       content_audio.audio = remap(content_audio.audio, film->audio_channels(), stream->mapping());
 
        /* Process */
 
        if (_audio_processor) {
-               content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+               content_audio.audio = _audio_processor->run(content_audio.audio, film->audio_channels());
        }
 
        /* Push */
 
        _audio_merger.push (content_audio.audio, time);
        DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
-       _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
+       _stream_states[stream].last_push_end = time + DCPTime::from_frames(content_audio.audio->frames(), film->audio_frame_rate());
 }
 
+
 void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
 {
-       shared_ptr<Piece> piece = wp.lock ();
-       shared_ptr<const TextContent> text = wc.lock ();
-       if (!piece || !text) {
+       if (_suspended) {
                return;
        }
 
-       /* Apply content's subtitle offsets */
-       subtitle.sub.rectangle.x += text->x_offset ();
-       subtitle.sub.rectangle.y += text->y_offset ();
+       auto piece = weak_piece.lock ();
+       auto content = weak_content.lock ();
+       if (!piece || !content) {
+               return;
+       }
+
+       PlayerText ps;
+       for (auto& sub: subtitle.subs)
+       {
+               /* Apply content's subtitle offsets */
+               sub.rectangle.x += content->x_offset ();
+               sub.rectangle.y += content->y_offset ();
+
+               /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
+               sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
+               sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
 
-       /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
-       subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
-       subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
+               /* Apply content's subtitle scale */
+               sub.rectangle.width *= content->x_scale ();
+               sub.rectangle.height *= content->y_scale ();
 
-       /* Apply content's subtitle scale */
-       subtitle.sub.rectangle.width *= text->x_scale ();
-       subtitle.sub.rectangle.height *= text->y_scale ();
+               auto image = sub.image;
 
-       PlayerText ps;
-       shared_ptr<Image> image = subtitle.sub.image;
+               /* We will scale the subtitle up to fit _video_container_size */
+               int const width = sub.rectangle.width * _video_container_size.load().width;
+               int const height = sub.rectangle.height * _video_container_size.load().height;
+               if (width == 0 || height == 0) {
+                       return;
+               }
 
-       /* We will scale the subtitle up to fit _video_container_size */
-       int const width = subtitle.sub.rectangle.width * _video_container_size.width;
-       int const height = subtitle.sub.rectangle.height * _video_container_size.height;
-       if (width == 0 || height == 0) {
-               return;
+               dcp::Size scaled_size (width, height);
+               ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
        }
 
-       dcp::Size scaled_size (width, height);
-       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
-       DCPTime from (content_time_to_dcp (piece, subtitle.from()));
-
-       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+       DCPTime from(content_time_to_dcp(piece, subtitle.from()));
+       _active_texts[content->type()].add_from(weak_content, ps, from);
 }
 
+
 void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
+Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
 {
-       shared_ptr<Piece> piece = wp.lock ();
-       shared_ptr<const TextContent> text = wc.lock ();
-       if (!piece || !text) {
+       if (_suspended) {
+               return;
+       }
+
+       auto piece = weak_piece.lock ();
+       auto content = weak_content.lock ();
+       auto film = _film.lock();
+       if (!piece || !content || !film) {
                return;
        }
 
        PlayerText ps;
        DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
 
-       if (from > piece->content->end(_film)) {
+       if (from > piece->content->end(film)) {
                return;
        }
 
        for (auto s: subtitle.subs) {
-               s.set_h_position (s.h_position() + text->x_offset ());
-               s.set_v_position (s.v_position() + text->y_offset ());
-               float const xs = text->x_scale();
-               float const ys = text->y_scale();
+               s.set_h_position (s.h_position() + content->x_offset());
+               s.set_v_position (s.v_position() + content->y_offset());
+               float const xs = content->x_scale();
+               float const ys = content->y_scale();
                float size = s.size();
 
                /* Adjust size to express the common part of the scaling;
@@ -1039,44 +1303,50 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                }
 
                s.set_in (dcp::Time(from.seconds(), 1000));
-               ps.string.push_back (StringText (s, text->outline_width()));
-               ps.add_fonts (text->fonts ());
+               ps.string.push_back (s);
        }
 
-       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+       _active_texts[content->type()].add_from(weak_content, ps, from);
 }
 
+
 void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
 {
-       shared_ptr<const TextContent> text = wc.lock ();
-       if (!text) {
+       if (_suspended) {
                return;
        }
 
-       if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
+       auto content = weak_content.lock ();
+       if (!content) {
                return;
        }
 
-       shared_ptr<Piece> piece = wp.lock ();
-       if (!piece) {
+       if (!_active_texts[content->type()].have(weak_content)) {
+               return;
+       }
+
+       auto piece = weak_piece.lock ();
+       auto film = _film.lock();
+       if (!piece || !film) {
                return;
        }
 
        DCPTime const dcp_to = content_time_to_dcp (piece, to);
 
-       if (dcp_to > piece->content->end(_film)) {
+       if (dcp_to > piece->content->end(film)) {
                return;
        }
 
-       pair<PlayerText, DCPTime> from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
+       auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
 
-       bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
-       if (text->use() && !always && !text->burn()) {
-               Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
+       bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
+       if (content->use() && !always && !content->burn()) {
+               Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
        }
 }
 
+
 void
 Player::seek (DCPTime time, bool accurate)
 {
@@ -1088,6 +1358,11 @@ Player::seek (DCPTime time, bool accurate)
                return;
        }
 
+       auto film = _film.lock();
+       if (!film) {
+               return;
+       }
+
        if (_shuffler) {
                _shuffler->clear ();
        }
@@ -1099,9 +1374,7 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        _audio_merger.clear ();
-       for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
-               _active_texts[i].clear ();
-       }
+       std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
 
        for (auto i: _pieces) {
                if (time < i->content->position()) {
@@ -1112,7 +1385,7 @@ Player::seek (DCPTime time, bool accurate)
                        */
                        i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
                        i->done = false;
-               } else if (i->content->position() <= time && time < i->content->end(_film)) {
+               } else if (i->content->position() <= time && time < i->content->end(film)) {
                        /* During; seek to position */
                        i->decoder->seek (dcp_to_content_time (i, time), accurate);
                        i->done = false;
@@ -1123,50 +1396,67 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        if (accurate) {
-               _last_video_time = time;
-               _last_video_eyes = Eyes::LEFT;
-               _last_audio_time = time;
+               _next_video_time = time;
+               _next_video_eyes = Eyes::LEFT;
+               _next_audio_time = time;
        } else {
-               _last_video_time = optional<DCPTime>();
-               _last_video_eyes = optional<Eyes>();
-               _last_audio_time = optional<DCPTime>();
+               _next_video_time = boost::none;
+               _next_video_eyes = boost::none;
+               _next_audio_time = boost::none;
        }
 
        _black.set_position (time);
        _silent.set_position (time);
 
        _last_video.clear ();
+
+       for (auto& state: _stream_states) {
+               state.second.last_push_end = boost::none;
+       }
 }
 
+
 void
 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
-       /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
+       if (!film->three_d()) {
+               if (pv->eyes() == Eyes::LEFT) {
+                       /* Use left-eye images for both eyes... */
+                       pv->set_eyes (Eyes::BOTH);
+               } else if (pv->eyes() == Eyes::RIGHT) {
+                       /* ...and discard the right */
+                       return;
+               }
+       }
+
+       /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
           player before the video that requires them.
        */
        _delay.push_back (make_pair (pv, time));
 
        if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               _last_video_time = time + one_video_frame();
+               _next_video_time = time + one_video_frame();
        }
-       _last_video_eyes = increment_eyes (pv->eyes());
+       _next_video_eyes = increment_eyes (pv->eyes());
 
        if (_delay.size() < 3) {
                return;
        }
 
-       pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
+       auto to_do = _delay.front();
        _delay.pop_front();
        do_emit_video (to_do.first, to_do.second);
 }
 
+
 void
 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
        if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
-                       _active_texts[i].clear_before (time);
-               }
+               std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
        }
 
        auto subtitles = open_subtitles_for_frame (time);
@@ -1177,23 +1467,31 @@ Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
        Video (pv, time);
 }
 
+
 void
 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
 {
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
        /* Log if the assert below is about to fail */
-       if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
-               _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
+       if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
+               film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
        }
 
        /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
-       DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
-       Audio (data, time, _film->audio_frame_rate());
-       _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
+       DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
+       Audio(data, time, film->audio_frame_rate());
+       _next_audio_time = time + DCPTime::from_frames(data->frames(), film->audio_frame_rate());
 }
 
+
 void
 Player::fill_audio (DCPTimePeriod period)
 {
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
        if (period.from == period.to) {
                return;
        }
@@ -1203,9 +1501,9 @@ Player::fill_audio (DCPTimePeriod period)
        DCPTime t = period.from;
        while (t < period.to) {
                DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
-               Frame const samples = block.frames_round(_film->audio_frame_rate());
+               Frame const samples = block.frames_round(film->audio_frame_rate());
                if (samples) {
-                       shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
+                       auto silence = make_shared<AudioBuffers>(film->audio_channels(), samples);
                        silence->make_silent ();
                        emit_audio (silence, t);
                }
@@ -1213,48 +1511,51 @@ Player::fill_audio (DCPTimePeriod period)
        }
 }
 
+
 DCPTime
 Player::one_video_frame () const
 {
-       return DCPTime::from_frames (1, _film->video_frame_rate ());
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
+       return DCPTime::from_frames(1, film->video_frame_rate ());
 }
 
+
 pair<shared_ptr<AudioBuffers>, DCPTime>
 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
 {
-       DCPTime const discard_time = discard_to - time;
-       Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
-       Frame remaining_frames = audio->frames() - discard_frames;
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
+       auto const discard_time = discard_to - time;
+       auto const discard_frames = discard_time.frames_round(film->audio_frame_rate());
+       auto remaining_frames = audio->frames() - discard_frames;
        if (remaining_frames <= 0) {
                return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
        }
-       shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
+       auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
        return make_pair(cut, time + discard_time);
 }
 
+
 void
 Player::set_dcp_decode_reduction (optional<int> reduction)
 {
-       Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
+       ChangeSignaller<Player, int> cc(this, PlayerProperty::DCP_DECODE_REDUCTION);
 
-       {
-               boost::mutex::scoped_lock lm (_mutex);
-
-               if (reduction == _dcp_decode_reduction) {
-                       lm.unlock ();
-                       Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
-                       return;
-               }
-
-               _dcp_decode_reduction = reduction;
-               setup_pieces_unlocked ();
+       if (reduction == _dcp_decode_reduction.load()) {
+               cc.abort();
+               return;
        }
 
-       Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
+       _dcp_decode_reduction = reduction;
+       setup_pieces();
 }
 
+
 optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
+Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
 {
        boost::mutex::scoped_lock lm (_mutex);
 
@@ -1269,16 +1570,70 @@ Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
 }
 
 
+optional<ContentTime>
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
+{
+       boost::mutex::scoped_lock lm (_mutex);
+
+       for (auto i: _pieces) {
+               if (i->content == content) {
+                       return dcp_to_content_time (i, t);
+               }
+       }
+
+       /* We couldn't find this content; perhaps things are being changed over */
+       return {};
+}
+
+
 shared_ptr<const Playlist>
 Player::playlist () const
 {
-       return _playlist ? _playlist : _film->playlist();
+       auto film = _film.lock();
+       if (!film) {
+               return {};
+       }
+
+       return _playlist ? _playlist : film->playlist();
+}
+
+
+void
+Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
+{
+       if (_suspended) {
+               return;
+       }
+
+       auto film = _film.lock();
+       DCPOMATIC_ASSERT(film);
+
+       auto piece = weak_piece.lock ();
+       DCPOMATIC_ASSERT (piece);
+
+       auto const vfr = film->video_frame_rate();
+
+       DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
+       if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(film))) {
+               return;
+       }
+
+       auto ignore_atmos = std::find_if(
+               piece->ignore_atmos.begin(),
+               piece->ignore_atmos.end(),
+               [dcp_time](DCPTimePeriod period) { return period.contains(dcp_time); }
+               );
+       if (ignore_atmos != piece->ignore_atmos.end()) {
+               return;
+       }
+
+       Atmos (data.data, dcp_time, data.metadata);
 }
 
 
 void
-Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+Player::signal_change(ChangeType type, int property)
 {
-       Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
+       Change(type, property, false);
 }