WIP: more
[dcpomatic.git] / src / lib / player.cc
index 07447d531db7036ed61ef1ef414d18e9cc1f7685..7e915296ed1dfc06dc16b0535557671d85cebe53 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
 
 */
 
-#include "player.h"
-#include "film.h"
+
+#include "atmos_decoder.h"
 #include "audio_buffers.h"
-#include "content_audio.h"
+#include "audio_content.h"
+#include "audio_decoder.h"
+#include "audio_processor.h"
+#include "compose.hpp"
+#include "config.h"
 #include "dcp_content.h"
-#include "job.h"
+#include "dcp_decoder.h"
+#include "dcpomatic_log.h"
+#include "decoder.h"
+#include "decoder_factory.h"
+#include "ffmpeg_content.h"
+#include "film.h"
+#include "frame_rate_change.h"
 #include "image.h"
-#include "raw_image_proxy.h"
-#include "ratio.h"
+#include "image_decoder.h"
+#include "job.h"
 #include "log.h"
-#include "render_text.h"
-#include "config.h"
-#include "content_video.h"
+#include "piece_video.h"
+#include "player.h"
 #include "player_video.h"
-#include "frame_rate_change.h"
-#include "audio_processor.h"
 #include "playlist.h"
+#include "ratio.h"
+#include "raw_image_proxy.h"
 #include "referenced_reel_asset.h"
-#include "decoder_factory.h"
-#include "decoder.h"
-#include "video_decoder.h"
-#include "audio_decoder.h"
+#include "render_text.h"
+#include "shuffler.h"
 #include "text_content.h"
 #include "text_decoder.h"
-#include "ffmpeg_content.h"
-#include "audio_content.h"
-#include "dcp_decoder.h"
-#include "image_decoder.h"
-#include "compose.hpp"
-#include "shuffler.h"
 #include "timer.h"
+#include "video_decoder.h"
 #include <dcp/reel.h>
 #include <dcp/reel_sound_asset.h>
 #include <dcp/reel_subtitle_asset.h>
 #include <dcp/reel_picture_asset.h>
 #include <dcp/reel_closed_caption_asset.h>
-#include <boost/foreach.hpp>
 #include <stdint.h>
 #include <algorithm>
 #include <iostream>
 
 #include "i18n.h"
 
-using std::list;
+
+using std::copy;
 using std::cout;
-using std::min;
+using std::dynamic_pointer_cast;
+using std::list;
+using std::make_pair;
+using std::make_shared;
+using std::make_shared;
+using std::map;
 using std::max;
 using std::min;
-using std::vector;
+using std::min;
 using std::pair;
-using std::map;
-using std::make_pair;
-using std::copy;
-using boost::shared_ptr;
-using boost::weak_ptr;
-using boost::dynamic_pointer_cast;
+using std::shared_ptr;
+using std::vector;
+using std::weak_ptr;
+using std::unique_ptr;
 using boost::optional;
-using boost::scoped_ptr;
+#if BOOST_VERSION >= 106100
+using namespace boost::placeholders;
+#endif
 using namespace dcpomatic;
 
+
 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
 int const PlayerProperty::PLAYLIST = 701;
 int const PlayerProperty::FILM_CONTAINER = 702;
@@ -85,39 +93,49 @@ int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 int const PlayerProperty::PLAYBACK_LENGTH = 705;
 
-Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist, DCPTime playback_length)
+
+/** About 0.01dB */
+#define AUDIO_GAIN_EPSILON 0.001
+
+
+Player::Player (shared_ptr<const Film> film)
        : _film (film)
-       , _playlist (playlist)
        , _suspended (0)
-       , _ignore_video (false)
-       , _ignore_audio (false)
-       , _ignore_text (false)
-       , _always_burn_open_subtitles (false)
-       , _fast (false)
        , _tolerant (film->tolerant())
-       , _play_referenced (false)
        , _audio_merger (_film->audio_frame_rate())
-       , _shuffler (0)
-       , _playback_length (playback_length)
+{
+       construct ();
+}
+
+
+Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
+       : _film (film)
+       , _playlist (playlist_)
+       , _suspended (0)
+       , _tolerant (film->tolerant())
+       , _audio_merger (_film->audio_frame_rate())
+{
+       construct ();
+}
+
+
+void
+Player::construct ()
 {
        _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
        /* The butler must hear about this first, so since we are proxying this through to the butler we must
           be first.
        */
-       _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
-       _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
+       _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
+       _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
        set_video_container_size (_film->frame_size ());
 
-       film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
+       film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
 
        setup_pieces ();
        seek (DCPTime (), true);
 }
 
-Player::~Player ()
-{
-       delete _shuffler;
-}
 
 void
 Player::setup_pieces ()
@@ -127,38 +145,92 @@ Player::setup_pieces ()
 }
 
 
-void
-Player::set_playback_length (DCPTime len)
-{
-       Change (CHANGE_TYPE_PENDING, PlayerProperty::PLAYBACK_LENGTH, false);
-       _playback_length = len;
-       Change (CHANGE_TYPE_DONE, PlayerProperty::PLAYBACK_LENGTH, false);
-       setup_pieces ();
-}
-
 bool
 have_video (shared_ptr<const Content> content)
 {
-       return static_cast<bool>(content->video);
+       return static_cast<bool>(content->video) && content->video->use();
 }
 
+
 bool
 have_audio (shared_ptr<const Content> content)
 {
        return static_cast<bool>(content->audio);
 }
 
+
+vector<vector<shared_ptr<Content>>>
+collect (shared_ptr<const Film> film, ContentList content)
+{
+       vector<shared_ptr<Content>> ungrouped;
+       vector<vector<shared_ptr<Content>>> grouped;
+
+       auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
+
+               auto a_streams = a->streams();
+               auto b_streams = b->streams();
+
+               if (a_streams.size() != b_streams.size()) {
+                       return false;
+               }
+
+               for (size_t i = 0; i < a_streams.size(); ++i) {
+                       auto a_stream = a_streams[i];
+                       auto b_stream = b_streams[i];
+                       if (
+                               !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
+                               a_stream->frame_rate() != b_stream->frame_rate() ||
+                               a_stream->channels() != b_stream->channels()) {
+                               return false;
+                       }
+               }
+
+               return (
+                       fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
+                       a->delay() == b->delay() &&
+                       a->language() == b->language() &&
+                       a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
+                       a->channel_names() == b->channel_names()
+                      );
+       };
+
+       for (auto i: content) {
+               if (i->video || !i->audio || !i->text.empty()) {
+                       ungrouped.push_back (i);
+               } else {
+                       bool done = false;
+                       for (auto& g: grouped) {
+                               if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
+                                       g.push_back (i);
+                                       done = true;
+                               }
+                       }
+                       if (!done) {
+                               grouped.push_back ({i});
+                       }
+               }
+       }
+
+       for (auto i: ungrouped) {
+               grouped.push_back({i});
+       }
+
+       return grouped;
+}
+
+
 void
 Player::setup_pieces_unlocked ()
 {
-       list<shared_ptr<Piece> > old_pieces = _pieces;
+       _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+
+       auto old_pieces = _pieces;
        _pieces.clear ();
 
-       delete _shuffler;
-       _shuffler = new Shuffler();
+       _shuffler.reset (new Shuffler());
        _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+       for (auto i: playlist()->content()) {
 
                if (!i->paths_valid ()) {
                        continue;
@@ -170,20 +242,18 @@ Player::setup_pieces_unlocked ()
                }
 
                shared_ptr<Decoder> old_decoder;
-               BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
-                       if (j->content == i) {
-                               old_decoder = j->decoder;
+               for (auto j: old_pieces) {
+                       auto decoder = j->decoder_for(i);
+                       if (decoder) {
+                               old_decoder = decoder;
                                break;
                        }
                }
 
-               shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
-               FrameRateChange frc (_film, i);
+               auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
+               DCPOMATIC_ASSERT (decoder);
 
-               if (!decoder) {
-                       /* Not something that we can decode; e.g. Atmos content */
-                       continue;
-               }
+               FrameRateChange frc (_film, i);
 
                if (decoder->video && _ignore_video) {
                        decoder->video->set_ignore (true);
@@ -194,12 +264,12 @@ Player::setup_pieces_unlocked ()
                }
 
                if (_ignore_text) {
-                       BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
+                       for (auto i: decoder->text) {
                                i->set_ignore (true);
                        }
                }
 
-               shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
+               auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
                if (dcp) {
                        dcp->set_decode_referenced (_play_referenced);
                        if (_play_referenced) {
@@ -207,87 +277,111 @@ Player::setup_pieces_unlocked ()
                        }
                }
 
-               shared_ptr<Piece> piece (new Piece (i, decoder, frc));
+               vector<Piece::Pair> content = {
+                       Piece::Pair(i, decoder)
+               };
+
+               auto piece = make_shared<Piece>(_film, content, frc, _fast);
                _pieces.push_back (piece);
 
-               if (decoder->video) {
-                       if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+               if (i->video) {
+                       if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
                                /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
-                               decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+                               piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
                        } else {
-                               decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
+                               piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
                        }
                }
 
-               if (decoder->audio) {
-                       decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
+               if (i->audio) {
+                       piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
                }
 
-               list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
-
-               while (j != decoder->text.end()) {
-                       (*j)->BitmapStart.connect (
-                               bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-                       (*j)->PlainStart.connect (
-                               bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-                       (*j)->Stop.connect (
-                               bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-
-                       ++j;
-               }
+               piece->BitmapTextStart.connect (bind(&Player::bitmap_text_start, this, piece, _1));
+               piece->StringTextStart.connect (bind(&Player::string_text_start, this, piece, _1));
+               piece->TextStop.connect (bind(&Player::subtitle_stop, this, piece, _1));
+               piece->Atmos.connect (bind(&Player::atmos, this, piece, _1));
        }
 
-       _stream_states.clear ();
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               if (i->content->audio) {
-                       BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
-                               _stream_states[j] = StreamState (i, i->content->position ());
+       for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
+               if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
+                       /* Look for content later in the content list with in-use video that overlaps this */
+                       for (auto j = std::next(i); j != _pieces.end(); ++j) {
+                               if ((*j)->use_video()) {
+                                       (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
+                               }
                        }
                }
        }
 
-       _black = Empty (_film, _playlist, bind(&have_video, _1), _playback_length);
-       _silent = Empty (_film, _playlist, bind(&have_audio, _1), _playback_length);
+       _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
+       _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
+
+       _last_video_time = boost::optional<dcpomatic::DCPTime>();
+       _last_video_eyes = Eyes::BOTH;
+       _last_audio_time = boost::optional<dcpomatic::DCPTime>();
+}
+
+
+optional<DCPTime>
+Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
+{
+       boost::mutex::scoped_lock lm (_mutex);
+
+       for (auto i: _pieces) {
+               auto dcp = i->content_time_to_dcp(content, t);
+               if (dcp) {
+                       return *dcp;
+               }
+       }
 
-       _last_video_time = DCPTime ();
-       _last_video_eyes = EYES_BOTH;
-       _last_audio_time = DCPTime ();
+       /* We couldn't find this content; perhaps things are being changed over */
+       return {};
 }
 
+
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
-       if (type == CHANGE_TYPE_PENDING) {
-               /* The player content is probably about to change, so we can't carry on
-                  until that has happened and we've rebuilt our pieces.  Stop pass()
-                  and seek() from working until then.
-               */
-               ++_suspended;
-       } else if (type == CHANGE_TYPE_DONE) {
-               /* A change in our content has gone through.  Re-build our pieces. */
-               setup_pieces ();
-               --_suspended;
-       } else if (type == CHANGE_TYPE_CANCELLED) {
-               --_suspended;
+       if (property == VideoContentProperty::CROP) {
+               if (type == ChangeType::DONE) {
+                       auto const vcs = video_container_size();
+                       boost::mutex::scoped_lock lm (_mutex);
+                       for (auto const& i: _delay) {
+                               i.first->reset_metadata (_film, vcs);
+                       }
+               }
+       } else {
+               if (type == ChangeType::PENDING) {
+                       /* The player content is probably about to change, so we can't carry on
+                          until that has happened and we've rebuilt our pieces.  Stop pass()
+                          and seek() from working until then.
+                       */
+                       ++_suspended;
+               } else if (type == ChangeType::DONE) {
+                       /* A change in our content has gone through.  Re-build our pieces. */
+                       setup_pieces ();
+                       --_suspended;
+               } else if (type == ChangeType::CANCELLED) {
+                       --_suspended;
+               }
        }
 
        Change (type, property, frequent);
 }
 
+
 void
 Player::set_video_container_size (dcp::Size s)
 {
-       Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+       Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
 
        {
                boost::mutex::scoped_lock lm (_mutex);
 
                if (s == _video_container_size) {
                        lm.unlock ();
-                       Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+                       Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
                        return;
                }
 
@@ -297,18 +391,20 @@ Player::set_video_container_size (dcp::Size s)
                _black_image->make_black ();
        }
 
-       Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+       Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
 }
 
+
 void
 Player::playlist_change (ChangeType type)
 {
-       if (type == CHANGE_TYPE_DONE) {
+       if (type == ChangeType::DONE) {
                setup_pieces ();
        }
        Change (type, PlayerProperty::PLAYLIST, false);
 }
 
+
 void
 Player::film_change (ChangeType type, Film::Property p)
 {
@@ -317,126 +413,68 @@ Player::film_change (ChangeType type, Film::Property p)
           last time we were run.
        */
 
-       if (p == Film::CONTAINER) {
+       if (p == Film::Property::CONTAINER) {
                Change (type, PlayerProperty::FILM_CONTAINER, false);
-       } else if (p == Film::VIDEO_FRAME_RATE) {
+       } else if (p == Film::Property::VIDEO_FRAME_RATE) {
                /* Pieces contain a FrameRateChange which contains the DCP frame rate,
                   so we need new pieces here.
                */
-               if (type == CHANGE_TYPE_DONE) {
+               if (type == ChangeType::DONE) {
                        setup_pieces ();
                }
                Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
-       } else if (p == Film::AUDIO_PROCESSOR) {
-               if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
+       } else if (p == Film::Property::AUDIO_PROCESSOR) {
+               if (type == ChangeType::DONE && _film->audio_processor ()) {
                        boost::mutex::scoped_lock lm (_mutex);
                        _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
                }
-       } else if (p == Film::AUDIO_CHANNELS) {
-               if (type == CHANGE_TYPE_DONE) {
+       } else if (p == Film::Property::AUDIO_CHANNELS) {
+               if (type == ChangeType::DONE) {
                        boost::mutex::scoped_lock lm (_mutex);
                        _audio_merger.clear ();
                }
        }
 }
 
+
 shared_ptr<PlayerVideo>
 Player::black_player_video_frame (Eyes eyes) const
 {
-       return shared_ptr<PlayerVideo> (
-               new PlayerVideo (
-                       shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
-                       Crop (),
-                       optional<double> (),
-                       _video_container_size,
-                       _video_container_size,
-                       eyes,
-                       PART_WHOLE,
-                       PresetColourConversion::all().front().conversion,
-                       VIDEO_RANGE_FULL,
-                       boost::weak_ptr<Content>(),
-                       boost::optional<Frame>(),
-                       false
-               )
+       return std::make_shared<PlayerVideo> (
+               std::make_shared<const RawImageProxy>(_black_image),
+               Crop(),
+               optional<double>(),
+               _video_container_size,
+               _video_container_size,
+               eyes,
+               Part::WHOLE,
+               PresetColourConversion::all().front().conversion,
+               VideoRange::FULL,
+               std::weak_ptr<Content>(),
+               boost::optional<Frame>(),
+               false
        );
 }
 
-Frame
-Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
-{
-       DCPTime s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
-       s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
-
-       /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
-          then convert that ContentTime to frames at the content's rate.  However this fails for
-          situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
-          enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
-
-          Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
-       */
-       return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
-}
-
-DCPTime
-Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
-{
-       /* See comment in dcp_to_content_video */
-       DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
-       return d + piece->content->position();
-}
-
-Frame
-Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
-{
-       DCPTime s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
-       /* See notes in dcp_to_content_video */
-       return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
-}
-
-DCPTime
-Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
-{
-       /* See comment in dcp_to_content_video */
-       return DCPTime::from_frames (f, _film->audio_frame_rate())
-               - DCPTime (piece->content->trim_start(), piece->frc)
-               + piece->content->position();
-}
-
-ContentTime
-Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
-{
-       DCPTime s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
-       return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
-}
-
-DCPTime
-Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
-{
-       return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
-}
 
-list<shared_ptr<Font> >
+vector<FontData>
 Player::get_subtitle_fonts ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       list<shared_ptr<Font> > fonts;
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
-                       /* XXX: things may go wrong if there are duplicate font IDs
-                          with different font files.
-                       */
-                       list<shared_ptr<Font> > f = j->fonts ();
-                       copy (f.begin(), f.end(), back_inserter (fonts));
-               }
+       vector<FontData> fonts;
+       for (auto i: _pieces) {
+               /* XXX: things may go wrong if there are duplicate font IDs
+                  with different font files.
+               */
+               auto f = i->fonts ();
+               copy (f.begin(), f.end(), back_inserter(fonts));
        }
 
        return fonts;
 }
 
+
 /** Set this player never to produce any video data */
 void
 Player::set_ignore_video ()
@@ -446,6 +484,7 @@ Player::set_ignore_video ()
        setup_pieces_unlocked ();
 }
 
+
 void
 Player::set_ignore_audio ()
 {
@@ -454,6 +493,7 @@ Player::set_ignore_audio ()
        setup_pieces_unlocked ();
 }
 
+
 void
 Player::set_ignore_text ()
 {
@@ -462,6 +502,7 @@ Player::set_ignore_text ()
        setup_pieces_unlocked ();
 }
 
+
 /** Set the player to always burn open texts into the image regardless of the content settings */
 void
 Player::set_always_burn_open_subtitles ()
@@ -470,6 +511,7 @@ Player::set_always_burn_open_subtitles ()
        _always_burn_open_subtitles = true;
 }
 
+
 /** Sets up the player to be faster, possibly at the expense of quality */
 void
 Player::set_fast ()
@@ -479,6 +521,7 @@ Player::set_fast ()
        setup_pieces_unlocked ();
 }
 
+
 void
 Player::set_play_referenced ()
 {
@@ -487,6 +530,7 @@ Player::set_play_referenced ()
        setup_pieces_unlocked ();
 }
 
+
 static void
 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
 {
@@ -500,6 +544,7 @@ maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Fra
        }
 }
 
+
 list<ReferencedReelAsset>
 Player::get_reel_assets ()
 {
@@ -507,15 +552,15 @@ Player::get_reel_assets ()
 
        list<ReferencedReelAsset> a;
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
-               shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
+       for (auto i: playlist()->content()) {
+               auto j = dynamic_pointer_cast<DCPContent> (i);
                if (!j) {
                        continue;
                }
 
-               scoped_ptr<DCPDecoder> decoder;
+               unique_ptr<DCPDecoder> decoder;
                try {
-                       decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
+                       decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
                } catch (...) {
                        return a;
                }
@@ -530,12 +575,12 @@ Player::get_reel_assets ()
                int64_t offset_from_start = 0;
                /* position in the asset from the end */
                int64_t offset_from_end = 0;
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
                        /* Assume that main picture duration is the length of the reel */
                        offset_from_end += k->main_picture()->actual_duration();
                }
 
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
 
                        /* Assume that main picture duration is the length of the reel */
                        int64_t const reel_duration = k->main_picture()->actual_duration();
@@ -544,7 +589,7 @@ Player::get_reel_assets ()
                        Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
                        Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
 
-                       DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
+                       auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
                        if (j->reference_video ()) {
                                maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
                        }
@@ -553,12 +598,12 @@ Player::get_reel_assets ()
                                maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
+                       if (j->reference_text (TextType::OPEN_SUBTITLE)) {
                                maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TEXT_CLOSED_CAPTION)) {
-                               BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
+                       if (j->reference_text (TextType::CLOSED_CAPTION)) {
+                               for (auto l: k->closed_captions()) {
                                        maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
                                }
                        }
@@ -571,6 +616,7 @@ Player::get_reel_assets ()
        return a;
 }
 
+
 bool
 Player::pass ()
 {
@@ -578,12 +624,13 @@ Player::pass ()
 
        if (_suspended) {
                /* We can't pass in this state */
+               LOG_DEBUG_PLAYER_NC ("Player is suspended");
                return false;
        }
 
        if (_playback_length == DCPTime()) {
                /* Special; just give one black frame */
-               emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
+               emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
                return true;
        }
 
@@ -592,23 +639,11 @@ Player::pass ()
        shared_ptr<Piece> earliest_content;
        optional<DCPTime> earliest_time;
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               if (i->done) {
-                       continue;
-               }
-
-               DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
-               if (t > i->content->end(_film)) {
-                       i->done = true;
-               } else {
-
-                       /* Given two choices at the same time, pick the one with texts so we see it before
-                          the video.
-                       */
-                       if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
-                               earliest_time = t;
-                               earliest_content = i;
-                       }
+       for (auto i: _pieces) {
+               auto time = i->decoder_before(earliest_time);
+               if (time) {
+                       earliest_time = *time;
+                       earliest_content = i;
                }
        }
 
@@ -638,23 +673,24 @@ Player::pass ()
        switch (which) {
        case CONTENT:
        {
-               earliest_content->done = earliest_content->decoder->pass ();
-               shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
-               if (dcp && !_play_referenced && dcp->reference_audio()) {
+               earliest_content->pass();
+               if (!_play_referenced && earliest_content->reference_dcp_audio()) {
                        /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
                           to `hide' the fact that no audio was emitted during the referenced DCP (though
                           we need to behave as though it was).
                        */
-                       _last_audio_time = dcp->end (_film);
+                       _last_audio_time = earliest_content->end ();
                }
                break;
        }
        case BLACK:
-               emit_video (black_player_video_frame(EYES_BOTH), _black.position());
+               LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
+               emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
        {
+               LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
                DCPTimePeriod period (_silent.period_at_position());
                if (_last_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
@@ -688,21 +724,20 @@ Player::pass ()
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
           of our streams, or the position of the _silent.
        */
-       DCPTime pull_to = _playback_length;
-       for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
-               if (!i->second.piece->done && i->second.last_push_end < pull_to) {
-                       pull_to = i->second.last_push_end;
-               }
+       auto pull_to = _playback_length;
+       for (auto i: _pieces) {
+               i->update_pull_to (pull_to);
        }
        if (!_silent.done() && _silent.position() < pull_to) {
                pull_to = _silent.position();
        }
 
-       list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
-       for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
+       LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
+       auto audio = _audio_merger.pull (pull_to);
+       for (auto i = audio.begin(); i != audio.end(); ++i) {
                if (_last_audio_time && i->second < *_last_audio_time) {
                        /* This new data comes before the last we emitted (or the last seek); discard it */
-                       pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
+                       auto cut = discard_audio (i->first, i->second, *_last_audio_time);
                        if (!cut.first) {
                                continue;
                        }
@@ -717,14 +752,15 @@ Player::pass ()
 
        if (done) {
                _shuffler->flush ();
-               for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
-                       do_emit_video(i->first, i->second);
+               for (auto const& i: _delay) {
+                       do_emit_video(i.first, i.second);
                }
        }
 
        return done;
 }
 
+
 /** @return Open subtitles for the frame at the given time, converted to images */
 optional<PositionImage>
 Player::open_subtitles_for_frame (DCPTime time) const
@@ -732,13 +768,13 @@ Player::open_subtitles_for_frame (DCPTime time) const
        list<PositionImage> captions;
        int const vfr = _film->video_frame_rate();
 
-       BOOST_FOREACH (
-               PlayerText j,
-               _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+       for (
+               auto j:
+               _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
                ) {
 
                /* Bitmap subtitles */
-               BOOST_FOREACH (BitmapText i, j.bitmap) {
+               for (auto i: j.bitmap) {
                        if (!i.image) {
                                continue;
                        }
@@ -750,85 +786,82 @@ Player::open_subtitles_for_frame (DCPTime time) const
                                PositionImage (
                                        i.image,
                                        Position<int> (
-                                               lrint (_video_container_size.width * i.rectangle.x),
-                                               lrint (_video_container_size.height * i.rectangle.y)
+                                               lrint(_video_container_size.width * i.rectangle.x),
+                                               lrint(_video_container_size.height * i.rectangle.y)
                                                )
                                        )
                                );
                }
 
                /* String subtitles (rendered to an image) */
-               if (!j.string.empty ()) {
-                       list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
+               if (!j.string.empty()) {
+                       auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
                        copy (s.begin(), s.end(), back_inserter (captions));
                }
        }
 
-       if (captions.empty ()) {
-               return optional<PositionImage> ();
+       if (captions.empty()) {
+               return {};
        }
 
        return merge (captions);
 }
 
+
 void
-Player::video (weak_ptr<Piece> wp, ContentVideo video)
+Player::video (weak_ptr<Piece> wp, PieceVideo video)
 {
-       shared_ptr<Piece> piece = wp.lock ();
+       auto piece = wp.lock ();
        if (!piece) {
                return;
        }
 
-       FrameRateChange frc (_film, piece->content);
-       if (frc.skip && (video.frame % 2) == 1) {
-               return;
-       }
-
-       /* Time of the first frame we will emit */
-       DCPTime const time = content_video_to_dcp (piece, video.frame);
+       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(video.time));
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
           `now' and the end of the content's period.
        */
-       if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
+       if (video.time < piece->position() || (_last_video_time && video.time < *_last_video_time)) {
                return;
        }
 
        /* Fill gaps that we discover now that we have some video which needs to be emitted.
           This is where we need to fill to.
        */
-       DCPTime fill_to = min (time, piece->content->end(_film));
+       DCPTime fill_to = min (video.time, piece->end());
 
        if (_last_video_time) {
-               DCPTime fill_from = max (*_last_video_time, piece->content->position());
+               DCPTime fill_from = max (*_last_video_time, piece->position());
 
                /* Fill if we have more than half a frame to do */
                if ((fill_to - fill_from) > one_video_frame() / 2) {
-                       LastVideoMap::const_iterator last = _last_video.find (wp);
+                       auto last = _last_video.find (wp);
                        if (_film->three_d()) {
-                               Eyes fill_to_eyes = video.eyes;
-                               if (fill_to_eyes == EYES_BOTH) {
-                                       fill_to_eyes = EYES_LEFT;
+                               auto fill_to_eyes = video.eyes;
+                               if (fill_to_eyes == Eyes::BOTH) {
+                                       fill_to_eyes = Eyes::LEFT;
                                }
-                               if (fill_to == piece->content->end(_film)) {
+                               if (fill_to == piece->end()) {
                                        /* Don't fill after the end of the content */
-                                       fill_to_eyes = EYES_LEFT;
+                                       fill_to_eyes = Eyes::LEFT;
                                }
-                               DCPTime j = fill_from;
-                               Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
-                               if (eyes == EYES_BOTH) {
-                                       eyes = EYES_LEFT;
+                               auto j = fill_from;
+                               auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
+                               if (eyes == Eyes::BOTH) {
+                                       eyes = Eyes::LEFT;
                                }
                                while (j < fill_to || eyes != fill_to_eyes) {
                                        if (last != _last_video.end()) {
-                                               shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
+                                               LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
+                                               auto copy = last->second->shallow_copy();
                                                copy->set_eyes (eyes);
                                                emit_video (copy, j);
                                        } else {
+                                               LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
                                                emit_video (black_player_video_frame(eyes), j);
                                        }
-                                       if (eyes == EYES_RIGHT) {
+                                       if (eyes == Eyes::RIGHT) {
                                                j += one_video_frame();
                                        }
                                        eyes = increment_eyes (eyes);
@@ -838,114 +871,100 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                                        if (last != _last_video.end()) {
                                                emit_video (last->second, j);
                                        } else {
-                                               emit_video (black_player_video_frame(EYES_BOTH), j);
+                                               emit_video (black_player_video_frame(Eyes::BOTH), j);
                                        }
                                }
                        }
                }
        }
 
-       _last_video[wp].reset (
-               new PlayerVideo (
-                       video.image,
-                       piece->content->video->crop (),
-                       piece->content->video->fade (_film, video.frame),
-                       piece->content->video->scale().size (
-                               piece->content->video, _video_container_size, _film->frame_size ()
-                               ),
-                       _video_container_size,
-                       video.eyes,
-                       video.part,
-                       piece->content->video->colour_conversion(),
-                       piece->content->video->range(),
-                       piece->content,
-                       video.frame,
-                       false
-                       )
-               );
-
-       DCPTime t = time;
+       _last_video[wp] = piece->player_video (video, _video_container_size);
+
+       DCPTime t = video.time;
+       auto const frc = piece->frame_rate_change();
        for (int i = 0; i < frc.repeat; ++i) {
-               if (t < piece->content->end(_film)) {
+               if (t < piece->end()) {
                        emit_video (_last_video[wp], t);
                }
                t += one_video_frame ();
        }
 }
 
+
 void
-Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+Player::audio (weak_ptr<Piece> wp, PieceAudio audio)
 {
-       DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+       DCPOMATIC_ASSERT (audio.audio->frames() > 0);
 
-       shared_ptr<Piece> piece = wp.lock ();
+       auto piece = wp.lock ();
        if (!piece) {
                return;
        }
 
-       shared_ptr<AudioContent> content = piece->content->audio;
-       DCPOMATIC_ASSERT (content);
+       LOG_DEBUG_PLAYER("Received audio at %1", to_string(audio.time));
 
-       int const rfr = content->resampled_frame_rate (_film);
+       /* The end of this block in the DCP */
+       int const rfr = piece->resampled_audio_frame_rate ();
+       auto end = audio.time + DCPTime::from_frames(audio.audio->frames(), rfr);
 
-       /* Compute time in the DCP */
-       DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
-       /* And the end of this block in the DCP */
-       DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+       /* XXX: is this still necessary? don't the checks in Piece take care of this now?
+        * Maybe replace with some assertions & run tests.
+        */
 
        /* Remove anything that comes before the start or after the end of the content */
-       if (time < piece->content->position()) {
-               pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
+       if (audio.time < piece->position()) {
+               auto cut = discard_audio (audio.audio, audio.time, piece->position());
                if (!cut.first) {
                        /* This audio is entirely discarded */
                        return;
                }
-               content_audio.audio = cut.first;
-               time = cut.second;
-       } else if (time > piece->content->end(_film)) {
+               audio.audio = cut.first;
+               audio.time = cut.second;
+       } else if (audio.time > piece->end()) {
                /* Discard it all */
                return;
-       } else if (end > piece->content->end(_film)) {
-               Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
+       } else if (end > piece->end()) {
+               Frame const remaining_frames = DCPTime(piece->end() - audio.time).frames_round(rfr);
                if (remaining_frames == 0) {
                        return;
                }
-               content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
+               audio.audio = make_shared<AudioBuffers>(audio.audio, remaining_frames, 0);
        }
 
-       DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+       DCPOMATIC_ASSERT (audio.audio->frames() > 0);
 
        /* Gain */
 
-       if (content->gain() != 0) {
-               shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
-               gain->apply_gain (content->gain ());
-               content_audio.audio = gain;
+       if (piece->audio_gain() != 0) {
+               auto gain = make_shared<AudioBuffers>(audio.audio);
+               gain->apply_gain (piece->audio_gain());
+               audio.audio = gain;
        }
 
        /* Remap */
 
-       content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
+       audio.audio = remap (audio.audio, _film->audio_channels(), audio.mapping);
 
        /* Process */
 
        if (_audio_processor) {
-               content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+               audio.audio = _audio_processor->run (audio.audio, _film->audio_channels());
        }
 
        /* Push */
 
-       _audio_merger.push (content_audio.audio, time);
-       DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
-       _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
+       _audio_merger.push (audio.audio, audio.time);
+       piece->set_last_push_end (audio.stream, audio.time + DCPTime::from_frames(audio.audio->frames(), _film->audio_frame_rate()));
 }
 
+
 void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> wp, PieceBitmapTextStart subtitle)
 {
-       shared_ptr<Piece> piece = wp.lock ();
-       shared_ptr<const TextContent> text = wc.lock ();
-       if (!piece || !text) {
+       auto piece = wp.lock ();
+       auto content = subtitle.content().lock();
+       auto text = subtitle.text().lock();
+       if (!piece || !content || !text) {
                return;
        }
 
@@ -962,7 +981,7 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
        subtitle.sub.rectangle.height *= text->y_scale ();
 
        PlayerText ps;
-       shared_ptr<Image> image = subtitle.sub.image;
+       auto image = subtitle.sub.image;
 
        /* We will scale the subtitle up to fit _video_container_size */
        int const width = subtitle.sub.rectangle.width * _video_container_size.width;
@@ -972,31 +991,35 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
        }
 
        dcp::Size scaled_size (width, height);
-       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
-       DCPTime from (content_time_to_dcp (piece, subtitle.from()));
+       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
+       auto from = piece->content_time_to_dcp(content, subtitle.time());
+       DCPOMATIC_ASSERT (from);
 
-       _active_texts[text->type()].add_from (wc, ps, from);
+       _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
 }
 
+
 void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
+Player::string_text_start (weak_ptr<Piece> wp, PieceStringTextStart subtitle)
 {
-       shared_ptr<Piece> piece = wp.lock ();
-       shared_ptr<const TextContent> text = wc.lock ();
-       if (!piece || !text) {
+       auto piece = wp.lock ();
+       auto content = subtitle.content().lock();
+       auto text = subtitle.text().lock();
+       if (!piece || !content || !text) {
                return;
        }
 
        PlayerText ps;
-       DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
+       auto const from = piece->content_time_to_dcp(content, subtitle.time());
+       DCPOMATIC_ASSERT (from);
 
-       if (from > piece->content->end(_film)) {
+       if (from > piece->end()) {
                return;
        }
 
-       BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
-               s.set_h_position (s.h_position() + text->x_offset ());
-               s.set_v_position (s.v_position() + text->y_offset ());
+       for (auto s: subtitle.subs) {
+               s.set_h_position (s.h_position() + text->x_offset());
+               s.set_v_position (s.v_position() + text->y_offset());
                float const xs = text->x_scale();
                float const ys = text->y_scale();
                float size = s.size();
@@ -1014,23 +1037,25 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                        s.set_aspect_adjust (xs / ys);
                }
 
-               s.set_in (dcp::Time(from.seconds(), 1000));
+               s.set_in (dcp::Time(from->seconds(), 1000));
                ps.string.push_back (StringText (s, text->outline_width()));
                ps.add_fonts (text->fonts ());
        }
 
-       _active_texts[text->type()].add_from (wc, ps, from);
+       _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
 }
 
+
 void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> wp, PieceTextStop stop)
 {
-       shared_ptr<const TextContent> text = wc.lock ();
+       auto content = stop.content().lock();
+       auto text = stop.text().lock();
        if (!text) {
                return;
        }
 
-       if (!_active_texts[text->type()].have(wc)) {
+       if (!_active_texts[static_cast<int>(text->type())].have(stop.text())) {
                return;
        }
 
@@ -1039,24 +1064,27 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Conte
                return;
        }
 
-       DCPTime const dcp_to = content_time_to_dcp (piece, to);
+       auto const dcp_to = piece->content_time_to_dcp(content, stop.time());
+       DCPOMATIC_ASSERT (dcp_to);
 
-       if (dcp_to > piece->content->end(_film)) {
+       if (*dcp_to > piece->end()) {
                return;
        }
 
-       pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
+       auto from = _active_texts[static_cast<int>(text->type())].add_to(stop.text(), *dcp_to);
 
-       bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
+       bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
        if (text->use() && !always && !text->burn()) {
-               Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
+               Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
        }
 }
 
+
 void
 Player::seek (DCPTime time, bool accurate)
 {
        boost::mutex::scoped_lock lm (_mutex);
+       LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
 
        if (_suspended) {
                /* We can't seek in this state */
@@ -1074,32 +1102,17 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        _audio_merger.clear ();
-       for (int i = 0; i < TEXT_COUNT; ++i) {
+       for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
                _active_texts[i].clear ();
        }
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               if (time < i->content->position()) {
-                       /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
-                          we must seek this (following) content accurately, otherwise when we come to the end of the current
-                          content we may not start right at the beginning of the next, causing a gap (if the next content has
-                          been trimmed to a point between keyframes, or something).
-                       */
-                       i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
-                       i->done = false;
-               } else if (i->content->position() <= time && time < i->content->end(_film)) {
-                       /* During; seek to position */
-                       i->decoder->seek (dcp_to_content_time (i, time), accurate);
-                       i->done = false;
-               } else {
-                       /* After; this piece is done */
-                       i->done = true;
-               }
+       for (auto i: _pieces) {
+               i->seek (time, accurate);
        }
 
        if (accurate) {
                _last_video_time = time;
-               _last_video_eyes = EYES_LEFT;
+               _last_video_eyes = Eyes::LEFT;
                _last_audio_time = time;
        } else {
                _last_video_time = optional<DCPTime>();
@@ -1113,15 +1126,26 @@ Player::seek (DCPTime time, bool accurate)
        _last_video.clear ();
 }
 
+
 void
 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
+       if (!_film->three_d()) {
+               if (pv->eyes() == Eyes::LEFT) {
+                       /* Use left-eye images for both eyes... */
+                       pv->set_eyes (Eyes::BOTH);
+               } else if (pv->eyes() == Eyes::RIGHT) {
+                       /* ...and discard the right */
+                       return;
+               }
+       }
+
        /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
           player before the video that requires them.
        */
        _delay.push_back (make_pair (pv, time));
 
-       if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
                _last_video_time = time + one_video_frame();
        }
        _last_video_eyes = increment_eyes (pv->eyes());
@@ -1130,21 +1154,22 @@ Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
                return;
        }
 
-       pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
+       auto to_do = _delay.front();
        _delay.pop_front();
        do_emit_video (to_do.first, to_do.second);
 }
 
+
 void
 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
-       if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
-               for (int i = 0; i < TEXT_COUNT; ++i) {
+       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+               for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
                        _active_texts[i].clear_before (time);
                }
        }
 
-       optional<PositionImage> subtitles = open_subtitles_for_frame (time);
+       auto subtitles = open_subtitles_for_frame (time);
        if (subtitles) {
                pv->set_text (subtitles.get ());
        }
@@ -1152,6 +1177,7 @@ Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
        Video (pv, time);
 }
 
+
 void
 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
 {
@@ -1166,6 +1192,7 @@ Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
        _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
 }
 
+
 void
 Player::fill_audio (DCPTimePeriod period)
 {
@@ -1180,7 +1207,7 @@ Player::fill_audio (DCPTimePeriod period)
                DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
                Frame const samples = block.frames_round(_film->audio_frame_rate());
                if (samples) {
-                       shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
+                       auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
                        silence->make_silent ();
                        emit_audio (silence, t);
                }
@@ -1188,36 +1215,39 @@ Player::fill_audio (DCPTimePeriod period)
        }
 }
 
+
 DCPTime
 Player::one_video_frame () const
 {
        return DCPTime::from_frames (1, _film->video_frame_rate ());
 }
 
+
 pair<shared_ptr<AudioBuffers>, DCPTime>
 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
 {
-       DCPTime const discard_time = discard_to - time;
-       Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
-       Frame remaining_frames = audio->frames() - discard_frames;
+       auto const discard_time = discard_to - time;
+       auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
+       auto remaining_frames = audio->frames() - discard_frames;
        if (remaining_frames <= 0) {
                return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
        }
-       shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
+       auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
        return make_pair(cut, time + discard_time);
 }
 
+
 void
 Player::set_dcp_decode_reduction (optional<int> reduction)
 {
-       Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
+       Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
 
        {
                boost::mutex::scoped_lock lm (_mutex);
 
                if (reduction == _dcp_decode_reduction) {
                        lm.unlock ();
-                       Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
+                       Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
                        return;
                }
 
@@ -1225,20 +1255,20 @@ Player::set_dcp_decode_reduction (optional<int> reduction)
                setup_pieces_unlocked ();
        }
 
-       Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
+       Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
 }
 
-optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
+
+shared_ptr<const Playlist>
+Player::playlist () const
 {
-       boost::mutex::scoped_lock lm (_mutex);
+       return _playlist ? _playlist : _film->playlist();
+}
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               if (i->content == content) {
-                       return content_time_to_dcp (i, t);
-               }
-       }
 
-       /* We couldn't find this content; perhaps things are being changed over */
-       return optional<DCPTime>();
+void
+Player::atmos (weak_ptr<Piece>, PieceAtmos data)
+{
+       Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
 }
+