fixup! WIP: allow Piece to take multiple content/decoder.
[dcpomatic.git] / src / lib / player.cc
index a5538d9a6103120ec69b2543f6ac4d4eff004ff0..8b785f0fe7d7c040dba0a6598dbe53c83a0e7cd9 100644 (file)
 
 
 #include "atmos_decoder.h"
-#include "player.h"
-#include "film.h"
 #include "audio_buffers.h"
-#include "content_audio.h"
+#include "audio_content.h"
+#include "audio_decoder.h"
+#include "audio_processor.h"
+#include "compose.hpp"
+#include "config.h"
 #include "dcp_content.h"
+#include "dcp_decoder.h"
 #include "dcpomatic_log.h"
-#include "job.h"
+#include "decoder.h"
+#include "decoder_factory.h"
+#include "ffmpeg_content.h"
+#include "film.h"
+#include "frame_rate_change.h"
 #include "image.h"
-#include "raw_image_proxy.h"
-#include "ratio.h"
+#include "image_decoder.h"
+#include "job.h"
 #include "log.h"
-#include "render_text.h"
-#include "config.h"
-#include "content_video.h"
+#include "piece_video.h"
+#include "player.h"
 #include "player_video.h"
-#include "frame_rate_change.h"
-#include "audio_processor.h"
 #include "playlist.h"
+#include "ratio.h"
+#include "raw_image_proxy.h"
 #include "referenced_reel_asset.h"
-#include "decoder_factory.h"
-#include "decoder.h"
-#include "video_decoder.h"
-#include "audio_decoder.h"
+#include "render_text.h"
+#include "shuffler.h"
 #include "text_content.h"
 #include "text_decoder.h"
-#include "ffmpeg_content.h"
-#include "audio_content.h"
-#include "dcp_decoder.h"
-#include "image_decoder.h"
-#include "compose.hpp"
-#include "shuffler.h"
 #include "timer.h"
+#include "video_decoder.h"
 #include <dcp/reel.h>
 #include <dcp/reel_sound_asset.h>
 #include <dcp/reel_subtitle_asset.h>
@@ -70,6 +69,7 @@ using std::dynamic_pointer_cast;
 using std::list;
 using std::make_pair;
 using std::make_shared;
+using std::make_shared;
 using std::map;
 using std::max;
 using std::min;
@@ -78,9 +78,8 @@ using std::pair;
 using std::shared_ptr;
 using std::vector;
 using std::weak_ptr;
-using std::make_shared;
+using std::unique_ptr;
 using boost::optional;
-using boost::scoped_ptr;
 #if BOOST_VERSION >= 106100
 using namespace boost::placeholders;
 #endif
@@ -95,6 +94,10 @@ int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 int const PlayerProperty::PLAYBACK_LENGTH = 705;
 
 
+/** About 0.01dB */
+#define AUDIO_GAIN_EPSILON 0.001
+
+
 Player::Player (shared_ptr<const Film> film)
        : _film (film)
        , _suspended (0)
@@ -134,12 +137,6 @@ Player::construct ()
 }
 
 
-Player::~Player ()
-{
-       delete _shuffler;
-}
-
-
 void
 Player::setup_pieces ()
 {
@@ -162,6 +159,66 @@ have_audio (shared_ptr<const Content> content)
 }
 
 
+vector<vector<shared_ptr<Content>>>
+collect (shared_ptr<const Film> film, ContentList content)
+{
+       vector<shared_ptr<Content>> ungrouped;
+       vector<vector<shared_ptr<Content>>> grouped;
+
+       auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
+
+               auto a_streams = a->streams();
+               auto b_streams = b->streams();
+
+               if (a_streams.size() != b_streams.size()) {
+                       return false;
+               }
+
+               for (size_t i = 0; i < a_streams.size(); ++i) {
+                       auto a_stream = a_streams[i];
+                       auto b_stream = b_streams[i];
+                       if (
+                               !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
+                               a_stream->frame_rate() != b_stream->frame_rate() ||
+                               a_stream->channels() != b_stream->channels()) {
+                               return false;
+                       }
+               }
+
+               return (
+                       fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
+                       a->delay() == b->delay() &&
+                       a->language() == b->language() &&
+                       a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
+                       a->channel_names() == b->channel_names()
+                      );
+       };
+
+       for (auto i: content) {
+               if (i->video || !i->audio || !i->text.empty()) {
+                       ungrouped.push_back (i);
+               } else {
+                       bool done = false;
+                       for (auto& g: grouped) {
+                               if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
+                                       g.push_back (i);
+                                       done = true;
+                               }
+                       }
+                       if (!done) {
+                               grouped.push_back ({i});
+                       }
+               }
+       }
+
+       for (auto i: ungrouped) {
+               grouped.push_back({i});
+       }
+
+       return grouped;
+}
+
+
 void
 Player::setup_pieces_unlocked ()
 {
@@ -170,8 +227,7 @@ Player::setup_pieces_unlocked ()
        auto old_pieces = _pieces;
        _pieces.clear ();
 
-       delete _shuffler;
-       _shuffler = new Shuffler();
+       _shuffler.reset (new Shuffler());
        _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
 
        for (auto i: playlist()->content()) {
@@ -187,13 +243,14 @@ Player::setup_pieces_unlocked ()
 
                shared_ptr<Decoder> old_decoder;
                for (auto j: old_pieces) {
-                       if (j->content == i) {
-                               old_decoder = j->decoder;
+                       auto decoder = j->decoder_for(i);
+                       if (decoder) {
+                               old_decoder = decoder;
                                break;
                        }
                }
 
-               auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+               auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
                DCPOMATIC_ASSERT (decoder);
 
                FrameRateChange frc (_film, i);
@@ -220,63 +277,38 @@ Player::setup_pieces_unlocked ()
                        }
                }
 
-               auto piece = make_shared<Piece>(i, decoder, frc);
+               vector<Piece::Pair> content = {
+                       Piece::Pair(i, decoder)
+               };
+
+               auto piece = make_shared<Piece>(_film, content, frc, _fast);
                _pieces.push_back (piece);
 
-               if (decoder->video) {
+               if (i->video) {
                        if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
                                /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
-                               decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+                               piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
                        } else {
-                               decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
+                               piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
                        }
                }
 
-               if (decoder->audio) {
-                       decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
-               }
-
-               auto j = decoder->text.begin();
-
-               while (j != decoder->text.end()) {
-                       (*j)->BitmapStart.connect (
-                               bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-                       (*j)->PlainStart.connect (
-                               bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-                       (*j)->Stop.connect (
-                               bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-
-                       ++j;
+               if (i->audio) {
+                       piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
                }
 
-               if (decoder->atmos) {
-                       decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
-               }
-       }
-
-       _stream_states.clear ();
-       for (auto i: _pieces) {
-               if (i->content->audio) {
-                       for (auto j: i->content->audio->streams()) {
-                               _stream_states[j] = StreamState (i, i->content->position ());
-                       }
-               }
+               piece->BitmapTextStart.connect (bind(&Player::bitmap_text_start, this, piece, _1));
+               piece->StringTextStart.connect (bind(&Player::string_text_start, this, piece, _1));
+               piece->TextStop.connect (bind(&Player::subtitle_stop, this, piece, _1));
+               piece->Atmos.connect (bind(&Player::atmos, this, piece, _1));
        }
 
        for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
-               if (auto video = (*i)->content->video) {
-                       if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
-                               /* Look for content later in the content list with in-use video that overlaps this */
-                               auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
-                               auto j = i;
-                               ++j;
-                               for (; j != _pieces.end(); ++j) {
-                                       if ((*j)->content->video && (*j)->content->video->use()) {
-                                               (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
-                                       }
+               if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
+                       /* Look for content later in the content list with in-use video that overlaps this */
+                       for (auto j = std::next(i); j != _pieces.end(); ++j) {
+                               if ((*j)->use_video()) {
+                                       (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
                                }
                        }
                }
@@ -291,6 +323,23 @@ Player::setup_pieces_unlocked ()
 }
 
 
+optional<DCPTime>
+Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
+{
+       boost::mutex::scoped_lock lm (_mutex);
+
+       for (auto i: _pieces) {
+               auto dcp = i->content_time_to_dcp(content, t);
+               if (dcp) {
+                       return *dcp;
+               }
+       }
+
+       /* We couldn't find this content; perhaps things are being changed over */
+       return {};
+}
+
+
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
@@ -408,69 +457,6 @@ Player::black_player_video_frame (Eyes eyes) const
 }
 
 
-Frame
-Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
-{
-       auto s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
-       s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
-
-       /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
-          then convert that ContentTime to frames at the content's rate.  However this fails for
-          situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
-          enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
-
-          Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
-       */
-       return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
-}
-
-
-DCPTime
-Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
-{
-       /* See comment in dcp_to_content_video */
-       auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
-       return d + piece->content->position();
-}
-
-
-Frame
-Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
-{
-       auto s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
-       /* See notes in dcp_to_content_video */
-       return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
-}
-
-
-DCPTime
-Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
-{
-       /* See comment in dcp_to_content_video */
-       return DCPTime::from_frames (f, _film->audio_frame_rate())
-               - DCPTime (piece->content->trim_start(), piece->frc)
-               + piece->content->position();
-}
-
-
-ContentTime
-Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
-{
-       auto s = t - piece->content->position ();
-       s = min (piece->content->length_after_trim(_film), s);
-       return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
-}
-
-
-DCPTime
-Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
-{
-       return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
-}
-
-
 vector<FontData>
 Player::get_subtitle_fonts ()
 {
@@ -481,7 +467,7 @@ Player::get_subtitle_fonts ()
                /* XXX: things may go wrong if there are duplicate font IDs
                   with different font files.
                */
-               auto f = i->decoder->fonts ();
+               auto f = i->fonts ();
                copy (f.begin(), f.end(), back_inserter(fonts));
        }
 
@@ -572,9 +558,9 @@ Player::get_reel_assets ()
                        continue;
                }
 
-               scoped_ptr<DCPDecoder> decoder;
+               unique_ptr<DCPDecoder> decoder;
                try {
-                       decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
+                       decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
                } catch (...) {
                        return a;
                }
@@ -654,22 +640,10 @@ Player::pass ()
        optional<DCPTime> earliest_time;
 
        for (auto i: _pieces) {
-               if (i->done) {
-                       continue;
-               }
-
-               auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
-               if (t > i->content->end(_film)) {
-                       i->done = true;
-               } else {
-
-                       /* Given two choices at the same time, pick the one with texts so we see it before
-                          the video.
-                       */
-                       if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
-                               earliest_time = t;
-                               earliest_content = i;
-                       }
+               auto time = i->decoder_before(earliest_time);
+               if (time) {
+                       earliest_time = *time;
+                       earliest_content = i;
                }
        }
 
@@ -699,15 +673,13 @@ Player::pass ()
        switch (which) {
        case CONTENT:
        {
-               LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
-               earliest_content->done = earliest_content->decoder->pass ();
-               auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
-               if (dcp && !_play_referenced && dcp->reference_audio()) {
+               earliest_content->pass();
+               if (!_play_referenced && earliest_content->reference_dcp_audio()) {
                        /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
                           to `hide' the fact that no audio was emitted during the referenced DCP (though
                           we need to behave as though it was).
                        */
-                       _last_audio_time = dcp->end (_film);
+                       _last_audio_time = earliest_content->end ();
                }
                break;
        }
@@ -753,10 +725,8 @@ Player::pass ()
           of our streams, or the position of the _silent.
        */
        auto pull_to = _playback_length;
-       for (auto const& i: _stream_states) {
-               if (!i.second.piece->done && i.second.last_push_end < pull_to) {
-                       pull_to = i.second.last_push_end;
-               }
+       for (auto i: _pieces) {
+               i->update_pull_to (pull_to);
        }
        if (!_silent.done() && _silent.position() < pull_to) {
                pull_to = _silent.position();
@@ -839,45 +809,30 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
 
 void
-Player::video (weak_ptr<Piece> wp, ContentVideo video)
+Player::video (weak_ptr<Piece> wp, PieceVideo video)
 {
        auto piece = wp.lock ();
        if (!piece) {
                return;
        }
 
-       if (!piece->content->video->use()) {
-               return;
-       }
-
-       FrameRateChange frc (_film, piece->content);
-       if (frc.skip && (video.frame % 2) == 1) {
-               return;
-       }
-
-       /* Time of the first frame we will emit */
-       DCPTime const time = content_video_to_dcp (piece, video.frame);
-       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(video.time));
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
           `now' and the end of the content's period.
        */
-       if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
-               return;
-       }
-
-       if (piece->ignore_video && piece->ignore_video->contains(time)) {
+       if (video.time < piece->position() || (_last_video_time && video.time < *_last_video_time)) {
                return;
        }
 
        /* Fill gaps that we discover now that we have some video which needs to be emitted.
           This is where we need to fill to.
        */
-       DCPTime fill_to = min (time, piece->content->end(_film));
+       DCPTime fill_to = min (video.time, piece->end());
 
        if (_last_video_time) {
-               DCPTime fill_from = max (*_last_video_time, piece->content->position());
+               DCPTime fill_from = max (*_last_video_time, piece->position());
 
                /* Fill if we have more than half a frame to do */
                if ((fill_to - fill_from) > one_video_frame() / 2) {
@@ -887,7 +842,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                                if (fill_to_eyes == Eyes::BOTH) {
                                        fill_to_eyes = Eyes::LEFT;
                                }
-                               if (fill_to == piece->content->end(_film)) {
+                               if (fill_to == piece->end()) {
                                        /* Don't fill after the end of the content */
                                        fill_to_eyes = Eyes::LEFT;
                                }
@@ -923,24 +878,12 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                }
        }
 
-       _last_video[wp] = std::make_shared<PlayerVideo>(
-               video.image,
-               piece->content->video->crop (),
-               piece->content->video->fade (_film, video.frame),
-               scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
-               _video_container_size,
-               video.eyes,
-               video.part,
-               piece->content->video->colour_conversion(),
-               piece->content->video->range(),
-               piece->content,
-               video.frame,
-               false
-               );
+       _last_video[wp] = piece->player_video (video, _video_container_size);
 
-       DCPTime t = time;
+       DCPTime t = video.time;
+       auto const frc = piece->frame_rate_change();
        for (int i = 0; i < frc.repeat; ++i) {
-               if (t < piece->content->end(_film)) {
+               if (t < piece->end()) {
                        emit_video (_last_video[wp], t);
                }
                t += one_video_frame ();
@@ -949,81 +892,76 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
 
 void
-Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+Player::audio (weak_ptr<Piece> wp, PieceAudio audio)
 {
-       DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+       DCPOMATIC_ASSERT (audio.audio->frames() > 0);
 
        auto piece = wp.lock ();
        if (!piece) {
                return;
        }
 
-       auto content = piece->content->audio;
-       DCPOMATIC_ASSERT (content);
-
-       int const rfr = content->resampled_frame_rate (_film);
-
-       /* Compute time in the DCP */
-       auto time = resampled_audio_to_dcp (piece, content_audio.frame);
-       LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+       LOG_DEBUG_PLAYER("Received audio at %1", to_string(audio.time));
 
-       /* And the end of this block in the DCP */
-       auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
+       /* The end of this block in the DCP */
+       int const rfr = piece->resampled_audio_frame_rate ();
+       auto end = audio.time + DCPTime::from_frames(audio.audio->frames(), rfr);
+       std::cout << "Player gets " << to_string(audio.time) << "\n";
 
        /* Remove anything that comes before the start or after the end of the content */
-       if (time < piece->content->position()) {
-               auto cut = discard_audio (content_audio.audio, time, piece->content->position());
+       if (audio.time < piece->position()) {
+               auto cut = discard_audio (audio.audio, audio.time, piece->position());
                if (!cut.first) {
                        /* This audio is entirely discarded */
                        return;
                }
-               content_audio.audio = cut.first;
-               time = cut.second;
-       } else if (time > piece->content->end(_film)) {
+               audio.audio = cut.first;
+               audio.time = cut.second;
+       } else if (audio.time > piece->end()) {
                /* Discard it all */
                return;
-       } else if (end > piece->content->end(_film)) {
-               Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
+       } else if (end > piece->end()) {
+               Frame const remaining_frames = DCPTime(piece->end() - audio.time).frames_round(rfr);
                if (remaining_frames == 0) {
                        return;
                }
-               content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
+               audio.audio = make_shared<AudioBuffers>(audio.audio, remaining_frames, 0);
        }
 
-       DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
+       DCPOMATIC_ASSERT (audio.audio->frames() > 0);
 
        /* Gain */
 
-       if (content->gain() != 0) {
-               auto gain = make_shared<AudioBuffers>(content_audio.audio);
-               gain->apply_gain (content->gain());
-               content_audio.audio = gain;
+       if (piece->audio_gain() != 0) {
+               auto gain = make_shared<AudioBuffers>(audio.audio);
+               gain->apply_gain (piece->audio_gain());
+               audio.audio = gain;
        }
 
        /* Remap */
 
-       content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
+       audio.audio = remap (audio.audio, _film->audio_channels(), audio.mapping);
 
        /* Process */
 
        if (_audio_processor) {
-               content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
+               audio.audio = _audio_processor->run (audio.audio, _film->audio_channels());
        }
 
        /* Push */
 
-       _audio_merger.push (content_audio.audio, time);
-       DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
-       _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
+       _audio_merger.push (audio.audio, audio.time);
+       piece->set_last_push_end (audio.stream, audio.time + DCPTime::from_frames(audio.audio->frames(), _film->audio_frame_rate()));
 }
 
 
 void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> wp, PieceBitmapTextStart subtitle)
 {
        auto piece = wp.lock ();
-       auto text = wc.lock ();
-       if (!piece || !text) {
+       auto content = subtitle.content().lock();
+       auto text = subtitle.text().lock();
+       if (!piece || !content || !text) {
                return;
        }
 
@@ -1051,31 +989,34 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
 
        dcp::Size scaled_size (width, height);
        ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
-       DCPTime from (content_time_to_dcp (piece, subtitle.from()));
+       auto from = piece->content_time_to_dcp(content, subtitle.time());
+       DCPOMATIC_ASSERT (from);
 
-       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+       _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
 }
 
 
 void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
+Player::string_text_start (weak_ptr<Piece> wp, PieceStringTextStart subtitle)
 {
        auto piece = wp.lock ();
-       auto text = wc.lock ();
-       if (!piece || !text) {
+       auto content = subtitle.content().lock();
+       auto text = subtitle.text().lock();
+       if (!piece || !content || !text) {
                return;
        }
 
        PlayerText ps;
-       DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
+       auto const from = piece->content_time_to_dcp(content, subtitle.time());
+       DCPOMATIC_ASSERT (from);
 
-       if (from > piece->content->end(_film)) {
+       if (from > piece->end()) {
                return;
        }
 
        for (auto s: subtitle.subs) {
-               s.set_h_position (s.h_position() + text->x_offset ());
-               s.set_v_position (s.v_position() + text->y_offset ());
+               s.set_h_position (s.h_position() + text->x_offset());
+               s.set_v_position (s.v_position() + text->y_offset());
                float const xs = text->x_scale();
                float const ys = text->y_scale();
                float size = s.size();
@@ -1093,24 +1034,25 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                        s.set_aspect_adjust (xs / ys);
                }
 
-               s.set_in (dcp::Time(from.seconds(), 1000));
+               s.set_in (dcp::Time(from->seconds(), 1000));
                ps.string.push_back (StringText (s, text->outline_width()));
                ps.add_fonts (text->fonts ());
        }
 
-       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+       _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
 }
 
 
 void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> wp, PieceTextStop stop)
 {
-       auto text = wc.lock ();
+       auto content = stop.content().lock();
+       auto text = stop.text().lock();
        if (!text) {
                return;
        }
 
-       if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
+       if (!_active_texts[static_cast<int>(text->type())].have(stop.text())) {
                return;
        }
 
@@ -1119,17 +1061,18 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Conte
                return;
        }
 
-       DCPTime const dcp_to = content_time_to_dcp (piece, to);
+       auto const dcp_to = piece->content_time_to_dcp(content, stop.time());
+       DCPOMATIC_ASSERT (dcp_to);
 
-       if (dcp_to > piece->content->end(_film)) {
+       if (*dcp_to > piece->end()) {
                return;
        }
 
-       auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
+       auto from = _active_texts[static_cast<int>(text->type())].add_to(stop.text(), *dcp_to);
 
        bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
        if (text->use() && !always && !text->burn()) {
-               Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
+               Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
        }
 }
 
@@ -1161,22 +1104,7 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        for (auto i: _pieces) {
-               if (time < i->content->position()) {
-                       /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
-                          we must seek this (following) content accurately, otherwise when we come to the end of the current
-                          content we may not start right at the beginning of the next, causing a gap (if the next content has
-                          been trimmed to a point between keyframes, or something).
-                       */
-                       i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
-                       i->done = false;
-               } else if (i->content->position() <= time && time < i->content->end(_film)) {
-                       /* During; seek to position */
-                       i->decoder->seek (dcp_to_content_time (i, time), accurate);
-                       i->done = false;
-               } else {
-                       /* After; this piece is done */
-                       i->done = true;
-               }
+               i->seek (time, accurate);
        }
 
        if (accurate) {
@@ -1199,6 +1127,16 @@ Player::seek (DCPTime time, bool accurate)
 void
 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
+       if (!_film->three_d()) {
+               if (pv->eyes() == Eyes::LEFT) {
+                       /* Use left-eye images for both eyes... */
+                       pv->set_eyes (Eyes::BOTH);
+               } else if (pv->eyes() == Eyes::RIGHT) {
+                       /* ...and discard the right */
+                       return;
+               }
+       }
+
        /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
           player before the video that requires them.
        */
@@ -1318,22 +1256,6 @@ Player::set_dcp_decode_reduction (optional<int> reduction)
 }
 
 
-optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
-{
-       boost::mutex::scoped_lock lm (_mutex);
-
-       for (auto i: _pieces) {
-               if (i->content == content) {
-                       return content_time_to_dcp (i, t);
-               }
-       }
-
-       /* We couldn't find this content; perhaps things are being changed over */
-       return {};
-}
-
-
 shared_ptr<const Playlist>
 Player::playlist () const
 {
@@ -1342,7 +1264,7 @@ Player::playlist () const
 
 
 void
-Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+Player::atmos (weak_ptr<Piece>, PieceAtmos data)
 {
        Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
 }