Change how video timing is done.
[dcpomatic.git] / src / lib / player.cc
index 6361d7b2acc1f3d0d6fb523d15d72107f9449a9f..ae79a457f428f6330121c77833f38c1a4f512626 100644 (file)
@@ -47,7 +47,6 @@
 #include "playlist.h"
 #include "ratio.h"
 #include "raw_image_proxy.h"
-#include "referenced_reel_asset.h"
 #include "render_text.h"
 #include "shuffler.h"
 #include "text_content.h"
@@ -72,10 +71,8 @@ using std::dynamic_pointer_cast;
 using std::list;
 using std::make_pair;
 using std::make_shared;
-using std::make_shared;
 using std::max;
 using std::min;
-using std::min;
 using std::pair;
 using std::shared_ptr;
 using std::vector;
@@ -105,6 +102,7 @@ Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment
        , _always_burn_open_subtitles(false)
        , _fast(false)
        , _tolerant (film->tolerant())
+       , _play_referenced(false)
        , _audio_merger (_film->audio_frame_rate())
        , _subtitle_alignment (subtitle_alignment)
 {
@@ -122,6 +120,7 @@ Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist
        , _always_burn_open_subtitles(false)
        , _fast(false)
        , _tolerant (film->tolerant())
+       , _play_referenced(false)
        , _audio_merger (_film->audio_frame_rate())
 {
        construct ();
@@ -146,14 +145,6 @@ Player::construct ()
 }
 
 
-void
-Player::setup_pieces ()
-{
-       boost::mutex::scoped_lock lm (_mutex);
-       setup_pieces_unlocked ();
-}
-
-
 bool
 have_video (shared_ptr<const Content> content)
 {
@@ -169,8 +160,10 @@ have_audio (shared_ptr<const Content> content)
 
 
 void
-Player::setup_pieces_unlocked ()
+Player::setup_pieces ()
 {
+       boost::mutex::scoped_lock lm (_mutex);
+
        _playback_length = _playlist ? _playlist->length(_film) : _film->length();
 
        auto old_pieces = _pieces;
@@ -302,7 +295,6 @@ Player::setup_pieces_unlocked ()
        _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
 
        _next_video_time = boost::none;
-       _next_video_eyes = Eyes::BOTH;
        _next_audio_time = boost::none;
 }
 
@@ -312,10 +304,9 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
        if (property == VideoContentProperty::CROP) {
                if (type == ChangeType::DONE) {
-                       auto const vcs = video_container_size();
                        boost::mutex::scoped_lock lm (_mutex);
                        for (auto const& i: _delay) {
-                               i.first->reset_metadata (_film, vcs);
+                               i.first->reset_metadata(_film, _video_container_size);
                        }
                }
        } else {
@@ -343,17 +334,15 @@ Player::set_video_container_size (dcp::Size s)
 {
        Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
 
-       {
-               boost::mutex::scoped_lock lm (_mutex);
-
-               if (s == _video_container_size) {
-                       lm.unlock ();
-                       Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
-                       return;
-               }
+       if (s == _video_container_size) {
+               Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+               return;
+       }
 
-               _video_container_size = s;
+       _video_container_size = s;
 
+       {
+               boost::mutex::scoped_lock lm(_black_image_mutex);
                _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
                _black_image->make_black ();
        }
@@ -407,8 +396,10 @@ Player::film_change (ChangeType type, Film::Property p)
 shared_ptr<PlayerVideo>
 Player::black_player_video_frame (Eyes eyes) const
 {
+       boost::mutex::scoped_lock lm(_black_image_mutex);
+
        return std::make_shared<PlayerVideo> (
-               std::make_shared<const RawImageProxy>(_black_image),
+               make_shared<const RawImageProxy>(_black_image),
                Crop(),
                optional<double>(),
                _video_container_size,
@@ -418,7 +409,7 @@ Player::black_player_video_frame (Eyes eyes) const
                PresetColourConversion::all().front().conversion,
                VideoRange::FULL,
                std::weak_ptr<Content>(),
-               boost::optional<Frame>(),
+               boost::optional<dcpomatic::ContentTime>(),
                false
        );
 }
@@ -549,101 +540,8 @@ Player::set_fast ()
 void
 Player::set_play_referenced ()
 {
-       boost::mutex::scoped_lock lm (_mutex);
        _play_referenced = true;
-       setup_pieces_unlocked ();
-}
-
-
-static void
-maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
-{
-       DCPOMATIC_ASSERT (r);
-       r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
-       r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
-       if (r->actual_duration() > 0) {
-               a.push_back (
-                       ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
-                       );
-       }
-}
-
-
-list<ReferencedReelAsset>
-Player::get_reel_assets ()
-{
-       /* Does not require a lock on _mutex as it's only called from DCPEncoder */
-
-       list<ReferencedReelAsset> reel_assets;
-
-       for (auto content: playlist()->content()) {
-               auto dcp = dynamic_pointer_cast<DCPContent>(content);
-               if (!dcp) {
-                       continue;
-               }
-
-               if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
-                       continue;
-               }
-
-               scoped_ptr<DCPDecoder> decoder;
-               try {
-                       decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
-               } catch (...) {
-                       return reel_assets;
-               }
-
-               auto const frame_rate = _film->video_frame_rate();
-               DCPOMATIC_ASSERT (dcp->video_frame_rate());
-               /* We should only be referencing if the DCP rate is the same as the film rate */
-               DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
-
-               Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
-               Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
-
-               /* position in the asset from the start */
-               int64_t offset_from_start = 0;
-               /* position i the asset from the end */
-               int64_t offset_from_end = 0;
-               for (auto reel: decoder->reels()) {
-                       /* Assume that main picture duration is the length of the reel */
-                       offset_from_end += reel->main_picture()->actual_duration();
-               }
-
-               for (auto reel: decoder->reels()) {
-
-                       /* Assume that main picture duration is the length of the reel */
-                       int64_t const reel_duration = reel->main_picture()->actual_duration();
-
-                       /* See doc/design/trim_reels.svg */
-                       Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
-                       Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
-
-                       auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate));
-                       if (dcp->reference_video()) {
-                               maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
-                       }
-
-                       if (dcp->reference_audio()) {
-                               maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
-                       }
-
-                       if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
-                               maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
-                       }
-
-                       if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
-                               for (auto caption: reel->closed_captions()) {
-                                       maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
-                               }
-                       }
-
-                       offset_from_start += reel_duration;
-                       offset_from_end -= reel_duration;
-               }
-       }
-
-       return reel_assets;
+       setup_pieces();
 }
 
 
@@ -658,9 +556,9 @@ Player::pass ()
                return false;
        }
 
-       if (_playback_length == DCPTime()) {
+       if (_playback_length.load() == DCPTime()) {
                /* Special; just give one black frame */
-               emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
+               use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
                return true;
        }
 
@@ -718,18 +616,23 @@ Player::pass ()
                LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
                auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
-               if (dcp && !_play_referenced && dcp->reference_audio()) {
-                       /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
-                          to `hide' the fact that no audio was emitted during the referenced DCP (though
-                          we need to behave as though it was).
-                       */
-                       _next_audio_time = dcp->end (_film);
+               if (dcp && !_play_referenced) {
+                       if (dcp->reference_video()) {
+                               _next_video_time = dcp->end(_film);
+                       }
+                       if (dcp->reference_audio()) {
+                               /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
+                                  to `hide' the fact that no audio was emitted during the referenced DCP (though
+                                  we need to behave as though it was).
+                               */
+                               _next_audio_time = dcp->end(_film);
+                       }
                }
                break;
        }
        case BLACK:
                LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
-               emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+               use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
@@ -796,7 +699,7 @@ Player::pass ()
                }
        }
 
-       auto pull_to = _playback_length;
+       auto pull_to = _playback_length.load();
        for (auto const& i: alive_stream_states) {
                if (!i.second.piece->done && i.second.last_push_end < pull_to) {
                        pull_to = i.second.last_push_end;
@@ -825,24 +728,13 @@ Player::pass ()
        }
 
        if (done) {
+               emit_video_until(_film->length());
+
                if (_shuffler) {
                        _shuffler->flush ();
                }
                for (auto const& i: _delay) {
-                       do_emit_video(i.first, i.second);
-               }
-
-               /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
-                * However, if we have L and R video files, and one is shorter than the other,
-                * the fill code in ::video mostly takes care of filling in the gaps.
-                * However, since it fills at the point when it knows there is more video coming
-                * at time t (so it should fill any gap up to t) it can't do anything right at the
-                * end.  This is particularly bad news if the last frame emitted is a LEFT
-                * eye, as the MXF writer will complain about the 3D sequence being wrong.
-                * Here's a hack to workaround that particular case.
-                */
-               if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
-                       do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+                       emit_video(i.first, i.second);
                }
        }
 
@@ -859,7 +751,7 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
        for (
                auto j:
-               _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+               _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
                ) {
 
                /* Bitmap subtitles */
@@ -869,14 +761,14 @@ Player::open_subtitles_for_frame (DCPTime time) const
                        }
 
                        /* i.image will already have been scaled to fit _video_container_size */
-                       dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
+                       dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
 
                        captions.push_back (
                                PositionImage (
                                        i.image,
                                        Position<int> (
-                                               lrint(_video_container_size.width * i.rectangle.x),
-                                               lrint(_video_container_size.height * i.rectangle.y)
+                                               lrint(_video_container_size.load().width * i.rectangle.x),
+                                               lrint(_video_container_size.load().height * i.rectangle.y)
                                                )
                                        )
                                );
@@ -897,6 +789,57 @@ Player::open_subtitles_for_frame (DCPTime time) const
 }
 
 
+void
+Player::emit_video_until(DCPTime time)
+{
+       auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+               /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
+                  player before the video that requires them.
+               */
+               _delay.push_back(make_pair(pv, time));
+
+               if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+                       _next_video_time = time + one_video_frame();
+               }
+
+               if (_delay.size() < 3) {
+                       return;
+               }
+
+               auto to_do = _delay.front();
+               _delay.pop_front();
+               emit_video(to_do.first, to_do.second);
+       };
+
+       auto const age_threshold = one_video_frame() * 2;
+
+       while (_next_video_time.get_value_or({}) < time) {
+               auto left = _last_video[Eyes::LEFT];
+               auto right = _last_video[Eyes::RIGHT];
+               auto both = _last_video[Eyes::BOTH];
+
+               auto const next = _next_video_time.get_value_or({});
+
+               if (
+                       left.first &&
+                       right.first &&
+                       (!both.first || (left.second >= both.second && right.second >= both.second)) &&
+                       (left.second - next) < age_threshold &&
+                       (right.second - next) < age_threshold
+                  ) {
+                       frame(left.first, next);
+                       frame(right.first, next);
+               } else if (both.first && (both.second - next) < age_threshold) {
+                       frame(both.first, next);
+                       LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
+               } else {
+                       frame(black_player_video_frame(Eyes::BOTH), next);
+                       LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
+               }
+       }
+}
+
+
 void
 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 {
@@ -913,20 +856,25 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       FrameRateChange frc (_film, piece->content);
-       if (frc.skip && (video.frame % 2) == 1) {
-               return;
+       auto const three_d = _film->three_d();
+
+       if (!three_d) {
+               if (video.eyes == Eyes::LEFT) {
+                       /* Use left-eye images for both eyes... */
+                       video.eyes = Eyes::BOTH;
+               } else if (video.eyes == Eyes::RIGHT) {
+                       /* ...and discard the right */
+                       return;
+               }
        }
 
-       /* Time of the first frame we will emit */
-       DCPTime const time = content_video_to_dcp (piece, video.frame);
-       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+       FrameRateChange frc (_film, piece->content);
 
-       /* Discard if it's before the content's period or the last accurate seek.  We can't discard
-          if it's after the content's period here as in that case we still need to fill any gap between
-          `now' and the end of the content's period.
-       */
-       if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
+       /* Time of the frame we just received within the DCP */
+       auto const time = content_time_to_dcp(piece, video.time);
+       LOG_DEBUG_PLAYER("Received video frame %1 %2 eyes %3", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
+
+       if (time < piece->content->position()) {
                return;
        }
 
@@ -934,86 +882,43 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       /* Fill gaps that we discover now that we have some video which needs to be emitted.
-          This is where we need to fill to.
-       */
-       DCPTime fill_to = min (time, piece->content->end(_film));
-
-       if (_next_video_time) {
-               DCPTime fill_from = max (*_next_video_time, piece->content->position());
-
-               /* Fill if we have more than half a frame to do */
-               if ((fill_to - fill_from) > one_video_frame() / 2) {
-                       auto last = _last_video.find (weak_piece);
-                       if (_film->three_d()) {
-                               auto fill_to_eyes = video.eyes;
-                               if (fill_to_eyes == Eyes::BOTH) {
-                                       fill_to_eyes = Eyes::LEFT;
-                               }
-                               if (fill_to == piece->content->end(_film)) {
-                                       /* Don't fill after the end of the content */
-                                       fill_to_eyes = Eyes::LEFT;
-                               }
-                               auto j = fill_from;
-                               auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
-                               if (eyes == Eyes::BOTH) {
-                                       eyes = Eyes::LEFT;
-                               }
-                               while (j < fill_to || eyes != fill_to_eyes) {
-                                       if (last != _last_video.end()) {
-                                               LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
-                                               auto copy = last->second->shallow_copy();
-                                               copy->set_eyes (eyes);
-                                               emit_video (copy, j);
-                                       } else {
-                                               LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
-                                               emit_video (black_player_video_frame(eyes), j);
-                                       }
-                                       if (eyes == Eyes::RIGHT) {
-                                               j += one_video_frame();
-                                       }
-                                       eyes = increment_eyes (eyes);
-                               }
-                       } else {
-                               for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
-                                       if (last != _last_video.end()) {
-                                               emit_video (last->second, j);
-                                       } else {
-                                               emit_video (black_player_video_frame(Eyes::BOTH), j);
-                                       }
-                               }
-                       }
-               }
+       if (!_next_video_time) {
+               _next_video_time = time.round(_film->video_frame_rate());
        }
 
        auto const content_video = piece->content->video;
-
-       _last_video[weak_piece] = std::make_shared<PlayerVideo>(
-               video.image,
-               content_video->actual_crop(),
-               content_video->fade (_film, video.frame),
-               scale_for_display(
-                       content_video->scaled_size(_film->frame_size()),
+       use_video(
+               std::make_shared<PlayerVideo>(
+                       video.image,
+                       content_video->actual_crop(),
+                       content_video->fade(_film, video.time),
+                       scale_for_display(
+                               content_video->scaled_size(_film->frame_size()),
+                               _video_container_size,
+                               _film->frame_size(),
+                               content_video->pixel_quanta()
+                               ),
                        _video_container_size,
-                       _film->frame_size(),
-                       content_video->pixel_quanta()
+                       video.eyes,
+                       video.part,
+                       content_video->colour_conversion(),
+                       content_video->range(),
+                       piece->content,
+                       video.time,
+                       false
                        ),
-               _video_container_size,
-               video.eyes,
-               video.part,
-               content_video->colour_conversion(),
-               content_video->range(),
-               piece->content,
-               video.frame,
-               false
-               );
+                       time,
+                       piece->content->end(_film)
+                               );
+}
 
-       DCPTime t = time;
-       for (int i = 0; i < frc.repeat; ++i) {
-               if (t < piece->content->end(_film)) {
-                       emit_video (_last_video[weak_piece], t);
-               }
-               t += one_video_frame ();
+
+void
+Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
+{
+       _last_video[pv->eyes()] = { pv, time };
+       if (pv->eyes() != Eyes::LEFT) {
+               emit_video_until(std::min(time + one_video_frame() / 2, end));
        }
 }
 
@@ -1139,8 +1044,8 @@ Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextConten
                auto image = sub.image;
 
                /* We will scale the subtitle up to fit _video_container_size */
-               int const width = sub.rectangle.width * _video_container_size.width;
-               int const height = sub.rectangle.height * _video_container_size.height;
+               int const width = sub.rectangle.width * _video_container_size.load().width;
+               int const height = sub.rectangle.height * _video_container_size.load().height;
                if (width == 0 || height == 0) {
                        return;
                }
@@ -1150,7 +1055,7 @@ Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextConten
        }
 
        DCPTime from(content_time_to_dcp(piece, subtitle.from()));
-       _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
+       _active_texts[content->type()].add_from(weak_content, ps, from);
 }
 
 
@@ -1198,7 +1103,7 @@ Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent
                ps.string.push_back (s);
        }
 
-       _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
+       _active_texts[content->type()].add_from(weak_content, ps, from);
 }
 
 
@@ -1214,7 +1119,7 @@ Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> w
                return;
        }
 
-       if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
+       if (!_active_texts[content->type()].have(weak_content)) {
                return;
        }
 
@@ -1229,7 +1134,7 @@ Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> w
                return;
        }
 
-       auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
+       auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
 
        bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
        if (content->use() && !always && !content->burn()) {
@@ -1260,9 +1165,7 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        _audio_merger.clear ();
-       for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
-               _active_texts[i].clear ();
-       }
+       std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
 
        for (auto i: _pieces) {
                if (time < i->content->position()) {
@@ -1285,61 +1188,25 @@ Player::seek (DCPTime time, bool accurate)
 
        if (accurate) {
                _next_video_time = time;
-               _next_video_eyes = Eyes::LEFT;
                _next_audio_time = time;
        } else {
                _next_video_time = boost::none;
-               _next_video_eyes = boost::none;
                _next_audio_time = boost::none;
        }
 
        _black.set_position (time);
        _silent.set_position (time);
 
-       _last_video.clear ();
-}
-
-
-void
-Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
-{
-       if (!_film->three_d()) {
-               if (pv->eyes() == Eyes::LEFT) {
-                       /* Use left-eye images for both eyes... */
-                       pv->set_eyes (Eyes::BOTH);
-               } else if (pv->eyes() == Eyes::RIGHT) {
-                       /* ...and discard the right */
-                       return;
-               }
-       }
-
-       /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
-          player before the video that requires them.
-       */
-       _delay.push_back (make_pair (pv, time));
-
-       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               _next_video_time = time + one_video_frame();
-       }
-       _next_video_eyes = increment_eyes (pv->eyes());
-
-       if (_delay.size() < 3) {
-               return;
-       }
-
-       auto to_do = _delay.front();
-       _delay.pop_front();
-       do_emit_video (to_do.first, to_do.second);
+       _last_video[Eyes::LEFT] = {};
+       _last_video[Eyes::RIGHT] = {};
+       _last_video[Eyes::BOTH] = {};
 }
 
-
 void
-Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
 {
        if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
-                       _active_texts[i].clear_before (time);
-               }
+               std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
        }
 
        auto subtitles = open_subtitles_for_frame (time);
@@ -1415,25 +1282,20 @@ Player::set_dcp_decode_reduction (optional<int> reduction)
 {
        Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
 
-       {
-               boost::mutex::scoped_lock lm (_mutex);
-
-               if (reduction == _dcp_decode_reduction) {
-                       lm.unlock ();
-                       Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
-                       return;
-               }
-
-               _dcp_decode_reduction = reduction;
-               setup_pieces_unlocked ();
+       if (reduction == _dcp_decode_reduction.load()) {
+               Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
+               return;
        }
 
+       _dcp_decode_reduction = reduction;
+       setup_pieces();
+
        Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
 }
 
 
 optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
+Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
 {
        boost::mutex::scoped_lock lm (_mutex);
 
@@ -1449,7 +1311,7 @@ Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
 
 
 optional<ContentTime>
-Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
 {
        boost::mutex::scoped_lock lm (_mutex);