Make EmptyVideo work with stereo a little better.
[dcpomatic.git] / src / lib / player.cc
index 5404dfac4bd3fb9d51b721e83669c5ff0cbc8e1a..392c929a31f7923f505207d2db72c69c1330b649 100644 (file)
 
 
 #include "atmos_decoder.h"
-#include "player.h"
-#include "film.h"
 #include "audio_buffers.h"
+#include "audio_content.h"
+#include "audio_decoder.h"
+#include "audio_processor.h"
+#include "compose.hpp"
+#include "config.h"
 #include "content_audio.h"
+#include "content_video.h"
 #include "dcp_content.h"
+#include "dcp_decoder.h"
 #include "dcpomatic_log.h"
-#include "job.h"
+#include "decoder.h"
+#include "decoder_factory.h"
+#include "ffmpeg_content.h"
+#include "film.h"
+#include "frame_rate_change.h"
 #include "image.h"
-#include "raw_image_proxy.h"
-#include "ratio.h"
+#include "image_decoder.h"
+#include "job.h"
 #include "log.h"
-#include "render_text.h"
-#include "config.h"
-#include "content_video.h"
+#include "maths_util.h"
+#include "piece.h"
+#include "player.h"
 #include "player_video.h"
-#include "frame_rate_change.h"
-#include "audio_processor.h"
 #include "playlist.h"
+#include "ratio.h"
+#include "raw_image_proxy.h"
 #include "referenced_reel_asset.h"
-#include "decoder_factory.h"
-#include "decoder.h"
-#include "video_decoder.h"
-#include "audio_decoder.h"
+#include "render_text.h"
+#include "shuffler.h"
 #include "text_content.h"
 #include "text_decoder.h"
-#include "ffmpeg_content.h"
-#include "audio_content.h"
-#include "dcp_decoder.h"
-#include "image_decoder.h"
-#include "compose.hpp"
-#include "shuffler.h"
 #include "timer.h"
+#include "video_decoder.h"
 #include <dcp/reel.h>
+#include <dcp/reel_closed_caption_asset.h>
+#include <dcp/reel_picture_asset.h>
 #include <dcp/reel_sound_asset.h>
 #include <dcp/reel_subtitle_asset.h>
-#include <dcp/reel_picture_asset.h>
-#include <dcp/reel_closed_caption_asset.h>
-#include <stdint.h>
 #include <algorithm>
 #include <iostream>
+#include <stdint.h>
 
 #include "i18n.h"
 
@@ -70,6 +72,7 @@ using std::dynamic_pointer_cast;
 using std::list;
 using std::make_pair;
 using std::make_shared;
+using std::make_shared;
 using std::max;
 using std::min;
 using std::min;
@@ -77,7 +80,6 @@ using std::pair;
 using std::shared_ptr;
 using std::vector;
 using std::weak_ptr;
-using std::make_shared;
 using boost::optional;
 using boost::scoped_ptr;
 #if BOOST_VERSION >= 106100
@@ -142,20 +144,6 @@ Player::setup_pieces ()
 }
 
 
-bool
-have_video (shared_ptr<const Content> content)
-{
-       return static_cast<bool>(content->video) && content->video->use();
-}
-
-
-bool
-have_audio (shared_ptr<const Content> content)
-{
-       return static_cast<bool>(content->audio);
-}
-
-
 void
 Player::setup_pieces_unlocked ()
 {
@@ -164,8 +152,19 @@ Player::setup_pieces_unlocked ()
        auto old_pieces = _pieces;
        _pieces.clear ();
 
-       _shuffler.reset (new Shuffler());
-       _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+       auto playlist_content = playlist()->content();
+       bool const have_threed = std::any_of(
+               playlist_content.begin(),
+               playlist_content.end(),
+               [](shared_ptr<const Content> c) {
+                       return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
+               });
+
+
+       if (have_threed) {
+               _shuffler.reset(new Shuffler());
+               _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+       }
 
        for (auto i: playlist()->content()) {
 
@@ -217,7 +216,7 @@ Player::setup_pieces_unlocked ()
                _pieces.push_back (piece);
 
                if (decoder->video) {
-                       if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
+                       if (have_threed) {
                                /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
                                decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
                        } else {
@@ -259,28 +258,28 @@ Player::setup_pieces_unlocked ()
                }
        }
 
+       auto ignore_overlap = [](shared_ptr<VideoContent> v) {
+               return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
+       };
+
        for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
-               if (auto video = (*i)->content->video) {
-                       if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
-                               /* Look for content later in the content list with in-use video that overlaps this */
-                               auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
-                               auto j = i;
-                               ++j;
-                               for (; j != _pieces.end(); ++j) {
-                                       if ((*j)->content->video && (*j)->content->video->use()) {
-                                               (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
-                                       }
+               if (ignore_overlap((*i)->content->video)) {
+                       /* Look for content later in the content list with in-use video that overlaps this */
+                       auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
+                       for (auto j = std::next(i); j != _pieces.end(); ++j) {
+                               if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
+                                       (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
                                }
                        }
                }
        }
 
-       _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
-       _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
+       _black = EmptyVideo (_film, playlist(), _playback_length);
+       _silent = EmptyAudio (_film, playlist(), _playback_length);
 
-       _last_video_time = boost::optional<dcpomatic::DCPTime>();
-       _last_video_eyes = Eyes::BOTH;
-       _last_audio_time = boost::optional<dcpomatic::DCPTime>();
+       _next_video_time = boost::none;
+       _next_video_eyes = Eyes::BOTH;
+       _next_audio_time = boost::none;
 }
 
 
@@ -565,6 +564,10 @@ Player::get_reel_assets ()
                        continue;
                }
 
+               if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
+                       continue;
+               }
+
                scoped_ptr<DCPDecoder> decoder;
                try {
                        decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
@@ -572,19 +575,21 @@ Player::get_reel_assets ()
                        return reel_assets;
                }
 
+               auto const frame_rate = _film->video_frame_rate();
                DCPOMATIC_ASSERT (dcp->video_frame_rate());
-               double const cfr = dcp->video_frame_rate().get();
-               Frame const trim_start = dcp->trim_start().frames_round(cfr);
-               Frame const trim_end = dcp->trim_end().frames_round(cfr);
-               int const ffr = _film->video_frame_rate ();
+               /* We should only be referencing if the DCP rate is the same as the film rate */
+               DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
+
+               Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
+               Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
 
                /* position in the asset from the start */
                int64_t offset_from_start = 0;
                /* position i the asset from the end */
                int64_t offset_from_end = 0;
-               for (auto k: decoder->reels()) {
+               for (auto reel: decoder->reels()) {
                        /* Assume that main picture duration is the length of the reel */
-                       offset_from_end += k->main_picture()->actual_duration();
+                       offset_from_end += reel->main_picture()->actual_duration();
                }
 
                for (auto reel: decoder->reels()) {
@@ -596,22 +601,22 @@ Player::get_reel_assets ()
                        Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
                        Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
 
-                       auto const from = max(DCPTime(), content->position() + DCPTime::from_frames(offset_from_start, ffr) - DCPTime::from_frames(trim_start, cfr));
+                       auto const from = max(DCPTime(), content->position() + DCPTime::from_frames(offset_from_start, frame_rate) - DCPTime::from_frames(trim_start, frame_rate));
                        if (dcp->reference_video()) {
-                               maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
+                               maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
                        }
 
                        if (dcp->reference_audio()) {
-                               maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
+                               maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
                        }
 
                        if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
-                               maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
+                               maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
                        }
 
                        if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
                                for (auto caption: reel->closed_captions()) {
-                                       maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, ffr);
+                                       maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
                                }
                        }
 
@@ -696,37 +701,40 @@ Player::pass ()
                earliest_content->done = earliest_content->decoder->pass ();
                auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced && dcp->reference_audio()) {
-                       /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
+                       /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
                           to `hide' the fact that no audio was emitted during the referenced DCP (though
                           we need to behave as though it was).
                        */
-                       _last_audio_time = dcp->end (_film);
+                       _next_audio_time = dcp->end (_film);
                }
                break;
        }
        case BLACK:
+       {
                LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
-               emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+               auto period = _black.period_at_position();
+               emit_video (black_player_video_frame(period.second), _black.position());
                _black.set_position (_black.position() + one_video_frame());
                break;
+       }
        case SILENT:
        {
                LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
                DCPTimePeriod period (_silent.period_at_position());
-               if (_last_audio_time) {
+               if (_next_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
                           or after this silence.  Bodge the start time of the silence to fix it.
                           I think this is nothing to worry about since we will just add or
                           remove a little silence at the end of some content.
                        */
-                       int64_t const error = labs(period.from.get() - _last_audio_time->get());
+                       int64_t const error = labs(period.from.get() - _next_audio_time->get());
                        /* Let's not worry about less than a frame at 24fps */
                        int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
                        if (error >= too_much_error) {
                                _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
                        }
                        DCPOMATIC_ASSERT (error < too_much_error);
-                       period.from = *_last_audio_time;
+                       period.from = *_next_audio_time;
                }
                if (period.duration() > one_video_frame()) {
                        period.to = period.from + one_video_frame();
@@ -786,26 +794,41 @@ Player::pass ()
        LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
        auto audio = _audio_merger.pull (pull_to);
        for (auto i = audio.begin(); i != audio.end(); ++i) {
-               if (_last_audio_time && i->second < *_last_audio_time) {
+               if (_next_audio_time && i->second < *_next_audio_time) {
                        /* This new data comes before the last we emitted (or the last seek); discard it */
-                       auto cut = discard_audio (i->first, i->second, *_last_audio_time);
+                       auto cut = discard_audio (i->first, i->second, *_next_audio_time);
                        if (!cut.first) {
                                continue;
                        }
                        *i = cut;
-               } else if (_last_audio_time && i->second > *_last_audio_time) {
+               } else if (_next_audio_time && i->second > *_next_audio_time) {
                        /* There's a gap between this data and the last we emitted; fill with silence */
-                       fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
+                       fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
                }
 
                emit_audio (i->first, i->second);
        }
 
        if (done) {
-               _shuffler->flush ();
+               if (_shuffler) {
+                       _shuffler->flush ();
+               }
                for (auto const& i: _delay) {
                        do_emit_video(i.first, i.second);
                }
+
+               /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
+                * However, if we have L and R video files, and one is shorter than the other,
+                * the fill code in ::video mostly takes care of filling in the gaps.
+                * However, since it fills at the point when it knows there is more video coming
+                * at time t (so it should fill any gap up to t) it can't do anything right at the
+                * end.  This is particularly bad news if the last frame emitted is a LEFT
+                * eye, as the MXF writer will complain about the 3D sequence being wrong.
+                * Here's a hack to workaround that particular case.
+                */
+               if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
+                       do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+               }
        }
 
        return done;
@@ -860,13 +883,13 @@ Player::open_subtitles_for_frame (DCPTime time) const
 
 
 void
-Player::video (weak_ptr<Piece> wp, ContentVideo video)
+Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 {
        if (_suspended) {
                return;
        }
 
-       auto piece = wp.lock ();
+       auto piece = weak_piece.lock ();
        if (!piece) {
                return;
        }
@@ -882,13 +905,17 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
        /* Time of the first frame we will emit */
        DCPTime const time = content_video_to_dcp (piece, video.frame);
-       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+       if (video.eyes == Eyes::BOTH) {
+               LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+       } else {
+               LOG_DEBUG_PLAYER("Received video frame %1 %2 at %3", (video.eyes == Eyes::LEFT ? "L" : "R"), video.frame, to_string(time));
+       }
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
           `now' and the end of the content's period.
        */
-       if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
+       if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
                return;
        }
 
@@ -901,12 +928,12 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
        */
        DCPTime fill_to = min (time, piece->content->end(_film));
 
-       if (_last_video_time) {
-               DCPTime fill_from = max (*_last_video_time, piece->content->position());
+       if (_next_video_time) {
+               DCPTime fill_from = max (*_next_video_time, piece->content->position());
 
                /* Fill if we have more than half a frame to do */
                if ((fill_to - fill_from) > one_video_frame() / 2) {
-                       auto last = _last_video.find (wp);
+                       auto last = _last_video.find (weak_piece);
                        if (_film->three_d()) {
                                auto fill_to_eyes = video.eyes;
                                if (fill_to_eyes == Eyes::BOTH) {
@@ -917,7 +944,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                                        fill_to_eyes = Eyes::LEFT;
                                }
                                auto j = fill_from;
-                               auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
+                               auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
                                if (eyes == Eyes::BOTH) {
                                        eyes = Eyes::LEFT;
                                }
@@ -950,7 +977,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
        auto const content_video = piece->content->video;
 
-       _last_video[wp] = std::make_shared<PlayerVideo>(
+       _last_video[weak_piece] = std::make_shared<PlayerVideo>(
                video.image,
                content_video->actual_crop(),
                content_video->fade (_film, video.frame),
@@ -973,7 +1000,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
        DCPTime t = time;
        for (int i = 0; i < frc.repeat; ++i) {
                if (t < piece->content->end(_film)) {
-                       emit_video (_last_video[wp], t);
+                       emit_video (_last_video[weak_piece], t);
                }
                t += one_video_frame ();
        }
@@ -981,7 +1008,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
 
 void
-Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
+Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
 {
        if (_suspended) {
                return;
@@ -989,7 +1016,7 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
 
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
 
-       auto piece = wp.lock ();
+       auto piece = weak_piece.lock ();
        if (!piece) {
                return;
        }
@@ -1028,12 +1055,28 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
 
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
 
-       /* Gain */
-
-       if (content->gain() != 0) {
-               auto gain = make_shared<AudioBuffers>(content_audio.audio);
-               gain->apply_gain (content->gain());
-               content_audio.audio = gain;
+       /* Gain and fade */
+
+       auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
+       if (content->gain() != 0 || !fade_coeffs.empty()) {
+               auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
+               if (!fade_coeffs.empty()) {
+                       /* Apply both fade and gain */
+                       DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
+                       auto const channels = gain_buffers->channels();
+                       auto const frames = fade_coeffs.size();
+                       auto data = gain_buffers->data();
+                       auto const gain = db_to_linear (content->gain());
+                       for (auto channel = 0; channel < channels; ++channel) {
+                               for (auto frame = 0U; frame < frames; ++frame) {
+                                       data[channel][frame] *= gain * fade_coeffs[frame];
+                               }
+                       }
+               } else {
+                       /* Just apply gain */
+                       gain_buffers->apply_gain (content->gain());
+               }
+               content_audio.audio = gain_buffers;
        }
 
        /* Remap */
@@ -1055,58 +1098,61 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
 
 
 void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
 {
        if (_suspended) {
                return;
        }
 
-       auto piece = wp.lock ();
-       auto text = wc.lock ();
-       if (!piece || !text) {
+       auto piece = weak_piece.lock ();
+       auto content = weak_content.lock ();
+       if (!piece || !content) {
                return;
        }
 
-       /* Apply content's subtitle offsets */
-       subtitle.sub.rectangle.x += text->x_offset ();
-       subtitle.sub.rectangle.y += text->y_offset ();
+       PlayerText ps;
+       for (auto& sub: subtitle.subs)
+       {
+               /* Apply content's subtitle offsets */
+               sub.rectangle.x += content->x_offset ();
+               sub.rectangle.y += content->y_offset ();
 
-       /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
-       subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
-       subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
+               /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
+               sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
+               sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
 
-       /* Apply content's subtitle scale */
-       subtitle.sub.rectangle.width *= text->x_scale ();
-       subtitle.sub.rectangle.height *= text->y_scale ();
+               /* Apply content's subtitle scale */
+               sub.rectangle.width *= content->x_scale ();
+               sub.rectangle.height *= content->y_scale ();
 
-       PlayerText ps;
-       auto image = subtitle.sub.image;
+               auto image = sub.image;
 
-       /* We will scale the subtitle up to fit _video_container_size */
-       int const width = subtitle.sub.rectangle.width * _video_container_size.width;
-       int const height = subtitle.sub.rectangle.height * _video_container_size.height;
-       if (width == 0 || height == 0) {
-               return;
-       }
+               /* We will scale the subtitle up to fit _video_container_size */
+               int const width = sub.rectangle.width * _video_container_size.width;
+               int const height = sub.rectangle.height * _video_container_size.height;
+               if (width == 0 || height == 0) {
+                       return;
+               }
 
-       dcp::Size scaled_size (width, height);
-       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
-       DCPTime from (content_time_to_dcp (piece, subtitle.from()));
+               dcp::Size scaled_size (width, height);
+               ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
+       }
 
-       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+       DCPTime from(content_time_to_dcp(piece, subtitle.from()));
+       _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
 }
 
 
 void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
+Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
 {
        if (_suspended) {
                return;
        }
 
-       auto piece = wp.lock ();
-       auto text = wc.lock ();
-       if (!piece || !text) {
+       auto piece = weak_piece.lock ();
+       auto content = weak_content.lock ();
+       if (!piece || !content) {
                return;
        }
 
@@ -1118,10 +1164,10 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
        }
 
        for (auto s: subtitle.subs) {
-               s.set_h_position (s.h_position() + text->x_offset ());
-               s.set_v_position (s.v_position() + text->y_offset ());
-               float const xs = text->x_scale();
-               float const ys = text->y_scale();
+               s.set_h_position (s.h_position() + content->x_offset());
+               s.set_v_position (s.v_position() + content->y_offset());
+               float const xs = content->x_scale();
+               float const ys = content->y_scale();
                float size = s.size();
 
                /* Adjust size to express the common part of the scaling;
@@ -1138,31 +1184,31 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                }
 
                s.set_in (dcp::Time(from.seconds(), 1000));
-               ps.string.push_back (StringText (s, text->outline_width()));
-               ps.add_fonts (text->fonts ());
+               ps.string.push_back (StringText (s, content->outline_width()));
+               ps.add_fonts (content->fonts ());
        }
 
-       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
+       _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
 }
 
 
 void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
 {
        if (_suspended) {
                return;
        }
 
-       auto text = wc.lock ();
-       if (!text) {
+       auto content = weak_content.lock ();
+       if (!content) {
                return;
        }
 
-       if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
+       if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
                return;
        }
 
-       shared_ptr<Piece> piece = wp.lock ();
+       auto piece = weak_piece.lock ();
        if (!piece) {
                return;
        }
@@ -1173,11 +1219,11 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Conte
                return;
        }
 
-       auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
+       auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
 
-       bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
-       if (text->use() && !always && !text->burn()) {
-               Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
+       bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
+       if (content->use() && !always && !content->burn()) {
+               Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
        }
 }
 
@@ -1228,13 +1274,13 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        if (accurate) {
-               _last_video_time = time;
-               _last_video_eyes = Eyes::LEFT;
-               _last_audio_time = time;
+               _next_video_time = time;
+               _next_video_eyes = Eyes::LEFT;
+               _next_audio_time = time;
        } else {
-               _last_video_time = optional<DCPTime>();
-               _last_video_eyes = optional<Eyes>();
-               _last_audio_time = optional<DCPTime>();
+               _next_video_time = boost::none;
+               _next_video_eyes = boost::none;
+               _next_audio_time = boost::none;
        }
 
        _black.set_position (time);
@@ -1257,15 +1303,15 @@ Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
                }
        }
 
-       /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
+       /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
           player before the video that requires them.
        */
        _delay.push_back (make_pair (pv, time));
 
        if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               _last_video_time = time + one_video_frame();
+               _next_video_time = time + one_video_frame();
        }
-       _last_video_eyes = increment_eyes (pv->eyes());
+       _next_video_eyes = increment_eyes (pv->eyes());
 
        if (_delay.size() < 3) {
                return;
@@ -1291,6 +1337,7 @@ Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
                pv->set_text (subtitles.get ());
        }
 
+       LOG_DEBUG_PLAYER("Player --> Video %1 %2", to_string(time), static_cast<int>(pv->eyes()));
        Video (pv, time);
 }
 
@@ -1299,14 +1346,14 @@ void
 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
 {
        /* Log if the assert below is about to fail */
-       if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
-               _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
+       if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
+               _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
        }
 
        /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
-       DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
+       DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
        Audio (data, time, _film->audio_frame_rate());
-       _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
+       _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
 }
 
 
@@ -1377,7 +1424,7 @@ Player::set_dcp_decode_reduction (optional<int> reduction)
 
 
 optional<DCPTime>
-Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
+Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
 {
        boost::mutex::scoped_lock lm (_mutex);
 
@@ -1392,6 +1439,22 @@ Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
 }
 
 
+optional<ContentTime>
+Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
+{
+       boost::mutex::scoped_lock lm (_mutex);
+
+       for (auto i: _pieces) {
+               if (i->content == content) {
+                       return dcp_to_content_time (i, t);
+               }
+       }
+
+       /* We couldn't find this content; perhaps things are being changed over */
+       return {};
+}
+
+
 shared_ptr<const Playlist>
 Player::playlist () const
 {
@@ -1400,12 +1463,22 @@ Player::playlist () const
 
 
 void
-Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
 {
        if (_suspended) {
                return;
        }
 
-       Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
+       auto piece = weak_piece.lock ();
+       DCPOMATIC_ASSERT (piece);
+
+       auto const vfr = _film->video_frame_rate();
+
+       DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
+       if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
+               return;
+       }
+
+       Atmos (data.data, dcp_time, data.metadata);
 }