Use enum class for VideoRange.
[dcpomatic.git] / src / lib / player.cc
index 657c1a8b4ed9e2d168a2ffdbc156b46704824545..cbfea55b6d5a9cf8e153d84ac73fd076d55e0ade 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
-    Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
 
     This file is part of DCP-o-matic.
 
 
 */
 
 
 */
 
+#include "atmos_decoder.h"
 #include "player.h"
 #include "film.h"
 #include "audio_buffers.h"
 #include "content_audio.h"
 #include "dcp_content.h"
 #include "player.h"
 #include "film.h"
 #include "audio_buffers.h"
 #include "content_audio.h"
 #include "dcp_content.h"
+#include "dcpomatic_log.h"
 #include "job.h"
 #include "image.h"
 #include "raw_image_proxy.h"
 #include "job.h"
 #include "image.h"
 #include "raw_image_proxy.h"
@@ -54,7 +56,6 @@
 #include <dcp/reel_subtitle_asset.h>
 #include <dcp/reel_picture_asset.h>
 #include <dcp/reel_closed_caption_asset.h>
 #include <dcp/reel_subtitle_asset.h>
 #include <dcp/reel_picture_asset.h>
 #include <dcp/reel_closed_caption_asset.h>
-#include <boost/foreach.hpp>
 #include <stdint.h>
 #include <algorithm>
 #include <iostream>
 #include <stdint.h>
 #include <algorithm>
 #include <iostream>
@@ -71,11 +72,14 @@ using std::pair;
 using std::map;
 using std::make_pair;
 using std::copy;
 using std::map;
 using std::make_pair;
 using std::copy;
-using boost::shared_ptr;
-using boost::weak_ptr;
-using boost::dynamic_pointer_cast;
+using std::shared_ptr;
+using std::weak_ptr;
+using std::dynamic_pointer_cast;
 using boost::optional;
 using boost::scoped_ptr;
 using boost::optional;
 using boost::scoped_ptr;
+#if BOOST_VERSION >= 106100
+using namespace boost::placeholders;
+#endif
 using namespace dcpomatic;
 
 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
 using namespace dcpomatic;
 
 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
@@ -83,11 +87,11 @@ int const PlayerProperty::PLAYLIST = 701;
 int const PlayerProperty::FILM_CONTAINER = 702;
 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 int const PlayerProperty::FILM_CONTAINER = 702;
 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
+int const PlayerProperty::PLAYBACK_LENGTH = 705;
 
 
-Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
+Player::Player (shared_ptr<const Film> film)
        : _film (film)
        : _film (film)
-       , _playlist (playlist)
-       , _suspended (false)
+       , _suspended (0)
        , _ignore_video (false)
        , _ignore_audio (false)
        , _ignore_text (false)
        , _ignore_video (false)
        , _ignore_audio (false)
        , _ignore_text (false)
@@ -97,13 +101,36 @@ Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist
        , _play_referenced (false)
        , _audio_merger (_film->audio_frame_rate())
        , _shuffler (0)
        , _play_referenced (false)
        , _audio_merger (_film->audio_frame_rate())
        , _shuffler (0)
+{
+       construct ();
+}
+
+Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
+       : _film (film)
+       , _playlist (playlist_)
+       , _suspended (0)
+       , _ignore_video (false)
+       , _ignore_audio (false)
+       , _ignore_text (false)
+       , _always_burn_open_subtitles (false)
+       , _fast (false)
+       , _tolerant (film->tolerant())
+       , _play_referenced (false)
+       , _audio_merger (_film->audio_frame_rate())
+       , _shuffler (0)
+{
+       construct ();
+}
+
+void
+Player::construct ()
 {
        _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
        /* The butler must hear about this first, so since we are proxying this through to the butler we must
           be first.
        */
 {
        _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
        /* The butler must hear about this first, so since we are proxying this through to the butler we must
           be first.
        */
-       _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
-       _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
+       _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
+       _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
        set_video_container_size (_film->frame_size ());
 
        film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
        set_video_container_size (_film->frame_size ());
 
        film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
@@ -124,21 +151,24 @@ Player::setup_pieces ()
        setup_pieces_unlocked ();
 }
 
        setup_pieces_unlocked ();
 }
 
+
 bool
 bool
-have_video (shared_ptr<Piece> piece)
+have_video (shared_ptr<const Content> content)
 {
 {
-       return piece->decoder && piece->decoder->video;
+       return static_cast<bool>(content->video) && content->video->use();
 }
 
 bool
 }
 
 bool
-have_audio (shared_ptr<Piece> piece)
+have_audio (shared_ptr<const Content> content)
 {
 {
-       return piece->decoder && piece->decoder->audio;
+       return static_cast<bool>(content->audio);
 }
 
 void
 Player::setup_pieces_unlocked ()
 {
 }
 
 void
 Player::setup_pieces_unlocked ()
 {
+       _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+
        list<shared_ptr<Piece> > old_pieces = _pieces;
        _pieces.clear ();
 
        list<shared_ptr<Piece> > old_pieces = _pieces;
        _pieces.clear ();
 
@@ -146,7 +176,7 @@ Player::setup_pieces_unlocked ()
        _shuffler = new Shuffler();
        _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
 
        _shuffler = new Shuffler();
        _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+       for (auto i: playlist()->content()) {
 
                if (!i->paths_valid ()) {
                        continue;
 
                if (!i->paths_valid ()) {
                        continue;
@@ -158,7 +188,7 @@ Player::setup_pieces_unlocked ()
                }
 
                shared_ptr<Decoder> old_decoder;
                }
 
                shared_ptr<Decoder> old_decoder;
-               BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
+               for (auto j: old_pieces) {
                        if (j->content == i) {
                                old_decoder = j->decoder;
                                break;
                        if (j->content == i) {
                                old_decoder = j->decoder;
                                break;
@@ -166,12 +196,9 @@ Player::setup_pieces_unlocked ()
                }
 
                shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
                }
 
                shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
-               FrameRateChange frc (_film, i);
+               DCPOMATIC_ASSERT (decoder);
 
 
-               if (!decoder) {
-                       /* Not something that we can decode; e.g. Atmos content */
-                       continue;
-               }
+               FrameRateChange frc (_film, i);
 
                if (decoder->video && _ignore_video) {
                        decoder->video->set_ignore (true);
 
                if (decoder->video && _ignore_video) {
                        decoder->video->set_ignore (true);
@@ -182,7 +209,7 @@ Player::setup_pieces_unlocked ()
                }
 
                if (_ignore_text) {
                }
 
                if (_ignore_text) {
-                       BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
+                       for (auto i: decoder->text) {
                                i->set_ignore (true);
                        }
                }
                                i->set_ignore (true);
                        }
                }
@@ -226,45 +253,54 @@ Player::setup_pieces_unlocked ()
 
                        ++j;
                }
 
                        ++j;
                }
+
+               if (decoder->atmos) {
+                       decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
+               }
        }
 
        _stream_states.clear ();
        }
 
        _stream_states.clear ();
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->content->audio) {
                if (i->content->audio) {
-                       BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
+                       for (auto j: i->content->audio->streams()) {
                                _stream_states[j] = StreamState (i, i->content->position ());
                        }
                }
        }
 
                                _stream_states[j] = StreamState (i, i->content->position ());
                        }
                }
        }
 
-       _black = Empty (_film, _pieces, bind(&have_video, _1));
-       _silent = Empty (_film, _pieces, bind(&have_audio, _1));
+       _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
+       _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
 
        _last_video_time = DCPTime ();
        _last_video_eyes = EYES_BOTH;
        _last_audio_time = DCPTime ();
 
        _last_video_time = DCPTime ();
        _last_video_eyes = EYES_BOTH;
        _last_audio_time = DCPTime ();
-
-       /* Cached value to save recalculating it on every ::pass */
-       _film_length = _film->length ();
 }
 
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
 }
 
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
-       if (type == CHANGE_TYPE_PENDING) {
-               boost::mutex::scoped_lock lm (_mutex);
-               /* The player content is probably about to change, so we can't carry on
-                  until that has happened and we've rebuilt our pieces.  Stop pass()
-                  and seek() from working until then.
-               */
-               _suspended = true;
-       } else if (type == CHANGE_TYPE_DONE) {
-               /* A change in our content has gone through.  Re-build our pieces. */
-               setup_pieces ();
-               _suspended = false;
-       } else if (type == CHANGE_TYPE_CANCELLED) {
-               boost::mutex::scoped_lock lm (_mutex);
-               _suspended = false;
+       if (property == VideoContentProperty::CROP) {
+               if (type == CHANGE_TYPE_DONE) {
+                       dcp::Size const vcs = video_container_size();
+                       boost::mutex::scoped_lock lm (_mutex);
+                       for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
+                               i->first->reset_metadata (_film, vcs);
+                       }
+               }
+       } else {
+               if (type == CHANGE_TYPE_PENDING) {
+                       /* The player content is probably about to change, so we can't carry on
+                          until that has happened and we've rebuilt our pieces.  Stop pass()
+                          and seek() from working until then.
+                       */
+                       ++_suspended;
+               } else if (type == CHANGE_TYPE_DONE) {
+                       /* A change in our content has gone through.  Re-build our pieces. */
+                       setup_pieces ();
+                       --_suspended;
+               } else if (type == CHANGE_TYPE_CANCELLED) {
+                       --_suspended;
+               }
        }
 
        Change (type, property, frequent);
        }
 
        Change (type, property, frequent);
@@ -346,9 +382,10 @@ Player::black_player_video_frame (Eyes eyes) const
                        eyes,
                        PART_WHOLE,
                        PresetColourConversion::all().front().conversion,
                        eyes,
                        PART_WHOLE,
                        PresetColourConversion::all().front().conversion,
-                       VIDEO_RANGE_FULL,
-                       boost::weak_ptr<Content>(),
-                       boost::optional<Frame>()
+                       VideoRange::FULL,
+                       std::weak_ptr<Content>(),
+                       boost::optional<Frame>(),
+                       false
                )
        );
 }
                )
        );
 }
@@ -410,20 +447,18 @@ Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
        return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
 }
 
        return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
 }
 
-list<shared_ptr<Font> >
+vector<FontData>
 Player::get_subtitle_fonts ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
 Player::get_subtitle_fonts ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       list<shared_ptr<Font> > fonts;
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
-                       /* XXX: things may go wrong if there are duplicate font IDs
-                          with different font files.
-                       */
-                       list<shared_ptr<Font> > f = j->fonts ();
-                       copy (f.begin(), f.end(), back_inserter (fonts));
-               }
+       vector<FontData> fonts;
+       for (auto i: _pieces) {
+               /* XXX: things may go wrong if there are duplicate font IDs
+                  with different font files.
+               */
+               vector<FontData> f = i->decoder->fonts ();
+               copy (f.begin(), f.end(), back_inserter(fonts));
        }
 
        return fonts;
        }
 
        return fonts;
@@ -499,7 +534,7 @@ Player::get_reel_assets ()
 
        list<ReferencedReelAsset> a;
 
 
        list<ReferencedReelAsset> a;
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+       for (auto i: playlist()->content()) {
                shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
                if (!j) {
                        continue;
                shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
                if (!j) {
                        continue;
@@ -522,12 +557,12 @@ Player::get_reel_assets ()
                int64_t offset_from_start = 0;
                /* position in the asset from the end */
                int64_t offset_from_end = 0;
                int64_t offset_from_start = 0;
                /* position in the asset from the end */
                int64_t offset_from_end = 0;
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
                        /* Assume that main picture duration is the length of the reel */
                        offset_from_end += k->main_picture()->actual_duration();
                }
 
                        /* Assume that main picture duration is the length of the reel */
                        offset_from_end += k->main_picture()->actual_duration();
                }
 
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
 
                        /* Assume that main picture duration is the length of the reel */
                        int64_t const reel_duration = k->main_picture()->actual_duration();
 
                        /* Assume that main picture duration is the length of the reel */
                        int64_t const reel_duration = k->main_picture()->actual_duration();
@@ -550,7 +585,7 @@ Player::get_reel_assets ()
                        }
 
                        if (j->reference_text (TEXT_CLOSED_CAPTION)) {
                        }
 
                        if (j->reference_text (TEXT_CLOSED_CAPTION)) {
-                               BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
+                               for (auto l: k->closed_captions()) {
                                        maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
                                }
                        }
                                        maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
                                }
                        }
@@ -567,15 +602,15 @@ bool
 Player::pass ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 Player::pass ()
 {
        boost::mutex::scoped_lock lm (_mutex);
-       DCPOMATIC_ASSERT (_film_length);
 
        if (_suspended) {
                /* We can't pass in this state */
 
        if (_suspended) {
                /* We can't pass in this state */
+               LOG_DEBUG_PLAYER_NC ("Player is suspended");
                return false;
        }
 
                return false;
        }
 
-       if (*_film_length == DCPTime()) {
-               /* Special case of an empty Film; just give one black frame */
+       if (_playback_length == DCPTime()) {
+               /* Special; just give one black frame */
                emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
                return true;
        }
                emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
                return true;
        }
@@ -585,7 +620,7 @@ Player::pass ()
        shared_ptr<Piece> earliest_content;
        optional<DCPTime> earliest_time;
 
        shared_ptr<Piece> earliest_content;
        optional<DCPTime> earliest_time;
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->done) {
                        continue;
                }
                if (i->done) {
                        continue;
                }
@@ -631,6 +666,7 @@ Player::pass ()
        switch (which) {
        case CONTENT:
        {
        switch (which) {
        case CONTENT:
        {
+               LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
                shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced && dcp->reference_audio()) {
                earliest_content->done = earliest_content->decoder->pass ();
                shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced && dcp->reference_audio()) {
@@ -643,11 +679,13 @@ Player::pass ()
                break;
        }
        case BLACK:
                break;
        }
        case BLACK:
+               LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
                emit_video (black_player_video_frame(EYES_BOTH), _black.position());
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
        {
                emit_video (black_player_video_frame(EYES_BOTH), _black.position());
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
        {
+               LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
                DCPTimePeriod period (_silent.period_at_position());
                if (_last_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
                DCPTimePeriod period (_silent.period_at_position());
                if (_last_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
@@ -681,7 +719,7 @@ Player::pass ()
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
           of our streams, or the position of the _silent.
        */
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
           of our streams, or the position of the _silent.
        */
-       DCPTime pull_to = *_film_length;
+       DCPTime pull_to = _playback_length;
        for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
                if (!i->second.piece->done && i->second.last_push_end < pull_to) {
                        pull_to = i->second.last_push_end;
        for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
                if (!i->second.piece->done && i->second.last_push_end < pull_to) {
                        pull_to = i->second.last_push_end;
@@ -691,6 +729,7 @@ Player::pass ()
                pull_to = _silent.position();
        }
 
                pull_to = _silent.position();
        }
 
+       LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
        list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
        for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
                if (_last_audio_time && i->second < *_last_audio_time) {
        list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
        for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
                if (_last_audio_time && i->second < *_last_audio_time) {
@@ -725,13 +764,13 @@ Player::open_subtitles_for_frame (DCPTime time) const
        list<PositionImage> captions;
        int const vfr = _film->video_frame_rate();
 
        list<PositionImage> captions;
        int const vfr = _film->video_frame_rate();
 
-       BOOST_FOREACH (
-               PlayerText j,
+       for (
+               auto j:
                _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
                ) {
 
                /* Bitmap subtitles */
                _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
                ) {
 
                /* Bitmap subtitles */
-               BOOST_FOREACH (BitmapText i, j.bitmap) {
+               for (auto i: j.bitmap) {
                        if (!i.image) {
                                continue;
                        }
                        if (!i.image) {
                                continue;
                        }
@@ -772,6 +811,10 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                return;
        }
 
                return;
        }
 
+       if (!piece->content->video->use()) {
+               return;
+       }
+
        FrameRateChange frc (_film, piece->content);
        if (frc.skip && (video.frame % 2) == 1) {
                return;
        FrameRateChange frc (_film, piece->content);
        if (frc.skip && (video.frame % 2) == 1) {
                return;
@@ -779,6 +822,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
        /* Time of the first frame we will emit */
        DCPTime const time = content_video_to_dcp (piece, video.frame);
 
        /* Time of the first frame we will emit */
        DCPTime const time = content_video_to_dcp (piece, video.frame);
+       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
@@ -815,10 +859,12 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                                }
                                while (j < fill_to || eyes != fill_to_eyes) {
                                        if (last != _last_video.end()) {
                                }
                                while (j < fill_to || eyes != fill_to_eyes) {
                                        if (last != _last_video.end()) {
+                                               LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
                                                shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
                                                copy->set_eyes (eyes);
                                                emit_video (copy, j);
                                        } else {
                                                shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
                                                copy->set_eyes (eyes);
                                                emit_video (copy, j);
                                        } else {
+                                               LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
                                                emit_video (black_player_video_frame(eyes), j);
                                        }
                                        if (eyes == EYES_RIGHT) {
                                                emit_video (black_player_video_frame(eyes), j);
                                        }
                                        if (eyes == EYES_RIGHT) {
@@ -843,16 +889,15 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                        video.image,
                        piece->content->video->crop (),
                        piece->content->video->fade (_film, video.frame),
                        video.image,
                        piece->content->video->crop (),
                        piece->content->video->fade (_film, video.frame),
-                       piece->content->video->scale().size (
-                               piece->content->video, _video_container_size, _film->frame_size ()
-                               ),
+                       scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
                        _video_container_size,
                        video.eyes,
                        video.part,
                        piece->content->video->colour_conversion(),
                        piece->content->video->range(),
                        piece->content,
                        _video_container_size,
                        video.eyes,
                        video.part,
                        piece->content->video->colour_conversion(),
                        piece->content->video->range(),
                        piece->content,
-                       video.frame
+                       video.frame,
+                       false
                        )
                );
 
                        )
                );
 
@@ -882,6 +927,8 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
 
        /* Compute time in the DCP */
        DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
 
        /* Compute time in the DCP */
        DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
+       LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+
        /* And the end of this block in the DCP */
        DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
 
        /* And the end of this block in the DCP */
        DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
 
@@ -902,9 +949,7 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
                if (remaining_frames == 0) {
                        return;
                }
                if (remaining_frames == 0) {
                        return;
                }
-               shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
-               cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
-               content_audio.audio = cut;
+               content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
        }
 
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
        }
 
        DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
@@ -957,9 +1002,16 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
 
        PlayerText ps;
        shared_ptr<Image> image = subtitle.sub.image;
 
        PlayerText ps;
        shared_ptr<Image> image = subtitle.sub.image;
+
        /* We will scale the subtitle up to fit _video_container_size */
        /* We will scale the subtitle up to fit _video_container_size */
-       dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
-       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
+       int const width = subtitle.sub.rectangle.width * _video_container_size.width;
+       int const height = subtitle.sub.rectangle.height * _video_container_size.height;
+       if (width == 0 || height == 0) {
+               return;
+       }
+
+       dcp::Size scaled_size (width, height);
+       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
        DCPTime from (content_time_to_dcp (piece, subtitle.from()));
 
        _active_texts[text->type()].add_from (wc, ps, from);
        DCPTime from (content_time_to_dcp (piece, subtitle.from()));
 
        _active_texts[text->type()].add_from (wc, ps, from);
@@ -981,7 +1033,7 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                return;
        }
 
                return;
        }
 
-       BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
+       for (auto s: subtitle.subs) {
                s.set_h_position (s.h_position() + text->x_offset ());
                s.set_v_position (s.v_position() + text->y_offset ());
                float const xs = text->x_scale();
                s.set_h_position (s.h_position() + text->x_offset ());
                s.set_v_position (s.v_position() + text->y_offset ());
                float const xs = text->x_scale();
@@ -1044,6 +1096,7 @@ void
 Player::seek (DCPTime time, bool accurate)
 {
        boost::mutex::scoped_lock lm (_mutex);
 Player::seek (DCPTime time, bool accurate)
 {
        boost::mutex::scoped_lock lm (_mutex);
+       LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
 
        if (_suspended) {
                /* We can't seek in this state */
 
        if (_suspended) {
                /* We can't seek in this state */
@@ -1065,7 +1118,7 @@ Player::seek (DCPTime time, bool accurate)
                _active_texts[i].clear ();
        }
 
                _active_texts[i].clear ();
        }
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (time < i->content->position()) {
                        /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
                           we must seek this (following) content accurately, otherwise when we come to the end of the current
                if (time < i->content->position()) {
                        /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
                           we must seek this (following) content accurately, otherwise when we come to the end of the current
@@ -1190,8 +1243,7 @@ Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTi
        if (remaining_frames <= 0) {
                return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
        }
        if (remaining_frames <= 0) {
                return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
        }
-       shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
-       cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
+       shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
        return make_pair(cut, time + discard_time);
 }
 
        return make_pair(cut, time + discard_time);
 }
 
@@ -1221,7 +1273,7 @@ Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
 {
        boost::mutex::scoped_lock lm (_mutex);
 
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->content == content) {
                        return content_time_to_dcp (i, t);
                }
                if (i->content == content) {
                        return content_time_to_dcp (i, t);
                }
@@ -1230,3 +1282,18 @@ Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
        /* We couldn't find this content; perhaps things are being changed over */
        return optional<DCPTime>();
 }
        /* We couldn't find this content; perhaps things are being changed over */
        return optional<DCPTime>();
 }
+
+
+shared_ptr<const Playlist>
+Player::playlist () const
+{
+       return _playlist ? _playlist : _film->playlist();
+}
+
+
+void
+Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+{
+       Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
+}
+