Use icu to remove diacritics from strings.
[dcpomatic.git] / src / lib / player.cc
index d5a558184196100a6ebc94eb660e6b30e0b186f5..b696f04c196d9cb6c7472448dd77718c4fba768d 100644 (file)
 
 */
 
+#include "atmos_decoder.h"
 #include "player.h"
 #include "film.h"
 #include "audio_buffers.h"
 #include "content_audio.h"
 #include "dcp_content.h"
+#include "dcpomatic_log.h"
 #include "job.h"
 #include "image.h"
 #include "raw_image_proxy.h"
 #include <dcp/reel_subtitle_asset.h>
 #include <dcp/reel_picture_asset.h>
 #include <dcp/reel_closed_caption_asset.h>
-#include <boost/foreach.hpp>
 #include <stdint.h>
 #include <algorithm>
 #include <iostream>
 
 #include "i18n.h"
 
-using std::list;
+using std::copy;
 using std::cout;
-using std::min;
+using std::dynamic_pointer_cast;
+using std::list;
+using std::make_pair;
+using std::make_shared;
+using std::map;
 using std::max;
 using std::min;
-using std::vector;
+using std::min;
 using std::pair;
-using std::map;
-using std::make_pair;
-using std::copy;
-using boost::shared_ptr;
-using boost::weak_ptr;
-using boost::dynamic_pointer_cast;
+using std::shared_ptr;
+using std::vector;
+using std::weak_ptr;
 using boost::optional;
 using boost::scoped_ptr;
+#if BOOST_VERSION >= 106100
+using namespace boost::placeholders;
+#endif
 using namespace dcpomatic;
 
 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
@@ -83,30 +88,39 @@ int const PlayerProperty::PLAYLIST = 701;
 int const PlayerProperty::FILM_CONTAINER = 702;
 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
+int const PlayerProperty::PLAYBACK_LENGTH = 705;
+
+Player::Player (shared_ptr<const Film> film)
+       : _film (film)
+       , _suspended (0)
+       , _tolerant (film->tolerant())
+       , _audio_merger (_film->audio_frame_rate())
+{
+       construct ();
+}
 
-Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
+Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
        : _film (film)
-       , _playlist (playlist)
+       , _playlist (playlist_)
        , _suspended (0)
-       , _ignore_video (false)
-       , _ignore_audio (false)
-       , _ignore_text (false)
-       , _always_burn_open_subtitles (false)
-       , _fast (false)
        , _tolerant (film->tolerant())
-       , _play_referenced (false)
        , _audio_merger (_film->audio_frame_rate())
-       , _shuffler (0)
+{
+       construct ();
+}
+
+void
+Player::construct ()
 {
        _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
        /* The butler must hear about this first, so since we are proxying this through to the butler we must
           be first.
        */
-       _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
-       _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
+       _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
+       _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
        set_video_container_size (_film->frame_size ());
 
-       film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
+       film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
 
        setup_pieces ();
        seek (DCPTime (), true);
@@ -124,29 +138,32 @@ Player::setup_pieces ()
        setup_pieces_unlocked ();
 }
 
+
 bool
-have_video (shared_ptr<Piece> piece)
+have_video (shared_ptr<const Content> content)
 {
-       return piece->decoder && piece->decoder->video;
+       return static_cast<bool>(content->video) && content->video->use();
 }
 
 bool
-have_audio (shared_ptr<Piece> piece)
+have_audio (shared_ptr<const Content> content)
 {
-       return piece->decoder && piece->decoder->audio;
+       return static_cast<bool>(content->audio);
 }
 
 void
 Player::setup_pieces_unlocked ()
 {
-       list<shared_ptr<Piece> > old_pieces = _pieces;
+       _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+
+       auto old_pieces = _pieces;
        _pieces.clear ();
 
        delete _shuffler;
        _shuffler = new Shuffler();
        _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+       for (auto i: playlist()->content()) {
 
                if (!i->paths_valid ()) {
                        continue;
@@ -158,20 +175,17 @@ Player::setup_pieces_unlocked ()
                }
 
                shared_ptr<Decoder> old_decoder;
-               BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
+               for (auto j: old_pieces) {
                        if (j->content == i) {
                                old_decoder = j->decoder;
                                break;
                        }
                }
 
-               shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
-               FrameRateChange frc (_film, i);
+               auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
+               DCPOMATIC_ASSERT (decoder);
 
-               if (!decoder) {
-                       /* Not something that we can decode; e.g. Atmos content */
-                       continue;
-               }
+               FrameRateChange frc (_film, i);
 
                if (decoder->video && _ignore_video) {
                        decoder->video->set_ignore (true);
@@ -182,12 +196,12 @@ Player::setup_pieces_unlocked ()
                }
 
                if (_ignore_text) {
-                       BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
+                       for (auto i: decoder->text) {
                                i->set_ignore (true);
                        }
                }
 
-               shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
+               auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
                if (dcp) {
                        dcp->set_decode_referenced (_play_referenced);
                        if (_play_referenced) {
@@ -195,15 +209,15 @@ Player::setup_pieces_unlocked ()
                        }
                }
 
-               shared_ptr<Piece> piece (new Piece (i, decoder, frc));
+               auto piece = make_shared<Piece>(i, decoder, frc);
                _pieces.push_back (piece);
 
                if (decoder->video) {
-                       if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+                       if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
                                /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
-                               decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+                               decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
                        } else {
-                               decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
+                               decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
                        }
                }
 
@@ -211,7 +225,7 @@ Player::setup_pieces_unlocked ()
                        decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
                }
 
-               list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
+               auto j = decoder->text.begin();
 
                while (j != decoder->text.end()) {
                        (*j)->BitmapStart.connect (
@@ -226,43 +240,54 @@ Player::setup_pieces_unlocked ()
 
                        ++j;
                }
+
+               if (decoder->atmos) {
+                       decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
+               }
        }
 
        _stream_states.clear ();
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->content->audio) {
-                       BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
+                       for (auto j: i->content->audio->streams()) {
                                _stream_states[j] = StreamState (i, i->content->position ());
                        }
                }
        }
 
-       _black = Empty (_film, _pieces, bind(&have_video, _1));
-       _silent = Empty (_film, _pieces, bind(&have_audio, _1));
+       _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
+       _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
 
        _last_video_time = DCPTime ();
-       _last_video_eyes = EYES_BOTH;
+       _last_video_eyes = Eyes::BOTH;
        _last_audio_time = DCPTime ();
-
-       /* Cached value to save recalculating it on every ::pass */
-       _film_length = _film->length ();
 }
 
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
-       if (type == CHANGE_TYPE_PENDING) {
-               /* The player content is probably about to change, so we can't carry on
-                  until that has happened and we've rebuilt our pieces.  Stop pass()
-                  and seek() from working until then.
-               */
-               ++_suspended;
-       } else if (type == CHANGE_TYPE_DONE) {
-               /* A change in our content has gone through.  Re-build our pieces. */
-               setup_pieces ();
-               --_suspended;
-       } else if (type == CHANGE_TYPE_CANCELLED) {
-               --_suspended;
+       if (property == VideoContentProperty::CROP) {
+               if (type == ChangeType::DONE) {
+                       dcp::Size const vcs = video_container_size();
+                       boost::mutex::scoped_lock lm (_mutex);
+                       for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
+                               i->first->reset_metadata (_film, vcs);
+                       }
+               }
+       } else {
+               if (type == ChangeType::PENDING) {
+                       /* The player content is probably about to change, so we can't carry on
+                          until that has happened and we've rebuilt our pieces.  Stop pass()
+                          and seek() from working until then.
+                       */
+                       ++_suspended;
+               } else if (type == ChangeType::DONE) {
+                       /* A change in our content has gone through.  Re-build our pieces. */
+                       setup_pieces ();
+                       --_suspended;
+               } else if (type == ChangeType::CANCELLED) {
+                       --_suspended;
+               }
        }
 
        Change (type, property, frequent);
@@ -271,14 +296,14 @@ Player::playlist_content_change (ChangeType type, int property, bool frequent)
 void
 Player::set_video_container_size (dcp::Size s)
 {
-       Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+       Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
 
        {
                boost::mutex::scoped_lock lm (_mutex);
 
                if (s == _video_container_size) {
                        lm.unlock ();
-                       Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+                       Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
                        return;
                }
 
@@ -288,13 +313,13 @@ Player::set_video_container_size (dcp::Size s)
                _black_image->make_black ();
        }
 
-       Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
+       Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
 }
 
 void
 Player::playlist_change (ChangeType type)
 {
-       if (type == CHANGE_TYPE_DONE) {
+       if (type == ChangeType::DONE) {
                setup_pieces ();
        }
        Change (type, PlayerProperty::PLAYLIST, false);
@@ -308,23 +333,23 @@ Player::film_change (ChangeType type, Film::Property p)
           last time we were run.
        */
 
-       if (p == Film::CONTAINER) {
+       if (p == Film::Property::CONTAINER) {
                Change (type, PlayerProperty::FILM_CONTAINER, false);
-       } else if (p == Film::VIDEO_FRAME_RATE) {
+       } else if (p == Film::Property::VIDEO_FRAME_RATE) {
                /* Pieces contain a FrameRateChange which contains the DCP frame rate,
                   so we need new pieces here.
                */
-               if (type == CHANGE_TYPE_DONE) {
+               if (type == ChangeType::DONE) {
                        setup_pieces ();
                }
                Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
-       } else if (p == Film::AUDIO_PROCESSOR) {
-               if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
+       } else if (p == Film::Property::AUDIO_PROCESSOR) {
+               if (type == ChangeType::DONE && _film->audio_processor ()) {
                        boost::mutex::scoped_lock lm (_mutex);
                        _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
                }
-       } else if (p == Film::AUDIO_CHANNELS) {
-               if (type == CHANGE_TYPE_DONE) {
+       } else if (p == Film::Property::AUDIO_CHANNELS) {
+               if (type == ChangeType::DONE) {
                        boost::mutex::scoped_lock lm (_mutex);
                        _audio_merger.clear ();
                }
@@ -334,21 +359,19 @@ Player::film_change (ChangeType type, Film::Property p)
 shared_ptr<PlayerVideo>
 Player::black_player_video_frame (Eyes eyes) const
 {
-       return shared_ptr<PlayerVideo> (
-               new PlayerVideo (
-                       shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
-                       Crop (),
-                       optional<double> (),
-                       _video_container_size,
-                       _video_container_size,
-                       eyes,
-                       PART_WHOLE,
-                       PresetColourConversion::all().front().conversion,
-                       VIDEO_RANGE_FULL,
-                       boost::weak_ptr<Content>(),
-                       boost::optional<Frame>(),
-                       false
-               )
+       return std::make_shared<PlayerVideo> (
+               std::make_shared<const RawImageProxy>(_black_image),
+               Crop(),
+               optional<double>(),
+               _video_container_size,
+               _video_container_size,
+               eyes,
+               Part::WHOLE,
+               PresetColourConversion::all().front().conversion,
+               VideoRange::FULL,
+               std::weak_ptr<Content>(),
+               boost::optional<Frame>(),
+               false
        );
 }
 
@@ -373,14 +396,14 @@ DCPTime
 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
 {
        /* See comment in dcp_to_content_video */
-       DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
+       auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
        return d + piece->content->position();
 }
 
 Frame
 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
 {
-       DCPTime s = t - piece->content->position ();
+       auto s = t - piece->content->position ();
        s = min (piece->content->length_after_trim(_film), s);
        /* See notes in dcp_to_content_video */
        return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
@@ -398,7 +421,7 @@ Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
 ContentTime
 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
 {
-       DCPTime s = t - piece->content->position ();
+       auto s = t - piece->content->position ();
        s = min (piece->content->length_after_trim(_film), s);
        return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
 }
@@ -406,23 +429,21 @@ Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
 DCPTime
 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
 {
-       return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
+       return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
 }
 
-list<shared_ptr<Font> >
+vector<FontData>
 Player::get_subtitle_fonts ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       list<shared_ptr<Font> > fonts;
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
-                       /* XXX: things may go wrong if there are duplicate font IDs
-                          with different font files.
-                       */
-                       list<shared_ptr<Font> > f = j->fonts ();
-                       copy (f.begin(), f.end(), back_inserter (fonts));
-               }
+       vector<FontData> fonts;
+       for (auto i: _pieces) {
+               /* XXX: things may go wrong if there are duplicate font IDs
+                  with different font files.
+               */
+               auto f = i->decoder->fonts ();
+               copy (f.begin(), f.end(), back_inserter(fonts));
        }
 
        return fonts;
@@ -498,7 +519,7 @@ Player::get_reel_assets ()
 
        list<ReferencedReelAsset> a;
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+       for (auto i: playlist()->content()) {
                shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
                if (!j) {
                        continue;
@@ -521,12 +542,12 @@ Player::get_reel_assets ()
                int64_t offset_from_start = 0;
                /* position in the asset from the end */
                int64_t offset_from_end = 0;
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
                        /* Assume that main picture duration is the length of the reel */
                        offset_from_end += k->main_picture()->actual_duration();
                }
 
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
 
                        /* Assume that main picture duration is the length of the reel */
                        int64_t const reel_duration = k->main_picture()->actual_duration();
@@ -544,12 +565,12 @@ Player::get_reel_assets ()
                                maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
+                       if (j->reference_text (TextType::OPEN_SUBTITLE)) {
                                maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TEXT_CLOSED_CAPTION)) {
-                               BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
+                       if (j->reference_text (TextType::CLOSED_CAPTION)) {
+                               for (auto l: k->closed_captions()) {
                                        maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
                                }
                        }
@@ -566,16 +587,16 @@ bool
 Player::pass ()
 {
        boost::mutex::scoped_lock lm (_mutex);
-       DCPOMATIC_ASSERT (_film_length);
 
        if (_suspended) {
                /* We can't pass in this state */
+               LOG_DEBUG_PLAYER_NC ("Player is suspended");
                return false;
        }
 
-       if (*_film_length == DCPTime()) {
-               /* Special case of an empty Film; just give one black frame */
-               emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
+       if (_playback_length == DCPTime()) {
+               /* Special; just give one black frame */
+               emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
                return true;
        }
 
@@ -584,7 +605,7 @@ Player::pass ()
        shared_ptr<Piece> earliest_content;
        optional<DCPTime> earliest_time;
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->done) {
                        continue;
                }
@@ -630,6 +651,7 @@ Player::pass ()
        switch (which) {
        case CONTENT:
        {
+               LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
                shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced && dcp->reference_audio()) {
@@ -642,11 +664,13 @@ Player::pass ()
                break;
        }
        case BLACK:
-               emit_video (black_player_video_frame(EYES_BOTH), _black.position());
+               LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
+               emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
        {
+               LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
                DCPTimePeriod period (_silent.period_at_position());
                if (_last_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
@@ -680,7 +704,7 @@ Player::pass ()
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
           of our streams, or the position of the _silent.
        */
-       DCPTime pull_to = *_film_length;
+       DCPTime pull_to = _playback_length;
        for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
                if (!i->second.piece->done && i->second.last_push_end < pull_to) {
                        pull_to = i->second.last_push_end;
@@ -690,6 +714,7 @@ Player::pass ()
                pull_to = _silent.position();
        }
 
+       LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
        list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
        for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
                if (_last_audio_time && i->second < *_last_audio_time) {
@@ -724,13 +749,13 @@ Player::open_subtitles_for_frame (DCPTime time) const
        list<PositionImage> captions;
        int const vfr = _film->video_frame_rate();
 
-       BOOST_FOREACH (
-               PlayerText j,
-               _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
+       for (
+               auto j:
+               _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
                ) {
 
                /* Bitmap subtitles */
-               BOOST_FOREACH (BitmapText i, j.bitmap) {
+               for (auto i: j.bitmap) {
                        if (!i.image) {
                                continue;
                        }
@@ -771,6 +796,10 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                return;
        }
 
+       if (!piece->content->video->use()) {
+               return;
+       }
+
        FrameRateChange frc (_film, piece->content);
        if (frc.skip && (video.frame % 2) == 1) {
                return;
@@ -778,6 +807,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
        /* Time of the first frame we will emit */
        DCPTime const time = content_video_to_dcp (piece, video.frame);
+       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
@@ -800,27 +830,29 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                        LastVideoMap::const_iterator last = _last_video.find (wp);
                        if (_film->three_d()) {
                                Eyes fill_to_eyes = video.eyes;
-                               if (fill_to_eyes == EYES_BOTH) {
-                                       fill_to_eyes = EYES_LEFT;
+                               if (fill_to_eyes == Eyes::BOTH) {
+                                       fill_to_eyes = Eyes::LEFT;
                                }
                                if (fill_to == piece->content->end(_film)) {
                                        /* Don't fill after the end of the content */
-                                       fill_to_eyes = EYES_LEFT;
+                                       fill_to_eyes = Eyes::LEFT;
                                }
                                DCPTime j = fill_from;
-                               Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
-                               if (eyes == EYES_BOTH) {
-                                       eyes = EYES_LEFT;
+                               Eyes eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
+                               if (eyes == Eyes::BOTH) {
+                                       eyes = Eyes::LEFT;
                                }
                                while (j < fill_to || eyes != fill_to_eyes) {
                                        if (last != _last_video.end()) {
+                                               LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
                                                shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
                                                copy->set_eyes (eyes);
                                                emit_video (copy, j);
                                        } else {
+                                               LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
                                                emit_video (black_player_video_frame(eyes), j);
                                        }
-                                       if (eyes == EYES_RIGHT) {
+                                       if (eyes == Eyes::RIGHT) {
                                                j += one_video_frame();
                                        }
                                        eyes = increment_eyes (eyes);
@@ -830,7 +862,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                                        if (last != _last_video.end()) {
                                                emit_video (last->second, j);
                                        } else {
-                                               emit_video (black_player_video_frame(EYES_BOTH), j);
+                                               emit_video (black_player_video_frame(Eyes::BOTH), j);
                                        }
                                }
                        }
@@ -842,9 +874,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                        video.image,
                        piece->content->video->crop (),
                        piece->content->video->fade (_film, video.frame),
-                       piece->content->video->scale().size (
-                               piece->content->video, _video_container_size, _film->frame_size ()
-                               ),
+                       scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
                        _video_container_size,
                        video.eyes,
                        video.part,
@@ -882,6 +912,8 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
 
        /* Compute time in the DCP */
        DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
+       LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+
        /* And the end of this block in the DCP */
        DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
 
@@ -964,10 +996,10 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
        }
 
        dcp::Size scaled_size (width, height);
-       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
+       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
        DCPTime from (content_time_to_dcp (piece, subtitle.from()));
 
-       _active_texts[text->type()].add_from (wc, ps, from);
+       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
 }
 
 void
@@ -986,7 +1018,7 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                return;
        }
 
-       BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
+       for (auto s: subtitle.subs) {
                s.set_h_position (s.h_position() + text->x_offset ());
                s.set_v_position (s.v_position() + text->y_offset ());
                float const xs = text->x_scale();
@@ -1011,7 +1043,7 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                ps.add_fonts (text->fonts ());
        }
 
-       _active_texts[text->type()].add_from (wc, ps, from);
+       _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
 }
 
 void
@@ -1022,7 +1054,7 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Conte
                return;
        }
 
-       if (!_active_texts[text->type()].have(wc)) {
+       if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
                return;
        }
 
@@ -1037,9 +1069,9 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Conte
                return;
        }
 
-       pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
+       pair<PlayerText, DCPTime> from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
 
-       bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
+       bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
        if (text->use() && !always && !text->burn()) {
                Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
        }
@@ -1049,6 +1081,7 @@ void
 Player::seek (DCPTime time, bool accurate)
 {
        boost::mutex::scoped_lock lm (_mutex);
+       LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
 
        if (_suspended) {
                /* We can't seek in this state */
@@ -1066,11 +1099,11 @@ Player::seek (DCPTime time, bool accurate)
        }
 
        _audio_merger.clear ();
-       for (int i = 0; i < TEXT_COUNT; ++i) {
+       for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
                _active_texts[i].clear ();
        }
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (time < i->content->position()) {
                        /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
                           we must seek this (following) content accurately, otherwise when we come to the end of the current
@@ -1091,7 +1124,7 @@ Player::seek (DCPTime time, bool accurate)
 
        if (accurate) {
                _last_video_time = time;
-               _last_video_eyes = EYES_LEFT;
+               _last_video_eyes = Eyes::LEFT;
                _last_audio_time = time;
        } else {
                _last_video_time = optional<DCPTime>();
@@ -1113,7 +1146,7 @@ Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
        */
        _delay.push_back (make_pair (pv, time));
 
-       if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
                _last_video_time = time + one_video_frame();
        }
        _last_video_eyes = increment_eyes (pv->eyes());
@@ -1130,13 +1163,13 @@ Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 void
 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
-       if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
-               for (int i = 0; i < TEXT_COUNT; ++i) {
+       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+               for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
                        _active_texts[i].clear_before (time);
                }
        }
 
-       optional<PositionImage> subtitles = open_subtitles_for_frame (time);
+       auto subtitles = open_subtitles_for_frame (time);
        if (subtitles) {
                pv->set_text (subtitles.get ());
        }
@@ -1202,14 +1235,14 @@ Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTi
 void
 Player::set_dcp_decode_reduction (optional<int> reduction)
 {
-       Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
+       Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
 
        {
                boost::mutex::scoped_lock lm (_mutex);
 
                if (reduction == _dcp_decode_reduction) {
                        lm.unlock ();
-                       Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
+                       Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
                        return;
                }
 
@@ -1217,7 +1250,7 @@ Player::set_dcp_decode_reduction (optional<int> reduction)
                setup_pieces_unlocked ();
        }
 
-       Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
+       Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
 }
 
 optional<DCPTime>
@@ -1225,12 +1258,27 @@ Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->content == content) {
                        return content_time_to_dcp (i, t);
                }
        }
 
        /* We couldn't find this content; perhaps things are being changed over */
-       return optional<DCPTime>();
+       return {};
+}
+
+
+shared_ptr<const Playlist>
+Player::playlist () const
+{
+       return _playlist ? _playlist : _film->playlist();
+}
+
+
+void
+Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+{
+       Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
 }
+