Use enum class for VideoRange.
[dcpomatic.git] / src / lib / player.cc
index 07447d531db7036ed61ef1ef414d18e9cc1f7685..cbfea55b6d5a9cf8e153d84ac73fd076d55e0ade 100644 (file)
 
 */
 
+#include "atmos_decoder.h"
 #include "player.h"
 #include "film.h"
 #include "audio_buffers.h"
 #include "content_audio.h"
 #include "dcp_content.h"
+#include "dcpomatic_log.h"
 #include "job.h"
 #include "image.h"
 #include "raw_image_proxy.h"
@@ -54,7 +56,6 @@
 #include <dcp/reel_subtitle_asset.h>
 #include <dcp/reel_picture_asset.h>
 #include <dcp/reel_closed_caption_asset.h>
-#include <boost/foreach.hpp>
 #include <stdint.h>
 #include <algorithm>
 #include <iostream>
@@ -71,11 +72,14 @@ using std::pair;
 using std::map;
 using std::make_pair;
 using std::copy;
-using boost::shared_ptr;
-using boost::weak_ptr;
-using boost::dynamic_pointer_cast;
+using std::shared_ptr;
+using std::weak_ptr;
+using std::dynamic_pointer_cast;
 using boost::optional;
 using boost::scoped_ptr;
+#if BOOST_VERSION >= 106100
+using namespace boost::placeholders;
+#endif
 using namespace dcpomatic;
 
 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
@@ -85,9 +89,8 @@ int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 int const PlayerProperty::PLAYBACK_LENGTH = 705;
 
-Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist, DCPTime playback_length)
+Player::Player (shared_ptr<const Film> film)
        : _film (film)
-       , _playlist (playlist)
        , _suspended (0)
        , _ignore_video (false)
        , _ignore_audio (false)
@@ -98,14 +101,36 @@ Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist
        , _play_referenced (false)
        , _audio_merger (_film->audio_frame_rate())
        , _shuffler (0)
-       , _playback_length (playback_length)
+{
+       construct ();
+}
+
+Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
+       : _film (film)
+       , _playlist (playlist_)
+       , _suspended (0)
+       , _ignore_video (false)
+       , _ignore_audio (false)
+       , _ignore_text (false)
+       , _always_burn_open_subtitles (false)
+       , _fast (false)
+       , _tolerant (film->tolerant())
+       , _play_referenced (false)
+       , _audio_merger (_film->audio_frame_rate())
+       , _shuffler (0)
+{
+       construct ();
+}
+
+void
+Player::construct ()
 {
        _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
        /* The butler must hear about this first, so since we are proxying this through to the butler we must
           be first.
        */
-       _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
-       _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
+       _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
+       _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
        set_video_container_size (_film->frame_size ());
 
        film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
@@ -127,19 +152,10 @@ Player::setup_pieces ()
 }
 
 
-void
-Player::set_playback_length (DCPTime len)
-{
-       Change (CHANGE_TYPE_PENDING, PlayerProperty::PLAYBACK_LENGTH, false);
-       _playback_length = len;
-       Change (CHANGE_TYPE_DONE, PlayerProperty::PLAYBACK_LENGTH, false);
-       setup_pieces ();
-}
-
 bool
 have_video (shared_ptr<const Content> content)
 {
-       return static_cast<bool>(content->video);
+       return static_cast<bool>(content->video) && content->video->use();
 }
 
 bool
@@ -151,6 +167,8 @@ have_audio (shared_ptr<const Content> content)
 void
 Player::setup_pieces_unlocked ()
 {
+       _playback_length = _playlist ? _playlist->length(_film) : _film->length();
+
        list<shared_ptr<Piece> > old_pieces = _pieces;
        _pieces.clear ();
 
@@ -158,7 +176,7 @@ Player::setup_pieces_unlocked ()
        _shuffler = new Shuffler();
        _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+       for (auto i: playlist()->content()) {
 
                if (!i->paths_valid ()) {
                        continue;
@@ -170,7 +188,7 @@ Player::setup_pieces_unlocked ()
                }
 
                shared_ptr<Decoder> old_decoder;
-               BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
+               for (auto j: old_pieces) {
                        if (j->content == i) {
                                old_decoder = j->decoder;
                                break;
@@ -178,12 +196,9 @@ Player::setup_pieces_unlocked ()
                }
 
                shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
-               FrameRateChange frc (_film, i);
+               DCPOMATIC_ASSERT (decoder);
 
-               if (!decoder) {
-                       /* Not something that we can decode; e.g. Atmos content */
-                       continue;
-               }
+               FrameRateChange frc (_film, i);
 
                if (decoder->video && _ignore_video) {
                        decoder->video->set_ignore (true);
@@ -194,7 +209,7 @@ Player::setup_pieces_unlocked ()
                }
 
                if (_ignore_text) {
-                       BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
+                       for (auto i: decoder->text) {
                                i->set_ignore (true);
                        }
                }
@@ -238,19 +253,23 @@ Player::setup_pieces_unlocked ()
 
                        ++j;
                }
+
+               if (decoder->atmos) {
+                       decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
+               }
        }
 
        _stream_states.clear ();
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->content->audio) {
-                       BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
+                       for (auto j: i->content->audio->streams()) {
                                _stream_states[j] = StreamState (i, i->content->position ());
                        }
                }
        }
 
-       _black = Empty (_film, _playlist, bind(&have_video, _1), _playback_length);
-       _silent = Empty (_film, _playlist, bind(&have_audio, _1), _playback_length);
+       _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
+       _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
 
        _last_video_time = DCPTime ();
        _last_video_eyes = EYES_BOTH;
@@ -260,18 +279,28 @@ Player::setup_pieces_unlocked ()
 void
 Player::playlist_content_change (ChangeType type, int property, bool frequent)
 {
-       if (type == CHANGE_TYPE_PENDING) {
-               /* The player content is probably about to change, so we can't carry on
-                  until that has happened and we've rebuilt our pieces.  Stop pass()
-                  and seek() from working until then.
-               */
-               ++_suspended;
-       } else if (type == CHANGE_TYPE_DONE) {
-               /* A change in our content has gone through.  Re-build our pieces. */
-               setup_pieces ();
-               --_suspended;
-       } else if (type == CHANGE_TYPE_CANCELLED) {
-               --_suspended;
+       if (property == VideoContentProperty::CROP) {
+               if (type == CHANGE_TYPE_DONE) {
+                       dcp::Size const vcs = video_container_size();
+                       boost::mutex::scoped_lock lm (_mutex);
+                       for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
+                               i->first->reset_metadata (_film, vcs);
+                       }
+               }
+       } else {
+               if (type == CHANGE_TYPE_PENDING) {
+                       /* The player content is probably about to change, so we can't carry on
+                          until that has happened and we've rebuilt our pieces.  Stop pass()
+                          and seek() from working until then.
+                       */
+                       ++_suspended;
+               } else if (type == CHANGE_TYPE_DONE) {
+                       /* A change in our content has gone through.  Re-build our pieces. */
+                       setup_pieces ();
+                       --_suspended;
+               } else if (type == CHANGE_TYPE_CANCELLED) {
+                       --_suspended;
+               }
        }
 
        Change (type, property, frequent);
@@ -353,8 +382,8 @@ Player::black_player_video_frame (Eyes eyes) const
                        eyes,
                        PART_WHOLE,
                        PresetColourConversion::all().front().conversion,
-                       VIDEO_RANGE_FULL,
-                       boost::weak_ptr<Content>(),
+                       VideoRange::FULL,
+                       std::weak_ptr<Content>(),
                        boost::optional<Frame>(),
                        false
                )
@@ -418,20 +447,18 @@ Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
        return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
 }
 
-list<shared_ptr<Font> >
+vector<FontData>
 Player::get_subtitle_fonts ()
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       list<shared_ptr<Font> > fonts;
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
-                       /* XXX: things may go wrong if there are duplicate font IDs
-                          with different font files.
-                       */
-                       list<shared_ptr<Font> > f = j->fonts ();
-                       copy (f.begin(), f.end(), back_inserter (fonts));
-               }
+       vector<FontData> fonts;
+       for (auto i: _pieces) {
+               /* XXX: things may go wrong if there are duplicate font IDs
+                  with different font files.
+               */
+               vector<FontData> f = i->decoder->fonts ();
+               copy (f.begin(), f.end(), back_inserter(fonts));
        }
 
        return fonts;
@@ -507,7 +534,7 @@ Player::get_reel_assets ()
 
        list<ReferencedReelAsset> a;
 
-       BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
+       for (auto i: playlist()->content()) {
                shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
                if (!j) {
                        continue;
@@ -530,12 +557,12 @@ Player::get_reel_assets ()
                int64_t offset_from_start = 0;
                /* position in the asset from the end */
                int64_t offset_from_end = 0;
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
                        /* Assume that main picture duration is the length of the reel */
                        offset_from_end += k->main_picture()->actual_duration();
                }
 
-               BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
+               for (auto k: decoder->reels()) {
 
                        /* Assume that main picture duration is the length of the reel */
                        int64_t const reel_duration = k->main_picture()->actual_duration();
@@ -558,7 +585,7 @@ Player::get_reel_assets ()
                        }
 
                        if (j->reference_text (TEXT_CLOSED_CAPTION)) {
-                               BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
+                               for (auto l: k->closed_captions()) {
                                        maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
                                }
                        }
@@ -578,6 +605,7 @@ Player::pass ()
 
        if (_suspended) {
                /* We can't pass in this state */
+               LOG_DEBUG_PLAYER_NC ("Player is suspended");
                return false;
        }
 
@@ -592,7 +620,7 @@ Player::pass ()
        shared_ptr<Piece> earliest_content;
        optional<DCPTime> earliest_time;
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->done) {
                        continue;
                }
@@ -638,6 +666,7 @@ Player::pass ()
        switch (which) {
        case CONTENT:
        {
+               LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
                shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
                if (dcp && !_play_referenced && dcp->reference_audio()) {
@@ -650,11 +679,13 @@ Player::pass ()
                break;
        }
        case BLACK:
+               LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
                emit_video (black_player_video_frame(EYES_BOTH), _black.position());
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
        {
+               LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
                DCPTimePeriod period (_silent.period_at_position());
                if (_last_audio_time) {
                        /* Sometimes the thing that happened last finishes fractionally before
@@ -698,6 +729,7 @@ Player::pass ()
                pull_to = _silent.position();
        }
 
+       LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
        list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
        for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
                if (_last_audio_time && i->second < *_last_audio_time) {
@@ -732,13 +764,13 @@ Player::open_subtitles_for_frame (DCPTime time) const
        list<PositionImage> captions;
        int const vfr = _film->video_frame_rate();
 
-       BOOST_FOREACH (
-               PlayerText j,
+       for (
+               auto j:
                _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
                ) {
 
                /* Bitmap subtitles */
-               BOOST_FOREACH (BitmapText i, j.bitmap) {
+               for (auto i: j.bitmap) {
                        if (!i.image) {
                                continue;
                        }
@@ -779,6 +811,10 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                return;
        }
 
+       if (!piece->content->video->use()) {
+               return;
+       }
+
        FrameRateChange frc (_film, piece->content);
        if (frc.skip && (video.frame % 2) == 1) {
                return;
@@ -786,6 +822,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
 
        /* Time of the first frame we will emit */
        DCPTime const time = content_video_to_dcp (piece, video.frame);
+       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
@@ -822,10 +859,12 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                                }
                                while (j < fill_to || eyes != fill_to_eyes) {
                                        if (last != _last_video.end()) {
+                                               LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
                                                shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
                                                copy->set_eyes (eyes);
                                                emit_video (copy, j);
                                        } else {
+                                               LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
                                                emit_video (black_player_video_frame(eyes), j);
                                        }
                                        if (eyes == EYES_RIGHT) {
@@ -850,9 +889,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                        video.image,
                        piece->content->video->crop (),
                        piece->content->video->fade (_film, video.frame),
-                       piece->content->video->scale().size (
-                               piece->content->video, _video_container_size, _film->frame_size ()
-                               ),
+                       scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
                        _video_container_size,
                        video.eyes,
                        video.part,
@@ -890,6 +927,8 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
 
        /* Compute time in the DCP */
        DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
+       LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
+
        /* And the end of this block in the DCP */
        DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
 
@@ -972,7 +1011,7 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, C
        }
 
        dcp::Size scaled_size (width, height);
-       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
+       ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
        DCPTime from (content_time_to_dcp (piece, subtitle.from()));
 
        _active_texts[text->type()].add_from (wc, ps, from);
@@ -994,7 +1033,7 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, Co
                return;
        }
 
-       BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
+       for (auto s: subtitle.subs) {
                s.set_h_position (s.h_position() + text->x_offset ());
                s.set_v_position (s.v_position() + text->y_offset ());
                float const xs = text->x_scale();
@@ -1057,6 +1096,7 @@ void
 Player::seek (DCPTime time, bool accurate)
 {
        boost::mutex::scoped_lock lm (_mutex);
+       LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
 
        if (_suspended) {
                /* We can't seek in this state */
@@ -1078,7 +1118,7 @@ Player::seek (DCPTime time, bool accurate)
                _active_texts[i].clear ();
        }
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (time < i->content->position()) {
                        /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
                           we must seek this (following) content accurately, otherwise when we come to the end of the current
@@ -1233,7 +1273,7 @@ Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
 {
        boost::mutex::scoped_lock lm (_mutex);
 
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+       for (auto i: _pieces) {
                if (i->content == content) {
                        return content_time_to_dcp (i, t);
                }
@@ -1242,3 +1282,18 @@ Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
        /* We couldn't find this content; perhaps things are being changed over */
        return optional<DCPTime>();
 }
+
+
+shared_ptr<const Playlist>
+Player::playlist () const
+{
+       return _playlist ? _playlist : _film->playlist();
+}
+
+
+void
+Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+{
+       Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
+}
+