X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fplayer.cc;h=e6e8aca86ea198bedaa1e5e4992732639ec4ee5e;hb=dd9be86db6cde0afa5da0d1d1ac43b42e05dca26;hp=62527e3ebbf3b60408bd2cc967ffb568f529ddf8;hpb=df48c75c38dd788835a93540aea243a2dac4bb10;p=dcpomatic.git diff --git a/src/lib/player.cc b/src/lib/player.cc index 62527e3eb..e6e8aca86 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2013-2019 Carl Hetherington + Copyright (C) 2013-2020 Carl Hetherington This file is part of DCP-o-matic. @@ -18,11 +18,13 @@ */ +#include "atmos_decoder.h" #include "player.h" #include "film.h" #include "audio_buffers.h" #include "content_audio.h" #include "dcp_content.h" +#include "dcpomatic_log.h" #include "job.h" #include "image.h" #include "raw_image_proxy.h" @@ -71,11 +73,14 @@ using std::pair; using std::map; using std::make_pair; using std::copy; -using boost::shared_ptr; -using boost::weak_ptr; -using boost::dynamic_pointer_cast; +using std::shared_ptr; +using std::weak_ptr; +using std::dynamic_pointer_cast; using boost::optional; using boost::scoped_ptr; +#if BOOST_VERSION >= 106100 +using namespace boost::placeholders; +#endif using namespace dcpomatic; int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700; @@ -83,10 +88,10 @@ int const PlayerProperty::PLAYLIST = 701; int const PlayerProperty::FILM_CONTAINER = 702; int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703; int const PlayerProperty::DCP_DECODE_REDUCTION = 704; +int const PlayerProperty::PLAYBACK_LENGTH = 705; -Player::Player (shared_ptr film, shared_ptr playlist) +Player::Player (shared_ptr film) : _film (film) - , _playlist (playlist) , _suspended (0) , _ignore_video (false) , _ignore_audio (false) @@ -97,13 +102,36 @@ Player::Player (shared_ptr film, shared_ptr playlist , _play_referenced (false) , _audio_merger (_film->audio_frame_rate()) , _shuffler (0) +{ + construct (); +} + +Player::Player (shared_ptr film, shared_ptr playlist_) + : _film (film) + , _playlist (playlist_) + , _suspended (0) + , _ignore_video (false) + , _ignore_audio (false) + , _ignore_text (false) + , _always_burn_open_subtitles (false) + , _fast (false) + , _tolerant (film->tolerant()) + , _play_referenced (false) + , _audio_merger (_film->audio_frame_rate()) + , _shuffler (0) +{ + construct (); +} + +void +Player::construct () { _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2)); /* The butler must hear about this first, so since we are proxying this through to the butler we must be first. */ - _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front); - _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4)); + _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front); + _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4)); set_video_container_size (_film->frame_size ()); film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR); @@ -124,21 +152,24 @@ Player::setup_pieces () setup_pieces_unlocked (); } + bool -have_video (shared_ptr piece) +have_video (shared_ptr content) { - return piece->decoder && piece->decoder->video; + return static_cast(content->video) && content->video->use(); } bool -have_audio (shared_ptr piece) +have_audio (shared_ptr content) { - return piece->decoder && piece->decoder->audio; + return static_cast(content->audio); } void Player::setup_pieces_unlocked () { + _playback_length = _playlist ? _playlist->length(_film) : _film->length(); + list > old_pieces = _pieces; _pieces.clear (); @@ -146,7 +177,7 @@ Player::setup_pieces_unlocked () _shuffler = new Shuffler(); _shuffler->Video.connect(bind(&Player::video, this, _1, _2)); - BOOST_FOREACH (shared_ptr i, _playlist->content ()) { + BOOST_FOREACH (shared_ptr i, playlist()->content()) { if (!i->paths_valid ()) { continue; @@ -166,12 +197,9 @@ Player::setup_pieces_unlocked () } shared_ptr decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder); - FrameRateChange frc (_film, i); + DCPOMATIC_ASSERT (decoder); - if (!decoder) { - /* Not something that we can decode; e.g. Atmos content */ - continue; - } + FrameRateChange frc (_film, i); if (decoder->video && _ignore_video) { decoder->video->set_ignore (true); @@ -226,6 +254,10 @@ Player::setup_pieces_unlocked () ++j; } + + if (decoder->atmos) { + decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr(piece), _1)); + } } _stream_states.clear (); @@ -237,32 +269,39 @@ Player::setup_pieces_unlocked () } } - _black = Empty (_film, _pieces, bind(&have_video, _1)); - _silent = Empty (_film, _pieces, bind(&have_audio, _1)); + _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length); + _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length); _last_video_time = DCPTime (); _last_video_eyes = EYES_BOTH; _last_audio_time = DCPTime (); - - /* Cached value to save recalculating it on every ::pass */ - _film_length = _film->length (); } void Player::playlist_content_change (ChangeType type, int property, bool frequent) { - if (type == CHANGE_TYPE_PENDING) { - /* The player content is probably about to change, so we can't carry on - until that has happened and we've rebuilt our pieces. Stop pass() - and seek() from working until then. - */ - ++_suspended; - } else if (type == CHANGE_TYPE_DONE) { - /* A change in our content has gone through. Re-build our pieces. */ - setup_pieces (); - --_suspended; - } else if (type == CHANGE_TYPE_CANCELLED) { - --_suspended; + if (property == VideoContentProperty::CROP) { + if (type == CHANGE_TYPE_DONE) { + dcp::Size const vcs = video_container_size(); + boost::mutex::scoped_lock lm (_mutex); + for (list, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) { + i->first->reset_metadata (_film, vcs); + } + } + } else { + if (type == CHANGE_TYPE_PENDING) { + /* The player content is probably about to change, so we can't carry on + until that has happened and we've rebuilt our pieces. Stop pass() + and seek() from working until then. + */ + ++_suspended; + } else if (type == CHANGE_TYPE_DONE) { + /* A change in our content has gone through. Re-build our pieces. */ + setup_pieces (); + --_suspended; + } else if (type == CHANGE_TYPE_CANCELLED) { + --_suspended; + } } Change (type, property, frequent); @@ -345,8 +384,9 @@ Player::black_player_video_frame (Eyes eyes) const PART_WHOLE, PresetColourConversion::all().front().conversion, VIDEO_RANGE_FULL, - boost::weak_ptr(), - boost::optional() + std::weak_ptr(), + boost::optional(), + false ) ); } @@ -408,20 +448,18 @@ Player::content_time_to_dcp (shared_ptr piece, ContentTime t) const return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position()); } -list > +vector Player::get_subtitle_fonts () { boost::mutex::scoped_lock lm (_mutex); - list > fonts; + vector fonts; BOOST_FOREACH (shared_ptr i, _pieces) { - BOOST_FOREACH (shared_ptr j, i->content->text) { - /* XXX: things may go wrong if there are duplicate font IDs - with different font files. - */ - list > f = j->fonts (); - copy (f.begin(), f.end(), back_inserter (fonts)); - } + /* XXX: things may go wrong if there are duplicate font IDs + with different font files. + */ + vector f = i->decoder->fonts (); + copy (f.begin(), f.end(), back_inserter(fonts)); } return fonts; @@ -497,7 +535,7 @@ Player::get_reel_assets () list a; - BOOST_FOREACH (shared_ptr i, _playlist->content ()) { + BOOST_FOREACH (shared_ptr i, playlist()->content()) { shared_ptr j = dynamic_pointer_cast (i); if (!j) { continue; @@ -565,15 +603,15 @@ bool Player::pass () { boost::mutex::scoped_lock lm (_mutex); - DCPOMATIC_ASSERT (_film_length); if (_suspended) { /* We can't pass in this state */ + LOG_DEBUG_PLAYER_NC ("Player is suspended"); return false; } - if (*_film_length == DCPTime()) { - /* Special case of an empty Film; just give one black frame */ + if (_playback_length == DCPTime()) { + /* Special; just give one black frame */ emit_video (black_player_video_frame(EYES_BOTH), DCPTime()); return true; } @@ -629,6 +667,7 @@ Player::pass () switch (which) { case CONTENT: { + LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0)); earliest_content->done = earliest_content->decoder->pass (); shared_ptr dcp = dynamic_pointer_cast(earliest_content->content); if (dcp && !_play_referenced && dcp->reference_audio()) { @@ -641,11 +680,13 @@ Player::pass () break; } case BLACK: + LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position())); emit_video (black_player_video_frame(EYES_BOTH), _black.position()); _black.set_position (_black.position() + one_video_frame()); break; case SILENT: { + LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position())); DCPTimePeriod period (_silent.period_at_position()); if (_last_audio_time) { /* Sometimes the thing that happened last finishes fractionally before @@ -679,7 +720,7 @@ Player::pass () /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one of our streams, or the position of the _silent. */ - DCPTime pull_to = *_film_length; + DCPTime pull_to = _playback_length; for (map::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) { if (!i->second.piece->done && i->second.last_push_end < pull_to) { pull_to = i->second.last_push_end; @@ -689,6 +730,7 @@ Player::pass () pull_to = _silent.position(); } + LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to)); list, DCPTime> > audio = _audio_merger.pull (pull_to); for (list, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) { if (_last_audio_time && i->second < *_last_audio_time) { @@ -770,6 +812,10 @@ Player::video (weak_ptr wp, ContentVideo video) return; } + if (!piece->content->video->use()) { + return; + } + FrameRateChange frc (_film, piece->content); if (frc.skip && (video.frame % 2) == 1) { return; @@ -777,6 +823,7 @@ Player::video (weak_ptr wp, ContentVideo video) /* Time of the first frame we will emit */ DCPTime const time = content_video_to_dcp (piece, video.frame); + LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time)); /* Discard if it's before the content's period or the last accurate seek. We can't discard if it's after the content's period here as in that case we still need to fill any gap between @@ -813,10 +860,12 @@ Player::video (weak_ptr wp, ContentVideo video) } while (j < fill_to || eyes != fill_to_eyes) { if (last != _last_video.end()) { + LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j)); shared_ptr copy = last->second->shallow_copy(); copy->set_eyes (eyes); emit_video (copy, j); } else { + LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j)); emit_video (black_player_video_frame(eyes), j); } if (eyes == EYES_RIGHT) { @@ -841,16 +890,15 @@ Player::video (weak_ptr wp, ContentVideo video) video.image, piece->content->video->crop (), piece->content->video->fade (_film, video.frame), - piece->content->video->scale().size ( - piece->content->video, _video_container_size, _film->frame_size () - ), + scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()), _video_container_size, video.eyes, video.part, piece->content->video->colour_conversion(), piece->content->video->range(), piece->content, - video.frame + video.frame, + false ) ); @@ -880,6 +928,8 @@ Player::audio (weak_ptr wp, AudioStreamPtr stream, ContentAudio content_a /* Compute time in the DCP */ DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame); + LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time)); + /* And the end of this block in the DCP */ DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr); @@ -900,9 +950,7 @@ Player::audio (weak_ptr wp, AudioStreamPtr stream, ContentAudio content_a if (remaining_frames == 0) { return; } - shared_ptr cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames)); - cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0); - content_audio.audio = cut; + content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0)); } DCPOMATIC_ASSERT (content_audio.audio->frames() > 0); @@ -955,8 +1003,15 @@ Player::bitmap_text_start (weak_ptr wp, weak_ptr wc, C PlayerText ps; shared_ptr image = subtitle.sub.image; + /* We will scale the subtitle up to fit _video_container_size */ - dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height); + int const width = subtitle.sub.rectangle.width * _video_container_size.width; + int const height = subtitle.sub.rectangle.height * _video_container_size.height; + if (width == 0 || height == 0) { + return; + } + + dcp::Size scaled_size (width, height); ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle)); DCPTime from (content_time_to_dcp (piece, subtitle.from())); @@ -1042,6 +1097,7 @@ void Player::seek (DCPTime time, bool accurate) { boost::mutex::scoped_lock lm (_mutex); + LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in"); if (_suspended) { /* We can't seek in this state */ @@ -1188,8 +1244,7 @@ Player::discard_audio (shared_ptr audio, DCPTime time, DCPTi if (remaining_frames <= 0) { return make_pair(shared_ptr(), DCPTime()); } - shared_ptr cut (new AudioBuffers (audio->channels(), remaining_frames)); - cut->copy_from (audio.get(), remaining_frames, discard_frames, 0); + shared_ptr cut (new AudioBuffers(audio, remaining_frames, discard_frames)); return make_pair(cut, time + discard_time); } @@ -1228,3 +1283,18 @@ Player::content_time_to_dcp (shared_ptr content, ContentTime t) /* We couldn't find this content; perhaps things are being changed over */ return optional(); } + + +shared_ptr +Player::playlist () const +{ + return _playlist ? _playlist : _film->playlist(); +} + + +void +Player::atmos (weak_ptr, ContentAtmos data) +{ + Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata); +} +