X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fplayer.cc;h=46bc497a23e9709de2190c5d86d1503fd80186f4;hb=6e922efb725dfb918fe4380eed81d837e85ed15a;hp=5de089ba91aa9ad297bbe299a2b2ad9b076ec76e;hpb=76e543bd7c85054ff857781707fa570f2b159360;p=dcpomatic.git diff --git a/src/lib/player.cc b/src/lib/player.cc index 5de089ba9..46bc497a2 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -70,7 +70,6 @@ using std::dynamic_pointer_cast; using std::list; using std::make_pair; using std::make_shared; -using std::map; using std::max; using std::min; using std::min; @@ -95,11 +94,12 @@ int const PlayerProperty::DCP_DECODE_REDUCTION = 704; int const PlayerProperty::PLAYBACK_LENGTH = 705; -Player::Player (shared_ptr film) +Player::Player (shared_ptr film, Image::Alignment subtitle_alignment) : _film (film) , _suspended (0) , _tolerant (film->tolerant()) , _audio_merger (_film->audio_frame_rate()) + , _subtitle_alignment (subtitle_alignment) { construct (); } @@ -331,7 +331,7 @@ Player::set_video_container_size (dcp::Size s) _video_container_size = s; - _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true)); + _black_image = make_shared(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED); _black_image->make_black (); } @@ -557,61 +557,61 @@ Player::get_reel_assets () { /* Does not require a lock on _mutex as it's only called from DCPEncoder */ - list a; + list reel_assets; - for (auto i: playlist()->content()) { - auto j = dynamic_pointer_cast (i); - if (!j) { + for (auto content: playlist()->content()) { + auto dcp = dynamic_pointer_cast(content); + if (!dcp) { continue; } scoped_ptr decoder; try { - decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr())); + decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr())); } catch (...) { - return a; + return reel_assets; } - DCPOMATIC_ASSERT (j->video_frame_rate ()); - double const cfr = j->video_frame_rate().get(); - Frame const trim_start = j->trim_start().frames_round (cfr); - Frame const trim_end = j->trim_end().frames_round (cfr); + DCPOMATIC_ASSERT (dcp->video_frame_rate()); + double const cfr = dcp->video_frame_rate().get(); + Frame const trim_start = dcp->trim_start().frames_round(cfr); + Frame const trim_end = dcp->trim_end().frames_round(cfr); int const ffr = _film->video_frame_rate (); /* position in the asset from the start */ int64_t offset_from_start = 0; - /* position in the asset from the end */ + /* position i the asset from the end */ int64_t offset_from_end = 0; for (auto k: decoder->reels()) { /* Assume that main picture duration is the length of the reel */ offset_from_end += k->main_picture()->actual_duration(); } - for (auto k: decoder->reels()) { + for (auto reel: decoder->reels()) { /* Assume that main picture duration is the length of the reel */ - int64_t const reel_duration = k->main_picture()->actual_duration(); + int64_t const reel_duration = reel->main_picture()->actual_duration(); /* See doc/design/trim_reels.svg */ Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start)); Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end))); - auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate()); - if (j->reference_video ()) { - maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr); + auto const from = content->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate()); + if (dcp->reference_video()) { + maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, ffr); } - if (j->reference_audio ()) { - maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr); + if (dcp->reference_audio()) { + maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, ffr); } - if (j->reference_text (TextType::OPEN_SUBTITLE)) { - maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr); + if (dcp->reference_text(TextType::OPEN_SUBTITLE)) { + maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr); } - if (j->reference_text (TextType::CLOSED_CAPTION)) { - for (auto l: k->closed_captions()) { - maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr); + if (dcp->reference_text(TextType::CLOSED_CAPTION)) { + for (auto caption: reel->closed_captions()) { + maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, ffr); } } @@ -620,7 +620,7 @@ Player::get_reel_assets () } } - return a; + return reel_assets; } @@ -743,10 +743,38 @@ Player::pass () /* Emit any audio that is ready */ /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one - of our streams, or the position of the _silent. + of our streams, or the position of the _silent. First, though we choose only streams that are less than + ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far + behind it has finished). This is so that we don't withhold audio indefinitely awaiting data from a stream + that will never come, causing bugs like #2101. */ - auto pull_to = _playback_length; + constexpr int ignore_streams_behind = 5; + + using state_pair = std::pair; + + /* Find the 'leading' stream (i.e. the one that pushed data most recently) */ + auto latest_last_push_end = std::max_element( + _stream_states.begin(), + _stream_states.end(), + [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; } + ); + + if (latest_last_push_end != _stream_states.end()) { + LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end)); + } + + /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */ + std::map alive_stream_states; for (auto const& i: _stream_states) { + if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) { + alive_stream_states.insert(i); + } else { + LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0)); + } + } + + auto pull_to = _playback_length; + for (auto const& i: alive_stream_states) { if (!i.second.piece->done && i.second.last_push_end < pull_to) { pull_to = i.second.last_push_end; } @@ -827,7 +855,7 @@ Player::open_subtitles_for_frame (DCPTime time) const return {}; } - return merge (captions); + return merge (captions, _subtitle_alignment); } @@ -920,16 +948,23 @@ Player::video (weak_ptr wp, ContentVideo video) } } + auto const content_video = piece->content->video; + _last_video[wp] = std::make_shared( video.image, - piece->content->video->crop (), - piece->content->video->fade (_film, video.frame), - scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()), + content_video->actual_crop(), + content_video->fade (_film, video.frame), + scale_for_display( + content_video->scaled_size(_film->frame_size()), + _video_container_size, + _film->frame_size(), + content_video->pixel_quanta() + ), _video_container_size, video.eyes, video.part, - piece->content->video->colour_conversion(), - piece->content->video->range(), + content_video->colour_conversion(), + content_video->range(), piece->content, video.frame, false @@ -1055,7 +1090,7 @@ Player::bitmap_text_start (weak_ptr wp, weak_ptr wc, C } dcp::Size scaled_size (width, height); - ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle)); + ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle)); DCPTime from (content_time_to_dcp (piece, subtitle.from())); _active_texts[static_cast(text->type())].add_from (wc, ps, from);