From 2537a2d959a5872c2e75b322022a7679d24c7e60 Mon Sep 17 00:00:00 2001 From: Carl Hetherington Date: Tue, 2 Jan 2018 21:09:36 +0000 Subject: [PATCH] A previous commit took care to make Decoder::position() not be updated if the data that was emitted from the decoder was not taken by the player. This means that when the decoder moves into its end trim the position will stay where it is (since the player does not take the data). I can't see the point of doing this; the only use of Decoder::position() is to decide what to pass() next (I think). It is also inconvenient because we would like to check Decoder::position() to decide whether to stop passing a decoder since it's in its end trim (not doing this causes #1154). --- src/lib/audio_decoder.cc | 5 +++-- src/lib/audio_decoder.h | 4 ++-- src/lib/player.cc | 31 +++++++++---------------------- src/lib/player.h | 4 ++-- src/lib/video_decoder.cc | 26 +++++++++++--------------- src/lib/video_decoder.h | 4 ++-- 6 files changed, 29 insertions(+), 45 deletions(-) diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc index 5df4047db..4bd6535d8 100644 --- a/src/lib/audio_decoder.cc +++ b/src/lib/audio_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2017 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -98,7 +98,8 @@ AudioDecoder::emit (AudioStreamPtr stream, shared_ptr data, data = ro; } - _positions[stream] += Data(stream, ContentAudio (data, _positions[stream])).get_value_or(0); + Data(stream, ContentAudio (data, _positions[stream])); + _positions[stream] += data->frames(); } /** @return Time just after the last thing that was emitted from a given stream */ diff --git a/src/lib/audio_decoder.h b/src/lib/audio_decoder.h index ba1520ef5..69655cceb 100644 --- a/src/lib/audio_decoder.h +++ b/src/lib/audio_decoder.h @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2017 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -54,7 +54,7 @@ public: ContentTime stream_position (AudioStreamPtr stream) const; /** @return Number of frames of data that were accepted */ - boost::signals2::signal Data; + boost::signals2::signal Data; private: void silence (int milliseconds); diff --git a/src/lib/player.cc b/src/lib/player.cc index 0bc460465..9e0561a70 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -638,17 +638,17 @@ Player::subtitles_for_frame (DCPTime time) const return merge (subtitles); } -bool +void Player::video (weak_ptr wp, ContentVideo video) { shared_ptr piece = wp.lock (); if (!piece) { - return false; + return; } FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate()); if (frc.skip && (video.frame % 2) == 1) { - return false; + return; } /* Time of the first frame we will emit */ @@ -659,7 +659,7 @@ Player::video (weak_ptr wp, ContentVideo video) time < piece->content->position() || time >= piece->content->end() || (_last_video_time && time < *_last_video_time)) { - return false; + return; } /* Fill gaps that we discover now that we have some video which needs to be emitted */ @@ -697,21 +697,16 @@ Player::video (weak_ptr wp, ContentVideo video) emit_video (_last_video[wp], t); t += one_video_frame (); } - - return true; } -/** @return Number of input frames that were `accepted'. This is the number of frames passed in - * unless some were discarded at the end of the block. - */ -Frame +void Player::audio (weak_ptr wp, AudioStreamPtr stream, ContentAudio content_audio) { DCPOMATIC_ASSERT (content_audio.audio->frames() > 0); shared_ptr piece = wp.lock (); if (!piece) { - return 0; + return; } shared_ptr content = piece->content->audio; @@ -722,33 +717,26 @@ Player::audio (weak_ptr wp, AudioStreamPtr stream, ContentAudio content_a /* And the end of this block in the DCP */ DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate()); - /* We consider frames trimmed off the beginning to nevertheless be `accepted'; it's only frames trimmed - off the end that are considered as discarded. This logic is necessary to ensure correct reel lengths, - although the precise details escape me at the moment. - */ - Frame accepted = content_audio.audio->frames(); - /* Remove anything that comes before the start or after the end of the content */ if (time < piece->content->position()) { pair, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position()); if (!cut.first) { /* This audio is entirely discarded */ - return accepted; + return; } content_audio.audio = cut.first; time = cut.second; } else if (time > piece->content->end()) { /* Discard it all */ - return 0; + return; } else if (end > piece->content->end()) { Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate()); if (remaining_frames == 0) { - return 0; + return; } shared_ptr cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames)); cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0); content_audio.audio = cut; - accepted = content_audio.audio->frames(); } DCPOMATIC_ASSERT (content_audio.audio->frames() > 0); @@ -776,7 +764,6 @@ Player::audio (weak_ptr wp, AudioStreamPtr stream, ContentAudio content_a _audio_merger.push (content_audio.audio, time); DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ()); _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate()); - return accepted; } void diff --git a/src/lib/player.h b/src/lib/player.h index ed55b6284..9dd5afd26 100644 --- a/src/lib/player.h +++ b/src/lib/player.h @@ -105,8 +105,8 @@ private: ContentTime dcp_to_content_time (boost::shared_ptr piece, DCPTime t) const; DCPTime content_time_to_dcp (boost::shared_ptr piece, ContentTime t) const; boost::shared_ptr black_player_video_frame () const; - bool video (boost::weak_ptr, ContentVideo); - Frame audio (boost::weak_ptr, AudioStreamPtr, ContentAudio); + void video (boost::weak_ptr, ContentVideo); + void audio (boost::weak_ptr, AudioStreamPtr, ContentAudio); void image_subtitle_start (boost::weak_ptr, ContentImageSubtitle); void text_subtitle_start (boost::weak_ptr, ContentTextSubtitle); void subtitle_stop (boost::weak_ptr, ContentTime); diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index b9ead52c8..3625e074f 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2016 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -59,11 +59,9 @@ VideoDecoder::emit (shared_ptr image, Frame frame) return; } - optional taken; - switch (_content->video->frame_type ()) { case VIDEO_FRAME_TYPE_2D: - taken = Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); + Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); break; case VIDEO_FRAME_TYPE_3D: { @@ -71,35 +69,33 @@ VideoDecoder::emit (shared_ptr image, Frame frame) frame this one is. */ bool const same = (_last_emitted && _last_emitted.get() == frame); - taken = Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); _last_emitted = frame; break; } case VIDEO_FRAME_TYPE_3D_ALTERNATE: - taken = Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); frame /= 2; break; case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); - taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); + Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); break; case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); - taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); + Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); break; case VIDEO_FRAME_TYPE_3D_LEFT: - taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); + Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); break; case VIDEO_FRAME_TYPE_3D_RIGHT: - taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); break; default: DCPOMATIC_ASSERT (false); } - if (taken.get_value_or(false)) { - _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ()); - } + _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ()); } void diff --git a/src/lib/video_decoder.h b/src/lib/video_decoder.h index c5e2ea4cf..959ff7ac7 100644 --- a/src/lib/video_decoder.h +++ b/src/lib/video_decoder.h @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2016 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -60,7 +60,7 @@ public: void emit (boost::shared_ptr, Frame frame); /** @return true if the emitted data was accepted, false if not */ - boost::signals2::signal Data; + boost::signals2::signal Data; private: boost::shared_ptr _content; -- 2.30.2