summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2018-01-03 15:44:05 +0000
committerCarl Hetherington <cth@carlh.net>2018-01-03 15:44:05 +0000
commitefa79a3db4a041cda3d93dde444e5dca9b84b976 (patch)
tree8a7c9ebfef941d30ef0c6505f5614aaa6e3cc253 /src
parentac9648e018f06d11be985f55984f6afe8b6bb97e (diff)
parent65c3b3a815545fef4d0373bb588eb815b8b544b0 (diff)
Fix alpha blending with with offset; should help with #1155.
Diffstat (limited to 'src')
-rw-r--r--src/lib/audio_decoder.cc5
-rw-r--r--src/lib/audio_decoder.h4
-rw-r--r--src/lib/image.cc94
-rw-r--r--src/lib/player.cc41
-rw-r--r--src/lib/player.h4
-rw-r--r--src/lib/video_decoder.cc26
-rw-r--r--src/lib/video_decoder.h4
7 files changed, 91 insertions, 87 deletions
diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc
index 5df4047db..4bd6535d8 100644
--- a/src/lib/audio_decoder.cc
+++ b/src/lib/audio_decoder.cc
@@ -1,5 +1,5 @@
/*
- Copyright (C) 2012-2017 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
@@ -98,7 +98,8 @@ AudioDecoder::emit (AudioStreamPtr stream, shared_ptr<const AudioBuffers> data,
data = ro;
}
- _positions[stream] += Data(stream, ContentAudio (data, _positions[stream])).get_value_or(0);
+ Data(stream, ContentAudio (data, _positions[stream]));
+ _positions[stream] += data->frames();
}
/** @return Time just after the last thing that was emitted from a given stream */
diff --git a/src/lib/audio_decoder.h b/src/lib/audio_decoder.h
index ba1520ef5..69655cceb 100644
--- a/src/lib/audio_decoder.h
+++ b/src/lib/audio_decoder.h
@@ -1,5 +1,5 @@
/*
- Copyright (C) 2012-2017 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
@@ -54,7 +54,7 @@ public:
ContentTime stream_position (AudioStreamPtr stream) const;
/** @return Number of frames of data that were accepted */
- boost::signals2::signal<Frame (AudioStreamPtr, ContentAudio)> Data;
+ boost::signals2::signal<void (AudioStreamPtr, ContentAudio)> Data;
private:
void silence (int milliseconds);
diff --git a/src/lib/image.cc b/src/lib/image.cc
index 33a0077db..b85451fb3 100644
--- a/src/lib/image.cc
+++ b/src/lib/image.cc
@@ -436,40 +436,6 @@ Image::make_transparent ()
memset (data()[0], 0, sample_size(0).height * stride()[0]);
}
-template <class T>
-void
-component (
- int n,
- Image* base,
- shared_ptr<const Image> other,
- shared_ptr<const Image> rgba,
- int start_base_x, int start_base_y,
- int start_other_x, int start_other_y
- )
-{
- dcp::Size const base_size = base->sample_size(n);
- dcp::Size const other_size = other->sample_size(n);
- int const bhf = base->horizontal_factor(n);
- int const bvf = base->vertical_factor(n);
- int const ohf = other->horizontal_factor(n);
- int const ovf = other->vertical_factor(n);
- for (int by = start_base_y / bvf, oy = start_other_y / ovf, ry = start_other_y; by < base_size.height && oy < other_size.height; ++by, ++oy, ry += ovf) {
- /* base image */
- T* bp = ((T*) (base->data()[n] + by * base->stride()[n])) + start_base_x / bhf;
- /* overlay image */
- T* op = ((T*) (other->data()[n] + oy * other->stride()[n]));
- /* original RGBA for alpha channel */
- uint8_t* rp = rgba->data()[0] + ry * rgba->stride()[0];
- for (int bx = start_base_x / bhf, ox = start_other_x / ohf; bx < base_size.width && ox < other_size.width; ++bx, ++ox) {
- float const alpha = float (rp[3]) / 255;
- *bp = *op * alpha + *bp * (1 - alpha);
- ++bp;
- ++op;
- rp += 4 * ohf;
- }
- }
-}
-
void
Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
{
@@ -590,18 +556,66 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
case AV_PIX_FMT_YUV420P:
{
shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
- component<uint8_t> (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
- component<uint8_t> (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
- component<uint8_t> (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ dcp::Size const ts = size();
+ dcp::Size const os = yuv->size();
+ for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
+ int const hty = ty / 2;
+ int const hoy = oy / 2;
+ uint8_t* tY = data()[0] + (ty * stride()[0]) + start_tx;
+ uint8_t* tU = data()[1] + (hty * stride()[1]) + start_tx / 2;
+ uint8_t* tV = data()[2] + (hty * stride()[2]) + start_tx / 2;
+ uint8_t* oY = yuv->data()[0] + (oy * yuv->stride()[0]) + start_ox;
+ uint8_t* oU = yuv->data()[1] + (hoy * yuv->stride()[1]) + start_ox / 2;
+ uint8_t* oV = yuv->data()[2] + (hoy * yuv->stride()[2]) + start_ox / 2;
+ uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
+ for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
+ float const a = float(alpha[3]) / 255;
+ *tY = *oY * a + *tY * (1 - a);
+ *tU = *oU * a + *tU * (1 - a);
+ *tV = *oV * a + *tV * (1 - a);
+ ++tY;
+ ++oY;
+ if (tx % 2) {
+ ++tU;
+ ++tV;
+ }
+ if (ox % 2) {
+ ++oU;
+ ++oV;
+ }
+ alpha += 4;
+ }
+ }
break;
}
case AV_PIX_FMT_YUV420P10:
case AV_PIX_FMT_YUV422P10LE:
{
shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
- component<uint16_t> (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
- component<uint8_t> (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
- component<uint8_t> (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+ dcp::Size const ts = size();
+ dcp::Size const os = yuv->size();
+ for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
+ uint16_t* tY = (uint16_t *) (data()[0] + (ty * stride()[0])) + start_tx;
+ uint8_t* tU = data()[1] + (ty * stride()[1]) + start_tx;
+ uint8_t* tV = data()[2] + (ty * stride()[2]) + start_tx;
+ uint16_t* oY = (uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0])) + start_ox;
+ uint8_t* oU = yuv->data()[1] + (oy * yuv->stride()[1]) + start_ox;
+ uint8_t* oV = yuv->data()[2] + (oy * yuv->stride()[2]) + start_ox;
+ uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
+ for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
+ float const a = float(alpha[3]) / 255;
+ *tY = *oY * a + *tY * (1 - a);
+ *tU = *oU * a + *tU * (1 - a);
+ *tV = *oV * a + *tV * (1 - a);
+ ++tY;
+ ++tU;
+ ++tV;
+ ++oY;
+ ++oU;
+ ++oV;
+ alpha += 4;
+ }
+ }
break;
}
default:
diff --git a/src/lib/player.cc b/src/lib/player.cc
index 0bc460465..df0b955a8 100644
--- a/src/lib/player.cc
+++ b/src/lib/player.cc
@@ -526,8 +526,14 @@ Player::pass ()
optional<DCPTime> earliest_time;
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (!i->done) {
- DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+ if (i->done) {
+ continue;
+ }
+
+ DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+ if (t > i->content->end()) {
+ i->done = true;
+ } else {
/* Given two choices at the same time, pick the one with a subtitle so we see it before
the video.
*/
@@ -638,17 +644,17 @@ Player::subtitles_for_frame (DCPTime time) const
return merge (subtitles);
}
-bool
+void
Player::video (weak_ptr<Piece> wp, ContentVideo video)
{
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
- return false;
+ return;
}
FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
if (frc.skip && (video.frame % 2) == 1) {
- return false;
+ return;
}
/* Time of the first frame we will emit */
@@ -659,7 +665,7 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
time < piece->content->position() ||
time >= piece->content->end() ||
(_last_video_time && time < *_last_video_time)) {
- return false;
+ return;
}
/* Fill gaps that we discover now that we have some video which needs to be emitted */
@@ -697,21 +703,16 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
emit_video (_last_video[wp], t);
t += one_video_frame ();
}
-
- return true;
}
-/** @return Number of input frames that were `accepted'. This is the number of frames passed in
- * unless some were discarded at the end of the block.
- */
-Frame
+void
Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
{
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
- return 0;
+ return;
}
shared_ptr<AudioContent> content = piece->content->audio;
@@ -722,33 +723,26 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
/* And the end of this block in the DCP */
DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
- /* We consider frames trimmed off the beginning to nevertheless be `accepted'; it's only frames trimmed
- off the end that are considered as discarded. This logic is necessary to ensure correct reel lengths,
- although the precise details escape me at the moment.
- */
- Frame accepted = content_audio.audio->frames();
-
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
if (!cut.first) {
/* This audio is entirely discarded */
- return accepted;
+ return;
}
content_audio.audio = cut.first;
time = cut.second;
} else if (time > piece->content->end()) {
/* Discard it all */
- return 0;
+ return;
} else if (end > piece->content->end()) {
Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
if (remaining_frames == 0) {
- return 0;
+ return;
}
shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
content_audio.audio = cut;
- accepted = content_audio.audio->frames();
}
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
@@ -776,7 +770,6 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
_audio_merger.push (content_audio.audio, time);
DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
_stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
- return accepted;
}
void
diff --git a/src/lib/player.h b/src/lib/player.h
index ed55b6284..9dd5afd26 100644
--- a/src/lib/player.h
+++ b/src/lib/player.h
@@ -105,8 +105,8 @@ private:
ContentTime dcp_to_content_time (boost::shared_ptr<const Piece> piece, DCPTime t) const;
DCPTime content_time_to_dcp (boost::shared_ptr<const Piece> piece, ContentTime t) const;
boost::shared_ptr<PlayerVideo> black_player_video_frame () const;
- bool video (boost::weak_ptr<Piece>, ContentVideo);
- Frame audio (boost::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
+ void video (boost::weak_ptr<Piece>, ContentVideo);
+ void audio (boost::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
void image_subtitle_start (boost::weak_ptr<Piece>, ContentImageSubtitle);
void text_subtitle_start (boost::weak_ptr<Piece>, ContentTextSubtitle);
void subtitle_stop (boost::weak_ptr<Piece>, ContentTime);
diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc
index b9ead52c8..3625e074f 100644
--- a/src/lib/video_decoder.cc
+++ b/src/lib/video_decoder.cc
@@ -1,5 +1,5 @@
/*
- Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
@@ -59,11 +59,9 @@ VideoDecoder::emit (shared_ptr<const ImageProxy> image, Frame frame)
return;
}
- optional<bool> taken;
-
switch (_content->video->frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
- taken = Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
+ Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
break;
case VIDEO_FRAME_TYPE_3D:
{
@@ -71,35 +69,33 @@ VideoDecoder::emit (shared_ptr<const ImageProxy> image, Frame frame)
frame this one is.
*/
bool const same = (_last_emitted && _last_emitted.get() == frame);
- taken = Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+ Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
_last_emitted = frame;
break;
}
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- taken = Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+ Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
frame /= 2;
break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
- taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
break;
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
- taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
break;
case VIDEO_FRAME_TYPE_3D_LEFT:
- taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
break;
case VIDEO_FRAME_TYPE_3D_RIGHT:
- taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
break;
default:
DCPOMATIC_ASSERT (false);
}
- if (taken.get_value_or(false)) {
- _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
- }
+ _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
}
void
diff --git a/src/lib/video_decoder.h b/src/lib/video_decoder.h
index c5e2ea4cf..959ff7ac7 100644
--- a/src/lib/video_decoder.h
+++ b/src/lib/video_decoder.h
@@ -1,5 +1,5 @@
/*
- Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
@@ -60,7 +60,7 @@ public:
void emit (boost::shared_ptr<const ImageProxy>, Frame frame);
/** @return true if the emitted data was accepted, false if not */
- boost::signals2::signal<bool (ContentVideo)> Data;
+ boost::signals2::signal<void (ContentVideo)> Data;
private:
boost::shared_ptr<const Content> _content;