_pts_offset = {};
}
- if (c->audio) {
+ if (c->has_mapped_audio()) {
audio = make_shared<AudioDecoder>(this, c->audio, fast);
}
decode_and_process_video_packet (packet);
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
decode_and_process_subtitle_packet (packet);
- } else {
+ } else if (audio) {
decode_and_process_audio_packet (packet);
}
*/
static
shared_ptr<AudioBuffers>
-deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
+deinterleave_audio(AVFrame* frame)
{
auto format = static_cast<AVSampleFormat>(frame->format);
auto audio = make_shared<AudioBuffers>(channels, frames);
auto data = audio->data();
+ if (frames == 0) {
+ return audio;
+ }
+
switch (format) {
case AV_SAMPLE_FMT_U8:
{
case AV_SAMPLE_FMT_FLTP:
{
auto p = reinterpret_cast<float**> (frame->data);
- DCPOMATIC_ASSERT(channels <= stream->channels());
for (int i = 0; i < channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
}
FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
{
auto frame = audio_frame (stream);
- auto data = deinterleave_audio(stream, frame);
+ auto data = deinterleave_audio(frame);
auto const time_base = stream->stream(_format_context)->time_base;
void
FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
{
+ auto context = subtitle_codec_context();
+ if (!context) {
+ return;
+ }
+
int got_subtitle;
AVSubtitle sub;
- if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
+ if (avcodec_decode_subtitle2(context, &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
return;
}
*/
ContentTime from;
from = sub_period.from + _pts_offset;
+ _have_current_subtitle = true;
if (sub_period.to) {
_current_subtitle_to = *sub_period.to + _pts_offset;
} else {
_current_subtitle_to = optional<ContentTime>();
- _have_current_subtitle = true;
}
ContentBitmapText bitmap_text(from);
/* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
(i.e. first byte B, second G, third R, fourth A)
*/
- auto const palette = rect->pict.data[1];
+ auto const* palette = rect->pict.data[1];
#else
/* Start of the first line in the subtitle */
auto sub_p = rect->data[0];
if (target_height == 0 && video_codec_context()) {
target_height = video_codec_context()->height;
}
- DCPOMATIC_ASSERT (target_width);
- DCPOMATIC_ASSERT (target_height);
+
+ int x_offset = 0;
+ int y_offset = 0;
+ if (_ffmpeg_content->video && _ffmpeg_content->video->use()) {
+ auto const crop = _ffmpeg_content->video->actual_crop();
+ target_width -= crop.left + crop.right;
+ target_height -= crop.top + crop.bottom;
+ x_offset = -crop.left;
+ y_offset = -crop.top;
+ }
+
+ DCPOMATIC_ASSERT(target_width > 0);
+ DCPOMATIC_ASSERT(target_height > 0);
+
dcpomatic::Rect<double> const scaled_rect (
- static_cast<double>(rect->x) / target_width,
- static_cast<double>(rect->y) / target_height,
+ static_cast<double>(rect->x + x_offset) / target_width,
+ static_cast<double>(rect->y + y_offset) / target_height,
static_cast<double>(rect->w) / target_width,
static_cast<double>(rect->h) / target_height
);
}
sub::RawSubtitle base;
- auto raw = sub::SSAReader::parse_line (
- base,
- text,
- _ffmpeg_content->video->size().width,
- _ffmpeg_content->video->size().height,
- sub::Colour(1, 1, 1)
- );
+ auto video_size = _ffmpeg_content->video->size();
+ DCPOMATIC_ASSERT(video_size);
+
+ sub::SSAReader::Context context(video_size->width, video_size->height, sub::Colour(1, 1, 1));
+ auto const raw = sub::SSAReader::parse_line(base, text, context);
for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
only_text()->emit_plain_start (from, i);