FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
: FFmpeg (c)
, Decoder (film)
+ , _filter_graphs(c->filters(), dcp::Fraction(lrint(_ffmpeg_content->video_frame_rate().get_value_or(24) * 1000), 1000))
{
if (c->video && c->video->use()) {
video = make_shared<VideoDecoder>(this, c);
}
if (c->only_text()) {
- /* XXX: this time here should be the time of the first subtitle, not 0 */
- text.push_back (make_shared<TextDecoder>(this, c->only_text(), ContentTime()));
+ text.push_back (make_shared<TextDecoder>(this, c->only_text()));
+ /* XXX: we should be calling maybe_set_position() on this TextDecoder, but we can't easily find
+ * the time of the first subtitle at this point.
+ */
}
for (auto i: c->ffmpeg_audio_streams()) {
full_length = full_length.ceil (frc.source);
if (video) {
double const vfr = _ffmpeg_content->video_frame_rate().get();
- auto const f = full_length.frames_round (vfr);
- auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
- while (v < f) {
- video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
- ++v;
+ auto v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr);
+ while (v < full_length) {
+ video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
+ v += ContentTime::from_frames(1, vfr);
}
}
/** @param data pointer to array of pointers to buffers.
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
+static
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (AVFrame* frame)
+deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
{
auto format = static_cast<AVSampleFormat>(frame->format);
/* XXX: can't we use swr_convert() to do the format conversion? */
- int const channels = frame->channels;
+ int const channels = frame->ch_layout.nb_channels;
int const frames = frame->nb_samples;
int const total_samples = frames * channels;
auto audio = make_shared<AudioBuffers>(channels, frames);
case AV_SAMPLE_FMT_FLTP:
{
auto p = reinterpret_cast<float**> (frame->data);
- DCPOMATIC_ASSERT (frame->channels <= channels);
+ DCPOMATIC_ASSERT(channels <= stream->channels());
/* Sometimes there aren't as many channels in the frame as in the stream */
- for (int i = 0; i < frame->channels; ++i) {
+ for (int i = 0; i < channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = frame->channels; i < channels; ++i) {
+ for (int i = channels; i < stream->channels(); ++i) {
audio->make_silent (i);
}
}
AVSEEK_FLAG_BACKWARD
);
- {
- /* Force re-creation of filter graphs to reset them and hence to make sure
- they don't have any pre-seek frames knocking about.
- */
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
- _filter_graphs.clear ();
- }
+ /* Force re-creation of filter graphs to reset them and hence to make sure
+ they don't have any pre-seek frames knocking about.
+ */
+ _filter_graphs.clear();
if (video_codec_context ()) {
avcodec_flush_buffers (video_codec_context());
FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
{
auto frame = audio_frame (stream);
- auto data = deinterleave_audio (frame);
+ auto data = deinterleave_audio(stream, frame);
+
+ auto const time_base = stream->stream(_format_context)->time_base;
ContentTime ct;
if (frame->pts == AV_NOPTS_VALUE) {
} else {
ct = ContentTime::from_seconds (
frame->best_effort_timestamp *
- av_q2d (stream->stream(_format_context)->time_base))
+ av_q2d(time_base))
+ _pts_offset;
+ LOG_DEBUG_PLAYER(
+ "Process audio with timestamp %1 (BET %2, timebase %3/%4, (PTS offset %5)",
+ to_string(ct),
+ frame->best_effort_timestamp,
+ time_base.num,
+ time_base.den,
+ to_string(_pts_offset)
+ );
}
_next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
data->frames(),
stream->id(),
frame->best_effort_timestamp,
- av_q2d(stream->stream(_format_context)->time_base),
+ av_q2d(time_base),
to_string(_pts_offset)
);
}
auto context = _codec_context[stream->index(_format_context)];
auto frame = audio_frame (stream);
+ LOG_DEBUG_PLAYER("Send audio packet on stream %1", stream->index(_format_context));
int r = avcodec_send_packet (context, packet);
if (r < 0) {
LOG_WARNING("avcodec_send_packet returned %1 for an audio packet", r);
r = avcodec_receive_frame (context, frame);
if (r == AVERROR(EAGAIN)) {
/* More input is required */
+ LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive audio frame");
return;
}
void
FFmpegDecoder::process_video_frame ()
{
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
-
- shared_ptr<VideoFilterGraph> graph;
-
- auto i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format)) {
- ++i;
- }
-
- if (i == _filter_graphs.end ()) {
- dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
- graph = make_shared<VideoFilterGraph>(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format, vfr);
- graph->setup (_ffmpeg_content->filters ());
- _filter_graphs.push_back (graph);
- LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _video_frame->width, _video_frame->height, _video_frame->format);
- } else {
- graph = *i;
- }
-
+ auto graph = _filter_graphs.get(dcp::Size(_video_frame->width, _video_frame->height), static_cast<AVPixelFormat>(_video_frame->format));
auto images = graph->process (_video_frame);
for (auto const& i: images) {
video->emit (
film(),
make_shared<RawImageProxy>(image),
- llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
+ ContentTime::from_seconds(pts)
);
} else {
LOG_WARNING_NC ("Dropping frame without PTS");
_have_current_subtitle = true;
}
+ ContentBitmapText bitmap_text(from);
for (unsigned int i = 0; i < sub.num_rects; ++i) {
auto const rect = sub.rects[i];
case SUBTITLE_NONE:
break;
case SUBTITLE_BITMAP:
- process_bitmap_subtitle (rect, from);
+ bitmap_text.subs.push_back(process_bitmap_subtitle(rect));
break;
case SUBTITLE_TEXT:
cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
}
}
+ if (!bitmap_text.subs.empty()) {
+ only_text()->emit_bitmap_start(bitmap_text);
+ }
+
if (_current_subtitle_to) {
only_text()->emit_stop (*_current_subtitle_to);
}
}
-void
-FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
+BitmapText
+FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect)
{
/* Note BGRA is expressed little-endian, so the first byte in the word is B, second
G, third R, fourth A.
static_cast<double>(rect->h) / target_height
);
- only_text()->emit_bitmap_start (from, image, scaled_rect);
+ return { image, scaled_rect };
}
base,
text,
_ffmpeg_content->video->size().width,
- _ffmpeg_content->video->size().height
+ _ffmpeg_content->video->size().height,
+ sub::Colour(1, 1, 1)
);
for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {