FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
: FFmpeg (c)
, Decoder (film)
+ , _filter_graphs(c->filters(), dcp::Fraction(lrint(_ffmpeg_content->video_frame_rate().get_value_or(24) * 1000), 1000))
{
if (c->video && c->video->use()) {
video = make_shared<VideoDecoder>(this, c);
}
-bool
+FFmpegDecoder::FlushResult
FFmpegDecoder::flush ()
{
- /* Flush video and audio once */
+ LOG_DEBUG_PLAYER("Flush FFmpeg decoder: current state %1", static_cast<int>(_flush_state));
+
+ switch (_flush_state) {
+ case FlushState::CODECS:
+ if (flush_codecs() == FlushResult::DONE) {
+ LOG_DEBUG_PLAYER_NC("Finished flushing codecs");
+ _flush_state = FlushState::AUDIO_DECODER;
+ }
+ break;
+ case FlushState::AUDIO_DECODER:
+ if (audio) {
+ audio->flush();
+ }
+ LOG_DEBUG_PLAYER_NC("Finished flushing audio decoder");
+ _flush_state = FlushState::FILL;
+ break;
+ case FlushState::FILL:
+ if (flush_fill() == FlushResult::DONE) {
+ LOG_DEBUG_PLAYER_NC("Finished flushing fills");
+ return FlushResult::DONE;
+ }
+ break;
+ }
+
+ return FlushResult::AGAIN;
+}
+
+/** @return true if we have finished flushing the codecs */
+FFmpegDecoder::FlushResult
+FFmpegDecoder::flush_codecs()
+{
bool did_something = false;
if (video) {
if (decode_and_process_video_packet(nullptr)) {
}
}
- if (did_something) {
- /* We want to be called again */
- return false;
- }
+ return did_something ? FlushResult::AGAIN : FlushResult::DONE;
+}
+
+FFmpegDecoder::FlushResult
+FFmpegDecoder::flush_fill()
+{
/* Make sure all streams are the same length and round up to the next video frame */
+ bool did_something = false;
+
auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
full_length = full_length.ceil (frc.source);
- if (video) {
+ if (video && !video->ignore()) {
double const vfr = _ffmpeg_content->video_frame_rate().get();
auto const f = full_length.frames_round (vfr);
- auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
- while (v < f) {
- video->emit (film(), make_shared<const RawImageProxy>(_black_image), v);
- ++v;
+ auto const v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
+ if (v < f) {
+ video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
+ did_something = true;
}
}
- for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
- auto a = audio->stream_position(film(), i);
- /* Unfortunately if a is 0 that really means that we don't know the stream position since
- there has been no data on it since the last seek. In this case we'll just do nothing
- here. I'm not sure if that's the right idea.
- */
- if (a > ContentTime()) {
- while (a < full_length) {
+ if (audio && !audio->ignore()) {
+ for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
+ auto const a = audio->stream_position(film(), i);
+ /* Unfortunately if a is 0 that really means that we don't know the stream position since
+ there has been no data on it since the last seek. In this case we'll just do nothing
+ here. I'm not sure if that's the right idea.
+ */
+ if (a > ContentTime() && a < full_length) {
+ LOG_DEBUG_PLAYER("Flush inserts silence at %1", to_string(a));
auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
silence->make_silent ();
audio->emit (film(), i, silence, a, true);
- a += to_do;
+ did_something = true;
}
}
}
- if (audio) {
- audio->flush ();
- }
-
- return true;
+ return did_something ? FlushResult::AGAIN : FlushResult::DONE;
}
Hence it makes sense to continue here in that case.
*/
if (r < 0 && r != AVERROR_INVALIDDATA) {
+ LOG_DEBUG_PLAYER("FFpmegDecoder::pass flushes because av_read_frame returned %1", r);
if (r != AVERROR_EOF) {
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
}
av_packet_free (&packet);
- return flush ();
+ return flush() == FlushResult::DONE;
}
int const si = packet->stream_index;
/** @param data pointer to array of pointers to buffers.
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
+static
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (AVFrame* frame)
+deinterleave_audio(shared_ptr<FFmpegAudioStream> stream, AVFrame* frame)
{
auto format = static_cast<AVSampleFormat>(frame->format);
case AV_SAMPLE_FMT_FLTP:
{
auto p = reinterpret_cast<float**> (frame->data);
- DCPOMATIC_ASSERT (frame->channels <= channels);
- /* Sometimes there aren't as many channels in the frame as in the stream */
- for (int i = 0; i < frame->channels; ++i) {
+ DCPOMATIC_ASSERT(channels <= stream->channels());
+ for (int i = 0; i < channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = frame->channels; i < channels; ++i) {
- audio->make_silent (i);
- }
}
break;
AVSEEK_FLAG_BACKWARD
);
- {
- /* Force re-creation of filter graphs to reset them and hence to make sure
- they don't have any pre-seek frames knocking about.
- */
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
- _filter_graphs.clear ();
- }
+ /* Force re-creation of filter graphs to reset them and hence to make sure
+ they don't have any pre-seek frames knocking about.
+ */
+ _filter_graphs.clear();
if (video_codec_context ()) {
avcodec_flush_buffers (video_codec_context());
FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
{
auto frame = audio_frame (stream);
- auto data = deinterleave_audio (frame);
+ auto data = deinterleave_audio(stream, frame);
auto const time_base = stream->stream(_format_context)->time_base;
void
FFmpegDecoder::process_video_frame ()
{
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
-
- shared_ptr<VideoFilterGraph> graph;
-
- auto i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format)) {
- ++i;
- }
-
- if (i == _filter_graphs.end ()) {
- dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
- graph = make_shared<VideoFilterGraph>(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format, vfr);
- graph->setup (_ffmpeg_content->filters ());
- _filter_graphs.push_back (graph);
- LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _video_frame->width, _video_frame->height, _video_frame->format);
- } else {
- graph = *i;
- }
-
+ auto graph = _filter_graphs.get(dcp::Size(_video_frame->width, _video_frame->height), static_cast<AVPixelFormat>(_video_frame->format));
auto images = graph->process (_video_frame);
for (auto const& i: images) {