#include "image.h"
#include "util.h"
#include "log.h"
+#include "dcpomatic_log.h"
#include "ffmpeg_decoder.h"
#include "text_decoder.h"
#include "ffmpeg_audio_stream.h"
#include "compose.hpp"
#include "text_content.h"
#include "audio_content.h"
+#include "frame_interval_checker.h"
#include <dcp/subtitle_string.h>
#include <sub/ssa_reader.h>
#include <sub/subtitle.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
-#include <boost/foreach.hpp>
#include <boost/algorithm/string.hpp>
#include <vector>
#include <iomanip>
#include "i18n.h"
-#define LOG_GENERAL(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
-#define LOG_ERROR(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR);
-#define LOG_WARNING_NC(...) _log->log (__VA_ARGS__, LogEntry::TYPE_WARNING);
-#define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
-
using std::cout;
using std::string;
using std::vector;
using std::pair;
using std::max;
using std::map;
-using boost::shared_ptr;
+using std::shared_ptr;
+using std::make_shared;
using boost::is_any_of;
using boost::split;
using boost::optional;
-using boost::dynamic_pointer_cast;
+using std::dynamic_pointer_cast;
using dcp::Size;
+using namespace dcpomatic;
+
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
: FFmpeg (c)
- , _log (log)
+ , Decoder (film)
, _have_current_subtitle (false)
{
- if (c->video) {
- video.reset (new VideoDecoder (this, c, log));
- _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate());
+ if (c->video && c->video->use()) {
+ video = make_shared<VideoDecoder>(this, c);
+ _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
/* It doesn't matter what size or pixel format this is, it just needs to be black */
_black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
_black_image->make_black ();
} else {
- _pts_offset = ContentTime ();
+ _pts_offset = {};
}
if (c->audio) {
- audio.reset (new AudioDecoder (this, c->audio, log, fast));
+ audio = make_shared<AudioDecoder>(this, c->audio, fast);
}
if (c->only_text()) {
/* XXX: this time here should be the time of the first subtitle, not 0 */
- text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), log, ContentTime())));
+ text.push_back (make_shared<TextDecoder>(this, c->only_text(), ContentTime()));
}
_next_time.resize (_format_context->nb_streams);
}
+
void
FFmpegDecoder::flush ()
{
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
- while (video && decode_video_packet ()) {}
+ while (video && decode_video_packet()) {}
if (audio) {
decode_audio_packet ();
/* Make sure all streams are the same length and round up to the next video frame */
- FrameRateChange const frc = _ffmpeg_content->film()->active_frame_rate_change(_ffmpeg_content->position());
- ContentTime full_length (_ffmpeg_content->full_length(), frc);
+ auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
+ ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
full_length = full_length.ceil (frc.source);
if (video) {
double const vfr = _ffmpeg_content->video_frame_rate().get();
- Frame const f = full_length.frames_round (vfr);
- Frame v = video->position().frames_round (vfr) + 1;
+ auto const f = full_length.frames_round (vfr);
+ auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
while (v < f) {
- video->emit (shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
+ video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
++v;
}
}
- BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
- ContentTime a = audio->stream_position(i);
+ for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
+ auto a = audio->stream_position(film(), i);
/* Unfortunately if a is 0 that really means that we don't know the stream position since
there has been no data on it since the last seek. In this case we'll just do nothing
here. I'm not sure if that's the right idea.
*/
if (a > ContentTime()) {
while (a < full_length) {
- ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
- shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
+ auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
+ auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
silence->make_silent ();
- audio->emit (i, silence, a);
+ audio->emit (film(), i, silence, a, true);
a += to_do;
}
}
}
}
+
bool
FFmpegDecoder::pass ()
{
}
int const si = _packet.stream_index;
- shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
+ auto fc = _ffmpeg_content;
- if (_video_stream && si == _video_stream.get() && !video->ignore()) {
+ if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
decode_video_packet ();
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
decode_subtitle_packet ();
return false;
}
+
/** @param data pointer to array of pointers to buffers.
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
{
DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
+DCPOMATIC_DISABLE_WARNINGS
int const size = av_samples_get_buffer_size (
0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
);
+DCPOMATIC_ENABLE_WARNINGS
+
+ /* XXX: can't we just use _frame->nb_samples directly here? */
+ /* XXX: can't we use swr_convert() to do the format conversion? */
/* Deinterleave and convert to float */
int const total_samples = size / bytes_per_audio_sample (stream);
int const channels = stream->channels();
int const frames = total_samples / channels;
- shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
- float** data = audio->data();
+ auto audio = make_shared<AudioBuffers>(channels, frames);
+ auto data = audio->data();
switch (audio_sample_format (stream)) {
case AV_SAMPLE_FMT_U8:
case AV_SAMPLE_FMT_FLTP:
{
float** p = reinterpret_cast<float**> (_frame->data);
+ DCPOMATIC_ASSERT (_frame->channels <= channels);
/* Sometimes there aren't as many channels in the _frame as in the stream */
for (int i = 0; i < _frame->channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
return audio;
}
+
AVSampleFormat
FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
{
+DCPOMATIC_DISABLE_WARNINGS
return stream->stream (_format_context)->codec->sample_fmt;
+DCPOMATIC_ENABLE_WARNINGS
}
+
int
FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
{
return av_get_bytes_per_sample (audio_sample_format (stream));
}
+
void
FFmpegDecoder::seek (ContentTime time, bool accurate)
{
we don't really know what the seek will give us.
*/
- ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
+ auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
time -= pre_roll;
/* XXX: it seems debatable whether PTS should be used here...
if (_video_stream) {
stream = _video_stream;
} else {
- shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
+ DCPOMATIC_ASSERT (_ffmpeg_content->audio);
+ auto s = dynamic_pointer_cast<FFmpegAudioStream>(_ffmpeg_content->audio->stream());
if (s) {
stream = s->index (_format_context);
}
DCPOMATIC_ASSERT (stream);
- ContentTime u = time - _pts_offset;
+ auto u = time - _pts_offset;
if (u < ContentTime ()) {
u = ContentTime ();
}
AVSEEK_FLAG_BACKWARD
);
+ {
+ /* Force re-creation of filter graphs to reset them and hence to make sure
+ they don't have any pre-seek frames knocking about.
+ */
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+ _filter_graphs.clear ();
+ }
+
if (video_codec_context ()) {
avcodec_flush_buffers (video_codec_context());
}
- BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
+DCPOMATIC_DISABLE_WARNINGS
+ for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
avcodec_flush_buffers (i->stream(_format_context)->codec);
}
+DCPOMATIC_ENABLE_WARNINGS
if (subtitle_codec_context ()) {
avcodec_flush_buffers (subtitle_codec_context ());
}
_have_current_subtitle = false;
+
+ for (auto& i: _next_time) {
+ i = optional<ContentTime>();
+ }
}
+
void
FFmpegDecoder::decode_audio_packet ()
{
int const stream_index = copy_packet.stream_index;
/* XXX: inefficient */
- vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
- vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
+ auto streams = ffmpeg_content()->ffmpeg_audio_streams();
+ auto stream = streams.begin ();
while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
++stream;
}
return;
}
+DCPOMATIC_DISABLE_WARNINGS
while (copy_packet.size > 0) {
int frame_finished;
shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
ContentTime ct;
- if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
+ if (_frame->pts == AV_NOPTS_VALUE) {
/* In some streams we see not every frame coming through with a timestamp; for those
that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
particularly noticeable with TrueHD streams (see #1111).
*/
- ct = *_next_time[stream_index];
+ if (_next_time[stream_index]) {
+ ct = *_next_time[stream_index];
+ }
} else {
ct = ContentTime::from_seconds (
av_frame_get_best_effort_timestamp (_frame) *
_next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
- if (ct < ContentTime ()) {
+ if (ct < ContentTime()) {
/* Discard audio data that comes before time 0 */
- Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
+ auto const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
data->move (data->frames() - remove, remove, 0);
data->set_frames (data->frames() - remove);
ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
to_string(_pts_offset)
);
}
+DCPOMATIC_ENABLE_WARNINGS
/* Give this data provided there is some, and its time is sane */
if (ct >= ContentTime() && data->frames() > 0) {
- audio->emit (*stream, data, ct);
+ audio->emit (film(), *stream, data, ct);
}
}
}
}
+
bool
FFmpegDecoder::decode_video_packet ()
{
DCPOMATIC_ASSERT (_video_stream);
int frame_finished;
+DCPOMATIC_DISABLE_WARNINGS
if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
return false;
}
+DCPOMATIC_ENABLE_WARNINGS
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
shared_ptr<VideoFilterGraph> graph;
- list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
+ auto i = _filter_graphs.begin();
while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
++i;
}
if (i == _filter_graphs.end ()) {
dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
- graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
+ graph = make_shared<VideoFilterGraph>(dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr);
graph->setup (_ffmpeg_content->filters ());
_filter_graphs.push_back (graph);
LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
graph = *i;
}
- list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
+ auto images = graph->process (_frame);
- for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
+ for (auto const& i: images) {
- shared_ptr<Image> image = i->first;
+ auto image = i.first;
- if (i->second != AV_NOPTS_VALUE) {
- double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
+ if (i.second != AV_NOPTS_VALUE) {
+ double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
video->emit (
- shared_ptr<ImageProxy> (new RawImageProxy (image)),
- llrint(pts * _ffmpeg_content->active_video_frame_rate())
+ film(),
+ make_shared<RawImageProxy>(image),
+ llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
);
} else {
LOG_WARNING_NC ("Dropping frame without PTS");
return true;
}
+
void
FFmpegDecoder::decode_subtitle_packet ()
{
/* Subtitle PTS (within the source, not taking into account any of the
source that we may have chopped off for the DCP).
*/
- FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
+ auto sub_period = subtitle_period (sub);
ContentTime from;
from = sub_period.from + _pts_offset;
if (sub_period.to) {
}
for (unsigned int i = 0; i < sub.num_rects; ++i) {
- AVSubtitleRect const * rect = sub.rects[i];
+ auto const rect = sub.rects[i];
switch (rect->type) {
case SUBTITLE_NONE:
avsubtitle_free (&sub);
}
+
void
FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
{
/* Note BGRA is expressed little-endian, so the first byte in the word is B, second
G, third R, fourth A.
*/
- shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
+ auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true);
#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
/* Start of the first line in the subtitle */
- uint8_t* sub_p = rect->pict.data[0];
- /* sub_p looks up into a BGRA palette which is here
+ auto sub_p = rect->pict.data[0];
+ /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
(i.e. first byte B, second G, third R, fourth A)
*/
- uint32_t const * palette = (uint32_t *) rect->pict.data[1];
+ auto const palette = rect->pict.data[1];
#else
/* Start of the first line in the subtitle */
- uint8_t* sub_p = rect->data[0];
- /* sub_p looks up into a BGRA palette which is here
- (i.e. first byte B, second G, third R, fourth A)
+ auto sub_p = rect->data[0];
+ /* sub_p looks up into a BGRA palette which is at rect->data[1].
+ (first byte B, second G, third R, fourth A)
*/
- uint32_t const * palette = (uint32_t *) rect->data[1];
+ auto const* palette = rect->data[1];
#endif
/* And the stream has a map of those palette colours to colours
chosen by the user; created a `mapped' palette from those settings.
*/
- map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
+ auto colour_map = ffmpeg_content()->subtitle_stream()->colours();
vector<RGBA> mapped_palette (rect->nb_colors);
for (int i = 0; i < rect->nb_colors; ++i) {
- RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24);
- map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
+ RGBA c (palette[2], palette[1], palette[0], palette[3]);
+ auto j = colour_map.find (c);
if (j != colour_map.end ()) {
mapped_palette[i] = j->second;
} else {
*/
mapped_palette[i] = c;
}
+ palette += 4;
}
/* Start of the output data */
- uint32_t* out_p = (uint32_t *) image->data()[0];
+ auto out_p = image->data()[0];
for (int y = 0; y < rect->h; ++y) {
- uint8_t* sub_line_p = sub_p;
- uint32_t* out_line_p = out_p;
+ auto sub_line_p = sub_p;
+ auto out_line_p = out_p;
for (int x = 0; x < rect->w; ++x) {
- RGBA const p = mapped_palette[*sub_line_p++];
- /* XXX: this seems to be wrong to me (isn't the output image BGRA?) but it looks right on screen */
- *out_line_p++ = (p.a << 24) | (p.b << 16) | (p.g << 8) | p.r;
+ auto const p = mapped_palette[*sub_line_p++];
+ *out_line_p++ = p.b;
+ *out_line_p++ = p.g;
+ *out_line_p++ = p.r;
+ *out_line_p++ = p.a;
}
#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
sub_p += rect->pict.linesize[0];
#else
sub_p += rect->linesize[0];
#endif
- out_p += image->stride()[0] / sizeof (uint32_t);
+ out_p += image->stride()[0];
}
- int const target_width = subtitle_codec_context()->width;
- int const target_height = subtitle_codec_context()->height;
+ int target_width = subtitle_codec_context()->width;
+ if (target_width == 0 && video_codec_context()) {
+ /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
+ know if it's supposed to mean something from FFmpeg's point of view.
+ */
+ target_width = video_codec_context()->width;
+ }
+ int target_height = subtitle_codec_context()->height;
+ if (target_height == 0 && video_codec_context()) {
+ target_height = video_codec_context()->height;
+ }
+ DCPOMATIC_ASSERT (target_width);
+ DCPOMATIC_ASSERT (target_height);
dcpomatic::Rect<double> const scaled_rect (
- static_cast<double> (rect->x) / target_width,
- static_cast<double> (rect->y) / target_height,
- static_cast<double> (rect->w) / target_width,
- static_cast<double> (rect->h) / target_height
+ static_cast<double>(rect->x) / target_width,
+ static_cast<double>(rect->y) / target_height,
+ static_cast<double>(rect->w) / target_width,
+ static_cast<double>(rect->h) / target_height
);
only_text()->emit_bitmap_start (from, image, scaled_rect);
}
+
void
FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
{
}
sub::RawSubtitle base;
- list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
+ auto raw = sub::SSAReader::parse_line (
base,
text,
_ffmpeg_content->video->size().width,
_ffmpeg_content->video->size().height
);
- BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
+ for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
only_text()->emit_plain_start (from, i);
}
}