using std::vector;
using std::stringstream;
using std::list;
+using std::min;
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
}
-bool
+void
FFmpegDecoder::pass ()
{
- cout << "ffd pass.\n";
int r = av_read_frame (_format_context, &_packet);
- cout << "A " << r << "\n";
if (r < 0) {
if (r != AVERROR_EOF) {
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
av_strerror (r, buf, sizeof(buf));
- _film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
}
/* Get any remaining frames */
decode_audio_packet ();
}
- return true;
+ return;
}
avcodec_get_frame_defaults (_frame);
if (_packet.stream_index == _video_stream && _decode_video) {
- cout << "dvp\n";
decode_video_packet ();
- cout << "ok.\n";
} else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) {
decode_audio_packet ();
} else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles) {
if (sub.num_rects > 0) {
shared_ptr<TimedSubtitle> ts;
try {
- emit_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub)));
+ subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub)));
} catch (...) {
/* some problem with the subtitle; we probably didn't understand it */
}
} else {
- emit_subtitle (shared_ptr<TimedSubtitle> ());
+ subtitle (shared_ptr<TimedSubtitle> ());
}
avsubtitle_free (&sub);
}
}
- cout << "out.\n";
av_free_packet (&_packet);
- return false;
}
/** @param data pointer to array of pointers to buffers.
}
libdcp::Size
-FFmpegDecoder::native_size () const
+FFmpegDecoder::video_size () const
{
return libdcp::Size (_video_codec_context->width, _video_codec_context->height);
}
-PixelFormat
-FFmpegDecoder::pixel_format () const
-{
- return _video_codec_context->pix_fmt;
-}
-
-int
-FFmpegDecoder::time_base_numerator () const
-{
- return _video_codec_context->time_base.num;
-}
-
-int
-FFmpegDecoder::time_base_denominator () const
-{
- return _video_codec_context->time_base.den;
-}
-
-int
-FFmpegDecoder::sample_aspect_ratio_numerator () const
-{
- return _video_codec_context->sample_aspect_ratio.num;
-}
-
-int
-FFmpegDecoder::sample_aspect_ratio_denominator () const
-{
- return _video_codec_context->sample_aspect_ratio.den;
-}
-
string
FFmpegDecoder::stream_name (AVStream* s) const
{
return av_get_bytes_per_sample (audio_sample_format ());
}
-bool
+void
FFmpegDecoder::seek (Time t)
{
- return do_seek (t, false, false);
+ do_seek (t, false, false);
}
-bool
+void
FFmpegDecoder::seek_back ()
{
- if (last_content_time() < 2.5) {
- return true;
+ if (next() < 2.5) {
+ return;
}
- return do_seek (last_content_time() - 2.5 * TIME_HZ / video_frame_rate(), true, true);
+ do_seek (next() - 2.5 * TIME_HZ / video_frame_rate(), true, true);
}
-bool
+void
FFmpegDecoder::seek_forward ()
{
- if (last_content_time() >= (video_length() - video_frame_rate())) {
- return true;
+ if (next() >= (video_length() - video_frame_rate())) {
+ return;
}
- return do_seek (last_content_time() - 0.5 * TIME_HZ / video_frame_rate(), true, true);
+ do_seek (next() - 0.5 * TIME_HZ / video_frame_rate(), true, true);
}
-bool
+void
FFmpegDecoder::do_seek (Time t, bool backwards, bool accurate)
{
int64_t const vt = t / (av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
-
- cout << "seek to " << vt << " (acc=" << accurate << ") (sec " << (vt * av_q2d (_format_context->streams[_video_stream]->time_base)) << "\n";
-
- int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
+ av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
avcodec_flush_buffers (_video_codec_context);
if (_subtitle_codec_context) {
while (1) {
int r = av_read_frame (_format_context, &_packet);
if (r < 0) {
- return true;
+ return;
}
avcodec_get_frame_defaults (_frame);
}
}
- cout << "seek ok.\n";
- return r < 0;
+ return;
}
void
FFmpegDecoder::decode_video_packet ()
{
int frame_finished;
- cout << "avc decode v2\n";
if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
return false;
}
- cout << "done that.\n";
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
- cout << "got lock.\n";
-
shared_ptr<FilterGraph> graph;
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
++i;
}
- cout << "found graph.\n";
-
if (i == _filter_graphs.end ()) {
graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
_filter_graphs.push_back (graph);
- _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
} else {
graph = *i;
}
- cout << "pushed in.\n";
list<shared_ptr<Image> > images = graph->process (_frame);
- cout << "got " << images.size() << "\n";
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
/* XXX: may need to insert extra frames / remove frames here ...
(as per old Matcher)
*/
- cout << "emitting.\n";
- emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ);
- cout << "emitted.\n";
+ Time const t = bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ;
+ video (*i, false, t);
} else {
- _film->log()->log ("Dropping frame without PTS");
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log ("Dropping frame without PTS");
}
}
return true;
}
+
+Time
+FFmpegDecoder::next () const
+{
+ return min (_next_video, _next_audio);
+}