* @param e Encoder to use.
*/
-ABTranscoder::ABTranscoder (
- shared_ptr<Film> a, shared_ptr<Film> b, DecodeOptions o, Job* j, shared_ptr<Encoder> e)
+ABTranscoder::ABTranscoder (shared_ptr<Film> a, shared_ptr<Film> b, shared_ptr<Job> j)
: _film_a (a)
, _film_b (b)
+ , _player_a (_film_a->player ())
+ , _player_b (_film_b->player ())
, _job (j)
- , _encoder (e)
+ , _encoder (new Encoder (_film_a))
, _combiner (new Combiner (a->log()))
{
- if (_film_a->has_audio ()) {
- _matcher.reset (new Matcher (_film_a->log(), _film_a->audio_frame_rate(), _film_a->video_frame_rate()));
- _delay_line.reset (new DelayLine (_film_a->log(), _film_a->audio_channels(), _film_a->audio_delay() * _film_a->audio_frame_rate() / 1000));
- _gain.reset (new Gain (_film_a->log(), _film_a->audio_gain()));
- }
- _da = decoder_factory (_film_a, o);
- _db = decoder_factory (_film_b, o);
-
- shared_ptr<AudioStream> st = _film_a->audio_stream();
- _matcher.reset (new Matcher (_film_a->log(), st->sample_rate(), _film_a->source_frame_rate()));
- _delay_line.reset (new DelayLine (_film_a->log(), _film_a->audio_delay() / 1000.0f));
++ _matcher.reset (new Matcher (_film_a->log(), _film_a->audio_frame_rate(), _film_a->video_frame_rate()));
++ _delay_line.reset (new DelayLine (_film_a->log(), _film_a->audio_delay() * _film_a->audio_frame_rate() / 1000));
+ _gain.reset (new Gain (_film_a->log(), _film_a->audio_gain()));
- _player_a->Video.connect (bind (&Combiner::process_video, _combiner, _1, _2, _3));
- _player_b->Video.connect (bind (&Combiner::process_video_b, _combiner, _1, _2, _3));
- /* Set up the decoder to use the film's set streams */
- _da.video->set_subtitle_stream (_film_a->subtitle_stream ());
- _db.video->set_subtitle_stream (_film_a->subtitle_stream ());
- _da.audio->set_audio_stream (_film_a->audio_stream ());
-
- _da.video->Video.connect (bind (&Combiner::process_video, _combiner, _1, _2, _3, _4));
- _db.video->Video.connect (bind (&Combiner::process_video_b, _combiner, _1, _2, _3, _4));
++ _player_a->Video.connect (bind (&Combiner::process_video, _combiner, _1, _2, _3, _4));
++ _player_b->Video.connect (bind (&Combiner::process_video_b, _combiner, _1, _2, _3, _4));
- if (_matcher) {
- _combiner->connect_video (_matcher);
- _matcher->connect_video (_encoder);
- } else {
- _combiner->connect_video (_encoder);
- }
+ _combiner->connect_video (_delay_line);
+ _delay_line->connect_video (_matcher);
+ _matcher->connect_video (_encoder);
- if (_matcher && _delay_line) {
- _player_a->connect_audio (_delay_line);
- _delay_line->connect_audio (_matcher);
- _matcher->connect_audio (_gain);
- _gain->connect_audio (_encoder);
- }
- _da.audio->connect_audio (_delay_line);
++ _player_a->connect_audio (_delay_line);
+ _delay_line->connect_audio (_matcher);
+ _matcher->connect_audio (_gain);
+ _gain->connect_audio (_encoder);
}
void
/** @class AudioDecoder.
* @brief Parent class for audio decoders.
*/
- class AudioDecoder : public AudioSource, public virtual Decoder
+ class AudioDecoder : public TimedAudioSource, public virtual Decoder
{
public:
- AudioDecoder (boost::shared_ptr<Film>, DecodeOptions);
-
- virtual void set_audio_stream (boost::shared_ptr<AudioStream>);
-
- /** @return Audio stream that we are using */
- boost::shared_ptr<AudioStream> audio_stream () const {
- return _audio_stream;
- }
-
- /** @return All available audio streams */
- std::vector<boost::shared_ptr<AudioStream> > audio_streams () const {
- return _audio_streams;
- }
-
-protected:
- /** Audio stream that we are using */
- boost::shared_ptr<AudioStream> _audio_stream;
- /** All available audio streams */
- std::vector<boost::shared_ptr<AudioStream> > _audio_streams;
+ AudioDecoder (boost::shared_ptr<const Film>);
};
#endif
void
AudioSource::connect_audio (shared_ptr<AudioSink> s)
{
- Audio.connect (bind (&AudioSink::process_audio, s, _1));
+ Audio.connect (bind (process_audio_proxy, weak_ptr<AudioSink> (s), _1));
}
+
+ void
+ TimedAudioSource::connect_audio (shared_ptr<TimedAudioSink> s)
+ {
+ Audio.connect (bind (&TimedAudioSink::process_audio, s, _1, _2));
+ }
virtual bool pass () = 0;
virtual bool seek (double);
- virtual bool seek_to_last ();
+ virtual void seek_back () {}
+ virtual void seek_forward () {}
+
+ boost::signals2::signal<void()> OutputChanged;
protected:
- /** our Film */
- boost::shared_ptr<Film> _film;
- /** our decode options */
- DecodeOptions _opt;
+ boost::shared_ptr<const Film> _film;
private:
virtual void film_changed (Film::Property) {}
--- /dev/null
- shared_ptr<FFmpegDecoder> decoder (new FFmpegDecoder (film, shared_from_this (), true, false, false, true));
+/*
+ Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <libcxml/cxml.h>
+#include "ffmpeg_content.h"
+#include "ffmpeg_decoder.h"
+#include "compose.hpp"
+#include "job.h"
+#include "util.h"
+#include "log.h"
+
+#include "i18n.h"
+
+using std::string;
+using std::stringstream;
+using std::vector;
+using std::list;
+using std::cout;
+using boost::shared_ptr;
+using boost::lexical_cast;
+
+int const FFmpegContentProperty::SUBTITLE_STREAMS = 100;
+int const FFmpegContentProperty::SUBTITLE_STREAM = 101;
+int const FFmpegContentProperty::AUDIO_STREAMS = 102;
+int const FFmpegContentProperty::AUDIO_STREAM = 103;
+
+FFmpegContent::FFmpegContent (boost::filesystem::path f)
+ : Content (f)
+ , VideoContent (f)
+ , AudioContent (f)
+{
+
+}
+
+FFmpegContent::FFmpegContent (shared_ptr<const cxml::Node> node)
+ : Content (node)
+ , VideoContent (node)
+ , AudioContent (node)
+{
+ list<shared_ptr<cxml::Node> > c = node->node_children ("SubtitleStream");
+ for (list<shared_ptr<cxml::Node> >::const_iterator i = c.begin(); i != c.end(); ++i) {
+ _subtitle_streams.push_back (FFmpegSubtitleStream (*i));
+ if ((*i)->optional_number_child<int> ("Selected")) {
+ _subtitle_stream = _subtitle_streams.back ();
+ }
+ }
+
+ c = node->node_children ("AudioStream");
+ for (list<shared_ptr<cxml::Node> >::const_iterator i = c.begin(); i != c.end(); ++i) {
+ _audio_streams.push_back (FFmpegAudioStream (*i));
+ if ((*i)->optional_number_child<int> ("Selected")) {
+ _audio_stream = _audio_streams.back ();
+ }
+ }
+}
+
+FFmpegContent::FFmpegContent (FFmpegContent const & o)
+ : Content (o)
+ , VideoContent (o)
+ , AudioContent (o)
+ , _subtitle_streams (o._subtitle_streams)
+ , _subtitle_stream (o._subtitle_stream)
+ , _audio_streams (o._audio_streams)
+ , _audio_stream (o._audio_stream)
+{
+
+}
+
+void
+FFmpegContent::as_xml (xmlpp::Node* node) const
+{
+ node->add_child("Type")->add_child_text ("FFmpeg");
+ Content::as_xml (node);
+ VideoContent::as_xml (node);
+
+ boost::mutex::scoped_lock lm (_mutex);
+
+ for (vector<FFmpegSubtitleStream>::const_iterator i = _subtitle_streams.begin(); i != _subtitle_streams.end(); ++i) {
+ xmlpp::Node* t = node->add_child("SubtitleStream");
+ if (_subtitle_stream && *i == _subtitle_stream.get()) {
+ t->add_child("Selected")->add_child_text("1");
+ }
+ i->as_xml (t);
+ }
+
+ for (vector<FFmpegAudioStream>::const_iterator i = _audio_streams.begin(); i != _audio_streams.end(); ++i) {
+ xmlpp::Node* t = node->add_child("AudioStream");
+ if (_audio_stream && *i == _audio_stream.get()) {
+ t->add_child("Selected")->add_child_text("1");
+ }
+ i->as_xml (t);
+ }
+}
+
+void
+FFmpegContent::examine (shared_ptr<Film> film, shared_ptr<Job> job, bool quick)
+{
+ job->set_progress_unknown ();
+
+ Content::examine (film, job, quick);
+
++ shared_ptr<FFmpegDecoder> decoder (new FFmpegDecoder (film, shared_from_this (), true, false, false));
+
+ ContentVideoFrame video_length = 0;
+ if (quick) {
+ video_length = decoder->video_length ();
+ film->log()->log (String::compose ("Video length obtained from header as %1 frames", decoder->video_length ()));
+ } else {
+ while (!decoder->pass ()) {
+ /* keep going */
+ }
+
+ video_length = decoder->video_frame ();
+ film->log()->log (String::compose ("Video length examined as %1 frames", decoder->video_frame ()));
+ }
+
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+
+ _video_length = video_length;
+
+ _subtitle_streams = decoder->subtitle_streams ();
+ if (!_subtitle_streams.empty ()) {
+ _subtitle_stream = _subtitle_streams.front ();
+ }
+
+ _audio_streams = decoder->audio_streams ();
+ if (!_audio_streams.empty ()) {
+ _audio_stream = _audio_streams.front ();
+ }
+ }
+
+ take_from_video_decoder (decoder);
+
+ signal_changed (VideoContentProperty::VIDEO_LENGTH);
+ signal_changed (FFmpegContentProperty::SUBTITLE_STREAMS);
+ signal_changed (FFmpegContentProperty::SUBTITLE_STREAM);
+ signal_changed (FFmpegContentProperty::AUDIO_STREAMS);
+ signal_changed (FFmpegContentProperty::AUDIO_STREAM);
+ signal_changed (AudioContentProperty::AUDIO_CHANNELS);
+}
+
+string
+FFmpegContent::summary () const
+{
+ return String::compose (_("Movie: %1"), file().filename().string());
+}
+
+string
+FFmpegContent::information () const
+{
+ if (video_length() == 0 || video_frame_rate() == 0) {
+ return "";
+ }
+
+ stringstream s;
+
+ s << String::compose (_("%1 frames; %2 frames per second"), video_length(), video_frame_rate()) << "\n";
+ s << VideoContent::information ();
+
+ return s.str ();
+}
+
+void
+FFmpegContent::set_subtitle_stream (FFmpegSubtitleStream s)
+{
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _subtitle_stream = s;
+ }
+
+ signal_changed (FFmpegContentProperty::SUBTITLE_STREAM);
+}
+
+void
+FFmpegContent::set_audio_stream (FFmpegAudioStream s)
+{
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _audio_stream = s;
+ }
+
+ signal_changed (FFmpegContentProperty::AUDIO_STREAM);
+}
+
+ContentAudioFrame
+FFmpegContent::audio_length () const
+{
+ if (!_audio_stream) {
+ return 0;
+ }
+
+ return video_frames_to_audio_frames (_video_length, audio_frame_rate(), video_frame_rate());
+}
+
+int
+FFmpegContent::audio_channels () const
+{
+ if (!_audio_stream) {
+ return 0;
+ }
+
+ return _audio_stream->channels;
+}
+
+int
+FFmpegContent::audio_frame_rate () const
+{
+ if (!_audio_stream) {
+ return 0;
+ }
+
+ return _audio_stream->frame_rate;
+}
+
+bool
+operator== (FFmpegSubtitleStream const & a, FFmpegSubtitleStream const & b)
+{
+ return a.id == b.id;
+}
+
+bool
+operator== (FFmpegAudioStream const & a, FFmpegAudioStream const & b)
+{
+ return a.id == b.id;
+}
+
+FFmpegAudioStream::FFmpegAudioStream (shared_ptr<const cxml::Node> node)
+{
+ name = node->string_child ("Name");
+ id = node->number_child<int> ("Id");
+ frame_rate = node->number_child<int> ("FrameRate");
+ channels = node->number_child<int64_t> ("Channels");
+}
+
+void
+FFmpegAudioStream::as_xml (xmlpp::Node* root) const
+{
+ root->add_child("Name")->add_child_text (name);
+ root->add_child("Id")->add_child_text (lexical_cast<string> (id));
+ root->add_child("FrameRate")->add_child_text (lexical_cast<string> (frame_rate));
+ root->add_child("Channels")->add_child_text (lexical_cast<string> (channels));
+}
+
+/** Construct a SubtitleStream from a value returned from to_string().
+ * @param t String returned from to_string().
+ * @param v State file version.
+ */
+FFmpegSubtitleStream::FFmpegSubtitleStream (shared_ptr<const cxml::Node> node)
+{
+ name = node->string_child ("Name");
+ id = node->number_child<int> ("Id");
+}
+
+void
+FFmpegSubtitleStream::as_xml (xmlpp::Node* root) const
+{
+ root->add_child("Name")->add_child_text (name);
+ root->add_child("Id")->add_child_text (lexical_cast<string> (id));
+}
+
+shared_ptr<Content>
+FFmpegContent::clone () const
+{
+ return shared_ptr<Content> (new FFmpegContent (*this));
+}
using boost::dynamic_pointer_cast;
using libdcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, DecodeOptions o)
- : Decoder (f, o)
- , VideoDecoder (f, o)
- , AudioDecoder (f, o)
+boost::mutex FFmpegDecoder::_mutex;
+
- FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio, bool subtitles, bool video_sync)
++FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio, bool subtitles)
+ : Decoder (f)
+ , VideoDecoder (f)
+ , AudioDecoder (f)
+ , _ffmpeg_content (c)
, _format_context (0)
, _video_stream (-1)
, _frame (0)
, _audio_codec (0)
, _subtitle_codec_context (0)
, _subtitle_codec (0)
- , _video_sync (video_sync)
+ , _decode_video (video)
+ , _decode_audio (audio)
+ , _decode_subtitles (subtitles)
{
setup_general ();
setup_video ();
_packet.data = 0;
_packet.size = 0;
-
+
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
-
+
int frame_finished;
-
- if (_opt.decode_video) {
+
+ if (_decode_video) {
while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- filter_and_emit_video (_frame);
+ filter_and_emit_video ();
}
}
-
- if (_audio_stream && _opt.decode_audio) {
+
+ if (_ffmpeg_content->audio_stream() && _decode_audio) {
decode_audio_packet ();
}
-
+
return true;
}
_film->log()->log (String::compose (N_("Used only %1 bytes of %2 in packet"), r, _packet.size));
}
- if (_video_sync) {
- out_with_sync ();
- } else {
- filter_and_emit_video (_frame);
- }
+ filter_and_emit_video ();
}
- } else if (ffa && _packet.stream_index == ffa->id() && _opt.decode_audio) {
+ } else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) {
decode_audio_packet ();
- } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles && _first_video) {
- } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream->id() && _opt.decode_subtitles) {
++ } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles) {
int got_subtitle;
AVSubtitle sub;
return av_get_bytes_per_sample (audio_sample_format ());
}
-void
-FFmpegDecoder::set_audio_stream (shared_ptr<AudioStream> s)
-{
- AudioDecoder::set_audio_stream (s);
- setup_audio ();
-}
-
-void
-FFmpegDecoder::set_subtitle_stream (shared_ptr<SubtitleStream> s)
-{
- VideoDecoder::set_subtitle_stream (s);
- setup_subtitle ();
- OutputChanged ();
-}
-
void
- FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
+ FFmpegDecoder::filter_and_emit_video ()
{
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
bool
FFmpegDecoder::seek (double p)
{
- /* This use of AVSEEK_FLAG_BACKWARD is a bit of a hack; without it, if we ask for a seek to the same place as last time
- (used when we change decoder parameters and want to re-fetch the frame) we end up going forwards rather than
- staying in the same place.
- */
- bool const backwards = (p == last_content_time());
-
+ return do_seek (p, false, false);
+ }
+
-bool
-FFmpegDecoder::seek_to_last ()
-{
- /* This AVSEEK_FLAG_BACKWARD in do_seek is a bit of a hack; without it, if we ask for a seek to the same place as last time
- (used when we change decoder parameters and want to re-fetch the frame) we end up going forwards rather than
- staying in the same place.
- */
- return do_seek (last_source_time(), true, false);
-}
-
+ void
+ FFmpegDecoder::seek_back ()
+ {
- do_seek (last_source_time() - 2.5 / frames_per_second (), true, true);
++ do_seek (last_content_time() - 2.5 / video_frame_rate(), true, true);
+ }
+
+ void
+ FFmpegDecoder::seek_forward ()
+ {
- do_seek (last_source_time() - 0.5 / frames_per_second(), true, true);
++ do_seek (last_content_time() - 0.5 / video_frame_rate(), true, true);
+ }
+
+ bool
+ FFmpegDecoder::do_seek (double p, bool backwards, bool accurate)
+ {
int64_t const vt = p / av_q2d (_format_context->streams[_video_stream]->time_base);
int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
if (_subtitle_codec_context) {
avcodec_flush_buffers (_subtitle_codec_context);
}
-
- return r < 0;
- }
- void
- FFmpegDecoder::out_with_sync ()
- {
- /* Where we are in the output, in seconds */
- double const out_pts_seconds = video_frame() / video_frame_rate();
-
- /* Where we are in the source, in seconds */
- double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame);
-
- _film->log()->log (
- String::compose (N_("Source video frame ready; source at %1, output at %2"), source_pts_seconds, out_pts_seconds),
- Log::VERBOSE
- );
-
- if (!_first_video) {
- _first_video = source_pts_seconds;
- }
-
- /* Difference between where we are and where we should be */
- double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
- double const one_frame = 1 / video_frame_rate();
-
- /* Insert frames if required to get out_pts_seconds up to pts_seconds */
- if (delta > one_frame) {
- int const extra = rint (delta / one_frame);
- for (int i = 0; i < extra; ++i) {
- repeat_last_video (frame_time ());
- _film->log()->log (
- String::compose (
- N_("Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)"),
- out_pts_seconds, video_frame(), source_pts_seconds, video_frame_rate()
- )
- );
+ if (accurate) {
+ while (1) {
+ int r = av_read_frame (_format_context, &_packet);
+ if (r < 0) {
+ return true;
+ }
+
+ avcodec_get_frame_defaults (_frame);
+
+ if (_packet.stream_index == _video_stream) {
+ int finished = 0;
+ int const r = avcodec_decode_video2 (_video_codec_context, _frame, &finished, &_packet);
+ if (r >= 0 && finished) {
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet > vt) {
+ break;
+ }
+ }
+ }
+
+ av_free_packet (&_packet);
}
}
-
- if (delta > -one_frame) {
- /* Process this frame */
- filter_and_emit_video (_frame);
- } else {
- /* Otherwise we are omitting a frame to keep things right */
- _film->log()->log (String::compose (N_("Frame removed at %1s"), out_pts_seconds));
- }
+
+ return r < 0;
}
-shared_ptr<FFmpegAudioStream>
-FFmpegAudioStream::create (string t, optional<int> v)
-{
- if (!v) {
- /* version < 1; no type in the string, and there's only FFmpeg streams anyway */
- return shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream (t, v));
- }
-
- stringstream s (t);
- string type;
- s >> type;
- if (type != N_("ffmpeg")) {
- return shared_ptr<FFmpegAudioStream> ();
- }
-
- return shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream (t, v));
-}
-
-FFmpegAudioStream::FFmpegAudioStream (string t, optional<int> version)
-{
- stringstream n (t);
-
- int name_index = 4;
- if (!version) {
- name_index = 2;
- int channels;
- n >> _id >> channels;
- _channel_layout = av_get_default_channel_layout (channels);
- _sample_rate = 0;
- } else {
- string type;
- /* Current (marked version 1) */
- n >> type >> _id >> _sample_rate >> _channel_layout;
- assert (type == N_("ffmpeg"));
- }
-
- for (int i = 0; i < name_index; ++i) {
- size_t const s = t.find (' ');
- if (s != string::npos) {
- t = t.substr (s + 1);
- }
- }
-
- _name = t;
-}
-
-string
-FFmpegAudioStream::to_string () const
-{
- return String::compose (N_("ffmpeg %1 %2 %3 %4"), _id, _sample_rate, _channel_layout, _name);
-}
-
void
FFmpegDecoder::film_changed (Film::Property p)
{
}
/** @return Length (in video frames) according to our content's header */
-SourceFrame
-FFmpegDecoder::length () const
+ContentVideoFrame
+FFmpegDecoder::video_length () const
{
- return (double(_format_context->duration) / AV_TIME_BASE) * frames_per_second();
+ return (double(_format_context->duration) / AV_TIME_BASE) * video_frame_rate();
}
- double
- FFmpegDecoder::frame_time () const
- {
- return av_frame_get_best_effort_timestamp(_frame) * av_q2d (_format_context->streams[_video_stream]->time_base);
- }
-
void
FFmpegDecoder::decode_audio_packet ()
{
0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
);
- assert (_audio_codec_context->channels == _film->audio_channels());
+ assert (_audio_codec_context->channels == _ffmpeg_content->audio_channels());
- Audio (deinterleave_audio (_frame->data, data_size));
+ Audio (deinterleave_audio (_frame->data, data_size), source_pts_seconds);
}
- }
-
- if (decode_result >= 0) {
+
copy_packet.data += decode_result;
copy_packet.size -= decode_result;
}
class FFmpegDecoder : public VideoDecoder, public AudioDecoder
{
public:
- FFmpegDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const FFmpegContent>, bool video, bool audio, bool subtitles, bool video_sync);
- FFmpegDecoder (boost::shared_ptr<Film>, DecodeOptions);
++ FFmpegDecoder (boost::shared_ptr<const Film>, boost::shared_ptr<const FFmpegContent>, bool video, bool audio, bool subtitles);
~FFmpegDecoder ();
- float frames_per_second () const;
+ float video_frame_rate () const;
libdcp::Size native_size () const;
- SourceFrame length () const;
+ ContentVideoFrame video_length () const;
int time_base_numerator () const;
int time_base_denominator () const;
int sample_aspect_ratio_numerator () const;
int sample_aspect_ratio_denominator () const;
- void set_audio_stream (boost::shared_ptr<AudioStream>);
- void set_subtitle_stream (boost::shared_ptr<SubtitleStream>);
+ std::vector<FFmpegSubtitleStream> subtitle_streams () const {
+ return _subtitle_streams;
+ }
+
+ std::vector<FFmpegAudioStream> audio_streams () const {
+ return _audio_streams;
+ }
bool seek (double);
- bool seek_to_last ();
+ void seek_forward ();
+ void seek_back ();
+ bool pass ();
private:
PixelFormat pixel_format () const;
AVSampleFormat audio_sample_format () const;
int bytes_per_audio_sample () const;
++ bool do_seek (double, bool, bool);
- void out_with_sync ();
- void filter_and_emit_video (AVFrame *);
- double frame_time () const;
+ void filter_and_emit_video ();
void setup_general ();
void setup_video ();
AVPacket _packet;
- boost::optional<double> _first_video;
- boost::optional<double> _first_audio;
-
std::list<boost::shared_ptr<FilterGraph> > _filter_graphs;
boost::mutex _filter_graphs_mutex;
- bool _video_sync;
+
+ std::vector<FFmpegSubtitleStream> _subtitle_streams;
+ std::vector<FFmpegAudioStream> _audio_streams;
+
+ bool _decode_video;
+ bool _decode_audio;
+ bool _decode_subtitles;
+
+ /* It would appear (though not completely verified) that one must have
+ a mutex around calls to avcodec_open* and avcodec_close... and here
+ it is.
+ */
+ static boost::mutex _mutex;
};
}
}
- _sndfile_stream = SndfileStream::create ();
-
if (must_exist) {
read_metadata ();
+ } else {
+ write_metadata ();
}
_log.reset (new FileLog (file ("log")));
void
Film::write_metadata () const
{
+ ContentList the_content = content ();
+
boost::mutex::scoped_lock lm (_state_mutex);
+ LocaleGuard lg;
boost::filesystem::create_directories (directory());
Film::read_metadata ()
{
boost::mutex::scoped_lock lm (_state_mutex);
+ LocaleGuard lg;
- _external_audio.clear ();
- _content_audio_streams.clear ();
- _subtitle_streams.clear ();
-
- boost::optional<int> version;
-
- /* Backward compatibility things */
- boost::optional<int> audio_sample_rate;
- boost::optional<int> audio_stream_index;
- boost::optional<int> subtitle_stream_index;
-
- ifstream f (file ("metadata").c_str());
- if (!f.good()) {
- throw OpenFileError (file ("metadata"));
+ if (boost::filesystem::exists (file ("metadata")) && !boost::filesystem::exists (file ("metadata.xml"))) {
+ throw StringError (_("This film was created with an older version of DCP-o-matic, and unfortunately it cannot be loaded into this version. You will need to create a new Film, re-add your content and set it up again. Sorry!"));
}
-
- multimap<string, string> kv = read_key_value (f);
- /* We need version before anything else */
- multimap<string, string>::iterator v = kv.find ("version");
- if (v != kv.end ()) {
- version = atoi (v->second.c_str());
- }
+ cxml::File f (file ("metadata.xml"), "Metadata");
- for (multimap<string, string>::const_iterator i = kv.begin(); i != kv.end(); ++i) {
- string const k = i->first;
- string const v = i->second;
+ _name = f.string_child ("Name");
+ _use_dci_name = f.bool_child ("UseDCIName");
+ _trust_content_headers = f.bool_child ("TrustContentHeaders");
- if (k == "audio_sample_rate") {
- audio_sample_rate = atoi (v.c_str());
+ {
+ optional<string> c = f.optional_string_child ("DCPContentType");
+ if (c) {
+ _dcp_content_type = DCPContentType::from_dci_name (c.get ());
}
+ }
- /* User-specified stuff */
- if (k == "name") {
- _name = v;
- } else if (k == "use_dci_name") {
- _use_dci_name = (v == "1");
- } else if (k == "content") {
- _content = v;
- } else if (k == "trust_content_header") {
- _trust_content_header = (v == "1");
- } else if (k == "dcp_content_type") {
- if (version < 3) {
- _dcp_content_type = DCPContentType::from_pretty_name (v);
- } else {
- _dcp_content_type = DCPContentType::from_dci_name (v);
- }
- } else if (k == "format") {
- _format = Format::from_metadata (v);
- } else if (k == "left_crop") {
- _crop.left = atoi (v.c_str ());
- } else if (k == "right_crop") {
- _crop.right = atoi (v.c_str ());
- } else if (k == "top_crop") {
- _crop.top = atoi (v.c_str ());
- } else if (k == "bottom_crop") {
- _crop.bottom = atoi (v.c_str ());
- } else if (k == "filter") {
- _filters.push_back (Filter::from_id (v));
- } else if (k == "scaler") {
- _scaler = Scaler::from_id (v);
- } else if ( ((!version || version < 2) && k == "dcp_trim_start") || k == "trim_start") {
- _trim_start = atoi (v.c_str ());
- } else if ( ((!version || version < 2) && k == "dcp_trim_end") || k == "trim_end") {
- _trim_end = atoi (v.c_str ());
- } else if (k == "trim_type") {
- if (v == "cpl") {
- _trim_type = CPL;
- } else if (v == "encode") {
- _trim_type = ENCODE;
- }
- } else if (k == "dcp_ab") {
- _dcp_ab = (v == "1");
- } else if (k == "selected_content_audio_stream" || (!version && k == "selected_audio_stream")) {
- if (!version) {
- audio_stream_index = atoi (v.c_str ());
- } else {
- _content_audio_stream = audio_stream_factory (v, version);
- }
- } else if (k == "external_audio") {
- _external_audio.push_back (v);
- } else if (k == "use_content_audio") {
- _use_content_audio = (v == "1");
- } else if (k == "audio_gain") {
- _audio_gain = atof (v.c_str ());
- } else if (k == "audio_delay") {
- _audio_delay = atoi (v.c_str ());
- } else if (k == "still_duration") {
- _still_duration = atoi (v.c_str ());
- } else if (k == "selected_subtitle_stream") {
- if (!version) {
- subtitle_stream_index = atoi (v.c_str ());
- } else {
- _subtitle_stream = subtitle_stream_factory (v, version);
- }
- } else if (k == "with_subtitles") {
- _with_subtitles = (v == "1");
- } else if (k == "subtitle_offset") {
- _subtitle_offset = atoi (v.c_str ());
- } else if (k == "subtitle_scale") {
- _subtitle_scale = atof (v.c_str ());
- } else if (k == "colour_lut") {
- _colour_lut = atoi (v.c_str ());
- } else if (k == "j2k_bandwidth") {
- _j2k_bandwidth = atoi (v.c_str ());
- } else if (k == "dci_date") {
- _dci_date = boost::gregorian::from_undelimited_string (v);
- } else if (k == "dcp_frame_rate") {
- _dcp_frame_rate = atoi (v.c_str ());
+ {
+ optional<string> c = f.optional_string_child ("Format");
+ if (c) {
+ _format = Format::from_id (c.get ());
}
+ }
- _dci_metadata.read (k, v);
-
- /* Cached stuff */
- if (k == "width") {
- _size.width = atoi (v.c_str ());
- } else if (k == "height") {
- _size.height = atoi (v.c_str ());
- } else if (k == "length") {
- int const vv = atoi (v.c_str ());
- if (vv) {
- _length = vv;
- }
- } else if (k == "content_digest") {
- _content_digest = v;
- } else if (k == "content_audio_stream" || (!version && k == "audio_stream")) {
- _content_audio_streams.push_back (audio_stream_factory (v, version));
- } else if (k == "external_audio_stream") {
- _sndfile_stream = audio_stream_factory (v, version);
- } else if (k == "subtitle_stream") {
- _subtitle_streams.push_back (subtitle_stream_factory (v, version));
- } else if (k == "source_frame_rate") {
- _source_frame_rate = atof (v.c_str ());
- } else if (version < 4 && k == "frames_per_second") {
- _source_frame_rate = atof (v.c_str ());
- /* Fill in what would have been used for DCP frame rate by the older version */
- _dcp_frame_rate = best_dcp_frame_rate (_source_frame_rate);
+ {
+ optional<string> c = f.optional_string_child ("TrimType");
+ if (!c || c.get() == "CPL") {
+ _trim_type = CPL;
+ } else if (c && c.get() == "Encode") {
+ _trim_type = ENCODE;
}
}
}
- int
- VariableFormat::ratio_as_integer (shared_ptr<const Film> f) const
- {
- return rint (ratio_as_float (f) * 100);
- }
-
float
- VariableFormat::ratio_as_float (shared_ptr<const Film> f) const
+ VariableFormat::ratio (shared_ptr<const Film> f) const
{
- libdcp::Size const c = f->cropped_size (f->size ());
+ libdcp::Size const c = f->cropped_size (f->video_size ());
return float (c.width) / c.height;
}
, _dci_name (d)
{}
- /** @return the aspect ratio multiplied by 100
- * (e.g. 239 for Cinemascope 2.39:1)
- */
- virtual int ratio_as_integer (boost::shared_ptr<const Film> f) const = 0;
-
- /** @return the ratio as a floating point number */
- virtual float ratio_as_float (boost::shared_ptr<const Film> f) const = 0;
-
- /** @return the ratio of the container (including any padding) as a floating point number */
- float container_ratio_as_float () const;
+ /** @return the ratio of the container (including any padding) */
+ float container_ratio () const;
- int dcp_padding (boost::shared_ptr<const Film> f) const;
+ int dcp_padding (boost::shared_ptr<const Film>) const;
/** @return size in pixels of the images that we should
* put in a DCP for this ratio. This size will not correspond
bool
ImageMagickDecoder::pass ()
{
- if (_iter == _files.end()) {
- if (video_frame() >= _film->still_duration_in_frames()) {
- return true;
- }
+ if (_position < 0 || _position >= _imagemagick_content->video_length ()) {
+ return true;
+ }
- if (have_last_video ()) {
- repeat_last_video (double (_position) / 24);
- emit_video (_image, true, double (video_frame()) / frames_per_second());
++ if (_image) {
++ emit_video (_image, true, double (_position) / 24);
+ _position++;
return false;
}
- Magick::Image* magick_image = new Magick::Image (_film->content_path ());
+ Magick::Image* magick_image = new Magick::Image (_imagemagick_content->file().string ());
libdcp::Size size = native_size ();
-- shared_ptr<Image> image (new SimpleImage (PIX_FMT_RGB24, size, false));
++ _image.reset (new SimpleImage (PIX_FMT_RGB24, size, false));
using namespace MagickCore;
-- uint8_t* p = image->data()[0];
++ uint8_t* p = _image->data()[0];
for (int y = 0; y < size.height; ++y) {
for (int x = 0; x < size.width; ++x) {
Magick::Color c = magick_image->pixelColor (x, y);
delete magick_image;
- image = image->crop (_film->crop(), true);
-
- emit_video (image, double (_position) / 24);
- _image = image->crop (_film->crop(), true);
-
- emit_video (_image, false, double (video_frame()) / frames_per_second());
++ _image = _image->crop (_film->crop(), true);
++ emit_video (_image, false, double (_position) / 24);
- ++_iter;
+ ++_position;
return false;
}
}
private:
- void film_changed (Film::Property);
-
- std::list<std::string> _files;
- std::list<std::string>::iterator _iter;
-
+ boost::shared_ptr<const ImageMagickContent> _imagemagick_content;
+ boost::shared_ptr<Image> _image;
+ ContentVideoFrame _position;
};
}
void
- Matcher::process_video (shared_ptr<Image> i, bool same, shared_ptr<Subtitle> s)
-Matcher::process_video (boost::shared_ptr<Image> image, bool same, boost::shared_ptr<Subtitle> sub, double t)
++Matcher::process_video (shared_ptr<Image> image, bool same, boost::shared_ptr<Subtitle> sub, double t)
{
- Video (i, same, s);
- _video_frames++;
+ _pixel_format = image->pixel_format ();
+ _size = image->size ();
- _pixel_format = i->pixel_format ();
- _size = i->size ();
+ _log->log(String::compose("Matcher video @ %1 [audio=%2, video=%3, pending_audio=%4]", t, _audio_frames, _video_frames, _pending_audio.size()));
+
+ if (!_first_input) {
+ _first_input = t;
+ }
+
+ bool const this_is_first_video = !_had_first_video;
+ _had_first_video = true;
+
+ if (this_is_first_video && _had_first_audio) {
+ /* First video since we got audio */
+ fix_start (t);
+ }
+
+ /* Video before audio is fine, since we can make up an arbitrary difference
+ with audio samples (contrasting with video which is quantised to frames)
+ */
+
+ /* Difference between where this video is and where it should be */
+ double const delta = t - _first_input.get() - _video_frames / _frames_per_second;
+ double const one_frame = 1 / _frames_per_second;
+
+ if (delta > one_frame) {
+ /* Insert frames to make up the difference */
+ int const extra = rint (delta / one_frame);
+ for (int i = 0; i < extra; ++i) {
+ repeat_last_video ();
+ _log->log (String::compose ("Extra video frame inserted at %1s", _video_frames / _frames_per_second));
+ }
+ }
+
+ if (delta > -one_frame) {
+ Video (image, same, sub);
+ ++_video_frames;
+ } else {
+ /* We are omitting a frame to keep things right */
+ _log->log (String::compose ("Frame removed at %1s", t));
+ }
+
+ _last_image = image;
+ _last_subtitle = sub;
}
void
- Matcher::process_audio (shared_ptr<AudioBuffers> b)
-Matcher::process_audio (boost::shared_ptr<AudioBuffers> b, double t)
++Matcher::process_audio (shared_ptr<AudioBuffers> b, double t)
{
- Audio (b);
- _audio_frames += b->frames ();
-
_channels = b->channels ();
+
+ _log->log (String::compose ("Matcher audio @ %1 [video=%2, audio=%3, pending_audio=%4]", t, _video_frames, _audio_frames, _pending_audio.size()));
+
+ if (!_first_input) {
+ _first_input = t;
+ }
+
+ bool const this_is_first_audio = _had_first_audio;
+ _had_first_audio = true;
+
+ if (!_had_first_video) {
+ /* No video yet; we must postpone these data until we have some */
+ _pending_audio.push_back (AudioRecord (b, t));
+ } else if (this_is_first_audio && !_had_first_video) {
+ /* First audio since we got video */
+ _pending_audio.push_back (AudioRecord (b, t));
+ fix_start (_first_input.get ());
+ } else {
+ /* Normal running. We assume audio time stamps are consecutive */
+ Audio (b);
+ _audio_frames += b->frames ();
+ }
}
void
--- /dev/null
- , _video_sync (true)
+/*
+ Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "player.h"
+#include "film.h"
+#include "ffmpeg_decoder.h"
+#include "ffmpeg_content.h"
+#include "imagemagick_decoder.h"
+#include "imagemagick_content.h"
+#include "sndfile_decoder.h"
+#include "sndfile_content.h"
+#include "playlist.h"
+#include "job.h"
+
+using std::list;
+using std::cout;
+using boost::shared_ptr;
+using boost::weak_ptr;
+using boost::dynamic_pointer_cast;
+
+Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
+ : _film (f)
+ , _playlist (p)
+ , _video (true)
+ , _audio (true)
+ , _subtitles (true)
+ , _have_valid_decoders (false)
- Audio (_audio_buffers);
+{
+ _playlist->Changed.connect (bind (&Player::playlist_changed, this));
+ _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2));
+}
+
+void
+Player::disable_video ()
+{
+ _video = false;
+}
+
+void
+Player::disable_audio ()
+{
+ _audio = false;
+}
+
+void
+Player::disable_subtitles ()
+{
+ _subtitles = false;
+}
+
+bool
+Player::pass ()
+{
+ if (!_have_valid_decoders) {
+ setup_decoders ();
+ _have_valid_decoders = true;
+ }
+
+ bool done = true;
+
+ if (_video_decoder != _video_decoders.end ()) {
+ if ((*_video_decoder)->pass ()) {
+ _video_decoder++;
+ }
+
+ if (_video_decoder != _video_decoders.end ()) {
+ done = false;
+ }
+ }
+
+ if (_playlist->audio_from() == Playlist::AUDIO_SNDFILE) {
+ for (list<shared_ptr<SndfileDecoder> >::iterator i = _sndfile_decoders.begin(); i != _sndfile_decoders.end(); ++i) {
+ if (!(*i)->pass ()) {
+ done = false;
+ }
+ }
+
- Player::process_video (shared_ptr<Image> i, bool same, shared_ptr<Subtitle> s)
++ Audio (_audio_buffers, _audio_time.get());
+ _audio_buffers.reset ();
++ _audio_time = boost::none;
+ }
+
+ return done;
+}
+
+void
+Player::set_progress (shared_ptr<Job> job)
+{
+ /* Assume progress can be divined from how far through the video we are */
+
+ if (_video_decoder == _video_decoders.end() || !_playlist->video_length()) {
+ return;
+ }
+
+ ContentVideoFrame p = 0;
+ list<shared_ptr<VideoDecoder> >::iterator i = _video_decoders.begin ();
+ while (i != _video_decoders.end() && i != _video_decoder) {
+ p += (*i)->video_length ();
+ }
+
+ job->set_progress (float ((*_video_decoder)->video_frame ()) / _playlist->video_length ());
+}
+
+void
- Video (i, same, s);
++Player::process_video (shared_ptr<Image> i, bool same, shared_ptr<Subtitle> s, double t)
+{
- Player::process_audio (weak_ptr<const AudioContent> c, shared_ptr<AudioBuffers> b)
++ /* XXX: this time will need mangling to add on the offset of the start of the content */
++ Video (i, same, s, t);
+}
+
+void
- Audio (_audio_buffers);
++Player::process_audio (weak_ptr<const AudioContent> c, shared_ptr<AudioBuffers> b, double t)
+{
++ /* XXX: this time will need mangling to add on the offset of the start of the content */
+ AudioMapping mapping = _film->audio_mapping ();
+ if (!_audio_buffers) {
+ _audio_buffers.reset (new AudioBuffers (mapping.dcp_channels(), b->frames ()));
+ _audio_buffers->make_silent ();
++ _audio_time = t;
+ }
+
+ for (int i = 0; i < b->channels(); ++i) {
+ list<libdcp::Channel> dcp = mapping.content_to_dcp (AudioMapping::Channel (c, i));
+ for (list<libdcp::Channel>::iterator j = dcp.begin(); j != dcp.end(); ++j) {
+ _audio_buffers->accumulate (b, i, static_cast<int> (*j));
+ }
+ }
+
+ if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG) {
+ /* We can just emit this audio now as it will all be here */
- _subtitles,
- _video_sync
++ Audio (_audio_buffers, t);
+ _audio_buffers.reset ();
++ _audio_time = boost::none;
+ }
+}
+
+/** @return true on error */
+bool
+Player::seek (double t)
+{
+ if (!_have_valid_decoders) {
+ setup_decoders ();
+ _have_valid_decoders = true;
+ }
+
+ /* Find the decoder that contains this position */
+ _video_decoder = _video_decoders.begin ();
+ while (_video_decoder != _video_decoders.end ()) {
+ double const this_length = double ((*_video_decoder)->video_length()) / _film->video_frame_rate ();
+ if (t < this_length) {
+ break;
+ }
+ t -= this_length;
+ ++_video_decoder;
+ }
+
+ if (_video_decoder != _video_decoders.end()) {
+ (*_video_decoder)->seek (t);
+ } else {
+ return true;
+ }
+
+ /* XXX: don't seek audio because we don't need to... */
+
+ return false;
+}
+
++
++void
++Player::seek_back ()
++{
++ /* XXX */
++}
++
++void
++Player::seek_forward ()
++{
++ /* XXX */
++}
++
++
+void
+Player::setup_decoders ()
+{
+ _video_decoders.clear ();
+ _video_decoder = _video_decoders.end ();
+ _sndfile_decoders.clear ();
+
+ if (_video) {
+ list<shared_ptr<const VideoContent> > vc = _playlist->video ();
+ for (list<shared_ptr<const VideoContent> >::iterator i = vc.begin(); i != vc.end(); ++i) {
+
+ shared_ptr<VideoDecoder> d;
+
+ /* XXX: into content? */
+
+ shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
+ if (fc) {
+ shared_ptr<FFmpegDecoder> fd (
+ new FFmpegDecoder (
+ _film, fc, _video,
+ _audio && _playlist->audio_from() == Playlist::AUDIO_FFMPEG,
- fd->Audio.connect (bind (&Player::process_audio, this, fc, _1));
++ _subtitles
+ )
+ );
+
+ if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG) {
- d->Audio.connect (bind (&Player::process_audio, this, *i, _1));
++ fd->Audio.connect (bind (&Player::process_audio, this, fc, _1, _2));
+ }
+
+ d = fd;
+ }
+
+ shared_ptr<const ImageMagickContent> ic = dynamic_pointer_cast<const ImageMagickContent> (*i);
+ if (ic) {
+ d.reset (new ImageMagickDecoder (_film, ic));
+ }
+
+ d->connect_video (shared_from_this ());
+ _video_decoders.push_back (d);
+ }
+
+ _video_decoder = _video_decoders.begin ();
+ }
+
+ if (_audio && _playlist->audio_from() == Playlist::AUDIO_SNDFILE) {
+ list<shared_ptr<const SndfileContent> > sc = _playlist->sndfile ();
+ for (list<shared_ptr<const SndfileContent> >::iterator i = sc.begin(); i != sc.end(); ++i) {
+ shared_ptr<SndfileDecoder> d (new SndfileDecoder (_film, *i));
+ _sndfile_decoders.push_back (d);
- void
- Player::disable_video_sync ()
- {
- _video_sync = false;
- }
-
++ d->Audio.connect (bind (&Player::process_audio, this, *i, _1, _2));
+ }
+ }
+}
+
+double
+Player::last_video_time () const
+{
+ double t = 0;
+ for (list<shared_ptr<VideoDecoder> >::const_iterator i = _video_decoders.begin(); i != _video_decoder; ++i) {
+ t += (*i)->video_length() / (*i)->video_frame_rate ();
+ }
+
+ return t + (*_video_decoder)->last_content_time ();
+}
+
+void
+Player::content_changed (weak_ptr<Content> w, int p)
+{
+ shared_ptr<Content> c = w.lock ();
+ if (!c) {
+ return;
+ }
+
+ if (p == VideoContentProperty::VIDEO_LENGTH) {
+ if (dynamic_pointer_cast<FFmpegContent> (c)) {
+ /* FFmpeg content length changes are serious; we need new decoders */
+ _have_valid_decoders = false;
+ }
+ }
+}
+
+void
+Player::playlist_changed ()
+{
+ _have_valid_decoders = false;
+}
--- /dev/null
- class Player : public VideoSource, public AudioSource, public VideoSink, public boost::enable_shared_from_this<Player>
+/*
+ Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef DCPOMATIC_PLAYER_H
+#define DCPOMATIC_PLAYER_H
+
+#include <list>
+#include <boost/shared_ptr.hpp>
+#include <boost/enable_shared_from_this.hpp>
+#include "video_source.h"
+#include "audio_source.h"
+#include "video_sink.h"
+#include "audio_sink.h"
+
+class VideoDecoder;
+class SndfileDecoder;
+class Job;
+class Film;
+class Playlist;
+class AudioContent;
+
- void disable_video_sync ();
++class Player : public TimedVideoSource, public TimedAudioSource, public TimedVideoSink, public boost::enable_shared_from_this<Player>
+{
+public:
+ Player (boost::shared_ptr<const Film>, boost::shared_ptr<const Playlist>);
+
+ void disable_video ();
+ void disable_audio ();
+ void disable_subtitles ();
- void process_video (boost::shared_ptr<Image> i, bool same, boost::shared_ptr<Subtitle> s);
- void process_audio (boost::weak_ptr<const AudioContent>, boost::shared_ptr<AudioBuffers>);
+
+ bool pass ();
+ void set_progress (boost::shared_ptr<Job>);
+ bool seek (double);
++ void seek_back ();
++ void seek_forward ();
+
+ double last_video_time () const;
+
+private:
-
- bool _video_sync;
++ void process_video (boost::shared_ptr<Image> i, bool same, boost::shared_ptr<Subtitle> s, double);
++ void process_audio (boost::weak_ptr<const AudioContent>, boost::shared_ptr<AudioBuffers>, double);
+ void setup_decoders ();
+ void playlist_changed ();
+ void content_changed (boost::weak_ptr<Content>, int);
+
+ boost::shared_ptr<const Film> _film;
+ boost::shared_ptr<const Playlist> _playlist;
+
+ bool _video;
+ bool _audio;
+ bool _subtitles;
+
+ bool _have_valid_decoders;
+ std::list<boost::shared_ptr<VideoDecoder> > _video_decoders;
+ std::list<boost::shared_ptr<VideoDecoder> >::iterator _video_decoder;
+ std::list<boost::shared_ptr<SndfileDecoder> > _sndfile_decoders;
+
+ boost::shared_ptr<AudioBuffers> _audio_buffers;
++ boost::optional<double> _audio_time;
+};
+
+#endif
using std::min;
using std::cout;
using boost::shared_ptr;
-using boost::optional;
-SndfileDecoder::SndfileDecoder (shared_ptr<Film> f, DecodeOptions o)
- : Decoder (f, o)
- , AudioDecoder (f, o)
+SndfileDecoder::SndfileDecoder (shared_ptr<const Film> f, shared_ptr<const SndfileContent> c)
+ : Decoder (f)
+ , AudioDecoder (f)
+ , _sndfile_content (c)
{
- sf_count_t frames;
- vector<SNDFILE*> sf = open_files (frames);
- close_files (sf);
-}
-
-vector<SNDFILE*>
-SndfileDecoder::open_files (sf_count_t & frames)
-{
- vector<string> const files = _film->external_audio ();
-
- int N = 0;
- for (size_t i = 0; i < files.size(); ++i) {
- if (!files[i].empty()) {
- N = i + 1;
- }
+ _sndfile = sf_open (_sndfile_content->file().string().c_str(), SFM_READ, &_info);
+ if (!_sndfile) {
+ throw DecodeError (_("could not open audio file for reading"));
}
- if (N == 0) {
- return vector<SNDFILE*> ();
- }
-
- bool first = true;
- frames = 0;
-
- vector<SNDFILE*> sndfiles;
- for (size_t i = 0; i < (size_t) N; ++i) {
- if (files[i].empty ()) {
- sndfiles.push_back (0);
- } else {
- SF_INFO info;
- SNDFILE* s = sf_open (files[i].c_str(), SFM_READ, &info);
- if (!s) {
- throw DecodeError (_("could not open external audio file for reading"));
- }
-
- if (info.channels != 1) {
- throw DecodeError (_("external audio files must be mono"));
- }
-
- sndfiles.push_back (s);
++ _done = 0;
+ _remaining = _info.frames;
+}
- if (first) {
- shared_ptr<SndfileStream> st (
- new SndfileStream (
- info.samplerate, av_get_default_channel_layout (N)
- )
- );
-
- _audio_streams.push_back (st);
- _audio_stream = st;
- frames = info.frames;
- first = false;
- } else {
- if (info.frames != frames) {
- throw DecodeError (_("external audio files have differing lengths"));
- }
- }
- }
+SndfileDecoder::~SndfileDecoder ()
+{
+ if (_sndfile) {
+ sf_close (_sndfile);
}
-
- return sndfiles;
}
bool
/* Do things in half second blocks as I think there may be limits
to what FFmpeg (and in particular the resampler) can cope with.
*/
- sf_count_t const block = _audio_stream->sample_rate() / 2;
- shared_ptr<AudioBuffers> audio (new AudioBuffers (_audio_stream->channels(), block));
- sf_count_t done = 0;
- while (frames > 0) {
- sf_count_t const this_time = min (block, frames);
- for (size_t i = 0; i < sndfiles.size(); ++i) {
- if (!sndfiles[i]) {
- audio->make_silent (i);
- } else {
- sf_read_float (sndfiles[i], audio->data(i), block);
- }
- }
-
- audio->set_frames (this_time);
- Audio (audio, double(done) / _audio_stream->sample_rate());
- done += this_time;
- frames -= this_time;
- }
-
- close_files (sndfiles);
-
- return true;
-}
-
-void
-SndfileDecoder::close_files (vector<SNDFILE*> const & sndfiles)
-{
- for (size_t i = 0; i < sndfiles.size(); ++i) {
- sf_close (sndfiles[i]);
- }
-}
-
-shared_ptr<SndfileStream>
-SndfileStream::create ()
-{
- return shared_ptr<SndfileStream> (new SndfileStream);
-}
-
-shared_ptr<SndfileStream>
-SndfileStream::create (string t, optional<int> v)
-{
- if (!v) {
- /* version < 1; no type in the string, and there's only FFmpeg streams anyway */
- return shared_ptr<SndfileStream> ();
- }
-
- stringstream s (t);
- string type;
- s >> type;
- if (type != N_("external")) {
- return shared_ptr<SndfileStream> ();
- }
-
- return shared_ptr<SndfileStream> (new SndfileStream (t, v));
+ sf_count_t const block = _sndfile_content->audio_frame_rate() / 2;
+ sf_count_t const this_time = min (block, _remaining);
+
+ shared_ptr<AudioBuffers> audio (new AudioBuffers (_sndfile_content->audio_channels(), this_time));
+ sf_read_float (_sndfile, audio->data(0), this_time);
+ audio->set_frames (this_time);
- Audio (audio);
++ Audio (audio, double(_done) / audio_frame_rate());
++ _done += this_time;
+ _remaining -= this_time;
+
+ return (_remaining == 0);
}
-SndfileStream::SndfileStream (string t, optional<int> v)
+int
+SndfileDecoder::audio_channels () const
{
- assert (v);
-
- stringstream s (t);
- string type;
- s >> type >> _sample_rate >> _channel_layout;
+ return _info.channels;
}
-SndfileStream::SndfileStream ()
+ContentAudioFrame
+SndfileDecoder::audio_length () const
{
-
+ return _info.frames;
}
-string
-SndfileStream::to_string () const
+int
+SndfileDecoder::audio_frame_rate () const
{
- return String::compose (N_("external %1 %2"), _sample_rate, _channel_layout);
+ return _info.samplerate;
}
bool pass ();
+ int audio_channels () const;
+ ContentAudioFrame audio_length () const;
+ int audio_frame_rate () const;
+
private:
- std::vector<SNDFILE*> open_files (sf_count_t &);
- void close_files (std::vector<SNDFILE*> const &);
+ SNDFILE* open_file (sf_count_t &);
+ void close_file (SNDFILE*);
+
+ boost::shared_ptr<const SndfileContent> _sndfile_content;
+ SNDFILE* _sndfile;
+ SF_INFO _info;
++ ContentAudioFrame _done;
+ ContentAudioFrame _remaining;
};
* @param j Job that we are running under, or 0.
* @param e Encoder to use.
*/
-Transcoder::Transcoder (shared_ptr<Film> f, DecodeOptions o, Job* j, shared_ptr<Encoder> e)
+Transcoder::Transcoder (shared_ptr<Film> f, shared_ptr<Job> j)
: _job (j)
- , _encoder (e)
- , _decoders (decoder_factory (f, o))
+ , _player (f->player ())
+ , _encoder (new Encoder (f))
{
- if (f->has_audio ()) {
- _matcher.reset (new Matcher (f->log(), f->audio_frame_rate(), f->video_frame_rate()));
- _delay_line.reset (new DelayLine (f->log(), f->audio_channels(), f->audio_delay() * f->audio_frame_rate() / 1000));
- _gain.reset (new Gain (f->log(), f->audio_gain()));
- }
- assert (_encoder);
-
- shared_ptr<AudioStream> st = f->audio_stream();
- _matcher.reset (new Matcher (f->log(), st->sample_rate(), f->source_frame_rate()));
- _delay_line.reset (new DelayLine (f->log(), f->audio_delay() / 1000.0f));
++ _matcher.reset (new Matcher (f->log(), f->audio_frame_rate(), f->video_frame_rate()));
++ _delay_line.reset (new DelayLine (f->log(), f->audio_delay() * f->audio_frame_rate() / 1000));
+ _gain.reset (new Gain (f->log(), f->audio_gain()));
- /* Set up the decoder to use the film's set streams */
- _decoders.video->set_subtitle_stream (f->subtitle_stream ());
- _decoders.audio->set_audio_stream (f->audio_stream ());
+ if (!f->with_subtitles ()) {
+ _player->disable_subtitles ();
+ }
- if (_matcher) {
- _player->connect_video (_matcher);
- _matcher->connect_video (_encoder);
- } else {
- _player->connect_video (_encoder);
- }
- _decoders.video->connect_video (_delay_line);
++ _player->connect_video (_delay_line);
+ _delay_line->connect_video (_matcher);
+ _matcher->connect_video (_encoder);
- if (_matcher && _delay_line && f->has_audio ()) {
- _player->connect_audio (_delay_line);
- _delay_line->connect_audio (_matcher);
- _matcher->connect_audio (_gain);
- _gain->connect_audio (_encoder);
- }
- _decoders.audio->connect_audio (_delay_line);
++ _player->connect_audio (_delay_line);
+ _delay_line->connect_audio (_matcher);
+ _matcher->connect_audio (_gain);
+ _gain->connect_audio (_encoder);
}
-/** Run the decoder, passing its output to the encoder, until the decoder
- * has no more data to present.
- */
void
Transcoder::go ()
{
_encoder->process_begin ();
- try {
- bool done[2] = { false, false };
-
- while (1) {
- if (!done[0]) {
- done[0] = _decoders.video->pass ();
- if (_job) {
- _decoders.video->set_progress (_job);
- }
- }
-
- if (!done[1] && _decoders.audio && dynamic_pointer_cast<Decoder> (_decoders.audio) != dynamic_pointer_cast<Decoder> (_decoders.video)) {
- done[1] = _decoders.audio->pass ();
- } else {
- done[1] = true;
- }
-
- if (done[0] && done[1]) {
- break;
- }
+ while (1) {
+ if (_player->pass ()) {
+ break;
}
-
- } catch (...) {
- _encoder->process_end ();
- throw;
+ _player->set_progress (_job);
}
-
+
- if (_delay_line) {
- _delay_line->process_end ();
- }
- if (_matcher) {
- _matcher->process_end ();
- }
- if (_gain) {
- _gain->process_end ();
- }
+ _delay_line->process_end ();
+ _matcher->process_end ();
+ _gain->process_end ();
_encoder->process_end ();
}
+
+float
+Transcoder::current_encoding_rate () const
+{
+ return _encoder->current_encoding_rate ();
+}
+
+int
+Transcoder::video_frames_out () const
+{
+ return _encoder->video_frames_out ();
+}
float** _data;
};
-class AudioMapping
-{
-public:
- AudioMapping (int);
-
- boost::optional<libdcp::Channel> source_to_dcp (int c) const;
- boost::optional<int> dcp_to_source (libdcp::Channel c) const;
- int dcp_channels () const;
-
-private:
- int _source_channels;
-};
-
-extern int64_t video_frames_to_audio_frames (SourceFrame v, float audio_sample_rate, float frames_per_second);
-extern bool still_image_file (std::string);
+extern int64_t video_frames_to_audio_frames (ContentVideoFrame v, float audio_sample_rate, float frames_per_second);
extern std::pair<std::string, int> cpu_info ();
+ class LocaleGuard
+ {
+ public:
+ LocaleGuard ();
+ ~LocaleGuard ();
+
+ private:
+ char* _old;
+ };
+
+
#endif
sub = _timed_subtitle->subtitle ();
}
- signal_video (image, false, sub, t);
- }
-
- bool
- VideoDecoder::have_last_video () const
- {
- return _last_image;
- }
-
- /** Called by subclasses to repeat the last video frame that we
- * passed to emit_video(). If emit_video hasn't yet been called,
- * we will generate a black frame.
- */
- void
- VideoDecoder::repeat_last_video (double t)
- {
- if (!_last_image) {
- _last_image.reset (new SimpleImage (pixel_format(), native_size(), true));
- _last_image->make_black ();
- }
-
- signal_video (_last_image, true, _last_subtitle, t);
- }
-
- /** Emit our signal to say that some video data is ready.
- * @param image Video frame.
- * @param same true if `image' is the same as the last one we emitted.
- * @param sub Subtitle for this frame, or 0.
- */
- void
- VideoDecoder::signal_video (shared_ptr<Image> image, bool same, shared_ptr<Subtitle> sub, double t)
- {
+ TIMING (N_("Decoder emits %1"), _video_frame);
- Video (image, same, sub);
+ Video (image, same, sub, t);
++_video_frame;
-
- _last_source_time = t;
+
- _last_image = image;
- _last_subtitle = sub;
+ _last_content_time = t;
}
/** Set up the current subtitle. This will be put onto frames that
*/
-#ifndef DVDOMATIC_VIDEO_DECODER_H
-#define DVDOMATIC_VIDEO_DECODER_H
+#ifndef DCPOMATIC_VIDEO_DECODER_H
+#define DCPOMATIC_VIDEO_DECODER_H
#include "video_source.h"
-#include "stream.h"
#include "decoder.h"
- class VideoDecoder : public VideoSource, public virtual Decoder
+class VideoContent;
+
+ class VideoDecoder : public TimedVideoSource, public virtual Decoder
{
public:
- VideoDecoder (boost::shared_ptr<Film>, DecodeOptions);
+ VideoDecoder (boost::shared_ptr<const Film>);
- /** @return video frames per second, or 0 if unknown */
- virtual float frames_per_second () const = 0;
+ /** @return video frame rate second, or 0 if unknown */
+ virtual float video_frame_rate () const = 0;
/** @return native size in pixels */
virtual libdcp::Size native_size () const = 0;
- /** @return length (in source video frames), according to our content's header */
- virtual SourceFrame length () const = 0;
+ /** @return length according to our content's header */
+ virtual ContentVideoFrame video_length () const = 0;
virtual int time_base_numerator () const = 0;
virtual int time_base_denominator () const = 0;
virtual PixelFormat pixel_format () const = 0;
- void emit_video (boost::shared_ptr<Image>, double);
+ void emit_video (boost::shared_ptr<Image>, bool, double);
void emit_subtitle (boost::shared_ptr<TimedSubtitle>);
- bool have_last_video () const;
- void repeat_last_video (double);
- /** Subtitle stream to use when decoding */
- boost::shared_ptr<SubtitleStream> _subtitle_stream;
- /** Subtitle streams that this decoder's content has */
- std::vector<boost::shared_ptr<SubtitleStream> > _subtitle_streams;
-
private:
- void signal_video (boost::shared_ptr<Image>, bool, boost::shared_ptr<Subtitle>, double);
-
int _video_frame;
- double _last_source_time;
+ double _last_content_time;
boost::shared_ptr<TimedSubtitle> _timed_subtitle;
-
- boost::shared_ptr<Image> _last_image;
- boost::shared_ptr<Subtitle> _last_subtitle;
};
#endif
void
VideoSource::connect_video (shared_ptr<VideoSink> s)
{
- Video.connect (bind (&VideoSink::process_video, s, _1, _2, _3));
+ /* If we bind, say, a Playlist (as the VideoSink) to a Decoder (which is owned
+ by the Playlist) we create a cycle. Use a weak_ptr to break it.
+ */
+ Video.connect (bind (process_video_proxy, boost::weak_ptr<VideoSink> (s), _1, _2, _3));
}
+
+ void
+ TimedVideoSource::connect_video (shared_ptr<TimedVideoSink> s)
+ {
+ Video.connect (bind (&TimedVideoSink::process_video, s, _1, _2, _3, _4));
+ }
update_from_raw ();
break;
case Film::CONTENT:
-- {
- DecodeOptions o;
- o.decode_audio = false;
- o.decode_subtitles = true;
- o.video_sync = false;
-
- try {
- _decoders = decoder_factory (_film, o);
- } catch (StringError& e) {
- error_dialog (this, wxString::Format (_("Could not open content file (%s)"), std_to_wx(e.what()).data()));
- return;
- }
-
- if (_decoders.video == 0) {
- break;
- }
- _decoders.video->Video.connect (bind (&FilmViewer::process_video, this, _1, _2, _3, _4));
- _decoders.video->OutputChanged.connect (boost::bind (&FilmViewer::decoder_changed, this));
- _decoders.video->set_subtitle_stream (_film->subtitle_stream());
calculate_sizes ();
get_frame ();
_panel->Refresh ();
- _slider->Show (_film->content_type() == VIDEO);
- _play_button->Show (_film->content_type() == VIDEO);
_v_sizer->Layout ();
break;
-- }
case Film::WITH_SUBTITLES:
case Film::SUBTITLE_OFFSET:
case Film::SUBTITLE_SCALE:
return;
}
- _player->disable_video_sync ();
+ _player = f->player ();
+ _player->disable_audio ();
- _player->Video.connect (bind (&FilmViewer::process_video, this, _1, _2, _3));
+ /* Don't disable subtitles here as we may need them, and it's nice to be able to turn them
+ on and off without needing obtain a new Player.
+ */
+
++ _player->Video.connect (bind (&FilmViewer::process_video, this, _1, _2, _3, _4));
+
_film->Changed.connect (boost::bind (&FilmViewer::film_changed, this, _1));
+ _film->ContentChanged.connect (boost::bind (&FilmViewer::film_content_changed, this, _1, _2));
film_changed (Film::CONTENT);
film_changed (Film::FORMAT);
raw_to_display ();
_got_frame = true;
- double const fps = _decoders.video->frames_per_second ();
+
++ double const fps = _film->video_frame_rate ();
+ _frame->SetLabel (wxString::Format (wxT("%d"), int (rint (t * fps))));
+
+ double w = t;
+ int const h = (w / 3600);
+ w -= h * 3600;
+ int const m = (w / 60);
+ w -= m * 60;
+ int const s = floor (w);
+ w -= s;
+ int const f = rint (w * fps);
+ _timecode->SetLabel (wxString::Format (wxT("%02d:%02d:%02d:%02d"), h, m, s, f));
}
+/** Get a new _raw_frame from the decoder and then do
+ * raw_to_display ().
+ */
void
FilmViewer::get_frame ()
{
_play_button->Enable (!a);
}
- if (!_decoders.video) {
+void
+FilmViewer::film_content_changed (weak_ptr<Content>, int p)
+{
+ if (p == VideoContentProperty::VIDEO_LENGTH) {
+ /* Force an update to our frame */
+ wxScrollEvent ev;
+ slider_moved (ev);
+ }
+}
++
+ void
+ FilmViewer::back_clicked (wxCommandEvent &)
+ {
- _decoders.video->seek_back ();
++ if (!_player) {
+ return;
+ }
+
- if (!_decoders.video) {
++ _player->seek_back ();
+ get_frame ();
+ _panel->Refresh ();
+ _panel->Update ();
+ }
+
+ void
+ FilmViewer::forward_clicked (wxCommandEvent &)
+ {
- _decoders.video->seek_forward ();
++ if (!_player) {
+ return;
+ }
+
++ _player->seek_forward ();
+ get_frame ();
+ _panel->Refresh ();
+ _panel->Update ();
+ }
void raw_to_display ();
void get_frame ();
void active_jobs_changed (bool);
+ void back_clicked (wxCommandEvent &);
+ void forward_clicked (wxCommandEvent &);
boost::shared_ptr<Film> _film;
+ boost::shared_ptr<Player> _player;
wxSizer* _v_sizer;
wxPanel* _panel;
f = Format::from_nickname ("Scope");
BOOST_CHECK (f);
- // BOOST_CHECK_EQUAL (f->ratio_as_integer(shared_ptr<const Film> ()), 239);
+ BOOST_CHECK_EQUAL (f->dcp_size().width, 2048);
+ BOOST_CHECK_EQUAL (f->dcp_size().height, 858);
}
-/* Test VariableFormat-based scaling of content */
-BOOST_AUTO_TEST_CASE (scaling_test)
-{
- shared_ptr<Film> film (new Film (test_film_dir ("scaling_test").string(), false));
-
- /* 4:3 ratio */
- film->set_size (libdcp::Size (320, 240));
-
- /* This format should preserve aspect ratio of the source */
- Format const * format = Format::from_id ("var-185");
-
- /* We should have enough padding that the result is 4:3,
- which would be 1440 pixels.
- */
- BOOST_CHECK_EQUAL (format->dcp_padding (film), (1998 - 1440) / 2);
-
- /* This crops it to 1.291666667 */
- film->set_left_crop (5);
- film->set_right_crop (5);
-
- /* We should now have enough padding that the result is 1.29166667,
- which would be 1395 pixels.
- */
- BOOST_CHECK_EQUAL (format->dcp_padding (film), rint ((1998 - 1395) / 2.0));
-}
-
BOOST_AUTO_TEST_CASE (util_test)
{
string t = "Hello this is a string \"with quotes\" and indeed without them";
import os
import sys
-APPNAME = 'dvdomatic'
+APPNAME = 'dcpomatic'
- VERSION = '0.84pre'
+ VERSION = '0.85pre'
def options(opt):
opt.load('compiler_cxx')
conf.env.append_value('CXXFLAGS', '-O2')
if not conf.options.static:
- conf.check_cfg(package = 'libdcp', atleast_version = '0.41', args = '--cflags --libs', uselib_store = 'DCP', mandatory = True)
+ conf.check_cfg(package = 'libdcp', atleast_version = '0.45', args = '--cflags --libs', uselib_store = 'DCP', mandatory = True)
+ conf.check_cfg(package = 'libcxml', atleast_version = '0.01', args = '--cflags --libs', uselib_store = 'CXML', mandatory = True)
conf.check_cfg(package = 'libavformat', args = '--cflags --libs', uselib_store = 'AVFORMAT', mandatory = True)
conf.check_cfg(package = 'libavfilter', args = '--cflags --libs', uselib_store = 'AVFILTER', mandatory = True)
conf.check_cfg(package = 'libavcodec', args = '--cflags --libs', uselib_store = 'AVCODEC', mandatory = True)