bool done = true;
if (_video_decoder != _video_decoders.end ()) {
+
+ /* Run video decoder; this may also produce audio */
+
if ((*_video_decoder)->pass ()) {
_video_decoder++;
}
if (_video_decoder != _video_decoders.end ()) {
done = false;
}
- }
+
+ } else if (!_video && _playlist->audio_from() == Playlist::AUDIO_FFMPEG && _sequential_audio_decoder != _audio_decoders.end ()) {
- if (_playlist->audio_from() == Playlist::AUDIO_SNDFILE) {
- for (list<shared_ptr<SndfileDecoder> >::iterator i = _sndfile_decoders.begin(); i != _sndfile_decoders.end(); ++i) {
+ /* We're not producing video, so we may need to run FFmpeg content to get the audio */
+
+ if ((*_sequential_audio_decoder)->pass ()) {
+ _sequential_audio_decoder++;
+ }
+
+ if (_sequential_audio_decoder != _audio_decoders.end ()) {
+ done = false;
+ }
+
+ } else if (_playlist->audio_from() == Playlist::AUDIO_SNDFILE) {
+
+ /* We're getting audio from SndfileContent */
+
+ for (list<shared_ptr<AudioDecoder> >::iterator i = _audio_decoders.begin(); i != _audio_decoders.end(); ++i) {
if (!(*i)->pass ()) {
done = false;
}
{
_video_decoders.clear ();
_video_decoder = _video_decoders.end ();
- _sndfile_decoders.clear ();
+ _audio_decoders.clear ();
if (_video) {
list<shared_ptr<const VideoContent> > vc = _playlist->video ();
_video_decoder = _video_decoders.begin ();
}
- if (_audio && _playlist->audio_from() == Playlist::AUDIO_SNDFILE) {
- list<shared_ptr<const SndfileContent> > sc = _playlist->sndfile ();
- for (list<shared_ptr<const SndfileContent> >::iterator i = sc.begin(); i != sc.end(); ++i) {
- shared_ptr<SndfileDecoder> d (new SndfileDecoder (_film, *i));
- _sndfile_decoders.push_back (d);
- d->Audio.connect (bind (&Player::process_audio, this, *i, _1, _2));
+ if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG && !_video) {
+
+ /* If we're getting audio from FFmpegContent but not the video, we need a set
+ of decoders for the audio.
+ */
+
+ list<shared_ptr<const AudioContent> > ac = _playlist->audio ();
+ for (list<shared_ptr<const AudioContent> >::iterator i = ac.begin(); i != ac.end(); ++i) {
+
+ shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
+ assert (fc);
+
+ shared_ptr<AudioDecoder> d (
+ new FFmpegDecoder (
+ _film, fc, _video,
+ _audio && _playlist->audio_from() == Playlist::AUDIO_FFMPEG,
+ _subtitles
+ )
+ );
+
+ d->Audio.connect (bind (&Player::process_audio, this, fc, _1, _2));
+ _audio_decoders.push_back (d);
+ }
+
+ _sequential_audio_decoder = _audio_decoders.begin ();
+ }
+
+ if (_playlist->audio_from() == Playlist::AUDIO_SNDFILE) {
+
+ list<shared_ptr<const AudioContent> > ac = _playlist->audio ();
+ for (list<shared_ptr<const AudioContent> >::iterator i = ac.begin(); i != ac.end(); ++i) {
+
+ shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
+ assert (sc);
+
+ shared_ptr<AudioDecoder> d (new SndfileDecoder (_film, sc));
+ d->Audio.connect (bind (&Player::process_audio, this, sc, _1, _2));
+ _audio_decoders.push_back (d);
}
}
}
_audio_from = AUDIO_FFMPEG;
_video.clear ();
- _sndfile.clear ();
+ _audio.clear ();
for (list<boost::signals2::connection>::iterator i = _content_connections.begin(); i != _content_connections.end(); ++i) {
i->disconnect ();
_content_connections.clear ();
for (ContentList::const_iterator i = content.begin(); i != content.end(); ++i) {
+
+ /* Video is video */
shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*i);
if (vc) {
_video.push_back (vc);
}
-
+
+ /* FFmpegContent is audio if we are doing AUDIO_FFMPEG */
+ shared_ptr<FFmpegContent> fc = dynamic_pointer_cast<FFmpegContent> (*i);
+ if (fc && _audio_from == AUDIO_FFMPEG) {
+ _audio.push_back (fc);
+ }
+
+ /* SndfileContent trumps FFmpegContent for audio */
shared_ptr<SndfileContent> sc = dynamic_pointer_cast<SndfileContent> (*i);
if (sc) {
- _sndfile.push_back (sc);
- _audio_from = AUDIO_SNDFILE;
+ if (_audio_from == AUDIO_FFMPEG) {
+ /* This is our fist SndfileContent; clear any FFmpegContent and
+ say that we are using Sndfile.
+ */
+ _audio.clear ();
+ _audio_from = AUDIO_SNDFILE;
+ }
+
+ _audio.push_back (sc);
}
_content_connections.push_back ((*i)->Changed.connect (bind (&Playlist::content_changed, this, _1, _2)));
Changed ();
}
+/** @return Length of our audio */
ContentAudioFrame
Playlist::audio_length () const
{
ContentAudioFrame len = 0;
-
+
switch (_audio_from) {
case AUDIO_FFMPEG:
- for (list<shared_ptr<const VideoContent> >::const_iterator i = _video.begin(); i != _video.end(); ++i) {
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
- if (fc) {
- len += fc->audio_length ();
- }
+ /* FFmpeg content is sequential */
+ for (list<shared_ptr<const AudioContent> >::const_iterator i = _audio.begin(); i != _audio.end(); ++i) {
+ len += (*i)->audio_length ();
}
break;
case AUDIO_SNDFILE:
- for (list<shared_ptr<const SndfileContent> >::const_iterator i = _sndfile.begin(); i != _sndfile.end(); ++i) {
- len += (*i)->audio_length ();
+ /* Sndfile content is simultaneous */
+ for (list<shared_ptr<const AudioContent> >::const_iterator i = _audio.begin(); i != _audio.end(); ++i) {
+ len = max (len, (*i)->audio_length ());
}
break;
}
return len;
}
+/** @return number of audio channels */
int
Playlist::audio_channels () const
{
switch (_audio_from) {
case AUDIO_FFMPEG:
- for (list<shared_ptr<const VideoContent> >::const_iterator i = _video.begin(); i != _video.end(); ++i) {
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
- if (fc) {
- channels = max (channels, fc->audio_channels ());
- }
+ /* FFmpeg audio is sequential, so use the maximum channel count */
+ for (list<shared_ptr<const AudioContent> >::const_iterator i = _audio.begin(); i != _audio.end(); ++i) {
+ channels = max (channels, (*i)->audio_channels ());
}
break;
case AUDIO_SNDFILE:
- for (list<shared_ptr<const SndfileContent> >::const_iterator i = _sndfile.begin(); i != _sndfile.end(); ++i) {
+ /* Sndfile audio is simultaneous, so it's the sum of the channel counts */
+ for (list<shared_ptr<const AudioContent> >::const_iterator i = _audio.begin(); i != _audio.end(); ++i) {
channels += (*i)->audio_channels ();
}
break;
int
Playlist::audio_frame_rate () const
{
- /* XXX: assuming that all content has the same rate */
-
- switch (_audio_from) {
- case AUDIO_FFMPEG:
- {
- shared_ptr<const FFmpegContent> fc = first_ffmpeg ();
- if (fc) {
- return fc->audio_frame_rate ();
- }
- break;
- }
- case AUDIO_SNDFILE:
- return _sndfile.front()->audio_frame_rate ();
+ if (_audio.empty ()) {
+ return 0;
}
- return 0;
+ /* XXX: assuming that all content has the same rate */
+ return _audio.front()->audio_frame_rate ();
}
float
bool
Playlist::has_audio () const
{
- if (!_sndfile.empty ()) {
- return true;
- }
-
- for (list<shared_ptr<const VideoContent> >::const_iterator i = _video.begin(); i != _video.end(); ++i) {
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
- if (fc && fc->audio_stream ()) {
- return true;
- }
- }
-
- return false;
+ return !_audio.empty ();
}
void
ContentChanged (c, p);
}
-shared_ptr<const FFmpegContent>
-Playlist::first_ffmpeg () const
-{
- for (list<shared_ptr<const VideoContent> >::const_iterator i = _video.begin(); i != _video.end(); ++i) {
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
- if (fc) {
- return fc;
- }
- }
-
- return shared_ptr<const FFmpegContent> ();
-}
-
-
AudioMapping
Playlist::default_audio_mapping () const
{
AudioMapping m;
+ if (_audio.empty ()) {
+ return m;
+ }
switch (_audio_from) {
case AUDIO_FFMPEG:
{
- shared_ptr<const FFmpegContent> fc = first_ffmpeg ();
- if (!fc) {
- break;
- }
-
/* XXX: assumes all the same */
- if (fc->audio_channels() == 1) {
+ if (_audio.front()->audio_channels() == 1) {
/* Map mono sources to centre */
- m.add (AudioMapping::Channel (fc, 0), libdcp::CENTRE);
+ m.add (AudioMapping::Channel (_audio.front(), 0), libdcp::CENTRE);
} else {
- int const N = min (fc->audio_channels (), MAX_AUDIO_CHANNELS);
+ int const N = min (_audio.front()->audio_channels (), MAX_AUDIO_CHANNELS);
/* Otherwise just start with a 1:1 mapping */
for (int i = 0; i < N; ++i) {
- m.add (AudioMapping::Channel (fc, i), (libdcp::Channel) i);
+ m.add (AudioMapping::Channel (_audio.front(), i), (libdcp::Channel) i);
}
}
break;
case AUDIO_SNDFILE:
{
int n = 0;
- for (list<shared_ptr<const SndfileContent> >::const_iterator i = _sndfile.begin(); i != _sndfile.end(); ++i) {
+ for (list<shared_ptr<const AudioContent> >::const_iterator i = _audio.begin(); i != _audio.end(); ++i) {
for (int j = 0; j < (*i)->audio_channels(); ++j) {
m.add (AudioMapping::Channel (*i, j), (libdcp::Channel) n);
++n;
{
string t;
- switch (_audio_from) {
- case AUDIO_FFMPEG:
- for (list<shared_ptr<const VideoContent> >::const_iterator i = _video.begin(); i != _video.end(); ++i) {
- shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
- if (fc) {
- t += (*i)->digest ();
- t += lexical_cast<string> (fc->audio_stream()->id);
- }
- }
- break;
- case AUDIO_SNDFILE:
- for (list<shared_ptr<const SndfileContent> >::const_iterator i = _sndfile.begin(); i != _sndfile.end(); ++i) {
- t += (*i)->digest ();
+ for (list<shared_ptr<const AudioContent> >::const_iterator i = _audio.begin(); i != _audio.end(); ++i) {
+ t += (*i)->digest ();
+
+ shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
+ if (fc) {
+ t += lexical_cast<string> (fc->audio_stream()->id);
}
- break;
}
return md5_digest (t.c_str(), t.length());
class Job;
class Film;
+/** @class Playlist
+ * @brief A set of content files (video and audio), with knowledge of how they should be arranged into
+ * a DCP.
+ *
+ * This class holds Content objects, and it knows how they should be arranged. At the moment
+ * the ordering is implicit; video content is placed sequentially, and audio content is taken
+ * from the video unless any sound-only files are present. If sound-only files exist, they
+ * are played simultaneously (i.e. they can be split up into multiple files for different channels)
+ */
+
class Playlist
{
public:
return _video;
}
- std::list<boost::shared_ptr<const SndfileContent> > sndfile () const {
- return _sndfile;
+ std::list<boost::shared_ptr<const AudioContent> > audio () const {
+ return _audio;
}
std::string audio_digest () const;
private:
void content_changed (boost::weak_ptr<Content>, int);
- boost::shared_ptr<const FFmpegContent> first_ffmpeg () const;
-
+
+ /** where we should get our audio from */
AudioFrom _audio_from;
+ /** all our content which contains video */
std::list<boost::shared_ptr<const VideoContent> > _video;
- std::list<boost::shared_ptr<const SndfileContent> > _sndfile;
+ /** all our content which contains audio. This may contain the same objects
+ * as _video for FFmpegContent.
+ */
+ std::list<boost::shared_ptr<const AudioContent> > _audio;
std::list<boost::signals2::connection> _content_connections;
};