optional<ContentTime>
AudioDecoder::position () const
{
- optional<ContentTime> pos;
- for (StreamMap::const_iterator i = _streams.begin(); i != _streams.end(); ++i) {
- if (!pos || (i->second->position() && i->second->position().get() < pos.get())) {
- pos = i->second->position();
+ optional<ContentTime> p;
+ for (map<AudioStreamPtr, ContentTime>::const_iterator i = _positions.begin(); i != _positions.end(); ++i) {
+ if (!p || i->second < *p) {
+ p = i->second;
}
}
- return pos;
+
+ return p;
+}
+
+void
+AudioDecoder::set_position (AudioStreamPtr stream, ContentTime time)
+{
+ _positions[stream] = time;
}
public:
AudioDecoder (Decoder* parent, boost::shared_ptr<const AudioContent>, boost::shared_ptr<Log> log);
+ boost::optional<ContentTime> position () const;
+ void set_position (AudioStreamPtr stream, ContentTime position);
+
void set_fast ();
void flush ();
void emit (AudioStreamPtr stream, boost::shared_ptr<const AudioBuffers>, ContentTime);
- boost::signals2::signal<void (ContentAudio)> Data;
+ boost::signals2::signal<void (AudioStreamPtr, ContentAudio)> Data;
boost::optional<ContentTime> position () const;
private:
/** An AudioDecoderStream object to manage each stream in _audio_content */
- typedef std::map<AudioStreamPtr, boost::shared_ptr<AudioDecoderStream> > StreamMap;
- StreamMap _streams;
+ std::map<AudioStreamPtr, boost::shared_ptr<AudioDecoderStream> > _streams;
+ std::map<AudioStreamPtr, ContentTime> _positions;
};
#endif
--- /dev/null
+/*
+ Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "audio_merger.h"
+#include "dcpomatic_time.h"
+
+using std::pair;
+using std::min;
+using std::max;
+using std::make_pair;
+using boost::shared_ptr;
+
+AudioMerger::AudioMerger (int channels, int frame_rate)
+ : _buffers (new AudioBuffers (channels, 0))
+ , _last_pull (0)
+ , _frame_rate (frame_rate)
+{
+
+}
+
+/** Pull audio up to a given time; after this call, no more data can be pushed
+ * before the specified time.
+ */
+pair<shared_ptr<AudioBuffers>, DCPTime>
+AudioMerger::pull (DCPTime time)
+{
+ /* Number of frames to return */
+ Frame const to_return = time.frames_floor (_frame_rate) - _last_pull.frames_floor (_frame_rate);
+ shared_ptr<AudioBuffers> out (new AudioBuffers (_buffers->channels(), to_return));
+
+ /* And this is how many we will get from our buffer */
+ Frame const to_return_from_buffers = min (to_return, Frame (_buffers->frames()));
+
+ /* Copy the data that we have to the back end of the return buffer */
+ out->copy_from (_buffers.get(), to_return_from_buffers, 0, to_return - to_return_from_buffers);
+ /* Silence any gap at the start */
+ out->make_silent (0, to_return - to_return_from_buffers);
+
+ DCPTime out_time = _last_pull;
+ _last_pull = time;
+
+ /* And remove the data we're returning from our buffers */
+ if (_buffers->frames() > to_return_from_buffers) {
+ _buffers->move (to_return_from_buffers, 0, _buffers->frames() - to_return_from_buffers);
+ }
+ _buffers->set_frames (_buffers->frames() - to_return_from_buffers);
+
+ return make_pair (out, out_time);
+}
+
+void
+AudioMerger::push (boost::shared_ptr<const AudioBuffers> audio, DCPTime time)
+{
+ DCPOMATIC_ASSERT (time >= _last_pull);
+
+ Frame const frame = time.frames_floor (_frame_rate);
+ Frame after = max (Frame (_buffers->frames()), frame + audio->frames() - _last_pull.frames_floor (_frame_rate));
+ _buffers->ensure_size (after);
+ _buffers->accumulate_frames (audio.get(), 0, frame - _last_pull.frames_floor (_frame_rate), audio->frames ());
+ _buffers->set_frames (after);
+}
--- /dev/null
+/*
+ Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "audio_buffers.h"
+#include "dcpomatic_time.h"
+#include "util.h"
+
+class AudioMerger
+{
+public:
+ AudioMerger (int channels, int frame_rate);
+
+ /** Pull audio up to a given time; after this call, no more data can be pushed
+ * before the specified time.
+ */
+ std::pair<boost::shared_ptr<AudioBuffers>, DCPTime> pull (DCPTime time);
+ void push (boost::shared_ptr<const AudioBuffers> audio, DCPTime time);
+
+private:
+ boost::shared_ptr<AudioBuffers> _buffers;
+ DCPTime _last_pull;
+ int _frame_rate;
+};
}
}
+ video->set_position (_next);
+ audio->set_position (_dcp_content->audio->stream(), _next);
+ subtitle->set_position (_next);
_next += ContentTime::from_frames (1, vfr);
if ((*_reel)->main_picture ()) {
DecoderPart (Decoder* parent, boost::shared_ptr<Log> log);
virtual ~DecoderPart () {}
+ virtual boost::optional<ContentTime> position () const = 0;
+
void set_ignore () {
_ignore = true;
}
return _ignore;
}
- virtual boost::optional<ContentTime> position () const = 0;
-
protected:
Decoder* _parent;
boost::shared_ptr<Log> _log;
LOG_WARNING ("Crazy timestamp %1", to_string (ct));
}
+ audio->set_position (*stream, ct);
+
/* Give this data provided there is some, and its time is sane */
if (ct >= ContentTime() && data->frames() > 0) {
audio->emit (*stream, data, ct);
, _always_burn_subtitles (false)
, _fast (false)
, _play_referenced (false)
+ , _audio_merger (_film->audio_channels(), _film->audio_frame_rate())
{
_film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
_playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
}
if (decoder->audio) {
- decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1));
+ decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
}
if (decoder->subtitle) {
return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
}
+DCPTime
+Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
+{
+ /* See comment in dcp_to_content_video */
+ DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start (), piece->frc);
+ return max (DCPTime (), d + piece->content->position ());
+}
+
ContentTime
Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
{
shared_ptr<Piece> earliest;
DCPTime earliest_position;
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- /* Convert i->decoder->position() to DCPTime and work out the earliest */
+ DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
+ if (t < earliest_position) {
+ earliest_position = t;
+ earliest = i;
+ }
}
earliest->decoder->pass ();
return;
}
- /* Get subs to burn in and burn them in */
+ /* XXX: get subs to burn in and burn them in */
/* Fill gaps */
- DCPTime time = content_video_to_dcp (piece, video.frame.index());
+ DCPTime const time = content_video_to_dcp (piece, video.frame.index());
- dcp::Size image_size = piece->content->video->scale().size (
- piece->content->video, _video_container_size, _film->frame_size ()
- );
+ for (DCPTime i = _last_video_time; i < time; i += DCPTime::from_frames (1, _film->video_frame_rate())) {
+ if (_playlist->video_content_at(i) && _last_video) {
+ Video (_last_video->clone (i));
+ } else {
+ Video (black_player_video_frame (i));
+ }
+ }
- Video (
- shared_ptr<PlayerVideo> (
- new PlayerVideo (
- video.image,
- time,
- piece->content->video->crop (),
- piece->content->video->fade (video.frame.index()),
- image_size,
- _video_container_size,
- video.frame.eyes(),
- video.part,
- piece->content->video->colour_conversion ()
- )
+ _last_video.reset (
+ new PlayerVideo (
+ video.image,
+ time,
+ piece->content->video->crop (),
+ piece->content->video->fade (video.frame.index()),
+ piece->content->video->scale().size (
+ piece->content->video, _video_container_size, _film->frame_size ()
+ ),
+ _video_container_size,
+ video.frame.eyes(),
+ video.part,
+ piece->content->video->colour_conversion ()
)
);
+ _last_video_time = time;
+
+ Video (_last_video);
}
void
-Player::audio (weak_ptr<Piece> piece, ContentAudio video)
+Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
{
- /* gain, remap, processor */
- /* Put into merge buffer */
+ shared_ptr<Piece> piece = wp.lock ();
+ if (!piece) {
+ return;
+ }
+
+ shared_ptr<AudioContent> content = piece->content->audio;
+ DCPOMATIC_ASSERT (content);
+
+ shared_ptr<AudioBuffers> audio = content_audio.audio;
+
+ /* Gain */
+ if (content->gain() != 0) {
+ shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
+ gain->apply_gain (content->gain ());
+ audio = gain;
+ }
+
+ /* XXX: end-trimming used to be checked here */
+
+ /* Compute time in the DCP */
+ DCPTime const time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000);
+
+ /* Remap channels */
+ shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
+ dcp_mapped->make_silent ();
+
+ AudioMapping map = stream->mapping ();
+ for (int i = 0; i < map.input_channels(); ++i) {
+ for (int j = 0; j < dcp_mapped->channels(); ++j) {
+ if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
+ dcp_mapped->accumulate_channel (
+ audio.get(),
+ i,
+ static_cast<dcp::Channel> (j),
+ map.get (i, static_cast<dcp::Channel> (j))
+ );
+ }
+ }
+ }
+
+ audio = dcp_mapped;
+
+ if (_audio_processor) {
+ audio = _audio_processor->run (audio, _film->audio_channels ());
+ }
+
+ _audio_merger.push (audio, time);
}
void
Player::image_subtitle (weak_ptr<Piece> piece, ContentImageSubtitle subtitle)
{
- /* Store for video to see */
+ /* XXX: Store for video to see */
}
void
Player::text_subtitle (weak_ptr<Piece> piece, ContentTextSubtitle subtitle)
{
- /* Store for video to see, or emit */
+ /* XXX: Store for video to see, or emit */
}
void
Player::seek (DCPTime time, bool accurate)
{
+ /* XXX: seek decoders */
+
if (accurate) {
- _last_video = time - DCPTime::from_frames (1, _film->video_frame_rate ());
+ _last_video_time = time - DCPTime::from_frames (1, _film->video_frame_rate ());
}
}
#include "content_video.h"
#include "content_audio.h"
#include "content_subtitle.h"
+#include "audio_stream.h"
+#include "audio_merger.h"
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <list>
Frame dcp_to_content_video (boost::shared_ptr<const Piece> piece, DCPTime t) const;
DCPTime content_video_to_dcp (boost::shared_ptr<const Piece> piece, Frame f) const;
Frame dcp_to_resampled_audio (boost::shared_ptr<const Piece> piece, DCPTime t) const;
+ DCPTime resampled_audio_to_dcp (boost::shared_ptr<const Piece> piece, Frame f) const;
ContentTime dcp_to_content_subtitle (boost::shared_ptr<const Piece> piece, DCPTime t) const;
DCPTime content_subtitle_to_dcp (boost::shared_ptr<const Piece> piece, ContentTime t) const;
boost::shared_ptr<PlayerVideo> black_player_video_frame (DCPTime) const;
std::list<boost::shared_ptr<Piece> > overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid);
void video (boost::weak_ptr<Piece>, ContentVideo);
- void audio (boost::weak_ptr<Piece>, ContentAudio);
+ void audio (boost::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
void image_subtitle (boost::weak_ptr<Piece>, ContentImageSubtitle);
void text_subtitle (boost::weak_ptr<Piece>, ContentTextSubtitle);
/** true if we should `play' (i.e output) referenced DCP data (e.g. for preview) */
bool _play_referenced;
- DCPTime _last_video;
+ boost::shared_ptr<PlayerVideo> _last_video;
+ DCPTime _last_video_time;
+
+ AudioMerger _audio_merger;
boost::shared_ptr<AudioProcessor> _audio_processor;
{
return p == AV_PIX_FMT_XYZ12LE ? AV_PIX_FMT_XYZ12LE : AV_PIX_FMT_RGB48LE;
}
+
+shared_ptr<PlayerVideo>
+PlayerVideo::clone (DCPTime time) const
+{
+ shared_ptr<PlayerVideo> c (new PlayerVideo (*this));
+ c->_time = time;
+ return c;
+}
bool same (boost::shared_ptr<const PlayerVideo> other) const;
+ boost::shared_ptr<PlayerVideo> clone (DCPTime time) const;
+
private:
boost::shared_ptr<const ImageProxy> _in;
DCPTime _time;
return best_summary;
}
+
+bool
+Playlist::video_content_at (DCPTime time) const
+{
+ BOOST_FOREACH (shared_ptr<Content> i, _content) {
+ if (i->video && i->position() <= time && time < i->end()) {
+ return true;
+ }
+ }
+
+ return false;
+}
void move_later (boost::shared_ptr<Content>);
ContentList content () const;
+ bool video_content_at (DCPTime time) const;
std::string video_identifier () const;
boost::shared_ptr<Log> log
);
+ void set_position (ContentTime position) {
+ _position = position;
+ }
+
+ boost::optional<ContentTime> position () const {
+ return _position;
+ }
+
void emit_image (ContentTimePeriod period, boost::shared_ptr<Image>, dcpomatic::Rect<double>);
void emit_text (ContentTimePeriod period, std::list<dcp::SubtitleString>);
void emit_text (ContentTimePeriod period, sub::Subtitle const & subtitle);
private:
boost::shared_ptr<const SubtitleContent> _content;
+ boost::optional<ContentTime> _position;
};
#endif
friend struct ffmpeg_pts_offset_test;
friend void ffmpeg_decoder_sequential_test_one (boost::filesystem::path file, float fps, int gaps, int video_length);
+ void set_position (ContentTime position) {
+ _position = position;
+ }
+
+ boost::optional<ContentTime> position () const {
+ return _position;
+ }
+
void emit (boost::shared_ptr<const ImageProxy>, Frame frame);
boost::signals2::signal<void (ContentVideo)> Data;
private:
boost::shared_ptr<const Content> _content;
boost::optional<Frame> _last_emitted;
+ boost::optional<ContentTime> _position;
};
#endif
audio_filter.cc
audio_filter_graph.cc
audio_mapping.cc
+ audio_merger.cc
audio_point.cc
audio_processor.cc
audio_stream.cc