#include "ffmpeg_content.h"
#include "still_image_decoder.h"
#include "still_image_content.h"
+#include "moving_image_decoder.h"
+#include "moving_image_content.h"
#include "sndfile_decoder.h"
#include "sndfile_content.h"
#include "subtitle_content.h"
#include "job.h"
#include "image.h"
#include "ratio.h"
+#include "resampler.h"
#include "log.h"
#include "scaler.h"
using boost::weak_ptr;
using boost::dynamic_pointer_cast;
-//#define DEBUG_PLAYER 1
-
class Piece
{
public:
Piece (shared_ptr<Content> c)
: content (c)
- , video_position (c->start ())
- , audio_position (c->start ())
+ , video_position (c->position ())
+ , audio_position (c->position ())
{}
Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
: content (c)
, decoder (d)
- , video_position (c->start ())
- , audio_position (c->start ())
+ , video_position (c->position ())
+ , audio_position (c->position ())
{}
shared_ptr<Content> content;
Time audio_position;
};
-#ifdef DEBUG_PLAYER
-std::ostream& operator<<(std::ostream& s, Piece const & p)
-{
- if (dynamic_pointer_cast<FFmpegContent> (p.content)) {
- s << "\tffmpeg ";
- } else if (dynamic_pointer_cast<StillImageContent> (p.content)) {
- s << "\tstill image";
- } else if (dynamic_pointer_cast<SndfileContent> (p.content)) {
- s << "\tsndfile ";
- }
-
- s << " at " << p.content->start() << " until " << p.content->end();
-
- return s;
-}
-#endif
-
Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
: _film (f)
, _playlist (p)
, _have_valid_pieces (false)
, _video_position (0)
, _audio_position (0)
- , _audio_buffers (f->audio_channels(), 0)
+ , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
+ , _last_emit_was_black (false)
{
- _playlist->Changed.connect (bind (&Player::playlist_changed, this));
- _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
- _film->Changed.connect (bind (&Player::film_changed, this, _1));
- set_video_container_size (_film->container()->size (_film->full_frame ()));
+ _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
+ _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
+ _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
+ set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
}
void
_have_valid_pieces = true;
}
-#ifdef DEBUG_PLAYER
- cout << "= PASS\n";
-#endif
-
Time earliest_t = TIME_MAX;
shared_ptr<Piece> earliest;
enum {
}
if (!earliest) {
-#ifdef DEBUG_PLAYER
- cout << "no earliest piece.\n";
-#endif
-
flush ();
return true;
}
switch (type) {
case VIDEO:
if (earliest_t > _video_position) {
-#ifdef DEBUG_PLAYER
- cout << "no video here; emitting black frame (earliest=" << earliest_t << ", video_position=" << _video_position << ").\n";
-#endif
emit_black ();
} else {
-#ifdef DEBUG_PLAYER
- cout << "Pass " << *earliest << "\n";
-#endif
earliest->decoder->pass ();
}
break;
case AUDIO:
if (earliest_t > _audio_position) {
-#ifdef DEBUG_PLAYER
- cout << "no audio here; emitting silence.\n";
-#endif
emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
} else {
-#ifdef DEBUG_PLAYER
- cout << "Pass " << *earliest << "\n";
-#endif
earliest->decoder->pass ();
+
+ if (earliest->decoder->done()) {
+ shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
+ assert (ac);
+ shared_ptr<Resampler> re = resampler (ac, false);
+ if (re) {
+ shared_ptr<const AudioBuffers> b = re->flush ();
+ if (b->frames ()) {
+ process_audio (earliest, b, ac->audio_length ());
+ }
+ }
+ }
}
break;
}
-#ifdef DEBUG_PLAYER
- cout << "\tpost pass " << _video_position << " " << _audio_position << "\n";
-#endif
+ if (_audio) {
+ Time audio_done_up_to = TIME_MAX;
+ for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+ if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
+ audio_done_up_to = min (audio_done_up_to, (*i)->audio_position);
+ }
+ }
+ TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to);
+ Audio (tb.audio, tb.time);
+ _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
+ }
+
return false;
}
void
Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
{
+ /* Keep a note of what came in so that we can repeat it if required */
+ _last_process_video.weak_piece = weak_piece;
+ _last_process_video.image = image;
+ _last_process_video.eyes = eyes;
+ _last_process_video.same = same;
+ _last_process_video.frame = frame;
+
shared_ptr<Piece> piece = weak_piece.lock ();
if (!piece) {
return;
return;
}
- shared_ptr<Image> work_image = image->crop (content->crop(), true);
+ Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
+ if (content->trimmed (relative_time)) {
+ return;
+ }
- libdcp::Size const image_size = content->ratio()->size (_video_container_size);
-
- work_image = work_image->scale_and_convert_to_rgb (image_size, _film->scaler(), true);
+ /* Convert to RGB first, as FFmpeg doesn't seem to like handling YUV images with odd widths */
+ shared_ptr<Image> work_image = image->scale (image->size (), _film->scaler(), PIX_FMT_RGB24, true);
- Time time = content->start() + (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
+ work_image = work_image->crop (content->crop(), true);
+
+ float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
+ libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
+ work_image = work_image->scale (image_size, _film->scaler(), PIX_FMT_RGB24, true);
+
+ Time time = content->position() + relative_time - content->trim_start ();
+
if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
}
_last_video = piece->content;
#endif
- Video (work_image, eyes, same, time);
+ Video (work_image, eyes, content->colour_conversion(), same, time);
time += TIME_HZ / _film->video_frame_rate();
if (frc.repeat) {
- Video (work_image, eyes, true, time);
+ Video (work_image, eyes, content->colour_conversion(), true, time);
time += TIME_HZ / _film->video_frame_rate();
}
+ _last_emit_was_black = false;
+
_video_position = piece->video_position = time;
}
shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
assert (content);
+ /* Gain */
+ if (content->audio_gain() != 0) {
+ shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
+ gain->apply_gain (content->audio_gain ());
+ audio = gain;
+ }
+
+ /* Resample */
+ if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
+ shared_ptr<Resampler> r = resampler (content, true);
+ pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
+ audio = ro.first;
+ frame = ro.second;
+ }
+
+ Time const relative_time = _film->audio_frames_to_time (frame);
+
+ if (content->trimmed (relative_time)) {
+ return;
+ }
+
+ Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time;
+
/* Remap channels */
shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
dcp_mapped->make_silent ();
audio = dcp_mapped;
- Time time = content->start()
- + _film->audio_frames_to_time (frame)
- + (content->audio_delay() * TIME_HZ / 1000);
-
/* We must cut off anything that comes before the start of all time */
if (time < 0) {
int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
time = 0;
}
- /* The time of this audio may indicate that some of our buffered audio is not going to
- be added to any more, so it can be emitted.
- */
-
- if (time > _audio_position) {
- /* We can emit some audio from our buffers; this is how many frames */
- OutputAudioFrame const N = _film->time_to_audio_frames (time - _audio_position);
- if (N > _audio_buffers.frames()) {
- /* We need some extra silence before whatever is in the buffers */
- _audio_buffers.ensure_size (N);
- _audio_buffers.move (0, N - _audio_buffers.frames(), _audio_buffers.frames ());
- _audio_buffers.make_silent (0, _audio_buffers.frames());
- _audio_buffers.set_frames (N);
- }
- assert (N <= _audio_buffers.frames());
-
- /* XXX: not convinced that a copy is necessary here */
- shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), N));
- emit->copy_from (&_audio_buffers, N, 0, 0);
- Audio (emit, _audio_position);
- _audio_position = piece->audio_position = _audio_position + _film->audio_frames_to_time (N);
-
- /* And remove it from our buffers */
- if (_audio_buffers.frames() > N) {
- _audio_buffers.move (N, 0, _audio_buffers.frames() - N);
- }
- _audio_buffers.set_frames (_audio_buffers.frames() - N);
- }
-
- /* Now accumulate the new audio into our buffers */
- _audio_buffers.ensure_size (_audio_buffers.frames() + audio->frames());
- _audio_buffers.accumulate_frames (audio.get(), 0, 0, audio->frames ());
- _audio_buffers.set_frames (_audio_buffers.frames() + audio->frames());
+ _audio_merger.push (audio, time);
+ piece->audio_position += _film->audio_frames_to_time (audio->frames ());
}
void
Player::flush ()
{
- if (_audio_buffers.frames() > 0) {
- shared_ptr<AudioBuffers> emit (new AudioBuffers (_audio_buffers.channels(), _audio_buffers.frames()));
- emit->copy_from (&_audio_buffers, _audio_buffers.frames(), 0, 0);
- Audio (emit, _audio_position);
- _audio_position += _film->audio_frames_to_time (_audio_buffers.frames ());
- _audio_buffers.set_frames (0);
+ TimedAudioBuffers<Time> tb = _audio_merger.flush ();
+ if (tb.audio) {
+ Audio (tb.audio, tb.time);
+ _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
}
while (_video_position < _audio_position) {
continue;
}
- Time s = t - vc->start ();
+ Time s = t - vc->position ();
s = max (static_cast<Time> (0), s);
- s = min (vc->length(), s);
+ s = min (vc->length_after_trim(), s);
- (*i)->video_position = (*i)->audio_position = vc->start() + s;
+ (*i)->video_position = (*i)->audio_position = vc->position() + s;
FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
/* Here we are converting from time (in the DCP) to a frame number in the content.
Hence we need to use the DCP's frame rate and the double/skip correction, not
the source's rate.
*/
- VideoContent::Frame f = s * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
+ VideoContent::Frame f = (s + vc->trim_start ()) * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
}
piece->decoder = id;
}
+ shared_ptr<const MovingImageContent> mc = dynamic_pointer_cast<const MovingImageContent> (*i);
+ if (mc) {
+ shared_ptr<MovingImageDecoder> md;
+
+ if (!md) {
+ md.reset (new MovingImageDecoder (_film, mc));
+ md->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
+ }
+
+ piece->decoder = md;
+ }
+
shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
if (sc) {
shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
_pieces.push_back (piece);
}
-
-#ifdef DEBUG_PLAYER
- cout << "=== Player setup:\n";
- for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
- cout << *(i->get()) << "\n";
- }
-#endif
}
void
}
if (
- property == ContentProperty::START || property == ContentProperty::LENGTH ||
- property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_RATIO
+ property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
+ property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END
) {
_have_valid_pieces = false;
Changed (frequent);
} else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
+
update_subtitle ();
Changed (frequent);
- } else if (property == VideoContentProperty::VIDEO_FRAME_TYPE) {
+
+ } else if (
+ property == VideoContentProperty::VIDEO_FRAME_TYPE || property == VideoContentProperty::VIDEO_CROP ||
+ property == VideoContentProperty::VIDEO_RATIO
+ ) {
+
Changed (frequent);
}
}
_black_frame->make_black ();
}
+shared_ptr<Resampler>
+Player::resampler (shared_ptr<AudioContent> c, bool create)
+{
+ map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
+ if (i != _resamplers.end ()) {
+ return i->second;
+ }
+
+ if (!create) {
+ return shared_ptr<Resampler> ();
+ }
+
+ shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
+ _resamplers[c] = r;
+ return r;
+}
+
void
Player::emit_black ()
{
_last_video.reset ();
#endif
- /* XXX: use same here */
- Video (_black_frame, EYES_BOTH, false, _video_position);
+ Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
_video_position += _film->video_frames_to_time (1);
+ _last_emit_was_black = true;
}
void
Player::emit_silence (OutputAudioFrame most)
{
+ if (most == 0) {
+ return;
+ }
+
OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
silence->make_silent ();
_out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
_out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
- _out_subtitle.image = _in_subtitle.image->scale (libdcp::Size (scaled_size.width, scaled_size.height), Scaler::from_id ("bicubic"), true);
- _out_subtitle.from = _in_subtitle.from + piece->content->start ();
- _out_subtitle.to = _in_subtitle.to + piece->content->start ();
+ _out_subtitle.image = _in_subtitle.image->scale (
+ scaled_size,
+ Scaler::from_id ("bicubic"),
+ _in_subtitle.image->pixel_format (),
+ true
+ );
+ _out_subtitle.from = _in_subtitle.from + piece->content->position ();
+ _out_subtitle.to = _in_subtitle.to + piece->content->position ();
+}
+
+/** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
+ * @return false if this could not be done.
+ */
+bool
+Player::repeat_last_video ()
+{
+ if (!_last_process_video.image) {
+ return false;
+ }
+
+ process_video (
+ _last_process_video.weak_piece,
+ _last_process_video.image,
+ _last_process_video.eyes,
+ _last_process_video.same,
+ _last_process_video.frame
+ );
+
+ return true;
}