2 Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "ffmpeg_decoder.h"
24 #include "ffmpeg_content.h"
25 #include "still_image_decoder.h"
26 #include "still_image_content.h"
27 #include "moving_image_decoder.h"
28 #include "moving_image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
36 #include "resampler.h"
47 using boost::shared_ptr;
48 using boost::weak_ptr;
49 using boost::dynamic_pointer_cast;
51 //#define DEBUG_PLAYER 1
56 Piece (shared_ptr<Content> c)
58 , video_position (c->position ())
59 , audio_position (c->position ())
62 Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
65 , video_position (c->position ())
66 , audio_position (c->position ())
69 shared_ptr<Content> content;
70 shared_ptr<Decoder> decoder;
76 std::ostream& operator<<(std::ostream& s, Piece const & p)
78 if (dynamic_pointer_cast<FFmpegContent> (p.content)) {
80 } else if (dynamic_pointer_cast<StillImageContent> (p.content)) {
82 } else if (dynamic_pointer_cast<SndfileContent> (p.content)) {
86 s << " at " << p.content->position() << " until " << p.content->end();
92 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
97 , _have_valid_pieces (false)
100 , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
101 , _last_emit_was_black (false)
103 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
104 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
105 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
106 set_video_container_size (_film->container()->size (_film->full_frame ()));
110 Player::disable_video ()
116 Player::disable_audio ()
124 if (!_have_valid_pieces) {
126 _have_valid_pieces = true;
133 Time earliest_t = TIME_MAX;
134 shared_ptr<Piece> earliest;
140 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
141 if ((*i)->decoder->done ()) {
145 if (_video && dynamic_pointer_cast<VideoDecoder> ((*i)->decoder)) {
146 if ((*i)->video_position < earliest_t) {
147 earliest_t = (*i)->video_position;
153 if (_audio && dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
154 if ((*i)->audio_position < earliest_t) {
155 earliest_t = (*i)->audio_position;
164 cout << "no earliest piece.\n";
173 if (earliest_t > _video_position) {
175 cout << "no video here; emitting black frame (earliest=" << earliest_t << ", video_position=" << _video_position << ").\n";
180 cout << "Pass video " << *earliest << "\n";
182 earliest->decoder->pass ();
187 if (earliest_t > _audio_position) {
189 cout << "no audio here (none until " << earliest_t << "); emitting silence.\n";
191 emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
194 cout << "Pass audio " << *earliest << "\n";
196 earliest->decoder->pass ();
198 if (earliest->decoder->done()) {
199 shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
201 shared_ptr<Resampler> re = resampler (ac, false);
203 shared_ptr<const AudioBuffers> b = re->flush ();
205 process_audio (earliest, b, ac->audio_length ());
214 Time audio_done_up_to = TIME_MAX;
215 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
216 if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
217 audio_done_up_to = min (audio_done_up_to, (*i)->audio_position);
221 TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to);
222 Audio (tb.audio, tb.time);
223 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
227 cout << "\tpost pass _video_position=" << _video_position << " _audio_position=" << _audio_position << "\n";
234 Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
236 shared_ptr<Piece> piece = weak_piece.lock ();
241 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
244 FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
245 if (frc.skip && (frame % 2) == 1) {
249 Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
250 if (content->trimmed (relative_time)) {
254 /* Convert to RGB first, as FFmpeg doesn't seem to like handling YUV images with odd widths */
255 shared_ptr<Image> work_image = image->scale (image->size (), _film->scaler(), PIX_FMT_RGB24, true);
257 work_image = work_image->crop (content->crop(), true);
259 libdcp::Size const image_size = content->ratio()->size (_video_container_size);
261 work_image = work_image->scale (image_size, _film->scaler(), PIX_FMT_RGB24, true);
263 Time time = content->position() + relative_time - content->trim_start ();
265 if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
266 work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
269 if (image_size != _video_container_size) {
270 assert (image_size.width <= _video_container_size.width);
271 assert (image_size.height <= _video_container_size.height);
272 shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
274 im->copy (work_image, Position<int> ((_video_container_size.width - image_size.width) / 2, (_video_container_size.height - image_size.height) / 2));
278 #ifdef DCPOMATIC_DEBUG
279 _last_video = piece->content;
282 Video (work_image, eyes, content->colour_conversion(), same, time);
283 time += TIME_HZ / _film->video_frame_rate();
286 Video (work_image, eyes, content->colour_conversion(), true, time);
287 time += TIME_HZ / _film->video_frame_rate();
290 _last_emit_was_black = false;
292 _video_position = piece->video_position = time;
296 Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
298 shared_ptr<Piece> piece = weak_piece.lock ();
303 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
307 if (content->audio_gain() != 0) {
308 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
309 gain->apply_gain (content->audio_gain ());
314 if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
315 shared_ptr<Resampler> r = resampler (content, true);
316 pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
321 Time const relative_time = _film->audio_frames_to_time (frame);
323 if (content->trimmed (relative_time)) {
327 Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time;
330 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
331 dcp_mapped->make_silent ();
332 list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
333 for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
334 if (i->first < audio->channels() && i->second < dcp_mapped->channels()) {
335 dcp_mapped->accumulate_channel (audio.get(), i->first, i->second);
341 /* We must cut off anything that comes before the start of all time */
343 int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
344 if (frames >= audio->frames ()) {
348 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
349 trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
355 _audio_merger.push (audio, time);
356 piece->audio_position += _film->audio_frames_to_time (audio->frames ());
362 TimedAudioBuffers<Time> tb = _audio_merger.flush ();
364 Audio (tb.audio, tb.time);
365 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
368 while (_video_position < _audio_position) {
372 while (_audio_position < _video_position) {
373 emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
378 /** Seek so that the next pass() will yield (approximately) the requested frame.
379 * Pass accurate = true to try harder to get close to the request.
380 * @return true on error
383 Player::seek (Time t, bool accurate)
385 if (!_have_valid_pieces) {
387 _have_valid_pieces = true;
390 if (_pieces.empty ()) {
394 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
395 shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
400 Time s = t - vc->position ();
401 s = max (static_cast<Time> (0), s);
402 s = min (vc->length_after_trim(), s);
404 (*i)->video_position = (*i)->audio_position = vc->position() + s;
406 FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
407 /* Here we are converting from time (in the DCP) to a frame number in the content.
408 Hence we need to use the DCP's frame rate and the double/skip correction, not
411 VideoContent::Frame f = (s + vc->trim_start ()) * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
412 dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
415 _video_position = _audio_position = t;
417 /* XXX: don't seek audio because we don't need to... */
421 Player::setup_pieces ()
423 list<shared_ptr<Piece> > old_pieces = _pieces;
427 ContentList content = _playlist->content ();
428 sort (content.begin(), content.end(), ContentSorter ());
430 for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
432 shared_ptr<Piece> piece (new Piece (*i));
434 /* XXX: into content? */
436 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
438 shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
440 fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
441 fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
442 fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
447 shared_ptr<const StillImageContent> ic = dynamic_pointer_cast<const StillImageContent> (*i);
449 shared_ptr<StillImageDecoder> id;
451 /* See if we can re-use an old StillImageDecoder */
452 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
453 shared_ptr<StillImageDecoder> imd = dynamic_pointer_cast<StillImageDecoder> ((*j)->decoder);
454 if (imd && imd->content() == ic) {
460 id.reset (new StillImageDecoder (_film, ic));
461 id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
467 shared_ptr<const MovingImageContent> mc = dynamic_pointer_cast<const MovingImageContent> (*i);
469 shared_ptr<MovingImageDecoder> md;
472 md.reset (new MovingImageDecoder (_film, mc));
473 md->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
479 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
481 shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
482 sd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
487 _pieces.push_back (piece);
491 cout << "=== Player setup:\n";
492 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
493 cout << *(i->get()) << "\n";
499 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
501 shared_ptr<Content> c = w.lock ();
507 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
508 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END ||
509 property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_RATIO
512 _have_valid_pieces = false;
515 } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
518 } else if (property == VideoContentProperty::VIDEO_FRAME_TYPE) {
524 Player::playlist_changed ()
526 _have_valid_pieces = false;
531 Player::set_video_container_size (libdcp::Size s)
533 _video_container_size = s;
534 _black_frame.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
535 _black_frame->make_black ();
538 shared_ptr<Resampler>
539 Player::resampler (shared_ptr<AudioContent> c, bool create)
541 map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
542 if (i != _resamplers.end ()) {
547 return shared_ptr<Resampler> ();
550 shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
556 Player::emit_black ()
558 #ifdef DCPOMATIC_DEBUG
559 _last_video.reset ();
562 Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
563 _video_position += _film->video_frames_to_time (1);
564 _last_emit_was_black = true;
568 Player::emit_silence (OutputAudioFrame most)
574 OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
575 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
576 silence->make_silent ();
577 Audio (silence, _audio_position);
578 _audio_position += _film->audio_frames_to_time (N);
582 Player::film_changed (Film::Property p)
584 /* Here we should notice Film properties that affect our output, and
585 alert listeners that our output now would be different to how it was
586 last time we were run.
589 if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) {
595 Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
597 _in_subtitle.piece = weak_piece;
598 _in_subtitle.image = image;
599 _in_subtitle.rect = rect;
600 _in_subtitle.from = from;
601 _in_subtitle.to = to;
607 Player::update_subtitle ()
609 shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
614 if (!_in_subtitle.image) {
615 _out_subtitle.image.reset ();
619 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
622 dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
623 libdcp::Size scaled_size;
625 in_rect.y += sc->subtitle_offset ();
627 /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
628 scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
629 scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
631 /* Then we need a corrective translation, consisting of two parts:
633 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
634 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
636 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
637 * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
638 * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
640 * Combining these two translations gives these expressions.
643 _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
644 _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
646 _out_subtitle.image = _in_subtitle.image->scale (
648 Scaler::from_id ("bicubic"),
649 _in_subtitle.image->pixel_format (),
652 _out_subtitle.from = _in_subtitle.from + piece->content->position ();
653 _out_subtitle.to = _in_subtitle.to + piece->content->position ();