2 Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
51 #include <dcp/reel_sound_asset.h>
52 #include <dcp/reel_subtitle_asset.h>
53 #include <dcp/reel_picture_asset.h>
54 #include <boost/foreach.hpp>
61 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
80 has_video (Content* c)
82 return static_cast<bool>(c->video);
86 has_audio (Content* c)
88 return static_cast<bool>(c->audio);
92 has_subtitle (Content* c)
94 return static_cast<bool>(c->subtitle);
97 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
99 , _playlist (playlist)
100 , _have_valid_pieces (false)
101 , _ignore_video (false)
102 , _ignore_audio (false)
103 , _always_burn_subtitles (false)
105 , _play_referenced (false)
107 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
108 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
109 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
110 set_video_container_size (_film->frame_size ());
112 film_changed (Film::AUDIO_PROCESSOR);
116 Player::setup_pieces ()
120 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
122 if (!i->paths_valid ()) {
126 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
127 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
130 /* Not something that we can decode; e.g. Atmos content */
134 if (decoder->video && _ignore_video) {
135 decoder->video->set_ignore ();
138 if (decoder->audio && _ignore_audio) {
139 decoder->audio->set_ignore ();
142 if (decoder->audio && _fast) {
143 decoder->audio->set_fast ();
146 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
147 if (dcp && _play_referenced) {
148 dcp->set_decode_referenced ();
151 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc)));
154 _have_valid_pieces = true;
158 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
160 shared_ptr<Content> c = w.lock ();
166 property == ContentProperty::POSITION ||
167 property == ContentProperty::LENGTH ||
168 property == ContentProperty::TRIM_START ||
169 property == ContentProperty::TRIM_END ||
170 property == ContentProperty::PATH ||
171 property == VideoContentProperty::FRAME_TYPE ||
172 property == DCPContentProperty::CAN_BE_PLAYED ||
173 property == SubtitleContentProperty::COLOUR ||
174 property == SubtitleContentProperty::OUTLINE ||
175 property == SubtitleContentProperty::OUTLINE_COLOUR ||
176 property == FFmpegContentProperty::SUBTITLE_STREAM
179 _have_valid_pieces = false;
183 property == ContentProperty::VIDEO_FRAME_RATE ||
184 property == SubtitleContentProperty::USE ||
185 property == SubtitleContentProperty::X_OFFSET ||
186 property == SubtitleContentProperty::Y_OFFSET ||
187 property == SubtitleContentProperty::X_SCALE ||
188 property == SubtitleContentProperty::Y_SCALE ||
189 property == SubtitleContentProperty::FONTS ||
190 property == VideoContentProperty::CROP ||
191 property == VideoContentProperty::SCALE ||
192 property == VideoContentProperty::FADE_IN ||
193 property == VideoContentProperty::FADE_OUT ||
194 property == VideoContentProperty::COLOUR_CONVERSION
202 Player::set_video_container_size (dcp::Size s)
204 _video_container_size = s;
206 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
207 _black_image->make_black ();
211 Player::playlist_changed ()
213 _have_valid_pieces = false;
218 Player::film_changed (Film::Property p)
220 /* Here we should notice Film properties that affect our output, and
221 alert listeners that our output now would be different to how it was
222 last time we were run.
225 if (p == Film::CONTAINER) {
227 } else if (p == Film::VIDEO_FRAME_RATE) {
228 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
229 so we need new pieces here.
231 _have_valid_pieces = false;
233 } else if (p == Film::AUDIO_PROCESSOR) {
234 if (_film->audio_processor ()) {
235 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
241 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
243 list<PositionImage> all;
245 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
250 /* We will scale the subtitle up to fit _video_container_size */
251 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
253 /* Then we need a corrective translation, consisting of two parts:
255 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
256 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
258 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
259 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
260 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
262 * Combining these two translations gives these expressions.
269 dcp::YUV_TO_RGB_REC601,
270 i->image->pixel_format (),
275 lrint (_video_container_size.width * i->rectangle.x),
276 lrint (_video_container_size.height * i->rectangle.y)
285 shared_ptr<PlayerVideo>
286 Player::black_player_video_frame (DCPTime time) const
288 return shared_ptr<PlayerVideo> (
290 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
294 _video_container_size,
295 _video_container_size,
298 PresetColourConversion::all().front().conversion
303 /** @return All PlayerVideos at the given time. There may be none if the content
304 * at `time' is a DCP which we are passing through (i.e. referring to by reference)
305 * or 2 if we have 3D.
307 list<shared_ptr<PlayerVideo> >
308 Player::get_video (DCPTime time, bool accurate)
310 if (!_have_valid_pieces) {
314 /* Find subtitles for possible burn-in */
316 PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate);
318 list<PositionImage> sub_images;
320 /* Image subtitles */
321 list<PositionImage> c = transform_image_subtitles (ps.image);
322 copy (c.begin(), c.end(), back_inserter (sub_images));
324 /* Text subtitles (rendered to an image) */
325 if (!ps.text.empty ()) {
326 list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
327 copy (s.begin (), s.end (), back_inserter (sub_images));
330 optional<PositionImage> subtitles;
331 if (!sub_images.empty ()) {
332 subtitles = merge (sub_images);
335 /* Find pieces containing video which is happening now */
337 list<shared_ptr<Piece> > ov = overlaps (
339 time + DCPTime::from_frames (1, _film->video_frame_rate ()),
343 list<shared_ptr<PlayerVideo> > pvf;
346 /* No video content at this time */
347 pvf.push_back (black_player_video_frame (time));
349 /* Some video content at this time */
350 shared_ptr<Piece> last = *(ov.rbegin ());
351 VideoFrameType const last_type = last->content->video->frame_type ();
353 /* Get video from appropriate piece(s) */
354 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
356 shared_ptr<VideoDecoder> decoder = piece->decoder->video;
357 DCPOMATIC_ASSERT (decoder);
359 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (piece->content);
360 if (dcp_content && dcp_content->reference_video () && !_play_referenced) {
365 /* always use the last video */
367 /* with a corresponding L/R eye if appropriate */
368 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
369 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
372 /* We want to use this piece */
373 list<ContentVideo> content_video = decoder->get (dcp_to_content_video (piece, time), accurate);
374 if (content_video.empty ()) {
375 pvf.push_back (black_player_video_frame (time));
377 dcp::Size image_size = piece->content->video->scale().size (
378 piece->content->video, _video_container_size, _film->frame_size ()
381 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
383 shared_ptr<PlayerVideo> (
387 piece->content->video->crop (),
388 piece->content->video->fade (i->frame.index()),
390 _video_container_size,
393 piece->content->video->colour_conversion ()
400 /* Discard unused video */
401 decoder->get (dcp_to_content_video (piece, time), accurate);
407 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
408 p->set_subtitle (subtitles.get ());
415 /** @return Audio data or 0 if the only audio data here is referenced DCP data */
416 shared_ptr<AudioBuffers>
417 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
419 if (!_have_valid_pieces) {
423 Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
425 shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
426 audio->make_silent ();
428 list<shared_ptr<Piece> > ov = overlaps (time, time + length, has_audio);
433 bool all_referenced = true;
434 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
435 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (i->content);
436 if (i->content->audio && (!dcp_content || !dcp_content->reference_audio ())) {
437 /* There is audio content which is not from a DCP or not set to be referenced */
438 all_referenced = false;
442 if (all_referenced && !_play_referenced) {
443 return shared_ptr<AudioBuffers> ();
446 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
448 DCPOMATIC_ASSERT (i->content->audio);
449 shared_ptr<AudioDecoder> decoder = i->decoder->audio;
450 DCPOMATIC_ASSERT (decoder);
452 /* The time that we should request from the content */
453 DCPTime request = time - DCPTime::from_seconds (i->content->audio->delay() / 1000.0);
454 Frame request_frames = length_frames;
456 if (request < DCPTime ()) {
457 /* We went off the start of the content, so we will need to offset
458 the stuff we get back.
461 request_frames += request.frames_round (_film->audio_frame_rate ());
462 if (request_frames < 0) {
465 request = DCPTime ();
468 Frame const content_frame = dcp_to_resampled_audio (i, request);
470 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams ()) {
472 if (j->channels() == 0) {
473 /* Some content (e.g. DCPs) can have streams with no channels */
477 /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
478 ContentAudio all = decoder->get (j, content_frame, request_frames, accurate);
481 if (i->content->audio->gain() != 0) {
482 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
483 gain->apply_gain (i->content->audio->gain ());
488 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
489 dcp_mapped->make_silent ();
490 AudioMapping map = j->mapping ();
491 for (int i = 0; i < map.input_channels(); ++i) {
492 for (int j = 0; j < _film->audio_channels(); ++j) {
493 if (map.get (i, j) > 0) {
494 dcp_mapped->accumulate_channel (
504 if (_audio_processor) {
505 dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ());
508 all.audio = dcp_mapped;
510 audio->accumulate_frames (
512 content_frame - all.frame,
513 offset.frames_round (_film->audio_frame_rate()),
514 min (Frame (all.audio->frames()), request_frames)
523 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
525 DCPTime s = t - piece->content->position ();
526 s = min (piece->content->length_after_trim(), s);
527 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
529 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
530 then convert that ContentTime to frames at the content's rate. However this fails for
531 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
532 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
534 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
536 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
540 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
542 /* See comment in dcp_to_content_video */
543 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
544 return max (DCPTime (), d + piece->content->position ());
548 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
550 DCPTime s = t - piece->content->position ();
551 s = min (piece->content->length_after_trim(), s);
552 /* See notes in dcp_to_content_video */
553 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
557 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
559 DCPTime s = t - piece->content->position ();
560 s = min (piece->content->length_after_trim(), s);
561 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
565 Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
567 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
570 /** @param burnt true to return only subtitles to be burnt, false to return only
571 * subtitles that should not be burnt. This parameter will be ignored if
572 * _always_burn_subtitles is true; in this case, all subtitles will be returned.
575 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate)
577 list<shared_ptr<Piece> > subs = overlaps (time, time + length, has_subtitle);
579 PlayerSubtitles ps (time, length);
581 for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
582 if (!(*j)->content->subtitle->use () || (!_always_burn_subtitles && (burnt != (*j)->content->subtitle->burn ()))) {
586 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> ((*j)->content);
587 if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) {
591 shared_ptr<SubtitleDecoder> subtitle_decoder = (*j)->decoder->subtitle;
592 ContentTime const from = dcp_to_content_subtitle (*j, time);
593 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
594 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
596 list<ContentImageSubtitle> image = subtitle_decoder->get_image (ContentTimePeriod (from, to), starting, accurate);
597 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
599 /* Apply content's subtitle offsets */
600 i->sub.rectangle.x += (*j)->content->subtitle->x_offset ();
601 i->sub.rectangle.y += (*j)->content->subtitle->y_offset ();
603 /* Apply content's subtitle scale */
604 i->sub.rectangle.width *= (*j)->content->subtitle->x_scale ();
605 i->sub.rectangle.height *= (*j)->content->subtitle->y_scale ();
607 /* Apply a corrective translation to keep the subtitle centred after that scale */
608 i->sub.rectangle.x -= i->sub.rectangle.width * ((*j)->content->subtitle->x_scale() - 1);
609 i->sub.rectangle.y -= i->sub.rectangle.height * ((*j)->content->subtitle->y_scale() - 1);
611 ps.image.push_back (i->sub);
614 list<ContentTextSubtitle> text = subtitle_decoder->get_text (ContentTimePeriod (from, to), starting, accurate);
615 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
616 BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
617 s.set_h_position (s.h_position() + (*j)->content->subtitle->x_offset ());
618 s.set_v_position (s.v_position() + (*j)->content->subtitle->y_offset ());
619 float const xs = (*j)->content->subtitle->x_scale();
620 float const ys = (*j)->content->subtitle->y_scale();
621 float size = s.size();
623 /* Adjust size to express the common part of the scaling;
624 e.g. if xs = ys = 0.5 we scale size by 2.
626 if (xs > 1e-5 && ys > 1e-5) {
627 size *= 1 / min (1 / xs, 1 / ys);
631 /* Then express aspect ratio changes */
632 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
633 s.set_aspect_adjust (xs / ys);
635 s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000));
636 s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000));
637 ps.text.push_back (s);
638 ps.add_fonts ((*j)->content->subtitle->fonts ());
646 list<shared_ptr<Font> >
647 Player::get_subtitle_fonts ()
649 if (!_have_valid_pieces) {
653 list<shared_ptr<Font> > fonts;
654 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
655 if (p->content->subtitle) {
656 /* XXX: things may go wrong if there are duplicate font IDs
657 with different font files.
659 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
660 copy (f.begin(), f.end(), back_inserter (fonts));
667 /** Set this player never to produce any video data */
669 Player::set_ignore_video ()
671 _ignore_video = true;
674 /** Set this player never to produce any audio data */
676 Player::set_ignore_audio ()
678 _ignore_audio = true;
681 /** Set whether or not this player should always burn text subtitles into the image,
682 * regardless of the content settings.
683 * @param burn true to always burn subtitles, false to obey content settings.
686 Player::set_always_burn_subtitles (bool burn)
688 _always_burn_subtitles = burn;
695 _have_valid_pieces = false;
699 Player::set_play_referenced ()
701 _play_referenced = true;
702 _have_valid_pieces = false;
705 list<ReferencedReelAsset>
706 Player::get_reel_assets ()
708 list<ReferencedReelAsset> a;
710 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
711 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
716 scoped_ptr<DCPDecoder> decoder;
718 decoder.reset (new DCPDecoder (j, _film->log()));
724 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
725 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
726 if (j->reference_video ()) {
728 ReferencedReelAsset (
730 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate()))
735 if (j->reference_audio ()) {
737 ReferencedReelAsset (
739 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate()))
744 if (j->reference_subtitle ()) {
745 DCPOMATIC_ASSERT (k->main_subtitle ());
747 ReferencedReelAsset (
749 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate()))
754 /* Assume that main picture duration is the length of the reel */
755 offset += k->main_picture()->duration ();
762 list<shared_ptr<Piece> >
763 Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
765 if (!_have_valid_pieces) {
769 list<shared_ptr<Piece> > overlaps;
770 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
771 if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
772 overlaps.push_back (i);