2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102 _playlist_content_may_change_connection = _playlist->ContentMayChange.connect (bind(&Player::playlist_content_may_change, this));
103 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind(&Player::playlist_content_changed, this, _1, _2, _3));
104 _playlist_content_not_changed_connection = _playlist->ContentNotChanged.connect (bind(&Player::playlist_content_not_changed, this));
105 set_video_container_size (_film->frame_size ());
107 film_changed (Film::AUDIO_PROCESSOR);
110 seek (DCPTime (), true);
119 Player::setup_pieces ()
121 boost::mutex::scoped_lock lm (_mutex);
122 setup_pieces_unlocked ();
126 Player::setup_pieces_unlocked ()
131 _shuffler = new Shuffler();
132 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
134 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
136 if (!i->paths_valid ()) {
140 if (_ignore_video && _ignore_audio && i->text.empty()) {
141 /* We're only interested in text and this content has none */
145 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
146 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
149 /* Not something that we can decode; e.g. Atmos content */
153 if (decoder->video && _ignore_video) {
154 decoder->video->set_ignore (true);
157 if (decoder->audio && _ignore_audio) {
158 decoder->audio->set_ignore (true);
162 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
163 i->set_ignore (true);
167 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
169 dcp->set_decode_referenced (_play_referenced);
170 if (_play_referenced) {
171 dcp->set_forced_reduction (_dcp_decode_reduction);
175 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
176 _pieces.push_back (piece);
178 if (decoder->video) {
179 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
180 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
181 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
183 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
187 if (decoder->audio) {
188 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
191 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
193 while (j != decoder->text.end()) {
194 (*j)->BitmapStart.connect (
195 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
197 (*j)->PlainStart.connect (
198 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
201 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
208 _stream_states.clear ();
209 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
210 if (i->content->audio) {
211 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
212 _stream_states[j] = StreamState (i, i->content->position ());
217 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
218 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
220 _last_video_time = DCPTime ();
221 _last_video_eyes = EYES_BOTH;
222 _last_audio_time = DCPTime ();
227 Player::playlist_content_may_change ()
230 boost::mutex::scoped_lock lm (_mutex);
231 /* The player content is probably about to change, so we can't carry on
232 until that has happened and we've rebuilt our pieces. Stop pass()
233 and seek() from working until then.
242 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
244 /* A change in our content has gone through. Re-build our pieces and signal
245 it to anybody that is interested.
248 shared_ptr<Content> c = w.lock ();
256 property == ContentProperty::POSITION ||
257 property == ContentProperty::LENGTH ||
258 property == ContentProperty::TRIM_START ||
259 property == ContentProperty::TRIM_END ||
260 property == ContentProperty::PATH ||
261 property == VideoContentProperty::FRAME_TYPE ||
262 property == VideoContentProperty::COLOUR_CONVERSION ||
263 property == AudioContentProperty::STREAMS ||
264 property == DCPContentProperty::NEEDS_ASSETS ||
265 property == DCPContentProperty::NEEDS_KDM ||
266 property == DCPContentProperty::CPL ||
267 property == TextContentProperty::COLOUR ||
268 property == TextContentProperty::EFFECT ||
269 property == TextContentProperty::EFFECT_COLOUR ||
270 property == FFmpegContentProperty::SUBTITLE_STREAM ||
271 property == FFmpegContentProperty::FILTERS ||
272 property == TextContentProperty::LINE_SPACING ||
273 property == TextContentProperty::OUTLINE_WIDTH ||
274 property == TextContentProperty::Y_SCALE ||
275 property == TextContentProperty::FADE_IN ||
276 property == TextContentProperty::FADE_OUT ||
277 property == ContentProperty::VIDEO_FRAME_RATE ||
278 property == TextContentProperty::USE ||
279 property == TextContentProperty::X_OFFSET ||
280 property == TextContentProperty::Y_OFFSET ||
281 property == TextContentProperty::X_SCALE ||
282 property == TextContentProperty::FONTS ||
283 property == TextContentProperty::TYPE ||
284 property == VideoContentProperty::CROP ||
285 property == VideoContentProperty::SCALE ||
286 property == VideoContentProperty::FADE_IN ||
287 property == VideoContentProperty::FADE_OUT
290 Changed (property, frequent);
295 Player::playlist_content_not_changed ()
297 /* A possible content change did end up happening for some reason */
303 Player::set_video_container_size (dcp::Size s)
306 boost::mutex::scoped_lock lm (_mutex);
308 if (s == _video_container_size) {
312 _video_container_size = s;
314 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
315 _black_image->make_black ();
318 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
322 Player::playlist_changed ()
325 Changed (PlayerProperty::PLAYLIST, false);
329 Player::film_changed (Film::Property p)
331 /* Here we should notice Film properties that affect our output, and
332 alert listeners that our output now would be different to how it was
333 last time we were run.
336 if (p == Film::CONTAINER) {
337 Changed (PlayerProperty::FILM_CONTAINER, false);
338 } else if (p == Film::VIDEO_FRAME_RATE) {
339 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
340 so we need new pieces here.
343 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
344 } else if (p == Film::AUDIO_PROCESSOR) {
345 if (_film->audio_processor ()) {
346 boost::mutex::scoped_lock lm (_mutex);
347 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
349 } else if (p == Film::AUDIO_CHANNELS) {
350 boost::mutex::scoped_lock lm (_mutex);
351 _audio_merger.clear ();
356 Player::transform_bitmap_texts (list<BitmapText> subs) const
358 list<PositionImage> all;
360 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
365 /* We will scale the subtitle up to fit _video_container_size */
366 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
372 dcp::YUV_TO_RGB_REC601,
373 i->image->pixel_format (),
378 lrint (_video_container_size.width * i->rectangle.x),
379 lrint (_video_container_size.height * i->rectangle.y)
388 shared_ptr<PlayerVideo>
389 Player::black_player_video_frame (Eyes eyes) const
391 return shared_ptr<PlayerVideo> (
393 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
396 _video_container_size,
397 _video_container_size,
400 PresetColourConversion::all().front().conversion,
401 boost::weak_ptr<Content>(),
402 boost::optional<Frame>()
408 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
410 DCPTime s = t - piece->content->position ();
411 s = min (piece->content->length_after_trim(), s);
412 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
414 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
415 then convert that ContentTime to frames at the content's rate. However this fails for
416 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
417 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
419 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
421 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
425 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
427 /* See comment in dcp_to_content_video */
428 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
429 return d + piece->content->position();
433 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
435 DCPTime s = t - piece->content->position ();
436 s = min (piece->content->length_after_trim(), s);
437 /* See notes in dcp_to_content_video */
438 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
442 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
444 /* See comment in dcp_to_content_video */
445 return DCPTime::from_frames (f, _film->audio_frame_rate())
446 - DCPTime (piece->content->trim_start(), piece->frc)
447 + piece->content->position();
451 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
453 DCPTime s = t - piece->content->position ();
454 s = min (piece->content->length_after_trim(), s);
455 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
459 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
461 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
464 list<shared_ptr<Font> >
465 Player::get_subtitle_fonts ()
467 boost::mutex::scoped_lock lm (_mutex);
469 list<shared_ptr<Font> > fonts;
470 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
471 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
472 /* XXX: things may go wrong if there are duplicate font IDs
473 with different font files.
475 list<shared_ptr<Font> > f = j->fonts ();
476 copy (f.begin(), f.end(), back_inserter (fonts));
483 /** Set this player never to produce any video data */
485 Player::set_ignore_video ()
487 boost::mutex::scoped_lock lm (_mutex);
488 _ignore_video = true;
489 setup_pieces_unlocked ();
493 Player::set_ignore_audio ()
495 boost::mutex::scoped_lock lm (_mutex);
496 _ignore_audio = true;
497 setup_pieces_unlocked ();
501 Player::set_ignore_text ()
503 boost::mutex::scoped_lock lm (_mutex);
505 setup_pieces_unlocked ();
508 /** Set the player to always burn open texts into the image regardless of the content settings */
510 Player::set_always_burn_open_subtitles ()
512 boost::mutex::scoped_lock lm (_mutex);
513 _always_burn_open_subtitles = true;
516 /** Sets up the player to be faster, possibly at the expense of quality */
520 boost::mutex::scoped_lock lm (_mutex);
522 setup_pieces_unlocked ();
526 Player::set_play_referenced ()
528 boost::mutex::scoped_lock lm (_mutex);
529 _play_referenced = true;
530 setup_pieces_unlocked ();
533 list<ReferencedReelAsset>
534 Player::get_reel_assets ()
536 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
538 list<ReferencedReelAsset> a;
540 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
541 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
546 scoped_ptr<DCPDecoder> decoder;
548 decoder.reset (new DCPDecoder (j, _film->log(), false));
554 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
556 DCPOMATIC_ASSERT (j->video_frame_rate ());
557 double const cfr = j->video_frame_rate().get();
558 Frame const trim_start = j->trim_start().frames_round (cfr);
559 Frame const trim_end = j->trim_end().frames_round (cfr);
560 int const ffr = _film->video_frame_rate ();
562 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
563 if (j->reference_video ()) {
564 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
565 DCPOMATIC_ASSERT (ra);
566 ra->set_entry_point (ra->entry_point() + trim_start);
567 ra->set_duration (ra->duration() - trim_start - trim_end);
569 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
573 if (j->reference_audio ()) {
574 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
575 DCPOMATIC_ASSERT (ra);
576 ra->set_entry_point (ra->entry_point() + trim_start);
577 ra->set_duration (ra->duration() - trim_start - trim_end);
579 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
583 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
584 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
585 DCPOMATIC_ASSERT (ra);
586 ra->set_entry_point (ra->entry_point() + trim_start);
587 ra->set_duration (ra->duration() - trim_start - trim_end);
589 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
593 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
594 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
595 DCPOMATIC_ASSERT (ra);
596 ra->set_entry_point (ra->entry_point() + trim_start);
597 ra->set_duration (ra->duration() - trim_start - trim_end);
599 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
603 /* Assume that main picture duration is the length of the reel */
604 offset += k->main_picture()->duration ();
614 boost::mutex::scoped_lock lm (_mutex);
617 /* We can't pass in this state */
621 if (_playlist->length() == DCPTime()) {
622 /* Special case of an empty Film; just give one black frame */
623 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
627 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
629 shared_ptr<Piece> earliest_content;
630 optional<DCPTime> earliest_time;
632 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
637 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
638 if (t > i->content->end()) {
642 /* Given two choices at the same time, pick the one with texts so we see it before
645 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
647 earliest_content = i;
661 if (earliest_content) {
665 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
666 earliest_time = _black.position ();
670 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
671 earliest_time = _silent.position ();
677 earliest_content->done = earliest_content->decoder->pass ();
680 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
681 _black.set_position (_black.position() + one_video_frame());
685 DCPTimePeriod period (_silent.period_at_position());
686 if (_last_audio_time) {
687 /* Sometimes the thing that happened last finishes fractionally before
688 this silence. Bodge the start time of the silence to fix it. I'm
689 not sure if this is the right solution --- maybe the last thing should
690 be padded `forward' rather than this thing padding `back'.
692 period.from = min(period.from, *_last_audio_time);
694 if (period.duration() > one_video_frame()) {
695 period.to = period.from + one_video_frame();
698 _silent.set_position (period.to);
706 /* Emit any audio that is ready */
708 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
709 of our streams, or the position of the _silent.
711 DCPTime pull_to = _film->length ();
712 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
713 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
714 pull_to = i->second.last_push_end;
717 if (!_silent.done() && _silent.position() < pull_to) {
718 pull_to = _silent.position();
721 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
722 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
723 if (_last_audio_time && i->second < *_last_audio_time) {
724 /* This new data comes before the last we emitted (or the last seek); discard it */
725 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
730 } else if (_last_audio_time && i->second > *_last_audio_time) {
731 /* There's a gap between this data and the last we emitted; fill with silence */
732 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
735 emit_audio (i->first, i->second);
740 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
741 do_emit_video(i->first, i->second);
748 /** @return Open subtitles for the frame at the given time, converted to images */
749 optional<PositionImage>
750 Player::open_subtitles_for_frame (DCPTime time) const
752 list<PositionImage> captions;
753 int const vfr = _film->video_frame_rate();
757 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
760 /* Bitmap subtitles */
761 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
762 copy (c.begin(), c.end(), back_inserter (captions));
764 /* String subtitles (rendered to an image) */
765 if (!j.string.empty ()) {
766 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
767 copy (s.begin(), s.end(), back_inserter (captions));
771 if (captions.empty ()) {
772 return optional<PositionImage> ();
775 return merge (captions);
779 Player::video (weak_ptr<Piece> wp, ContentVideo video)
781 shared_ptr<Piece> piece = wp.lock ();
786 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
787 if (frc.skip && (video.frame % 2) == 1) {
791 /* Time of the first frame we will emit */
792 DCPTime const time = content_video_to_dcp (piece, video.frame);
794 /* Discard if it's before the content's period or the last accurate seek. We can't discard
795 if it's after the content's period here as in that case we still need to fill any gap between
796 `now' and the end of the content's period.
798 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
802 /* Fill gaps that we discover now that we have some video which needs to be emitted.
803 This is where we need to fill to.
805 DCPTime fill_to = min (time, piece->content->end());
807 if (_last_video_time) {
808 DCPTime fill_from = max (*_last_video_time, piece->content->position());
809 LastVideoMap::const_iterator last = _last_video.find (wp);
810 if (_film->three_d()) {
811 Eyes fill_to_eyes = video.eyes;
812 if (fill_to == piece->content->end()) {
813 /* Don't fill after the end of the content */
814 fill_to_eyes = EYES_LEFT;
816 DCPTime j = fill_from;
817 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
818 if (eyes == EYES_BOTH) {
821 while (j < fill_to || eyes != fill_to_eyes) {
822 if (last != _last_video.end()) {
823 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
824 copy->set_eyes (eyes);
825 emit_video (copy, j);
827 emit_video (black_player_video_frame(eyes), j);
829 if (eyes == EYES_RIGHT) {
830 j += one_video_frame();
832 eyes = increment_eyes (eyes);
835 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
836 if (last != _last_video.end()) {
837 emit_video (last->second, j);
839 emit_video (black_player_video_frame(EYES_BOTH), j);
845 _last_video[wp].reset (
848 piece->content->video->crop (),
849 piece->content->video->fade (video.frame),
850 piece->content->video->scale().size (
851 piece->content->video, _video_container_size, _film->frame_size ()
853 _video_container_size,
856 piece->content->video->colour_conversion(),
863 for (int i = 0; i < frc.repeat; ++i) {
864 if (t < piece->content->end()) {
865 emit_video (_last_video[wp], t);
867 t += one_video_frame ();
872 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
874 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
876 shared_ptr<Piece> piece = wp.lock ();
881 shared_ptr<AudioContent> content = piece->content->audio;
882 DCPOMATIC_ASSERT (content);
884 /* Compute time in the DCP */
885 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
886 /* And the end of this block in the DCP */
887 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
889 /* Remove anything that comes before the start or after the end of the content */
890 if (time < piece->content->position()) {
891 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
893 /* This audio is entirely discarded */
896 content_audio.audio = cut.first;
898 } else if (time > piece->content->end()) {
901 } else if (end > piece->content->end()) {
902 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
903 if (remaining_frames == 0) {
906 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
907 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
908 content_audio.audio = cut;
911 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
915 if (content->gain() != 0) {
916 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
917 gain->apply_gain (content->gain ());
918 content_audio.audio = gain;
923 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
927 if (_audio_processor) {
928 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
933 _audio_merger.push (content_audio.audio, time);
934 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
935 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
939 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
941 shared_ptr<Piece> piece = wp.lock ();
942 shared_ptr<const TextContent> text = wc.lock ();
943 if (!piece || !text) {
947 /* Apply content's subtitle offsets */
948 subtitle.sub.rectangle.x += text->x_offset ();
949 subtitle.sub.rectangle.y += text->y_offset ();
951 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
952 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
953 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
955 /* Apply content's subtitle scale */
956 subtitle.sub.rectangle.width *= text->x_scale ();
957 subtitle.sub.rectangle.height *= text->y_scale ();
960 ps.bitmap.push_back (subtitle.sub);
961 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
963 _active_texts[subtitle.type()].add_from (wc, ps, from);
967 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
969 shared_ptr<Piece> piece = wp.lock ();
970 shared_ptr<const TextContent> text = wc.lock ();
971 if (!piece || !text) {
976 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
978 if (from > piece->content->end()) {
982 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
983 s.set_h_position (s.h_position() + text->x_offset ());
984 s.set_v_position (s.v_position() + text->y_offset ());
985 float const xs = text->x_scale();
986 float const ys = text->y_scale();
987 float size = s.size();
989 /* Adjust size to express the common part of the scaling;
990 e.g. if xs = ys = 0.5 we scale size by 2.
992 if (xs > 1e-5 && ys > 1e-5) {
993 size *= 1 / min (1 / xs, 1 / ys);
997 /* Then express aspect ratio changes */
998 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
999 s.set_aspect_adjust (xs / ys);
1002 s.set_in (dcp::Time(from.seconds(), 1000));
1003 ps.string.push_back (StringText (s, text->outline_width()));
1004 ps.add_fonts (text->fonts ());
1007 _active_texts[subtitle.type()].add_from (wc, ps, from);
1011 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
1013 if (!_active_texts[type].have (wc)) {
1017 shared_ptr<Piece> piece = wp.lock ();
1018 shared_ptr<const TextContent> text = wc.lock ();
1019 if (!piece || !text) {
1023 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1025 if (dcp_to > piece->content->end()) {
1029 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1031 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1032 if (text->use() && !always && !text->burn()) {
1033 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1038 Player::seek (DCPTime time, bool accurate)
1040 boost::mutex::scoped_lock lm (_mutex);
1043 /* We can't seek in this state */
1048 _shuffler->clear ();
1053 if (_audio_processor) {
1054 _audio_processor->flush ();
1057 _audio_merger.clear ();
1058 for (int i = 0; i < TEXT_COUNT; ++i) {
1059 _active_texts[i].clear ();
1062 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1063 if (time < i->content->position()) {
1064 /* Before; seek to the start of the content */
1065 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1067 } else if (i->content->position() <= time && time < i->content->end()) {
1068 /* During; seek to position */
1069 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1072 /* After; this piece is done */
1078 _last_video_time = time;
1079 _last_video_eyes = EYES_LEFT;
1080 _last_audio_time = time;
1082 _last_video_time = optional<DCPTime>();
1083 _last_video_eyes = optional<Eyes>();
1084 _last_audio_time = optional<DCPTime>();
1087 _black.set_position (time);
1088 _silent.set_position (time);
1090 _last_video.clear ();
1094 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1096 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1097 player before the video that requires them.
1099 _delay.push_back (make_pair (pv, time));
1101 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1102 _last_video_time = time + one_video_frame();
1104 _last_video_eyes = increment_eyes (pv->eyes());
1106 if (_delay.size() < 3) {
1110 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1112 do_emit_video (to_do.first, to_do.second);
1116 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1118 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1119 for (int i = 0; i < TEXT_COUNT; ++i) {
1120 _active_texts[i].clear_before (time);
1124 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1126 pv->set_text (subtitles.get ());
1133 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1135 /* Log if the assert below is about to fail */
1136 if (_last_audio_time && time != *_last_audio_time) {
1137 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1140 /* This audio must follow on from the previous */
1141 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1143 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1147 Player::fill_audio (DCPTimePeriod period)
1149 if (period.from == period.to) {
1153 DCPOMATIC_ASSERT (period.from < period.to);
1155 DCPTime t = period.from;
1156 while (t < period.to) {
1157 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1158 Frame const samples = block.frames_round(_film->audio_frame_rate());
1160 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1161 silence->make_silent ();
1162 emit_audio (silence, t);
1169 Player::one_video_frame () const
1171 return DCPTime::from_frames (1, _film->video_frame_rate ());
1174 pair<shared_ptr<AudioBuffers>, DCPTime>
1175 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1177 DCPTime const discard_time = discard_to - time;
1178 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1179 Frame remaining_frames = audio->frames() - discard_frames;
1180 if (remaining_frames <= 0) {
1181 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1183 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1184 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1185 return make_pair(cut, time + discard_time);
1189 Player::set_dcp_decode_reduction (optional<int> reduction)
1192 boost::mutex::scoped_lock lm (_mutex);
1194 if (reduction == _dcp_decode_reduction) {
1198 _dcp_decode_reduction = reduction;
1199 setup_pieces_unlocked ();
1202 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1206 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1208 boost::mutex::scoped_lock lm (_mutex);
1210 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1211 if (i->content == content) {
1212 return content_time_to_dcp (i, t);
1216 /* We couldn't find this content; perhaps things are being changed over */
1217 return optional<DCPTime>();