2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
79 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
80 int const PlayerProperty::PLAYLIST = 701;
81 int const PlayerProperty::FILM_CONTAINER = 702;
82 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
83 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
85 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
87 , _playlist (playlist)
89 , _ignore_video (false)
90 , _ignore_audio (false)
91 , _ignore_text (false)
92 , _always_burn_open_subtitles (false)
94 , _play_referenced (false)
95 , _audio_merger (_film->audio_frame_rate())
98 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
99 /* The butler must hear about this first, so since we are proxying this through to the butler we must
102 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
103 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
104 set_video_container_size (_film->frame_size ());
106 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
109 seek (DCPTime (), true);
118 Player::setup_pieces ()
120 boost::mutex::scoped_lock lm (_mutex);
121 setup_pieces_unlocked ();
125 have_video (shared_ptr<Piece> piece)
127 return piece->decoder && piece->decoder->video;
131 have_audio (shared_ptr<Piece> piece)
133 return piece->decoder && piece->decoder->audio;
137 Player::setup_pieces_unlocked ()
142 _shuffler = new Shuffler();
143 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
145 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
147 if (!i->paths_valid ()) {
151 if (_ignore_video && _ignore_audio && i->text.empty()) {
152 /* We're only interested in text and this content has none */
156 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
157 FrameRateChange frc (_film, i);
160 /* Not something that we can decode; e.g. Atmos content */
164 if (decoder->video && _ignore_video) {
165 decoder->video->set_ignore (true);
168 if (decoder->audio && _ignore_audio) {
169 decoder->audio->set_ignore (true);
173 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
174 i->set_ignore (true);
178 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
180 dcp->set_decode_referenced (_play_referenced);
181 if (_play_referenced) {
182 dcp->set_forced_reduction (_dcp_decode_reduction);
186 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
187 _pieces.push_back (piece);
189 if (decoder->video) {
190 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
191 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
192 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
194 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
198 if (decoder->audio) {
199 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
202 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
204 while (j != decoder->text.end()) {
205 (*j)->BitmapStart.connect (
206 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
208 (*j)->PlainStart.connect (
209 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
212 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
219 _stream_states.clear ();
220 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
221 if (i->content->audio) {
222 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
223 _stream_states[j] = StreamState (i, i->content->position ());
228 _black = Empty (_film, _pieces, bind(&have_video, _1));
229 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
231 _last_video_time = DCPTime ();
232 _last_video_eyes = EYES_BOTH;
233 _last_audio_time = DCPTime ();
238 Player::playlist_content_change (ChangeType type, int property, bool frequent)
240 if (type == CHANGE_TYPE_PENDING) {
241 boost::mutex::scoped_lock lm (_mutex);
242 /* The player content is probably about to change, so we can't carry on
243 until that has happened and we've rebuilt our pieces. Stop pass()
244 and seek() from working until then.
247 } else if (type == CHANGE_TYPE_DONE) {
248 /* A change in our content has gone through. Re-build our pieces. */
250 } else if (type == CHANGE_TYPE_CANCELLED) {
251 boost::mutex::scoped_lock lm (_mutex);
255 Change (type, property, frequent);
259 Player::set_video_container_size (dcp::Size s)
261 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
264 boost::mutex::scoped_lock lm (_mutex);
266 if (s == _video_container_size) {
268 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
272 _video_container_size = s;
274 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
275 _black_image->make_black ();
278 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
282 Player::playlist_change (ChangeType type)
284 if (type == CHANGE_TYPE_DONE) {
287 Change (type, PlayerProperty::PLAYLIST, false);
291 Player::film_change (ChangeType type, Film::Property p)
293 /* Here we should notice Film properties that affect our output, and
294 alert listeners that our output now would be different to how it was
295 last time we were run.
298 if (p == Film::CONTAINER) {
299 Change (type, PlayerProperty::FILM_CONTAINER, false);
300 } else if (p == Film::VIDEO_FRAME_RATE) {
301 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
302 so we need new pieces here.
304 if (type == CHANGE_TYPE_DONE) {
307 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
308 } else if (p == Film::AUDIO_PROCESSOR) {
309 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
310 boost::mutex::scoped_lock lm (_mutex);
311 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
313 } else if (p == Film::AUDIO_CHANNELS) {
314 if (type == CHANGE_TYPE_DONE) {
315 boost::mutex::scoped_lock lm (_mutex);
316 _audio_merger.clear ();
321 shared_ptr<PlayerVideo>
322 Player::black_player_video_frame (Eyes eyes) const
324 return shared_ptr<PlayerVideo> (
326 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
329 _video_container_size,
330 _video_container_size,
333 PresetColourConversion::all().front().conversion,
334 boost::weak_ptr<Content>(),
335 boost::optional<Frame>()
341 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
343 DCPTime s = t - piece->content->position ();
344 s = min (piece->content->length_after_trim(_film), s);
345 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
347 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
348 then convert that ContentTime to frames at the content's rate. However this fails for
349 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
350 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
352 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
354 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
358 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
360 /* See comment in dcp_to_content_video */
361 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
362 return d + piece->content->position();
366 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
368 DCPTime s = t - piece->content->position ();
369 s = min (piece->content->length_after_trim(_film), s);
370 /* See notes in dcp_to_content_video */
371 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
375 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 /* See comment in dcp_to_content_video */
378 return DCPTime::from_frames (f, _film->audio_frame_rate())
379 - DCPTime (piece->content->trim_start(), piece->frc)
380 + piece->content->position();
384 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
386 DCPTime s = t - piece->content->position ();
387 s = min (piece->content->length_after_trim(_film), s);
388 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
392 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
394 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
397 list<shared_ptr<Font> >
398 Player::get_subtitle_fonts ()
400 boost::mutex::scoped_lock lm (_mutex);
402 list<shared_ptr<Font> > fonts;
403 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
404 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
405 /* XXX: things may go wrong if there are duplicate font IDs
406 with different font files.
408 list<shared_ptr<Font> > f = j->fonts ();
409 copy (f.begin(), f.end(), back_inserter (fonts));
416 /** Set this player never to produce any video data */
418 Player::set_ignore_video ()
420 boost::mutex::scoped_lock lm (_mutex);
421 _ignore_video = true;
422 setup_pieces_unlocked ();
426 Player::set_ignore_audio ()
428 boost::mutex::scoped_lock lm (_mutex);
429 _ignore_audio = true;
430 setup_pieces_unlocked ();
434 Player::set_ignore_text ()
436 boost::mutex::scoped_lock lm (_mutex);
438 setup_pieces_unlocked ();
441 /** Set the player to always burn open texts into the image regardless of the content settings */
443 Player::set_always_burn_open_subtitles ()
445 boost::mutex::scoped_lock lm (_mutex);
446 _always_burn_open_subtitles = true;
449 /** Sets up the player to be faster, possibly at the expense of quality */
453 boost::mutex::scoped_lock lm (_mutex);
455 setup_pieces_unlocked ();
459 Player::set_play_referenced ()
461 boost::mutex::scoped_lock lm (_mutex);
462 _play_referenced = true;
463 setup_pieces_unlocked ();
466 list<ReferencedReelAsset>
467 Player::get_reel_assets ()
469 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
471 list<ReferencedReelAsset> a;
473 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
474 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
479 scoped_ptr<DCPDecoder> decoder;
481 decoder.reset (new DCPDecoder (_film, j, false));
487 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
489 DCPOMATIC_ASSERT (j->video_frame_rate ());
490 double const cfr = j->video_frame_rate().get();
491 Frame const trim_start = j->trim_start().frames_round (cfr);
492 Frame const trim_end = j->trim_end().frames_round (cfr);
493 int const ffr = _film->video_frame_rate ();
495 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
496 if (j->reference_video ()) {
497 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
498 DCPOMATIC_ASSERT (ra);
499 ra->set_entry_point (ra->entry_point() + trim_start);
500 ra->set_duration (ra->duration() - trim_start - trim_end);
502 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
506 if (j->reference_audio ()) {
507 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
508 DCPOMATIC_ASSERT (ra);
509 ra->set_entry_point (ra->entry_point() + trim_start);
510 ra->set_duration (ra->duration() - trim_start - trim_end);
512 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
516 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
517 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
518 DCPOMATIC_ASSERT (ra);
519 ra->set_entry_point (ra->entry_point() + trim_start);
520 ra->set_duration (ra->duration() - trim_start - trim_end);
522 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
526 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
527 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
528 DCPOMATIC_ASSERT (l);
529 l->set_entry_point (l->entry_point() + trim_start);
530 l->set_duration (l->duration() - trim_start - trim_end);
532 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
537 /* Assume that main picture duration is the length of the reel */
538 offset += k->main_picture()->duration ();
548 boost::mutex::scoped_lock lm (_mutex);
551 /* We can't pass in this state */
555 if (_playlist->length(_film) == DCPTime()) {
556 /* Special case of an empty Film; just give one black frame */
557 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
561 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
563 shared_ptr<Piece> earliest_content;
564 optional<DCPTime> earliest_time;
566 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
571 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
572 if (t > i->content->end(_film)) {
576 /* Given two choices at the same time, pick the one with texts so we see it before
579 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
581 earliest_content = i;
595 if (earliest_content) {
599 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
600 earliest_time = _black.position ();
604 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
605 earliest_time = _silent.position ();
611 earliest_content->done = earliest_content->decoder->pass ();
614 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
615 _black.set_position (_black.position() + one_video_frame());
619 DCPTimePeriod period (_silent.period_at_position());
620 if (_last_audio_time) {
621 /* Sometimes the thing that happened last finishes fractionally before
622 this silence. Bodge the start time of the silence to fix it. I'm
623 not sure if this is the right solution --- maybe the last thing should
624 be padded `forward' rather than this thing padding `back'.
626 period.from = min(period.from, *_last_audio_time);
628 if (period.duration() > one_video_frame()) {
629 period.to = period.from + one_video_frame();
632 _silent.set_position (period.to);
640 /* Emit any audio that is ready */
642 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
643 of our streams, or the position of the _silent.
645 DCPTime pull_to = _film->length ();
646 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
647 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
648 pull_to = i->second.last_push_end;
651 if (!_silent.done() && _silent.position() < pull_to) {
652 pull_to = _silent.position();
655 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
656 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
657 if (_last_audio_time && i->second < *_last_audio_time) {
658 /* This new data comes before the last we emitted (or the last seek); discard it */
659 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
664 } else if (_last_audio_time && i->second > *_last_audio_time) {
665 /* There's a gap between this data and the last we emitted; fill with silence */
666 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
669 emit_audio (i->first, i->second);
674 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
675 do_emit_video(i->first, i->second);
682 /** @return Open subtitles for the frame at the given time, converted to images */
683 optional<PositionImage>
684 Player::open_subtitles_for_frame (DCPTime time) const
686 list<PositionImage> captions;
687 int const vfr = _film->video_frame_rate();
691 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
694 /* Bitmap subtitles */
695 BOOST_FOREACH (BitmapText i, j.bitmap) {
700 /* i.image will already have been scaled to fit _video_container_size */
701 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
707 lrint (_video_container_size.width * i.rectangle.x),
708 lrint (_video_container_size.height * i.rectangle.y)
714 /* String subtitles (rendered to an image) */
715 if (!j.string.empty ()) {
716 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
717 copy (s.begin(), s.end(), back_inserter (captions));
721 if (captions.empty ()) {
722 return optional<PositionImage> ();
725 return merge (captions);
729 Player::video (weak_ptr<Piece> wp, ContentVideo video)
731 shared_ptr<Piece> piece = wp.lock ();
736 FrameRateChange frc (_film, piece->content);
737 if (frc.skip && (video.frame % 2) == 1) {
741 /* Time of the first frame we will emit */
742 DCPTime const time = content_video_to_dcp (piece, video.frame);
744 /* Discard if it's before the content's period or the last accurate seek. We can't discard
745 if it's after the content's period here as in that case we still need to fill any gap between
746 `now' and the end of the content's period.
748 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
752 /* Fill gaps that we discover now that we have some video which needs to be emitted.
753 This is where we need to fill to.
755 DCPTime fill_to = min (time, piece->content->end(_film));
757 if (_last_video_time) {
758 DCPTime fill_from = max (*_last_video_time, piece->content->position());
760 /* Fill if we have more than half a frame to do */
761 if ((fill_to - fill_from) > one_video_frame() / 2) {
762 LastVideoMap::const_iterator last = _last_video.find (wp);
763 if (_film->three_d()) {
764 Eyes fill_to_eyes = video.eyes;
765 if (fill_to_eyes == EYES_BOTH) {
766 fill_to_eyes = EYES_LEFT;
768 if (fill_to == piece->content->end(_film)) {
769 /* Don't fill after the end of the content */
770 fill_to_eyes = EYES_LEFT;
772 DCPTime j = fill_from;
773 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
774 if (eyes == EYES_BOTH) {
777 while (j < fill_to || eyes != fill_to_eyes) {
778 if (last != _last_video.end()) {
779 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
780 copy->set_eyes (eyes);
781 emit_video (copy, j);
783 emit_video (black_player_video_frame(eyes), j);
785 if (eyes == EYES_RIGHT) {
786 j += one_video_frame();
788 eyes = increment_eyes (eyes);
791 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
792 if (last != _last_video.end()) {
793 emit_video (last->second, j);
795 emit_video (black_player_video_frame(EYES_BOTH), j);
802 _last_video[wp].reset (
805 piece->content->video->crop (),
806 piece->content->video->fade (_film, video.frame),
807 piece->content->video->scale().size (
808 piece->content->video, _video_container_size, _film->frame_size ()
810 _video_container_size,
813 piece->content->video->colour_conversion(),
820 for (int i = 0; i < frc.repeat; ++i) {
821 if (t < piece->content->end(_film)) {
822 emit_video (_last_video[wp], t);
824 t += one_video_frame ();
829 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
831 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
833 shared_ptr<Piece> piece = wp.lock ();
838 shared_ptr<AudioContent> content = piece->content->audio;
839 DCPOMATIC_ASSERT (content);
841 /* Compute time in the DCP */
842 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
843 /* And the end of this block in the DCP */
844 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
846 /* Remove anything that comes before the start or after the end of the content */
847 if (time < piece->content->position()) {
848 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
850 /* This audio is entirely discarded */
853 content_audio.audio = cut.first;
855 } else if (time > piece->content->end(_film)) {
858 } else if (end > piece->content->end(_film)) {
859 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
860 if (remaining_frames == 0) {
863 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
864 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
865 content_audio.audio = cut;
868 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
872 if (content->gain() != 0) {
873 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
874 gain->apply_gain (content->gain ());
875 content_audio.audio = gain;
880 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
884 if (_audio_processor) {
885 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
890 _audio_merger.push (content_audio.audio, time);
891 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
892 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
896 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
898 shared_ptr<Piece> piece = wp.lock ();
899 shared_ptr<const TextContent> text = wc.lock ();
900 if (!piece || !text) {
904 /* Apply content's subtitle offsets */
905 subtitle.sub.rectangle.x += text->x_offset ();
906 subtitle.sub.rectangle.y += text->y_offset ();
908 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
909 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
910 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
912 /* Apply content's subtitle scale */
913 subtitle.sub.rectangle.width *= text->x_scale ();
914 subtitle.sub.rectangle.height *= text->y_scale ();
917 shared_ptr<Image> image = subtitle.sub.image;
918 /* We will scale the subtitle up to fit _video_container_size */
919 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
920 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
921 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
923 _active_texts[text->type()].add_from (wc, ps, from);
927 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
929 shared_ptr<Piece> piece = wp.lock ();
930 shared_ptr<const TextContent> text = wc.lock ();
931 if (!piece || !text) {
936 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
938 if (from > piece->content->end(_film)) {
942 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
943 s.set_h_position (s.h_position() + text->x_offset ());
944 s.set_v_position (s.v_position() + text->y_offset ());
945 float const xs = text->x_scale();
946 float const ys = text->y_scale();
947 float size = s.size();
949 /* Adjust size to express the common part of the scaling;
950 e.g. if xs = ys = 0.5 we scale size by 2.
952 if (xs > 1e-5 && ys > 1e-5) {
953 size *= 1 / min (1 / xs, 1 / ys);
957 /* Then express aspect ratio changes */
958 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
959 s.set_aspect_adjust (xs / ys);
962 s.set_in (dcp::Time(from.seconds(), 1000));
963 ps.string.push_back (StringText (s, text->outline_width()));
964 ps.add_fonts (text->fonts ());
967 _active_texts[text->type()].add_from (wc, ps, from);
971 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
973 shared_ptr<const TextContent> text = wc.lock ();
978 if (!_active_texts[text->type()].have(wc)) {
982 shared_ptr<Piece> piece = wp.lock ();
987 DCPTime const dcp_to = content_time_to_dcp (piece, to);
989 if (dcp_to > piece->content->end(_film)) {
993 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
995 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
996 if (text->use() && !always && !text->burn()) {
997 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1002 Player::seek (DCPTime time, bool accurate)
1004 boost::mutex::scoped_lock lm (_mutex);
1007 /* We can't seek in this state */
1012 _shuffler->clear ();
1017 if (_audio_processor) {
1018 _audio_processor->flush ();
1021 _audio_merger.clear ();
1022 for (int i = 0; i < TEXT_COUNT; ++i) {
1023 _active_texts[i].clear ();
1026 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1027 if (time < i->content->position()) {
1028 /* Before; seek to the start of the content */
1029 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1031 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1032 /* During; seek to position */
1033 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1036 /* After; this piece is done */
1042 _last_video_time = time;
1043 _last_video_eyes = EYES_LEFT;
1044 _last_audio_time = time;
1046 _last_video_time = optional<DCPTime>();
1047 _last_video_eyes = optional<Eyes>();
1048 _last_audio_time = optional<DCPTime>();
1051 _black.set_position (time);
1052 _silent.set_position (time);
1054 _last_video.clear ();
1058 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1060 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1061 player before the video that requires them.
1063 _delay.push_back (make_pair (pv, time));
1065 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1066 _last_video_time = time + one_video_frame();
1068 _last_video_eyes = increment_eyes (pv->eyes());
1070 if (_delay.size() < 3) {
1074 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1076 do_emit_video (to_do.first, to_do.second);
1080 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1082 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1083 for (int i = 0; i < TEXT_COUNT; ++i) {
1084 _active_texts[i].clear_before (time);
1088 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1090 pv->set_text (subtitles.get ());
1097 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1099 /* Log if the assert below is about to fail */
1100 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1101 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1104 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1105 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1106 Audio (data, time, _film->audio_frame_rate());
1107 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1111 Player::fill_audio (DCPTimePeriod period)
1113 if (period.from == period.to) {
1117 DCPOMATIC_ASSERT (period.from < period.to);
1119 DCPTime t = period.from;
1120 while (t < period.to) {
1121 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1122 Frame const samples = block.frames_round(_film->audio_frame_rate());
1124 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1125 silence->make_silent ();
1126 emit_audio (silence, t);
1133 Player::one_video_frame () const
1135 return DCPTime::from_frames (1, _film->video_frame_rate ());
1138 pair<shared_ptr<AudioBuffers>, DCPTime>
1139 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1141 DCPTime const discard_time = discard_to - time;
1142 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1143 Frame remaining_frames = audio->frames() - discard_frames;
1144 if (remaining_frames <= 0) {
1145 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1147 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1148 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1149 return make_pair(cut, time + discard_time);
1153 Player::set_dcp_decode_reduction (optional<int> reduction)
1155 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1158 boost::mutex::scoped_lock lm (_mutex);
1160 if (reduction == _dcp_decode_reduction) {
1162 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1166 _dcp_decode_reduction = reduction;
1167 setup_pieces_unlocked ();
1170 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1174 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1176 boost::mutex::scoped_lock lm (_mutex);
1178 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1179 if (i->content == content) {
1180 return content_time_to_dcp (i, t);
1184 /* We couldn't find this content; perhaps things are being changed over */
1185 return optional<DCPTime>();