2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
51 #include "dcpomatic_log.h"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88 , _playlist (playlist)
90 , _ignore_video (false)
91 , _ignore_audio (false)
92 , _ignore_text (false)
93 , _always_burn_open_subtitles (false)
95 , _play_referenced (false)
96 , _audio_merger (_film->audio_frame_rate())
99 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
100 /* The butler must hear about this first, so since we are proxying this through to the butler we must
103 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
104 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
105 set_video_container_size (_film->frame_size ());
107 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
110 seek (DCPTime (), true);
119 Player::setup_pieces ()
121 boost::mutex::scoped_lock lm (_mutex);
122 setup_pieces_unlocked ();
126 have_video (shared_ptr<Piece> piece)
128 return piece->decoder && piece->decoder->video;
132 have_audio (shared_ptr<Piece> piece)
134 return piece->decoder && piece->decoder->audio;
138 Player::setup_pieces_unlocked ()
143 _shuffler = new Shuffler();
144 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
146 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
148 if (!i->paths_valid ()) {
152 if (_ignore_video && _ignore_audio && i->text.empty()) {
153 /* We're only interested in text and this content has none */
157 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
158 FrameRateChange frc (_film, i);
161 /* Not something that we can decode; e.g. Atmos content */
165 if (decoder->video && _ignore_video) {
166 decoder->video->set_ignore (true);
169 if (decoder->audio && _ignore_audio) {
170 decoder->audio->set_ignore (true);
174 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
175 i->set_ignore (true);
179 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
181 dcp->set_decode_referenced (_play_referenced);
182 if (_play_referenced) {
183 dcp->set_forced_reduction (_dcp_decode_reduction);
187 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
188 _pieces.push_back (piece);
190 if (decoder->video) {
191 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
192 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
193 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
195 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
199 if (decoder->audio) {
200 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
203 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
205 while (j != decoder->text.end()) {
206 (*j)->BitmapStart.connect (
207 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
209 (*j)->PlainStart.connect (
210 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
213 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
220 _stream_states.clear ();
221 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
222 if (i->content->audio) {
223 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
224 _stream_states[j] = StreamState (i, i->content->position ());
229 _black = Empty (_film, _pieces, bind(&have_video, _1));
230 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
232 _last_video_time = DCPTime ();
233 _last_video_eyes = EYES_BOTH;
234 _last_audio_time = DCPTime ();
238 Player::playlist_content_change (ChangeType type, int property, bool frequent)
240 if (type == CHANGE_TYPE_PENDING) {
241 boost::mutex::scoped_lock lm (_mutex);
242 /* The player content is probably about to change, so we can't carry on
243 until that has happened and we've rebuilt our pieces. Stop pass()
244 and seek() from working until then.
247 } else if (type == CHANGE_TYPE_DONE) {
248 /* A change in our content has gone through. Re-build our pieces. */
251 } else if (type == CHANGE_TYPE_CANCELLED) {
252 boost::mutex::scoped_lock lm (_mutex);
256 Change (type, property, frequent);
260 Player::set_video_container_size (dcp::Size s)
262 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
265 boost::mutex::scoped_lock lm (_mutex);
267 if (s == _video_container_size) {
269 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
273 _video_container_size = s;
275 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
276 _black_image->make_black ();
279 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
283 Player::playlist_change (ChangeType type)
285 if (type == CHANGE_TYPE_DONE) {
288 Change (type, PlayerProperty::PLAYLIST, false);
292 Player::film_change (ChangeType type, Film::Property p)
294 /* Here we should notice Film properties that affect our output, and
295 alert listeners that our output now would be different to how it was
296 last time we were run.
299 if (p == Film::CONTAINER) {
300 Change (type, PlayerProperty::FILM_CONTAINER, false);
301 } else if (p == Film::VIDEO_FRAME_RATE) {
302 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
303 so we need new pieces here.
305 if (type == CHANGE_TYPE_DONE) {
308 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
309 } else if (p == Film::AUDIO_PROCESSOR) {
310 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
311 boost::mutex::scoped_lock lm (_mutex);
312 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
314 } else if (p == Film::AUDIO_CHANNELS) {
315 if (type == CHANGE_TYPE_DONE) {
316 boost::mutex::scoped_lock lm (_mutex);
317 _audio_merger.clear ();
322 shared_ptr<PlayerVideo>
323 Player::black_player_video_frame (Eyes eyes) const
325 return shared_ptr<PlayerVideo> (
327 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
330 _video_container_size,
331 _video_container_size,
334 PresetColourConversion::all().front().conversion,
335 boost::weak_ptr<Content>(),
336 boost::optional<Frame>()
342 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
344 DCPTime s = t - piece->content->position ();
345 s = min (piece->content->length_after_trim(_film), s);
346 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
348 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
349 then convert that ContentTime to frames at the content's rate. However this fails for
350 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
351 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
353 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
355 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
359 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
361 /* See comment in dcp_to_content_video */
362 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
363 return d + piece->content->position();
367 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
369 DCPTime s = t - piece->content->position ();
370 s = min (piece->content->length_after_trim(_film), s);
371 /* See notes in dcp_to_content_video */
372 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
376 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
378 /* See comment in dcp_to_content_video */
379 return DCPTime::from_frames (f, _film->audio_frame_rate())
380 - DCPTime (piece->content->trim_start(), piece->frc)
381 + piece->content->position();
385 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
387 DCPTime s = t - piece->content->position ();
388 s = min (piece->content->length_after_trim(_film), s);
389 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
393 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
395 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
398 list<shared_ptr<Font> >
399 Player::get_subtitle_fonts ()
401 boost::mutex::scoped_lock lm (_mutex);
403 list<shared_ptr<Font> > fonts;
404 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
405 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
406 /* XXX: things may go wrong if there are duplicate font IDs
407 with different font files.
409 list<shared_ptr<Font> > f = j->fonts ();
410 copy (f.begin(), f.end(), back_inserter (fonts));
417 /** Set this player never to produce any video data */
419 Player::set_ignore_video ()
421 boost::mutex::scoped_lock lm (_mutex);
422 _ignore_video = true;
423 setup_pieces_unlocked ();
427 Player::set_ignore_audio ()
429 boost::mutex::scoped_lock lm (_mutex);
430 _ignore_audio = true;
431 setup_pieces_unlocked ();
435 Player::set_ignore_text ()
437 boost::mutex::scoped_lock lm (_mutex);
439 setup_pieces_unlocked ();
442 /** Set the player to always burn open texts into the image regardless of the content settings */
444 Player::set_always_burn_open_subtitles ()
446 boost::mutex::scoped_lock lm (_mutex);
447 _always_burn_open_subtitles = true;
450 /** Sets up the player to be faster, possibly at the expense of quality */
454 boost::mutex::scoped_lock lm (_mutex);
456 setup_pieces_unlocked ();
460 Player::set_play_referenced ()
462 boost::mutex::scoped_lock lm (_mutex);
463 _play_referenced = true;
464 setup_pieces_unlocked ();
468 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
470 DCPOMATIC_ASSERT (r);
471 r->set_entry_point (r->entry_point() + reel_trim_start);
472 r->set_duration (r->duration() - reel_trim_start - reel_trim_end);
473 if (r->duration() > 0) {
475 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->duration(), ffr)))
480 list<ReferencedReelAsset>
481 Player::get_reel_assets ()
483 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
485 list<ReferencedReelAsset> a;
487 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
488 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
493 scoped_ptr<DCPDecoder> decoder;
495 decoder.reset (new DCPDecoder (_film, j, false));
500 DCPOMATIC_ASSERT (j->video_frame_rate ());
501 double const cfr = j->video_frame_rate().get();
502 Frame const trim_start = j->trim_start().frames_round (cfr);
503 Frame const trim_end = j->trim_end().frames_round (cfr);
504 int const ffr = _film->video_frame_rate ();
506 /* position in the asset from the start */
507 int64_t offset_from_start = 0;
508 /* position in the asset from the end */
509 int64_t offset_from_end = 0;
510 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
511 /* Assume that main picture duration is the length of the reel */
512 offset_from_end += k->main_picture()->duration();
515 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
517 /* Assume that main picture duration is the length of the reel */
518 int64_t const reel_duration = k->main_picture()->duration();
520 /* See doc/design/trim_reels.svg */
521 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
522 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
524 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
525 if (j->reference_video ()) {
526 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
529 if (j->reference_audio ()) {
530 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
533 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
534 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
537 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
538 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
539 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
543 offset_from_start += reel_duration;
544 offset_from_end -= reel_duration;
554 boost::mutex::scoped_lock lm (_mutex);
557 /* We can't pass in this state */
561 if (_playlist->length(_film) == DCPTime()) {
562 /* Special case of an empty Film; just give one black frame */
563 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
567 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
569 shared_ptr<Piece> earliest_content;
570 optional<DCPTime> earliest_time;
572 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
577 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
578 if (t > i->content->end(_film)) {
582 /* Given two choices at the same time, pick the one with texts so we see it before
585 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
587 earliest_content = i;
601 if (earliest_content) {
605 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
606 earliest_time = _black.position ();
610 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
611 earliest_time = _silent.position ();
618 earliest_content->done = earliest_content->decoder->pass ();
619 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
620 if (dcp && !_play_referenced && dcp->reference_audio()) {
621 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
622 to `hide' the fact that no audio was emitted during the referenced DCP (though
623 we need to behave as though it was).
625 _last_audio_time = dcp->end (_film);
630 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
631 _black.set_position (_black.position() + one_video_frame());
635 DCPTimePeriod period (_silent.period_at_position());
636 if (_last_audio_time) {
637 /* Sometimes the thing that happened last finishes fractionally before
638 or after this silence. Bodge the start time of the silence to fix it.
639 I think this is nothing to worry about since we will just add or
640 remove a little silence at the end of some content.
642 int64_t const error = labs(period.from.get() - _last_audio_time->get());
643 /* Let's not worry about less than a frame at 24fps */
644 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
645 if (error >= too_much_error) {
646 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
648 DCPOMATIC_ASSERT (error < too_much_error);
649 period.from = *_last_audio_time;
651 if (period.duration() > one_video_frame()) {
652 period.to = period.from + one_video_frame();
655 _silent.set_position (period.to);
663 /* Emit any audio that is ready */
665 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
666 of our streams, or the position of the _silent.
668 DCPTime pull_to = _film->length ();
669 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
670 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
671 pull_to = i->second.last_push_end;
674 if (!_silent.done() && _silent.position() < pull_to) {
675 pull_to = _silent.position();
678 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
679 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
680 if (_last_audio_time && i->second < *_last_audio_time) {
681 /* This new data comes before the last we emitted (or the last seek); discard it */
682 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
687 } else if (_last_audio_time && i->second > *_last_audio_time) {
688 /* There's a gap between this data and the last we emitted; fill with silence */
689 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
692 emit_audio (i->first, i->second);
697 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
698 do_emit_video(i->first, i->second);
705 /** @return Open subtitles for the frame at the given time, converted to images */
706 optional<PositionImage>
707 Player::open_subtitles_for_frame (DCPTime time) const
709 list<PositionImage> captions;
710 int const vfr = _film->video_frame_rate();
714 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
717 /* Bitmap subtitles */
718 BOOST_FOREACH (BitmapText i, j.bitmap) {
723 /* i.image will already have been scaled to fit _video_container_size */
724 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
730 lrint (_video_container_size.width * i.rectangle.x),
731 lrint (_video_container_size.height * i.rectangle.y)
737 /* String subtitles (rendered to an image) */
738 if (!j.string.empty ()) {
739 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
740 copy (s.begin(), s.end(), back_inserter (captions));
744 if (captions.empty ()) {
745 return optional<PositionImage> ();
748 return merge (captions);
752 Player::video (weak_ptr<Piece> wp, ContentVideo video)
754 shared_ptr<Piece> piece = wp.lock ();
759 FrameRateChange frc (_film, piece->content);
760 if (frc.skip && (video.frame % 2) == 1) {
764 /* Time of the first frame we will emit */
765 DCPTime const time = content_video_to_dcp (piece, video.frame);
767 /* Discard if it's before the content's period or the last accurate seek. We can't discard
768 if it's after the content's period here as in that case we still need to fill any gap between
769 `now' and the end of the content's period.
771 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
775 /* Fill gaps that we discover now that we have some video which needs to be emitted.
776 This is where we need to fill to.
778 DCPTime fill_to = min (time, piece->content->end(_film));
780 if (_last_video_time) {
781 DCPTime fill_from = max (*_last_video_time, piece->content->position());
783 /* Fill if we have more than half a frame to do */
784 if ((fill_to - fill_from) > one_video_frame() / 2) {
785 LastVideoMap::const_iterator last = _last_video.find (wp);
786 if (_film->three_d()) {
787 Eyes fill_to_eyes = video.eyes;
788 if (fill_to_eyes == EYES_BOTH) {
789 fill_to_eyes = EYES_LEFT;
791 if (fill_to == piece->content->end(_film)) {
792 /* Don't fill after the end of the content */
793 fill_to_eyes = EYES_LEFT;
795 DCPTime j = fill_from;
796 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
797 if (eyes == EYES_BOTH) {
800 while (j < fill_to || eyes != fill_to_eyes) {
801 if (last != _last_video.end()) {
802 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
803 copy->set_eyes (eyes);
804 emit_video (copy, j);
806 emit_video (black_player_video_frame(eyes), j);
808 if (eyes == EYES_RIGHT) {
809 j += one_video_frame();
811 eyes = increment_eyes (eyes);
814 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
815 if (last != _last_video.end()) {
816 emit_video (last->second, j);
818 emit_video (black_player_video_frame(EYES_BOTH), j);
825 _last_video[wp].reset (
828 piece->content->video->crop (),
829 piece->content->video->fade (_film, video.frame),
830 piece->content->video->scale().size (
831 piece->content->video, _video_container_size, _film->frame_size ()
833 _video_container_size,
836 piece->content->video->colour_conversion(),
843 for (int i = 0; i < frc.repeat; ++i) {
844 if (t < piece->content->end(_film)) {
845 emit_video (_last_video[wp], t);
847 t += one_video_frame ();
852 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
854 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
856 shared_ptr<Piece> piece = wp.lock ();
861 shared_ptr<AudioContent> content = piece->content->audio;
862 DCPOMATIC_ASSERT (content);
864 /* Compute time in the DCP */
865 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
866 /* And the end of this block in the DCP */
867 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
869 /* Remove anything that comes before the start or after the end of the content */
870 if (time < piece->content->position()) {
871 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
873 /* This audio is entirely discarded */
876 content_audio.audio = cut.first;
878 } else if (time > piece->content->end(_film)) {
881 } else if (end > piece->content->end(_film)) {
882 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
883 if (remaining_frames == 0) {
886 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
887 LOG_GENERAL_NC("copy_from #8");
888 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
889 content_audio.audio = cut;
892 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
896 if (content->gain() != 0) {
897 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
898 gain->apply_gain (content->gain ());
899 content_audio.audio = gain;
904 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
908 if (_audio_processor) {
909 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
914 _audio_merger.push (content_audio.audio, time);
915 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
916 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
920 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
922 shared_ptr<Piece> piece = wp.lock ();
923 shared_ptr<const TextContent> text = wc.lock ();
924 if (!piece || !text) {
928 /* Apply content's subtitle offsets */
929 subtitle.sub.rectangle.x += text->x_offset ();
930 subtitle.sub.rectangle.y += text->y_offset ();
932 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
933 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
934 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
936 /* Apply content's subtitle scale */
937 subtitle.sub.rectangle.width *= text->x_scale ();
938 subtitle.sub.rectangle.height *= text->y_scale ();
941 shared_ptr<Image> image = subtitle.sub.image;
942 /* We will scale the subtitle up to fit _video_container_size */
943 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
944 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
945 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
947 _active_texts[text->type()].add_from (wc, ps, from);
951 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
953 shared_ptr<Piece> piece = wp.lock ();
954 shared_ptr<const TextContent> text = wc.lock ();
955 if (!piece || !text) {
960 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
962 if (from > piece->content->end(_film)) {
966 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
967 s.set_h_position (s.h_position() + text->x_offset ());
968 s.set_v_position (s.v_position() + text->y_offset ());
969 float const xs = text->x_scale();
970 float const ys = text->y_scale();
971 float size = s.size();
973 /* Adjust size to express the common part of the scaling;
974 e.g. if xs = ys = 0.5 we scale size by 2.
976 if (xs > 1e-5 && ys > 1e-5) {
977 size *= 1 / min (1 / xs, 1 / ys);
981 /* Then express aspect ratio changes */
982 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
983 s.set_aspect_adjust (xs / ys);
986 s.set_in (dcp::Time(from.seconds(), 1000));
987 ps.string.push_back (StringText (s, text->outline_width()));
988 ps.add_fonts (text->fonts ());
991 _active_texts[text->type()].add_from (wc, ps, from);
995 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
997 shared_ptr<const TextContent> text = wc.lock ();
1002 if (!_active_texts[text->type()].have(wc)) {
1006 shared_ptr<Piece> piece = wp.lock ();
1011 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1013 if (dcp_to > piece->content->end(_film)) {
1017 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1019 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1020 if (text->use() && !always && !text->burn()) {
1021 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1026 Player::seek (DCPTime time, bool accurate)
1028 boost::mutex::scoped_lock lm (_mutex);
1031 /* We can't seek in this state */
1036 _shuffler->clear ();
1041 if (_audio_processor) {
1042 _audio_processor->flush ();
1045 _audio_merger.clear ();
1046 for (int i = 0; i < TEXT_COUNT; ++i) {
1047 _active_texts[i].clear ();
1050 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1051 if (time < i->content->position()) {
1052 /* Before; seek to the start of the content */
1053 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1055 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1056 /* During; seek to position */
1057 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1060 /* After; this piece is done */
1066 _last_video_time = time;
1067 _last_video_eyes = EYES_LEFT;
1068 _last_audio_time = time;
1070 _last_video_time = optional<DCPTime>();
1071 _last_video_eyes = optional<Eyes>();
1072 _last_audio_time = optional<DCPTime>();
1075 _black.set_position (time);
1076 _silent.set_position (time);
1078 _last_video.clear ();
1082 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1084 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1085 player before the video that requires them.
1087 _delay.push_back (make_pair (pv, time));
1089 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1090 _last_video_time = time + one_video_frame();
1092 _last_video_eyes = increment_eyes (pv->eyes());
1094 if (_delay.size() < 3) {
1098 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1100 do_emit_video (to_do.first, to_do.second);
1104 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1106 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1107 for (int i = 0; i < TEXT_COUNT; ++i) {
1108 _active_texts[i].clear_before (time);
1112 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1114 pv->set_text (subtitles.get ());
1121 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1123 /* Log if the assert below is about to fail */
1124 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1125 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1128 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1129 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1130 Audio (data, time, _film->audio_frame_rate());
1131 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1135 Player::fill_audio (DCPTimePeriod period)
1137 if (period.from == period.to) {
1141 DCPOMATIC_ASSERT (period.from < period.to);
1143 DCPTime t = period.from;
1144 while (t < period.to) {
1145 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1146 Frame const samples = block.frames_round(_film->audio_frame_rate());
1148 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1149 silence->make_silent ();
1150 emit_audio (silence, t);
1157 Player::one_video_frame () const
1159 return DCPTime::from_frames (1, _film->video_frame_rate ());
1162 pair<shared_ptr<AudioBuffers>, DCPTime>
1163 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1165 DCPTime const discard_time = discard_to - time;
1166 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1167 Frame remaining_frames = audio->frames() - discard_frames;
1168 if (remaining_frames <= 0) {
1169 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1171 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1172 LOG_GENERAL_NC("copy_from #9");
1173 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1174 return make_pair(cut, time + discard_time);
1178 Player::set_dcp_decode_reduction (optional<int> reduction)
1180 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1183 boost::mutex::scoped_lock lm (_mutex);
1185 if (reduction == _dcp_decode_reduction) {
1187 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1191 _dcp_decode_reduction = reduction;
1192 setup_pieces_unlocked ();
1195 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1199 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1201 boost::mutex::scoped_lock lm (_mutex);
1203 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1204 if (i->content == content) {
1205 return content_time_to_dcp (i, t);
1209 /* We couldn't find this content; perhaps things are being changed over */
1210 return optional<DCPTime>();