2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 int const PlayerProperty::PLAYBACK_LENGTH = 705;
88 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist, DCPTime playback_length)
90 , _playlist (playlist)
92 , _ignore_video (false)
93 , _ignore_audio (false)
94 , _ignore_text (false)
95 , _always_burn_open_subtitles (false)
97 , _tolerant (film->tolerant())
98 , _play_referenced (false)
99 , _audio_merger (_film->audio_frame_rate())
101 , _playback_length (playback_length)
103 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
104 /* The butler must hear about this first, so since we are proxying this through to the butler we must
107 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
108 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
109 set_video_container_size (_film->frame_size ());
111 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
114 seek (DCPTime (), true);
123 Player::setup_pieces ()
125 boost::mutex::scoped_lock lm (_mutex);
126 setup_pieces_unlocked ();
131 Player::set_playback_length (DCPTime len)
133 Change (CHANGE_TYPE_PENDING, PlayerProperty::PLAYBACK_LENGTH, false);
134 _playback_length = len;
135 Change (CHANGE_TYPE_DONE, PlayerProperty::PLAYBACK_LENGTH, false);
140 have_video (shared_ptr<const Content> content)
142 return static_cast<bool>(content->video);
146 have_audio (shared_ptr<const Content> content)
148 return static_cast<bool>(content->audio);
152 Player::setup_pieces_unlocked ()
154 list<shared_ptr<Piece> > old_pieces = _pieces;
158 _shuffler = new Shuffler();
159 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
161 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
163 if (!i->paths_valid ()) {
167 if (_ignore_video && _ignore_audio && i->text.empty()) {
168 /* We're only interested in text and this content has none */
172 shared_ptr<Decoder> old_decoder;
173 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
174 if (j->content == i) {
175 old_decoder = j->decoder;
180 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
181 FrameRateChange frc (_film, i);
184 /* Not something that we can decode; e.g. Atmos content */
188 if (decoder->video && _ignore_video) {
189 decoder->video->set_ignore (true);
192 if (decoder->audio && _ignore_audio) {
193 decoder->audio->set_ignore (true);
197 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
198 i->set_ignore (true);
202 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
204 dcp->set_decode_referenced (_play_referenced);
205 if (_play_referenced) {
206 dcp->set_forced_reduction (_dcp_decode_reduction);
210 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
211 _pieces.push_back (piece);
213 if (decoder->video) {
214 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
215 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
216 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
218 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
222 if (decoder->audio) {
223 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
226 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
228 while (j != decoder->text.end()) {
229 (*j)->BitmapStart.connect (
230 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
232 (*j)->PlainStart.connect (
233 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
236 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
243 _stream_states.clear ();
244 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
245 if (i->content->audio) {
246 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
247 _stream_states[j] = StreamState (i, i->content->position ());
252 _black = Empty (_film, _playlist, bind(&have_video, _1), _playback_length);
253 _silent = Empty (_film, _playlist, bind(&have_audio, _1), _playback_length);
255 _last_video_time = DCPTime ();
256 _last_video_eyes = EYES_BOTH;
257 _last_audio_time = DCPTime ();
261 Player::playlist_content_change (ChangeType type, int property, bool frequent)
263 if (type == CHANGE_TYPE_PENDING) {
264 /* The player content is probably about to change, so we can't carry on
265 until that has happened and we've rebuilt our pieces. Stop pass()
266 and seek() from working until then.
269 } else if (type == CHANGE_TYPE_DONE) {
270 /* A change in our content has gone through. Re-build our pieces. */
273 } else if (type == CHANGE_TYPE_CANCELLED) {
277 Change (type, property, frequent);
281 Player::set_video_container_size (dcp::Size s)
283 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
286 boost::mutex::scoped_lock lm (_mutex);
288 if (s == _video_container_size) {
290 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
294 _video_container_size = s;
296 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
297 _black_image->make_black ();
300 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
304 Player::playlist_change (ChangeType type)
306 if (type == CHANGE_TYPE_DONE) {
309 Change (type, PlayerProperty::PLAYLIST, false);
313 Player::film_change (ChangeType type, Film::Property p)
315 /* Here we should notice Film properties that affect our output, and
316 alert listeners that our output now would be different to how it was
317 last time we were run.
320 if (p == Film::CONTAINER) {
321 Change (type, PlayerProperty::FILM_CONTAINER, false);
322 } else if (p == Film::VIDEO_FRAME_RATE) {
323 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
324 so we need new pieces here.
326 if (type == CHANGE_TYPE_DONE) {
329 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
330 } else if (p == Film::AUDIO_PROCESSOR) {
331 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
332 boost::mutex::scoped_lock lm (_mutex);
333 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
335 } else if (p == Film::AUDIO_CHANNELS) {
336 if (type == CHANGE_TYPE_DONE) {
337 boost::mutex::scoped_lock lm (_mutex);
338 _audio_merger.clear ();
343 shared_ptr<PlayerVideo>
344 Player::black_player_video_frame (Eyes eyes) const
346 return shared_ptr<PlayerVideo> (
348 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
351 _video_container_size,
352 _video_container_size,
355 PresetColourConversion::all().front().conversion,
357 boost::weak_ptr<Content>(),
358 boost::optional<Frame>(),
365 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
367 DCPTime s = t - piece->content->position ();
368 s = min (piece->content->length_after_trim(_film), s);
369 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
371 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
372 then convert that ContentTime to frames at the content's rate. However this fails for
373 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
374 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
376 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
378 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
382 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
384 /* See comment in dcp_to_content_video */
385 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
386 return d + piece->content->position();
390 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
392 DCPTime s = t - piece->content->position ();
393 s = min (piece->content->length_after_trim(_film), s);
394 /* See notes in dcp_to_content_video */
395 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
399 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
401 /* See comment in dcp_to_content_video */
402 return DCPTime::from_frames (f, _film->audio_frame_rate())
403 - DCPTime (piece->content->trim_start(), piece->frc)
404 + piece->content->position();
408 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
410 DCPTime s = t - piece->content->position ();
411 s = min (piece->content->length_after_trim(_film), s);
412 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
416 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
418 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
421 list<shared_ptr<Font> >
422 Player::get_subtitle_fonts ()
424 boost::mutex::scoped_lock lm (_mutex);
426 list<shared_ptr<Font> > fonts;
427 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
428 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
429 /* XXX: things may go wrong if there are duplicate font IDs
430 with different font files.
432 list<shared_ptr<Font> > f = j->fonts ();
433 copy (f.begin(), f.end(), back_inserter (fonts));
440 /** Set this player never to produce any video data */
442 Player::set_ignore_video ()
444 boost::mutex::scoped_lock lm (_mutex);
445 _ignore_video = true;
446 setup_pieces_unlocked ();
450 Player::set_ignore_audio ()
452 boost::mutex::scoped_lock lm (_mutex);
453 _ignore_audio = true;
454 setup_pieces_unlocked ();
458 Player::set_ignore_text ()
460 boost::mutex::scoped_lock lm (_mutex);
462 setup_pieces_unlocked ();
465 /** Set the player to always burn open texts into the image regardless of the content settings */
467 Player::set_always_burn_open_subtitles ()
469 boost::mutex::scoped_lock lm (_mutex);
470 _always_burn_open_subtitles = true;
473 /** Sets up the player to be faster, possibly at the expense of quality */
477 boost::mutex::scoped_lock lm (_mutex);
479 setup_pieces_unlocked ();
483 Player::set_play_referenced ()
485 boost::mutex::scoped_lock lm (_mutex);
486 _play_referenced = true;
487 setup_pieces_unlocked ();
491 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
493 DCPOMATIC_ASSERT (r);
494 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
495 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
496 if (r->actual_duration() > 0) {
498 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
503 list<ReferencedReelAsset>
504 Player::get_reel_assets ()
506 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
508 list<ReferencedReelAsset> a;
510 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
511 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
516 scoped_ptr<DCPDecoder> decoder;
518 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
523 DCPOMATIC_ASSERT (j->video_frame_rate ());
524 double const cfr = j->video_frame_rate().get();
525 Frame const trim_start = j->trim_start().frames_round (cfr);
526 Frame const trim_end = j->trim_end().frames_round (cfr);
527 int const ffr = _film->video_frame_rate ();
529 /* position in the asset from the start */
530 int64_t offset_from_start = 0;
531 /* position in the asset from the end */
532 int64_t offset_from_end = 0;
533 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
534 /* Assume that main picture duration is the length of the reel */
535 offset_from_end += k->main_picture()->actual_duration();
538 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
540 /* Assume that main picture duration is the length of the reel */
541 int64_t const reel_duration = k->main_picture()->actual_duration();
543 /* See doc/design/trim_reels.svg */
544 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
545 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
547 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
548 if (j->reference_video ()) {
549 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
552 if (j->reference_audio ()) {
553 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
556 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
557 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
560 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
561 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
562 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
566 offset_from_start += reel_duration;
567 offset_from_end -= reel_duration;
577 boost::mutex::scoped_lock lm (_mutex);
580 /* We can't pass in this state */
584 if (_playback_length == DCPTime()) {
585 /* Special; just give one black frame */
586 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
590 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
592 shared_ptr<Piece> earliest_content;
593 optional<DCPTime> earliest_time;
595 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
600 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
601 if (t > i->content->end(_film)) {
605 /* Given two choices at the same time, pick the one with texts so we see it before
608 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
610 earliest_content = i;
624 if (earliest_content) {
628 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
629 earliest_time = _black.position ();
633 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
634 earliest_time = _silent.position ();
641 earliest_content->done = earliest_content->decoder->pass ();
642 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
643 if (dcp && !_play_referenced && dcp->reference_audio()) {
644 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
645 to `hide' the fact that no audio was emitted during the referenced DCP (though
646 we need to behave as though it was).
648 _last_audio_time = dcp->end (_film);
653 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
654 _black.set_position (_black.position() + one_video_frame());
658 DCPTimePeriod period (_silent.period_at_position());
659 if (_last_audio_time) {
660 /* Sometimes the thing that happened last finishes fractionally before
661 or after this silence. Bodge the start time of the silence to fix it.
662 I think this is nothing to worry about since we will just add or
663 remove a little silence at the end of some content.
665 int64_t const error = labs(period.from.get() - _last_audio_time->get());
666 /* Let's not worry about less than a frame at 24fps */
667 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
668 if (error >= too_much_error) {
669 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
671 DCPOMATIC_ASSERT (error < too_much_error);
672 period.from = *_last_audio_time;
674 if (period.duration() > one_video_frame()) {
675 period.to = period.from + one_video_frame();
678 _silent.set_position (period.to);
686 /* Emit any audio that is ready */
688 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
689 of our streams, or the position of the _silent.
691 DCPTime pull_to = _playback_length;
692 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
693 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
694 pull_to = i->second.last_push_end;
697 if (!_silent.done() && _silent.position() < pull_to) {
698 pull_to = _silent.position();
701 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
702 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
703 if (_last_audio_time && i->second < *_last_audio_time) {
704 /* This new data comes before the last we emitted (or the last seek); discard it */
705 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
710 } else if (_last_audio_time && i->second > *_last_audio_time) {
711 /* There's a gap between this data and the last we emitted; fill with silence */
712 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
715 emit_audio (i->first, i->second);
720 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
721 do_emit_video(i->first, i->second);
728 /** @return Open subtitles for the frame at the given time, converted to images */
729 optional<PositionImage>
730 Player::open_subtitles_for_frame (DCPTime time) const
732 list<PositionImage> captions;
733 int const vfr = _film->video_frame_rate();
737 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
740 /* Bitmap subtitles */
741 BOOST_FOREACH (BitmapText i, j.bitmap) {
746 /* i.image will already have been scaled to fit _video_container_size */
747 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
753 lrint (_video_container_size.width * i.rectangle.x),
754 lrint (_video_container_size.height * i.rectangle.y)
760 /* String subtitles (rendered to an image) */
761 if (!j.string.empty ()) {
762 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
763 copy (s.begin(), s.end(), back_inserter (captions));
767 if (captions.empty ()) {
768 return optional<PositionImage> ();
771 return merge (captions);
775 Player::video (weak_ptr<Piece> wp, ContentVideo video)
777 shared_ptr<Piece> piece = wp.lock ();
782 FrameRateChange frc (_film, piece->content);
783 if (frc.skip && (video.frame % 2) == 1) {
787 /* Time of the first frame we will emit */
788 DCPTime const time = content_video_to_dcp (piece, video.frame);
790 /* Discard if it's before the content's period or the last accurate seek. We can't discard
791 if it's after the content's period here as in that case we still need to fill any gap between
792 `now' and the end of the content's period.
794 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
798 /* Fill gaps that we discover now that we have some video which needs to be emitted.
799 This is where we need to fill to.
801 DCPTime fill_to = min (time, piece->content->end(_film));
803 if (_last_video_time) {
804 DCPTime fill_from = max (*_last_video_time, piece->content->position());
806 /* Fill if we have more than half a frame to do */
807 if ((fill_to - fill_from) > one_video_frame() / 2) {
808 LastVideoMap::const_iterator last = _last_video.find (wp);
809 if (_film->three_d()) {
810 Eyes fill_to_eyes = video.eyes;
811 if (fill_to_eyes == EYES_BOTH) {
812 fill_to_eyes = EYES_LEFT;
814 if (fill_to == piece->content->end(_film)) {
815 /* Don't fill after the end of the content */
816 fill_to_eyes = EYES_LEFT;
818 DCPTime j = fill_from;
819 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
820 if (eyes == EYES_BOTH) {
823 while (j < fill_to || eyes != fill_to_eyes) {
824 if (last != _last_video.end()) {
825 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
826 copy->set_eyes (eyes);
827 emit_video (copy, j);
829 emit_video (black_player_video_frame(eyes), j);
831 if (eyes == EYES_RIGHT) {
832 j += one_video_frame();
834 eyes = increment_eyes (eyes);
837 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
838 if (last != _last_video.end()) {
839 emit_video (last->second, j);
841 emit_video (black_player_video_frame(EYES_BOTH), j);
848 _last_video[wp].reset (
851 piece->content->video->crop (),
852 piece->content->video->fade (_film, video.frame),
853 piece->content->video->scale().size (
854 piece->content->video, _video_container_size, _film->frame_size ()
856 _video_container_size,
859 piece->content->video->colour_conversion(),
860 piece->content->video->range(),
868 for (int i = 0; i < frc.repeat; ++i) {
869 if (t < piece->content->end(_film)) {
870 emit_video (_last_video[wp], t);
872 t += one_video_frame ();
877 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
879 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
881 shared_ptr<Piece> piece = wp.lock ();
886 shared_ptr<AudioContent> content = piece->content->audio;
887 DCPOMATIC_ASSERT (content);
889 int const rfr = content->resampled_frame_rate (_film);
891 /* Compute time in the DCP */
892 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
893 /* And the end of this block in the DCP */
894 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
896 /* Remove anything that comes before the start or after the end of the content */
897 if (time < piece->content->position()) {
898 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
900 /* This audio is entirely discarded */
903 content_audio.audio = cut.first;
905 } else if (time > piece->content->end(_film)) {
908 } else if (end > piece->content->end(_film)) {
909 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
910 if (remaining_frames == 0) {
913 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
916 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
920 if (content->gain() != 0) {
921 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
922 gain->apply_gain (content->gain ());
923 content_audio.audio = gain;
928 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
932 if (_audio_processor) {
933 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
938 _audio_merger.push (content_audio.audio, time);
939 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
940 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
944 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
946 shared_ptr<Piece> piece = wp.lock ();
947 shared_ptr<const TextContent> text = wc.lock ();
948 if (!piece || !text) {
952 /* Apply content's subtitle offsets */
953 subtitle.sub.rectangle.x += text->x_offset ();
954 subtitle.sub.rectangle.y += text->y_offset ();
956 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
957 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
958 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
960 /* Apply content's subtitle scale */
961 subtitle.sub.rectangle.width *= text->x_scale ();
962 subtitle.sub.rectangle.height *= text->y_scale ();
965 shared_ptr<Image> image = subtitle.sub.image;
967 /* We will scale the subtitle up to fit _video_container_size */
968 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
969 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
970 if (width == 0 || height == 0) {
974 dcp::Size scaled_size (width, height);
975 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
976 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
978 _active_texts[text->type()].add_from (wc, ps, from);
982 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
984 shared_ptr<Piece> piece = wp.lock ();
985 shared_ptr<const TextContent> text = wc.lock ();
986 if (!piece || !text) {
991 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
993 if (from > piece->content->end(_film)) {
997 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
998 s.set_h_position (s.h_position() + text->x_offset ());
999 s.set_v_position (s.v_position() + text->y_offset ());
1000 float const xs = text->x_scale();
1001 float const ys = text->y_scale();
1002 float size = s.size();
1004 /* Adjust size to express the common part of the scaling;
1005 e.g. if xs = ys = 0.5 we scale size by 2.
1007 if (xs > 1e-5 && ys > 1e-5) {
1008 size *= 1 / min (1 / xs, 1 / ys);
1012 /* Then express aspect ratio changes */
1013 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1014 s.set_aspect_adjust (xs / ys);
1017 s.set_in (dcp::Time(from.seconds(), 1000));
1018 ps.string.push_back (StringText (s, text->outline_width()));
1019 ps.add_fonts (text->fonts ());
1022 _active_texts[text->type()].add_from (wc, ps, from);
1026 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1028 shared_ptr<const TextContent> text = wc.lock ();
1033 if (!_active_texts[text->type()].have(wc)) {
1037 shared_ptr<Piece> piece = wp.lock ();
1042 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1044 if (dcp_to > piece->content->end(_film)) {
1048 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1050 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1051 if (text->use() && !always && !text->burn()) {
1052 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1057 Player::seek (DCPTime time, bool accurate)
1059 boost::mutex::scoped_lock lm (_mutex);
1062 /* We can't seek in this state */
1067 _shuffler->clear ();
1072 if (_audio_processor) {
1073 _audio_processor->flush ();
1076 _audio_merger.clear ();
1077 for (int i = 0; i < TEXT_COUNT; ++i) {
1078 _active_texts[i].clear ();
1081 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1082 if (time < i->content->position()) {
1083 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1084 we must seek this (following) content accurately, otherwise when we come to the end of the current
1085 content we may not start right at the beginning of the next, causing a gap (if the next content has
1086 been trimmed to a point between keyframes, or something).
1088 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1090 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1091 /* During; seek to position */
1092 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1095 /* After; this piece is done */
1101 _last_video_time = time;
1102 _last_video_eyes = EYES_LEFT;
1103 _last_audio_time = time;
1105 _last_video_time = optional<DCPTime>();
1106 _last_video_eyes = optional<Eyes>();
1107 _last_audio_time = optional<DCPTime>();
1110 _black.set_position (time);
1111 _silent.set_position (time);
1113 _last_video.clear ();
1117 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1119 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1120 player before the video that requires them.
1122 _delay.push_back (make_pair (pv, time));
1124 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1125 _last_video_time = time + one_video_frame();
1127 _last_video_eyes = increment_eyes (pv->eyes());
1129 if (_delay.size() < 3) {
1133 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1135 do_emit_video (to_do.first, to_do.second);
1139 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1141 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1142 for (int i = 0; i < TEXT_COUNT; ++i) {
1143 _active_texts[i].clear_before (time);
1147 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1149 pv->set_text (subtitles.get ());
1156 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1158 /* Log if the assert below is about to fail */
1159 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1160 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1163 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1164 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1165 Audio (data, time, _film->audio_frame_rate());
1166 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1170 Player::fill_audio (DCPTimePeriod period)
1172 if (period.from == period.to) {
1176 DCPOMATIC_ASSERT (period.from < period.to);
1178 DCPTime t = period.from;
1179 while (t < period.to) {
1180 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1181 Frame const samples = block.frames_round(_film->audio_frame_rate());
1183 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1184 silence->make_silent ();
1185 emit_audio (silence, t);
1192 Player::one_video_frame () const
1194 return DCPTime::from_frames (1, _film->video_frame_rate ());
1197 pair<shared_ptr<AudioBuffers>, DCPTime>
1198 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1200 DCPTime const discard_time = discard_to - time;
1201 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1202 Frame remaining_frames = audio->frames() - discard_frames;
1203 if (remaining_frames <= 0) {
1204 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1206 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1207 return make_pair(cut, time + discard_time);
1211 Player::set_dcp_decode_reduction (optional<int> reduction)
1213 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1216 boost::mutex::scoped_lock lm (_mutex);
1218 if (reduction == _dcp_decode_reduction) {
1220 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1224 _dcp_decode_reduction = reduction;
1225 setup_pieces_unlocked ();
1228 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1232 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1234 boost::mutex::scoped_lock lm (_mutex);
1236 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1237 if (i->content == content) {
1238 return content_time_to_dcp (i, t);
1242 /* We couldn't find this content; perhaps things are being changed over */
1243 return optional<DCPTime>();