2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 /* The butler must hear about this first, so since we are proxying this through to the butler we must
104 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106 set_video_container_size (_film->frame_size ());
108 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
111 seek (DCPTime (), true);
120 Player::setup_pieces ()
122 boost::mutex::scoped_lock lm (_mutex);
123 setup_pieces_unlocked ();
127 Player::setup_pieces_unlocked ()
132 _shuffler = new Shuffler();
133 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
135 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
137 if (!i->paths_valid ()) {
141 if (_ignore_video && _ignore_audio && i->text.empty()) {
142 /* We're only interested in text and this content has none */
146 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
147 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
150 /* Not something that we can decode; e.g. Atmos content */
154 if (decoder->video && _ignore_video) {
155 decoder->video->set_ignore (true);
158 if (decoder->audio && _ignore_audio) {
159 decoder->audio->set_ignore (true);
163 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
164 i->set_ignore (true);
168 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
170 dcp->set_decode_referenced (_play_referenced);
171 if (_play_referenced) {
172 dcp->set_forced_reduction (_dcp_decode_reduction);
176 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
177 _pieces.push_back (piece);
179 if (decoder->video) {
180 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
181 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
182 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
184 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
188 if (decoder->audio) {
189 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
192 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
194 while (j != decoder->text.end()) {
195 (*j)->BitmapStart.connect (
196 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
198 (*j)->PlainStart.connect (
199 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
202 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
209 _stream_states.clear ();
210 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
211 if (i->content->audio) {
212 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
213 _stream_states[j] = StreamState (i, i->content->position ());
218 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
219 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
221 _last_video_time = DCPTime ();
222 _last_video_eyes = EYES_BOTH;
223 _last_audio_time = DCPTime ();
228 Player::playlist_content_change (ChangeType type, int property, bool frequent)
230 if (type == CHANGE_TYPE_PENDING) {
231 boost::mutex::scoped_lock lm (_mutex);
232 /* The player content is probably about to change, so we can't carry on
233 until that has happened and we've rebuilt our pieces. Stop pass()
234 and seek() from working until then.
237 } else if (type == CHANGE_TYPE_DONE) {
238 /* A change in our content has gone through. Re-build our pieces. */
240 } else if (type == CHANGE_TYPE_CANCELLED) {
244 Change (type, property, frequent);
248 Player::set_video_container_size (dcp::Size s)
250 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
253 boost::mutex::scoped_lock lm (_mutex);
255 if (s == _video_container_size) {
257 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
261 _video_container_size = s;
263 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
264 _black_image->make_black ();
267 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
271 Player::playlist_change (ChangeType type)
273 if (type == CHANGE_TYPE_DONE) {
276 Change (type, PlayerProperty::PLAYLIST, false);
280 Player::film_change (ChangeType type, Film::Property p)
282 /* Here we should notice Film properties that affect our output, and
283 alert listeners that our output now would be different to how it was
284 last time we were run.
287 if (p == Film::CONTAINER) {
288 Change (type, PlayerProperty::FILM_CONTAINER, false);
289 } else if (p == Film::VIDEO_FRAME_RATE) {
290 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
291 so we need new pieces here.
293 if (type == CHANGE_TYPE_DONE) {
296 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
297 } else if (p == Film::AUDIO_PROCESSOR) {
298 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
299 boost::mutex::scoped_lock lm (_mutex);
300 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
302 } else if (p == Film::AUDIO_CHANNELS) {
303 if (type == CHANGE_TYPE_DONE) {
304 boost::mutex::scoped_lock lm (_mutex);
305 _audio_merger.clear ();
311 Player::transform_bitmap_texts (list<BitmapText> subs) const
313 list<PositionImage> all;
315 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
320 /* We will scale the subtitle up to fit _video_container_size */
321 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
327 dcp::YUV_TO_RGB_REC601,
328 i->image->pixel_format (),
333 lrint (_video_container_size.width * i->rectangle.x),
334 lrint (_video_container_size.height * i->rectangle.y)
343 shared_ptr<PlayerVideo>
344 Player::black_player_video_frame (Eyes eyes) const
346 return shared_ptr<PlayerVideo> (
348 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
351 _video_container_size,
352 _video_container_size,
355 PresetColourConversion::all().front().conversion,
356 boost::weak_ptr<Content>(),
357 boost::optional<Frame>()
363 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
365 DCPTime s = t - piece->content->position ();
366 s = min (piece->content->length_after_trim(), s);
367 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
369 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
370 then convert that ContentTime to frames at the content's rate. However this fails for
371 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
372 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
374 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
376 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
380 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
382 /* See comment in dcp_to_content_video */
383 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
384 return d + piece->content->position();
388 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
390 DCPTime s = t - piece->content->position ();
391 s = min (piece->content->length_after_trim(), s);
392 /* See notes in dcp_to_content_video */
393 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
397 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
399 /* See comment in dcp_to_content_video */
400 return DCPTime::from_frames (f, _film->audio_frame_rate())
401 - DCPTime (piece->content->trim_start(), piece->frc)
402 + piece->content->position();
406 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
408 DCPTime s = t - piece->content->position ();
409 s = min (piece->content->length_after_trim(), s);
410 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
414 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
416 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
419 list<shared_ptr<Font> >
420 Player::get_subtitle_fonts ()
422 boost::mutex::scoped_lock lm (_mutex);
424 list<shared_ptr<Font> > fonts;
425 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
426 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
427 /* XXX: things may go wrong if there are duplicate font IDs
428 with different font files.
430 list<shared_ptr<Font> > f = j->fonts ();
431 copy (f.begin(), f.end(), back_inserter (fonts));
438 /** Set this player never to produce any video data */
440 Player::set_ignore_video ()
442 boost::mutex::scoped_lock lm (_mutex);
443 _ignore_video = true;
444 setup_pieces_unlocked ();
448 Player::set_ignore_audio ()
450 boost::mutex::scoped_lock lm (_mutex);
451 _ignore_audio = true;
452 setup_pieces_unlocked ();
456 Player::set_ignore_text ()
458 boost::mutex::scoped_lock lm (_mutex);
460 setup_pieces_unlocked ();
463 /** Set the player to always burn open texts into the image regardless of the content settings */
465 Player::set_always_burn_open_subtitles ()
467 boost::mutex::scoped_lock lm (_mutex);
468 _always_burn_open_subtitles = true;
471 /** Sets up the player to be faster, possibly at the expense of quality */
475 boost::mutex::scoped_lock lm (_mutex);
477 setup_pieces_unlocked ();
481 Player::set_play_referenced ()
483 boost::mutex::scoped_lock lm (_mutex);
484 _play_referenced = true;
485 setup_pieces_unlocked ();
488 list<ReferencedReelAsset>
489 Player::get_reel_assets ()
491 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
493 list<ReferencedReelAsset> a;
495 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
496 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
501 scoped_ptr<DCPDecoder> decoder;
503 decoder.reset (new DCPDecoder (j, _film->log(), false));
509 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
511 DCPOMATIC_ASSERT (j->video_frame_rate ());
512 double const cfr = j->video_frame_rate().get();
513 Frame const trim_start = j->trim_start().frames_round (cfr);
514 Frame const trim_end = j->trim_end().frames_round (cfr);
515 int const ffr = _film->video_frame_rate ();
517 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
518 if (j->reference_video ()) {
519 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
520 DCPOMATIC_ASSERT (ra);
521 ra->set_entry_point (ra->entry_point() + trim_start);
522 ra->set_duration (ra->duration() - trim_start - trim_end);
524 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
528 if (j->reference_audio ()) {
529 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
530 DCPOMATIC_ASSERT (ra);
531 ra->set_entry_point (ra->entry_point() + trim_start);
532 ra->set_duration (ra->duration() - trim_start - trim_end);
534 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
538 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
539 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
540 DCPOMATIC_ASSERT (ra);
541 ra->set_entry_point (ra->entry_point() + trim_start);
542 ra->set_duration (ra->duration() - trim_start - trim_end);
544 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
548 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
549 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
550 DCPOMATIC_ASSERT (l);
551 l->set_entry_point (l->entry_point() + trim_start);
552 l->set_duration (l->duration() - trim_start - trim_end);
554 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
559 /* Assume that main picture duration is the length of the reel */
560 offset += k->main_picture()->duration ();
570 boost::mutex::scoped_lock lm (_mutex);
573 /* We can't pass in this state */
577 if (_playlist->length() == DCPTime()) {
578 /* Special case of an empty Film; just give one black frame */
579 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
583 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
585 shared_ptr<Piece> earliest_content;
586 optional<DCPTime> earliest_time;
588 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
593 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
594 if (t > i->content->end()) {
598 /* Given two choices at the same time, pick the one with texts so we see it before
601 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
603 earliest_content = i;
617 if (earliest_content) {
621 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
622 earliest_time = _black.position ();
626 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
627 earliest_time = _silent.position ();
633 earliest_content->done = earliest_content->decoder->pass ();
636 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
637 _black.set_position (_black.position() + one_video_frame());
641 DCPTimePeriod period (_silent.period_at_position());
642 if (_last_audio_time) {
643 /* Sometimes the thing that happened last finishes fractionally before
644 this silence. Bodge the start time of the silence to fix it. I'm
645 not sure if this is the right solution --- maybe the last thing should
646 be padded `forward' rather than this thing padding `back'.
648 period.from = min(period.from, *_last_audio_time);
650 if (period.duration() > one_video_frame()) {
651 period.to = period.from + one_video_frame();
654 _silent.set_position (period.to);
662 /* Emit any audio that is ready */
664 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
665 of our streams, or the position of the _silent.
667 DCPTime pull_to = _film->length ();
668 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
669 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
670 pull_to = i->second.last_push_end;
673 if (!_silent.done() && _silent.position() < pull_to) {
674 pull_to = _silent.position();
677 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
678 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
679 if (_last_audio_time && i->second < *_last_audio_time) {
680 /* This new data comes before the last we emitted (or the last seek); discard it */
681 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
686 } else if (_last_audio_time && i->second > *_last_audio_time) {
687 /* There's a gap between this data and the last we emitted; fill with silence */
688 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
691 emit_audio (i->first, i->second);
696 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
697 do_emit_video(i->first, i->second);
704 /** @return Open subtitles for the frame at the given time, converted to images */
705 optional<PositionImage>
706 Player::open_subtitles_for_frame (DCPTime time) const
708 list<PositionImage> captions;
709 int const vfr = _film->video_frame_rate();
713 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
716 /* Bitmap subtitles */
717 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
718 copy (c.begin(), c.end(), back_inserter (captions));
720 /* String subtitles (rendered to an image) */
721 if (!j.string.empty ()) {
722 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
723 copy (s.begin(), s.end(), back_inserter (captions));
727 if (captions.empty ()) {
728 return optional<PositionImage> ();
731 return merge (captions);
735 Player::video (weak_ptr<Piece> wp, ContentVideo video)
737 shared_ptr<Piece> piece = wp.lock ();
742 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
743 if (frc.skip && (video.frame % 2) == 1) {
747 /* Time of the first frame we will emit */
748 DCPTime const time = content_video_to_dcp (piece, video.frame);
750 /* Discard if it's before the content's period or the last accurate seek. We can't discard
751 if it's after the content's period here as in that case we still need to fill any gap between
752 `now' and the end of the content's period.
754 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
758 /* Fill gaps that we discover now that we have some video which needs to be emitted.
759 This is where we need to fill to.
761 DCPTime fill_to = min (time, piece->content->end());
763 if (_last_video_time) {
764 DCPTime fill_from = max (*_last_video_time, piece->content->position());
765 LastVideoMap::const_iterator last = _last_video.find (wp);
766 if (_film->three_d()) {
767 Eyes fill_to_eyes = video.eyes;
768 if (fill_to_eyes == EYES_BOTH) {
769 fill_to_eyes = EYES_LEFT;
771 if (fill_to == piece->content->end()) {
772 /* Don't fill after the end of the content */
773 fill_to_eyes = EYES_LEFT;
775 DCPTime j = fill_from;
776 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
777 if (eyes == EYES_BOTH) {
780 while (j < fill_to || eyes != fill_to_eyes) {
781 if (last != _last_video.end()) {
782 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
783 copy->set_eyes (eyes);
784 emit_video (copy, j);
786 emit_video (black_player_video_frame(eyes), j);
788 if (eyes == EYES_RIGHT) {
789 j += one_video_frame();
791 eyes = increment_eyes (eyes);
794 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
795 if (last != _last_video.end()) {
796 emit_video (last->second, j);
798 emit_video (black_player_video_frame(EYES_BOTH), j);
804 _last_video[wp].reset (
807 piece->content->video->crop (),
808 piece->content->video->fade (video.frame),
809 piece->content->video->scale().size (
810 piece->content->video, _video_container_size, _film->frame_size ()
812 _video_container_size,
815 piece->content->video->colour_conversion(),
822 for (int i = 0; i < frc.repeat; ++i) {
823 if (t < piece->content->end()) {
824 emit_video (_last_video[wp], t);
826 t += one_video_frame ();
831 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
833 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
835 shared_ptr<Piece> piece = wp.lock ();
840 shared_ptr<AudioContent> content = piece->content->audio;
841 DCPOMATIC_ASSERT (content);
843 /* Compute time in the DCP */
844 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
845 /* And the end of this block in the DCP */
846 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
848 /* Remove anything that comes before the start or after the end of the content */
849 if (time < piece->content->position()) {
850 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
852 /* This audio is entirely discarded */
855 content_audio.audio = cut.first;
857 } else if (time > piece->content->end()) {
860 } else if (end > piece->content->end()) {
861 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
862 if (remaining_frames == 0) {
865 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
866 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
867 content_audio.audio = cut;
870 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
874 if (content->gain() != 0) {
875 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
876 gain->apply_gain (content->gain ());
877 content_audio.audio = gain;
882 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
886 if (_audio_processor) {
887 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
892 _audio_merger.push (content_audio.audio, time);
893 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
894 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
898 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
900 shared_ptr<Piece> piece = wp.lock ();
901 shared_ptr<const TextContent> text = wc.lock ();
902 if (!piece || !text) {
906 /* Apply content's subtitle offsets */
907 subtitle.sub.rectangle.x += text->x_offset ();
908 subtitle.sub.rectangle.y += text->y_offset ();
910 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
911 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
912 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
914 /* Apply content's subtitle scale */
915 subtitle.sub.rectangle.width *= text->x_scale ();
916 subtitle.sub.rectangle.height *= text->y_scale ();
919 shared_ptr<Image> image = subtitle.sub.image;
920 /* We will scale the subtitle up to fit _video_container_size */
921 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
922 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
923 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
925 _active_texts[text->type()].add_from (wc, ps, from);
929 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
931 shared_ptr<Piece> piece = wp.lock ();
932 shared_ptr<const TextContent> text = wc.lock ();
933 if (!piece || !text) {
938 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
940 if (from > piece->content->end()) {
944 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
945 s.set_h_position (s.h_position() + text->x_offset ());
946 s.set_v_position (s.v_position() + text->y_offset ());
947 float const xs = text->x_scale();
948 float const ys = text->y_scale();
949 float size = s.size();
951 /* Adjust size to express the common part of the scaling;
952 e.g. if xs = ys = 0.5 we scale size by 2.
954 if (xs > 1e-5 && ys > 1e-5) {
955 size *= 1 / min (1 / xs, 1 / ys);
959 /* Then express aspect ratio changes */
960 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
961 s.set_aspect_adjust (xs / ys);
964 s.set_in (dcp::Time(from.seconds(), 1000));
965 ps.string.push_back (StringText (s, text->outline_width()));
966 ps.add_fonts (text->fonts ());
969 _active_texts[text->type()].add_from (wc, ps, from);
973 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
975 shared_ptr<const TextContent> text = wc.lock ();
980 if (!_active_texts[text->type()].have(wc)) {
984 shared_ptr<Piece> piece = wp.lock ();
989 DCPTime const dcp_to = content_time_to_dcp (piece, to);
991 if (dcp_to > piece->content->end()) {
995 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
997 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
998 if (text->use() && !always && !text->burn()) {
999 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1004 Player::seek (DCPTime time, bool accurate)
1006 boost::mutex::scoped_lock lm (_mutex);
1009 /* We can't seek in this state */
1014 _shuffler->clear ();
1019 if (_audio_processor) {
1020 _audio_processor->flush ();
1023 _audio_merger.clear ();
1024 for (int i = 0; i < TEXT_COUNT; ++i) {
1025 _active_texts[i].clear ();
1028 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1029 if (time < i->content->position()) {
1030 /* Before; seek to the start of the content */
1031 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1033 } else if (i->content->position() <= time && time < i->content->end()) {
1034 /* During; seek to position */
1035 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1038 /* After; this piece is done */
1044 _last_video_time = time;
1045 _last_video_eyes = EYES_LEFT;
1046 _last_audio_time = time;
1048 _last_video_time = optional<DCPTime>();
1049 _last_video_eyes = optional<Eyes>();
1050 _last_audio_time = optional<DCPTime>();
1053 _black.set_position (time);
1054 _silent.set_position (time);
1056 _last_video.clear ();
1060 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1062 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1063 player before the video that requires them.
1065 _delay.push_back (make_pair (pv, time));
1067 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1068 _last_video_time = time + one_video_frame();
1070 _last_video_eyes = increment_eyes (pv->eyes());
1072 if (_delay.size() < 3) {
1076 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1078 do_emit_video (to_do.first, to_do.second);
1082 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1084 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1085 for (int i = 0; i < TEXT_COUNT; ++i) {
1086 _active_texts[i].clear_before (time);
1090 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1092 pv->set_text (subtitles.get ());
1099 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1101 /* Log if the assert below is about to fail */
1102 if (_last_audio_time && time != *_last_audio_time) {
1103 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1106 /* This audio must follow on from the previous */
1107 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1109 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1113 Player::fill_audio (DCPTimePeriod period)
1115 if (period.from == period.to) {
1119 DCPOMATIC_ASSERT (period.from < period.to);
1121 DCPTime t = period.from;
1122 while (t < period.to) {
1123 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1124 Frame const samples = block.frames_round(_film->audio_frame_rate());
1126 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1127 silence->make_silent ();
1128 emit_audio (silence, t);
1135 Player::one_video_frame () const
1137 return DCPTime::from_frames (1, _film->video_frame_rate ());
1140 pair<shared_ptr<AudioBuffers>, DCPTime>
1141 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1143 DCPTime const discard_time = discard_to - time;
1144 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1145 Frame remaining_frames = audio->frames() - discard_frames;
1146 if (remaining_frames <= 0) {
1147 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1149 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1150 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1151 return make_pair(cut, time + discard_time);
1155 Player::set_dcp_decode_reduction (optional<int> reduction)
1157 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1160 boost::mutex::scoped_lock lm (_mutex);
1162 if (reduction == _dcp_decode_reduction) {
1164 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1168 _dcp_decode_reduction = reduction;
1169 setup_pieces_unlocked ();
1172 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1176 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1178 boost::mutex::scoped_lock lm (_mutex);
1180 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1181 if (i->content == content) {
1182 return content_time_to_dcp (i, t);
1186 /* We couldn't find this content; perhaps things are being changed over */
1187 return optional<DCPTime>();