2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 /* The butler must hear about this first, so since we are proxying this through to the butler we must
104 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106 set_video_container_size (_film->frame_size ());
108 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
111 seek (DCPTime (), true);
120 Player::setup_pieces ()
122 boost::mutex::scoped_lock lm (_mutex);
123 setup_pieces_unlocked ();
127 Player::setup_pieces_unlocked ()
132 _shuffler = new Shuffler();
133 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
135 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
137 if (!i->paths_valid ()) {
141 if (_ignore_video && _ignore_audio && i->text.empty()) {
142 /* We're only interested in text and this content has none */
146 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
147 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
150 /* Not something that we can decode; e.g. Atmos content */
154 if (decoder->video && _ignore_video) {
155 decoder->video->set_ignore (true);
158 if (decoder->audio && _ignore_audio) {
159 decoder->audio->set_ignore (true);
163 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
164 i->set_ignore (true);
168 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
170 dcp->set_decode_referenced (_play_referenced);
171 if (_play_referenced) {
172 dcp->set_forced_reduction (_dcp_decode_reduction);
176 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
177 _pieces.push_back (piece);
179 if (decoder->video) {
180 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
181 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
182 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
184 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
188 if (decoder->audio) {
189 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
192 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
194 while (j != decoder->text.end()) {
195 (*j)->BitmapStart.connect (
196 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
198 (*j)->PlainStart.connect (
199 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
202 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
209 _stream_states.clear ();
210 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
211 if (i->content->audio) {
212 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
213 _stream_states[j] = StreamState (i, i->content->position ());
218 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
219 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
221 _last_video_time = DCPTime ();
222 _last_video_eyes = EYES_BOTH;
223 _last_audio_time = DCPTime ();
228 Player::playlist_content_change (ChangeType type, int property, bool frequent)
230 if (type == CHANGE_TYPE_PENDING) {
231 boost::mutex::scoped_lock lm (_mutex);
232 /* The player content is probably about to change, so we can't carry on
233 until that has happened and we've rebuilt our pieces. Stop pass()
234 and seek() from working until then.
237 } else if (type == CHANGE_TYPE_DONE) {
238 /* A change in our content has gone through. Re-build our pieces. */
242 Change (type, property, frequent);
246 Player::set_video_container_size (dcp::Size s)
248 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
251 boost::mutex::scoped_lock lm (_mutex);
253 if (s == _video_container_size) {
255 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
259 _video_container_size = s;
261 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
262 _black_image->make_black ();
265 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
269 Player::playlist_change (ChangeType type)
271 if (type == CHANGE_TYPE_DONE) {
274 Change (type, PlayerProperty::PLAYLIST, false);
278 Player::film_change (ChangeType type, Film::Property p)
280 /* Here we should notice Film properties that affect our output, and
281 alert listeners that our output now would be different to how it was
282 last time we were run.
285 if (p == Film::CONTAINER) {
286 Change (type, PlayerProperty::FILM_CONTAINER, false);
287 } else if (p == Film::VIDEO_FRAME_RATE) {
288 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
289 so we need new pieces here.
291 if (type == CHANGE_TYPE_DONE) {
294 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
295 } else if (p == Film::AUDIO_PROCESSOR) {
296 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
297 boost::mutex::scoped_lock lm (_mutex);
298 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
300 } else if (p == Film::AUDIO_CHANNELS) {
301 if (type == CHANGE_TYPE_DONE) {
302 boost::mutex::scoped_lock lm (_mutex);
303 _audio_merger.clear ();
309 Player::transform_bitmap_texts (list<BitmapText> subs) const
311 list<PositionImage> all;
313 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
318 /* We will scale the subtitle up to fit _video_container_size */
319 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
325 dcp::YUV_TO_RGB_REC601,
326 i->image->pixel_format (),
331 lrint (_video_container_size.width * i->rectangle.x),
332 lrint (_video_container_size.height * i->rectangle.y)
341 shared_ptr<PlayerVideo>
342 Player::black_player_video_frame (Eyes eyes) const
344 return shared_ptr<PlayerVideo> (
346 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
349 _video_container_size,
350 _video_container_size,
353 PresetColourConversion::all().front().conversion,
354 boost::weak_ptr<Content>(),
355 boost::optional<Frame>()
361 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
363 DCPTime s = t - piece->content->position ();
364 s = min (piece->content->length_after_trim(), s);
365 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
367 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
368 then convert that ContentTime to frames at the content's rate. However this fails for
369 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
370 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
372 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
374 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
378 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
380 /* See comment in dcp_to_content_video */
381 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
382 return d + piece->content->position();
386 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
388 DCPTime s = t - piece->content->position ();
389 s = min (piece->content->length_after_trim(), s);
390 /* See notes in dcp_to_content_video */
391 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
395 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
397 /* See comment in dcp_to_content_video */
398 return DCPTime::from_frames (f, _film->audio_frame_rate())
399 - DCPTime (piece->content->trim_start(), piece->frc)
400 + piece->content->position();
404 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
406 DCPTime s = t - piece->content->position ();
407 s = min (piece->content->length_after_trim(), s);
408 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
412 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
414 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
417 list<shared_ptr<Font> >
418 Player::get_subtitle_fonts ()
420 boost::mutex::scoped_lock lm (_mutex);
422 list<shared_ptr<Font> > fonts;
423 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
424 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
425 /* XXX: things may go wrong if there are duplicate font IDs
426 with different font files.
428 list<shared_ptr<Font> > f = j->fonts ();
429 copy (f.begin(), f.end(), back_inserter (fonts));
436 /** Set this player never to produce any video data */
438 Player::set_ignore_video ()
440 boost::mutex::scoped_lock lm (_mutex);
441 _ignore_video = true;
442 setup_pieces_unlocked ();
446 Player::set_ignore_audio ()
448 boost::mutex::scoped_lock lm (_mutex);
449 _ignore_audio = true;
450 setup_pieces_unlocked ();
454 Player::set_ignore_text ()
456 boost::mutex::scoped_lock lm (_mutex);
458 setup_pieces_unlocked ();
461 /** Set the player to always burn open texts into the image regardless of the content settings */
463 Player::set_always_burn_open_subtitles ()
465 boost::mutex::scoped_lock lm (_mutex);
466 _always_burn_open_subtitles = true;
469 /** Sets up the player to be faster, possibly at the expense of quality */
473 boost::mutex::scoped_lock lm (_mutex);
475 setup_pieces_unlocked ();
479 Player::set_play_referenced ()
481 boost::mutex::scoped_lock lm (_mutex);
482 _play_referenced = true;
483 setup_pieces_unlocked ();
486 list<ReferencedReelAsset>
487 Player::get_reel_assets ()
489 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
491 list<ReferencedReelAsset> a;
493 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
494 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
499 scoped_ptr<DCPDecoder> decoder;
501 decoder.reset (new DCPDecoder (j, _film->log(), false));
507 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
509 DCPOMATIC_ASSERT (j->video_frame_rate ());
510 double const cfr = j->video_frame_rate().get();
511 Frame const trim_start = j->trim_start().frames_round (cfr);
512 Frame const trim_end = j->trim_end().frames_round (cfr);
513 int const ffr = _film->video_frame_rate ();
515 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
516 if (j->reference_video ()) {
517 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
518 DCPOMATIC_ASSERT (ra);
519 ra->set_entry_point (ra->entry_point() + trim_start);
520 ra->set_duration (ra->duration() - trim_start - trim_end);
522 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
526 if (j->reference_audio ()) {
527 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
528 DCPOMATIC_ASSERT (ra);
529 ra->set_entry_point (ra->entry_point() + trim_start);
530 ra->set_duration (ra->duration() - trim_start - trim_end);
532 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
536 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
537 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
538 DCPOMATIC_ASSERT (ra);
539 ra->set_entry_point (ra->entry_point() + trim_start);
540 ra->set_duration (ra->duration() - trim_start - trim_end);
542 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
546 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
547 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
548 DCPOMATIC_ASSERT (l);
549 l->set_entry_point (l->entry_point() + trim_start);
550 l->set_duration (l->duration() - trim_start - trim_end);
552 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
557 /* Assume that main picture duration is the length of the reel */
558 offset += k->main_picture()->duration ();
568 boost::mutex::scoped_lock lm (_mutex);
571 /* We can't pass in this state */
575 if (_playlist->length() == DCPTime()) {
576 /* Special case of an empty Film; just give one black frame */
577 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
581 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
583 shared_ptr<Piece> earliest_content;
584 optional<DCPTime> earliest_time;
586 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
591 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
592 if (t > i->content->end()) {
596 /* Given two choices at the same time, pick the one with texts so we see it before
599 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
601 earliest_content = i;
615 if (earliest_content) {
619 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
620 earliest_time = _black.position ();
624 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
625 earliest_time = _silent.position ();
631 earliest_content->done = earliest_content->decoder->pass ();
634 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
635 _black.set_position (_black.position() + one_video_frame());
639 DCPTimePeriod period (_silent.period_at_position());
640 if (_last_audio_time) {
641 /* Sometimes the thing that happened last finishes fractionally before
642 this silence. Bodge the start time of the silence to fix it. I'm
643 not sure if this is the right solution --- maybe the last thing should
644 be padded `forward' rather than this thing padding `back'.
646 period.from = min(period.from, *_last_audio_time);
648 if (period.duration() > one_video_frame()) {
649 period.to = period.from + one_video_frame();
652 _silent.set_position (period.to);
660 /* Emit any audio that is ready */
662 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
663 of our streams, or the position of the _silent.
665 DCPTime pull_to = _film->length ();
666 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
667 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
668 pull_to = i->second.last_push_end;
671 if (!_silent.done() && _silent.position() < pull_to) {
672 pull_to = _silent.position();
675 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
676 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
677 if (_last_audio_time && i->second < *_last_audio_time) {
678 /* This new data comes before the last we emitted (or the last seek); discard it */
679 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
684 } else if (_last_audio_time && i->second > *_last_audio_time) {
685 /* There's a gap between this data and the last we emitted; fill with silence */
686 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
689 emit_audio (i->first, i->second);
694 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
695 do_emit_video(i->first, i->second);
702 /** @return Open subtitles for the frame at the given time, converted to images */
703 optional<PositionImage>
704 Player::open_subtitles_for_frame (DCPTime time) const
706 list<PositionImage> captions;
707 int const vfr = _film->video_frame_rate();
711 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
714 /* Bitmap subtitles */
715 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
716 copy (c.begin(), c.end(), back_inserter (captions));
718 /* String subtitles (rendered to an image) */
719 if (!j.string.empty ()) {
720 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
721 copy (s.begin(), s.end(), back_inserter (captions));
725 if (captions.empty ()) {
726 return optional<PositionImage> ();
729 return merge (captions);
733 Player::video (weak_ptr<Piece> wp, ContentVideo video)
735 shared_ptr<Piece> piece = wp.lock ();
740 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
741 if (frc.skip && (video.frame % 2) == 1) {
745 /* Time of the first frame we will emit */
746 DCPTime const time = content_video_to_dcp (piece, video.frame);
748 /* Discard if it's before the content's period or the last accurate seek. We can't discard
749 if it's after the content's period here as in that case we still need to fill any gap between
750 `now' and the end of the content's period.
752 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
756 /* Fill gaps that we discover now that we have some video which needs to be emitted.
757 This is where we need to fill to.
759 DCPTime fill_to = min (time, piece->content->end());
761 if (_last_video_time) {
762 DCPTime fill_from = max (*_last_video_time, piece->content->position());
763 LastVideoMap::const_iterator last = _last_video.find (wp);
764 if (_film->three_d()) {
765 Eyes fill_to_eyes = video.eyes;
766 if (fill_to_eyes == EYES_BOTH) {
767 fill_to_eyes = EYES_LEFT;
769 if (fill_to == piece->content->end()) {
770 /* Don't fill after the end of the content */
771 fill_to_eyes = EYES_LEFT;
773 DCPTime j = fill_from;
774 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
775 if (eyes == EYES_BOTH) {
778 while (j < fill_to || eyes != fill_to_eyes) {
779 if (last != _last_video.end()) {
780 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
781 copy->set_eyes (eyes);
782 emit_video (copy, j);
784 emit_video (black_player_video_frame(eyes), j);
786 if (eyes == EYES_RIGHT) {
787 j += one_video_frame();
789 eyes = increment_eyes (eyes);
792 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
793 if (last != _last_video.end()) {
794 emit_video (last->second, j);
796 emit_video (black_player_video_frame(EYES_BOTH), j);
802 _last_video[wp].reset (
805 piece->content->video->crop (),
806 piece->content->video->fade (video.frame),
807 piece->content->video->scale().size (
808 piece->content->video, _video_container_size, _film->frame_size ()
810 _video_container_size,
813 piece->content->video->colour_conversion(),
820 for (int i = 0; i < frc.repeat; ++i) {
821 if (t < piece->content->end()) {
822 emit_video (_last_video[wp], t);
824 t += one_video_frame ();
829 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
831 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
833 shared_ptr<Piece> piece = wp.lock ();
838 shared_ptr<AudioContent> content = piece->content->audio;
839 DCPOMATIC_ASSERT (content);
841 /* Compute time in the DCP */
842 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
843 /* And the end of this block in the DCP */
844 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
846 /* Remove anything that comes before the start or after the end of the content */
847 if (time < piece->content->position()) {
848 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
850 /* This audio is entirely discarded */
853 content_audio.audio = cut.first;
855 } else if (time > piece->content->end()) {
858 } else if (end > piece->content->end()) {
859 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
860 if (remaining_frames == 0) {
863 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
864 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
865 content_audio.audio = cut;
868 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
872 if (content->gain() != 0) {
873 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
874 gain->apply_gain (content->gain ());
875 content_audio.audio = gain;
880 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
884 if (_audio_processor) {
885 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
890 _audio_merger.push (content_audio.audio, time);
891 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
892 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
896 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
898 shared_ptr<Piece> piece = wp.lock ();
899 shared_ptr<const TextContent> text = wc.lock ();
900 if (!piece || !text) {
904 /* Apply content's subtitle offsets */
905 subtitle.sub.rectangle.x += text->x_offset ();
906 subtitle.sub.rectangle.y += text->y_offset ();
908 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
909 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
910 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
912 /* Apply content's subtitle scale */
913 subtitle.sub.rectangle.width *= text->x_scale ();
914 subtitle.sub.rectangle.height *= text->y_scale ();
917 ps.bitmap.push_back (subtitle.sub);
918 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
920 _active_texts[subtitle.type()].add_from (wc, ps, from);
924 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
926 shared_ptr<Piece> piece = wp.lock ();
927 shared_ptr<const TextContent> text = wc.lock ();
928 if (!piece || !text) {
933 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
935 if (from > piece->content->end()) {
939 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
940 s.set_h_position (s.h_position() + text->x_offset ());
941 s.set_v_position (s.v_position() + text->y_offset ());
942 float const xs = text->x_scale();
943 float const ys = text->y_scale();
944 float size = s.size();
946 /* Adjust size to express the common part of the scaling;
947 e.g. if xs = ys = 0.5 we scale size by 2.
949 if (xs > 1e-5 && ys > 1e-5) {
950 size *= 1 / min (1 / xs, 1 / ys);
954 /* Then express aspect ratio changes */
955 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
956 s.set_aspect_adjust (xs / ys);
959 s.set_in (dcp::Time(from.seconds(), 1000));
960 ps.string.push_back (StringText (s, text->outline_width()));
961 ps.add_fonts (text->fonts ());
964 _active_texts[subtitle.type()].add_from (wc, ps, from);
968 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
970 if (!_active_texts[type].have (wc)) {
974 shared_ptr<Piece> piece = wp.lock ();
975 shared_ptr<const TextContent> text = wc.lock ();
976 if (!piece || !text) {
980 DCPTime const dcp_to = content_time_to_dcp (piece, to);
982 if (dcp_to > piece->content->end()) {
986 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
988 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
989 if (text->use() && !always && !text->burn()) {
990 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
995 Player::seek (DCPTime time, bool accurate)
997 boost::mutex::scoped_lock lm (_mutex);
1000 /* We can't seek in this state */
1005 _shuffler->clear ();
1010 if (_audio_processor) {
1011 _audio_processor->flush ();
1014 _audio_merger.clear ();
1015 for (int i = 0; i < TEXT_COUNT; ++i) {
1016 _active_texts[i].clear ();
1019 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1020 if (time < i->content->position()) {
1021 /* Before; seek to the start of the content */
1022 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1024 } else if (i->content->position() <= time && time < i->content->end()) {
1025 /* During; seek to position */
1026 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1029 /* After; this piece is done */
1035 _last_video_time = time;
1036 _last_video_eyes = EYES_LEFT;
1037 _last_audio_time = time;
1039 _last_video_time = optional<DCPTime>();
1040 _last_video_eyes = optional<Eyes>();
1041 _last_audio_time = optional<DCPTime>();
1044 _black.set_position (time);
1045 _silent.set_position (time);
1047 _last_video.clear ();
1051 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1053 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1054 player before the video that requires them.
1056 _delay.push_back (make_pair (pv, time));
1058 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1059 _last_video_time = time + one_video_frame();
1061 _last_video_eyes = increment_eyes (pv->eyes());
1063 if (_delay.size() < 3) {
1067 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1069 do_emit_video (to_do.first, to_do.second);
1073 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1075 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1076 for (int i = 0; i < TEXT_COUNT; ++i) {
1077 _active_texts[i].clear_before (time);
1081 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1083 pv->set_text (subtitles.get ());
1090 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1092 /* Log if the assert below is about to fail */
1093 if (_last_audio_time && time != *_last_audio_time) {
1094 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1097 /* This audio must follow on from the previous */
1098 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1100 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1104 Player::fill_audio (DCPTimePeriod period)
1106 if (period.from == period.to) {
1110 DCPOMATIC_ASSERT (period.from < period.to);
1112 DCPTime t = period.from;
1113 while (t < period.to) {
1114 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1115 Frame const samples = block.frames_round(_film->audio_frame_rate());
1117 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1118 silence->make_silent ();
1119 emit_audio (silence, t);
1126 Player::one_video_frame () const
1128 return DCPTime::from_frames (1, _film->video_frame_rate ());
1131 pair<shared_ptr<AudioBuffers>, DCPTime>
1132 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1134 DCPTime const discard_time = discard_to - time;
1135 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1136 Frame remaining_frames = audio->frames() - discard_frames;
1137 if (remaining_frames <= 0) {
1138 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1140 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1141 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1142 return make_pair(cut, time + discard_time);
1146 Player::set_dcp_decode_reduction (optional<int> reduction)
1148 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1151 boost::mutex::scoped_lock lm (_mutex);
1153 if (reduction == _dcp_decode_reduction) {
1155 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1159 _dcp_decode_reduction = reduction;
1160 setup_pieces_unlocked ();
1163 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1167 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1169 boost::mutex::scoped_lock lm (_mutex);
1171 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1172 if (i->content == content) {
1173 return content_time_to_dcp (i, t);
1177 /* We couldn't find this content; perhaps things are being changed over */
1178 return optional<DCPTime>();