2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 /* The butler must hear about this first, so since we are proxying this through to the butler we must
104 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106 set_video_container_size (_film->frame_size ());
108 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
111 seek (DCPTime (), true);
120 Player::setup_pieces ()
122 boost::mutex::scoped_lock lm (_mutex);
123 setup_pieces_unlocked ();
127 have_video (shared_ptr<Piece> piece)
129 return piece->decoder && piece->decoder->video;
133 have_audio (shared_ptr<Piece> piece)
135 return piece->decoder && piece->decoder->audio;
139 Player::setup_pieces_unlocked ()
144 _shuffler = new Shuffler();
145 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
147 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
149 if (!i->paths_valid ()) {
153 if (_ignore_video && _ignore_audio && i->text.empty()) {
154 /* We're only interested in text and this content has none */
158 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
159 FrameRateChange frc (_film, i);
162 /* Not something that we can decode; e.g. Atmos content */
166 if (decoder->video && _ignore_video) {
167 decoder->video->set_ignore (true);
170 if (decoder->audio && _ignore_audio) {
171 decoder->audio->set_ignore (true);
175 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
176 i->set_ignore (true);
180 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
182 dcp->set_decode_referenced (_play_referenced);
183 if (_play_referenced) {
184 dcp->set_forced_reduction (_dcp_decode_reduction);
188 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
189 _pieces.push_back (piece);
191 if (decoder->video) {
192 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
193 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
194 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
196 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
200 if (decoder->audio) {
201 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
204 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
206 while (j != decoder->text.end()) {
207 (*j)->BitmapStart.connect (
208 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
210 (*j)->PlainStart.connect (
211 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
214 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
221 _stream_states.clear ();
222 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
223 if (i->content->audio) {
224 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
225 _stream_states[j] = StreamState (i, i->content->position ());
230 _black = Empty (_film, _pieces, bind(&have_video, _1));
231 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
233 _last_video_time = DCPTime ();
234 _last_video_eyes = EYES_BOTH;
235 _last_audio_time = DCPTime ();
239 Player::playlist_content_change (ChangeType type, int property, bool frequent)
241 if (type == CHANGE_TYPE_PENDING) {
242 boost::mutex::scoped_lock lm (_mutex);
243 /* The player content is probably about to change, so we can't carry on
244 until that has happened and we've rebuilt our pieces. Stop pass()
245 and seek() from working until then.
248 } else if (type == CHANGE_TYPE_DONE) {
249 /* A change in our content has gone through. Re-build our pieces. */
252 } else if (type == CHANGE_TYPE_CANCELLED) {
253 boost::mutex::scoped_lock lm (_mutex);
257 Change (type, property, frequent);
261 Player::set_video_container_size (dcp::Size s)
263 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
266 boost::mutex::scoped_lock lm (_mutex);
268 if (s == _video_container_size) {
270 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
274 _video_container_size = s;
276 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
277 _black_image->make_black ();
280 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
284 Player::playlist_change (ChangeType type)
286 if (type == CHANGE_TYPE_DONE) {
289 Change (type, PlayerProperty::PLAYLIST, false);
293 Player::film_change (ChangeType type, Film::Property p)
295 /* Here we should notice Film properties that affect our output, and
296 alert listeners that our output now would be different to how it was
297 last time we were run.
300 if (p == Film::CONTAINER) {
301 Change (type, PlayerProperty::FILM_CONTAINER, false);
302 } else if (p == Film::VIDEO_FRAME_RATE) {
303 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
304 so we need new pieces here.
306 if (type == CHANGE_TYPE_DONE) {
309 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
310 } else if (p == Film::AUDIO_PROCESSOR) {
311 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
312 boost::mutex::scoped_lock lm (_mutex);
313 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
315 } else if (p == Film::AUDIO_CHANNELS) {
316 if (type == CHANGE_TYPE_DONE) {
317 boost::mutex::scoped_lock lm (_mutex);
318 _audio_merger.clear ();
323 shared_ptr<PlayerVideo>
324 Player::black_player_video_frame (Eyes eyes) const
326 return shared_ptr<PlayerVideo> (
328 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
331 _video_container_size,
332 _video_container_size,
335 PresetColourConversion::all().front().conversion,
337 boost::weak_ptr<Content>(),
338 boost::optional<Frame>()
344 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
346 DCPTime s = t - piece->content->position ();
347 s = min (piece->content->length_after_trim(_film), s);
348 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
350 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
351 then convert that ContentTime to frames at the content's rate. However this fails for
352 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
353 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
355 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
357 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
361 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
363 /* See comment in dcp_to_content_video */
364 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
365 return d + piece->content->position();
369 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
371 DCPTime s = t - piece->content->position ();
372 s = min (piece->content->length_after_trim(_film), s);
373 /* See notes in dcp_to_content_video */
374 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
378 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
380 /* See comment in dcp_to_content_video */
381 return DCPTime::from_frames (f, _film->audio_frame_rate())
382 - DCPTime (piece->content->trim_start(), piece->frc)
383 + piece->content->position();
387 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
389 DCPTime s = t - piece->content->position ();
390 s = min (piece->content->length_after_trim(_film), s);
391 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
395 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
397 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
400 list<shared_ptr<Font> >
401 Player::get_subtitle_fonts ()
403 boost::mutex::scoped_lock lm (_mutex);
405 list<shared_ptr<Font> > fonts;
406 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
407 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
408 /* XXX: things may go wrong if there are duplicate font IDs
409 with different font files.
411 list<shared_ptr<Font> > f = j->fonts ();
412 copy (f.begin(), f.end(), back_inserter (fonts));
419 /** Set this player never to produce any video data */
421 Player::set_ignore_video ()
423 boost::mutex::scoped_lock lm (_mutex);
424 _ignore_video = true;
425 setup_pieces_unlocked ();
429 Player::set_ignore_audio ()
431 boost::mutex::scoped_lock lm (_mutex);
432 _ignore_audio = true;
433 setup_pieces_unlocked ();
437 Player::set_ignore_text ()
439 boost::mutex::scoped_lock lm (_mutex);
441 setup_pieces_unlocked ();
444 /** Set the player to always burn open texts into the image regardless of the content settings */
446 Player::set_always_burn_open_subtitles ()
448 boost::mutex::scoped_lock lm (_mutex);
449 _always_burn_open_subtitles = true;
452 /** Sets up the player to be faster, possibly at the expense of quality */
456 boost::mutex::scoped_lock lm (_mutex);
458 setup_pieces_unlocked ();
462 Player::set_play_referenced ()
464 boost::mutex::scoped_lock lm (_mutex);
465 _play_referenced = true;
466 setup_pieces_unlocked ();
470 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
472 DCPOMATIC_ASSERT (r);
473 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
474 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
475 if (r->actual_duration() > 0) {
477 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
482 list<ReferencedReelAsset>
483 Player::get_reel_assets ()
485 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
487 list<ReferencedReelAsset> a;
489 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
490 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
495 scoped_ptr<DCPDecoder> decoder;
497 decoder.reset (new DCPDecoder (_film, j, false));
502 DCPOMATIC_ASSERT (j->video_frame_rate ());
503 double const cfr = j->video_frame_rate().get();
504 Frame const trim_start = j->trim_start().frames_round (cfr);
505 Frame const trim_end = j->trim_end().frames_round (cfr);
506 int const ffr = _film->video_frame_rate ();
508 /* position in the asset from the start */
509 int64_t offset_from_start = 0;
510 /* position in the asset from the end */
511 int64_t offset_from_end = 0;
512 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
513 /* Assume that main picture duration is the length of the reel */
514 offset_from_end += k->main_picture()->actual_duration();
517 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
519 /* Assume that main picture duration is the length of the reel */
520 int64_t const reel_duration = k->main_picture()->actual_duration();
522 /* See doc/design/trim_reels.svg */
523 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
524 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
526 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
527 if (j->reference_video ()) {
528 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
531 if (j->reference_audio ()) {
532 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
535 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
536 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
539 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
540 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
541 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
545 offset_from_start += reel_duration;
546 offset_from_end -= reel_duration;
556 boost::mutex::scoped_lock lm (_mutex);
559 /* We can't pass in this state */
563 if (_playlist->length(_film) == DCPTime()) {
564 /* Special case of an empty Film; just give one black frame */
565 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
569 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
571 shared_ptr<Piece> earliest_content;
572 optional<DCPTime> earliest_time;
574 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
579 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
580 if (t > i->content->end(_film)) {
584 /* Given two choices at the same time, pick the one with texts so we see it before
587 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
589 earliest_content = i;
603 if (earliest_content) {
607 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
608 earliest_time = _black.position ();
612 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
613 earliest_time = _silent.position ();
620 earliest_content->done = earliest_content->decoder->pass ();
621 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
622 if (dcp && !_play_referenced && dcp->reference_audio()) {
623 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
624 to `hide' the fact that no audio was emitted during the referenced DCP (though
625 we need to behave as though it was).
627 _last_audio_time = dcp->end (_film);
632 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
633 _black.set_position (_black.position() + one_video_frame());
637 DCPTimePeriod period (_silent.period_at_position());
638 if (_last_audio_time) {
639 /* Sometimes the thing that happened last finishes fractionally before
640 or after this silence. Bodge the start time of the silence to fix it.
642 DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
643 period.from = *_last_audio_time;
645 if (period.duration() > one_video_frame()) {
646 period.to = period.from + one_video_frame();
649 _silent.set_position (period.to);
657 /* Emit any audio that is ready */
659 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
660 of our streams, or the position of the _silent.
662 DCPTime pull_to = _film->length ();
663 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
664 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
665 pull_to = i->second.last_push_end;
668 if (!_silent.done() && _silent.position() < pull_to) {
669 pull_to = _silent.position();
672 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
673 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
674 if (_last_audio_time && i->second < *_last_audio_time) {
675 /* This new data comes before the last we emitted (or the last seek); discard it */
676 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
681 } else if (_last_audio_time && i->second > *_last_audio_time) {
682 /* There's a gap between this data and the last we emitted; fill with silence */
683 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
686 emit_audio (i->first, i->second);
691 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
692 do_emit_video(i->first, i->second);
699 /** @return Open subtitles for the frame at the given time, converted to images */
700 optional<PositionImage>
701 Player::open_subtitles_for_frame (DCPTime time) const
703 list<PositionImage> captions;
704 int const vfr = _film->video_frame_rate();
708 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
711 /* Bitmap subtitles */
712 BOOST_FOREACH (BitmapText i, j.bitmap) {
717 /* i.image will already have been scaled to fit _video_container_size */
718 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
724 lrint (_video_container_size.width * i.rectangle.x),
725 lrint (_video_container_size.height * i.rectangle.y)
731 /* String subtitles (rendered to an image) */
732 if (!j.string.empty ()) {
733 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
734 copy (s.begin(), s.end(), back_inserter (captions));
738 if (captions.empty ()) {
739 return optional<PositionImage> ();
742 return merge (captions);
746 Player::video (weak_ptr<Piece> wp, ContentVideo video)
748 shared_ptr<Piece> piece = wp.lock ();
753 FrameRateChange frc (_film, piece->content);
754 if (frc.skip && (video.frame % 2) == 1) {
758 /* Time of the first frame we will emit */
759 DCPTime const time = content_video_to_dcp (piece, video.frame);
761 /* Discard if it's before the content's period or the last accurate seek. We can't discard
762 if it's after the content's period here as in that case we still need to fill any gap between
763 `now' and the end of the content's period.
765 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
769 /* Fill gaps that we discover now that we have some video which needs to be emitted.
770 This is where we need to fill to.
772 DCPTime fill_to = min (time, piece->content->end(_film));
774 if (_last_video_time) {
775 DCPTime fill_from = max (*_last_video_time, piece->content->position());
777 /* Fill if we have more than half a frame to do */
778 if ((fill_to - fill_from) > one_video_frame() / 2) {
779 LastVideoMap::const_iterator last = _last_video.find (wp);
780 if (_film->three_d()) {
781 Eyes fill_to_eyes = video.eyes;
782 if (fill_to_eyes == EYES_BOTH) {
783 fill_to_eyes = EYES_LEFT;
785 if (fill_to == piece->content->end(_film)) {
786 /* Don't fill after the end of the content */
787 fill_to_eyes = EYES_LEFT;
789 DCPTime j = fill_from;
790 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
791 if (eyes == EYES_BOTH) {
794 while (j < fill_to || eyes != fill_to_eyes) {
795 if (last != _last_video.end()) {
796 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
797 copy->set_eyes (eyes);
798 emit_video (copy, j);
800 emit_video (black_player_video_frame(eyes), j);
802 if (eyes == EYES_RIGHT) {
803 j += one_video_frame();
805 eyes = increment_eyes (eyes);
808 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
809 if (last != _last_video.end()) {
810 emit_video (last->second, j);
812 emit_video (black_player_video_frame(EYES_BOTH), j);
819 _last_video[wp].reset (
822 piece->content->video->crop (),
823 piece->content->video->fade (_film, video.frame),
824 piece->content->video->scale().size (
825 piece->content->video, _video_container_size, _film->frame_size ()
827 _video_container_size,
830 piece->content->video->colour_conversion(),
831 piece->content->video->range(),
838 for (int i = 0; i < frc.repeat; ++i) {
839 if (t < piece->content->end(_film)) {
840 emit_video (_last_video[wp], t);
842 t += one_video_frame ();
847 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
849 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
851 shared_ptr<Piece> piece = wp.lock ();
856 shared_ptr<AudioContent> content = piece->content->audio;
857 DCPOMATIC_ASSERT (content);
859 int const rfr = content->resampled_frame_rate (_film);
861 /* Compute time in the DCP */
862 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
863 /* And the end of this block in the DCP */
864 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
866 /* Remove anything that comes before the start or after the end of the content */
867 if (time < piece->content->position()) {
868 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
870 /* This audio is entirely discarded */
873 content_audio.audio = cut.first;
875 } else if (time > piece->content->end(_film)) {
878 } else if (end > piece->content->end(_film)) {
879 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
880 if (remaining_frames == 0) {
883 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
884 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
885 content_audio.audio = cut;
888 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
892 if (content->gain() != 0) {
893 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
894 gain->apply_gain (content->gain ());
895 content_audio.audio = gain;
900 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
904 if (_audio_processor) {
905 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
910 _audio_merger.push (content_audio.audio, time);
911 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
912 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
916 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
918 shared_ptr<Piece> piece = wp.lock ();
919 shared_ptr<const TextContent> text = wc.lock ();
920 if (!piece || !text) {
924 /* Apply content's subtitle offsets */
925 subtitle.sub.rectangle.x += text->x_offset ();
926 subtitle.sub.rectangle.y += text->y_offset ();
928 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
929 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
930 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
932 /* Apply content's subtitle scale */
933 subtitle.sub.rectangle.width *= text->x_scale ();
934 subtitle.sub.rectangle.height *= text->y_scale ();
937 shared_ptr<Image> image = subtitle.sub.image;
938 /* We will scale the subtitle up to fit _video_container_size */
939 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
940 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
941 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
943 _active_texts[text->type()].add_from (wc, ps, from);
947 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
949 shared_ptr<Piece> piece = wp.lock ();
950 shared_ptr<const TextContent> text = wc.lock ();
951 if (!piece || !text) {
956 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
958 if (from > piece->content->end(_film)) {
962 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
963 s.set_h_position (s.h_position() + text->x_offset ());
964 s.set_v_position (s.v_position() + text->y_offset ());
965 float const xs = text->x_scale();
966 float const ys = text->y_scale();
967 float size = s.size();
969 /* Adjust size to express the common part of the scaling;
970 e.g. if xs = ys = 0.5 we scale size by 2.
972 if (xs > 1e-5 && ys > 1e-5) {
973 size *= 1 / min (1 / xs, 1 / ys);
977 /* Then express aspect ratio changes */
978 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
979 s.set_aspect_adjust (xs / ys);
982 s.set_in (dcp::Time(from.seconds(), 1000));
983 ps.string.push_back (StringText (s, text->outline_width()));
984 ps.add_fonts (text->fonts ());
987 _active_texts[text->type()].add_from (wc, ps, from);
991 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
993 shared_ptr<const TextContent> text = wc.lock ();
998 if (!_active_texts[text->type()].have(wc)) {
1002 shared_ptr<Piece> piece = wp.lock ();
1007 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1009 if (dcp_to > piece->content->end(_film)) {
1013 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1015 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1016 if (text->use() && !always && !text->burn()) {
1017 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1022 Player::seek (DCPTime time, bool accurate)
1024 boost::mutex::scoped_lock lm (_mutex);
1027 /* We can't seek in this state */
1032 _shuffler->clear ();
1037 if (_audio_processor) {
1038 _audio_processor->flush ();
1041 _audio_merger.clear ();
1042 for (int i = 0; i < TEXT_COUNT; ++i) {
1043 _active_texts[i].clear ();
1046 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1047 if (time < i->content->position()) {
1048 /* Before; seek to the start of the content */
1049 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1051 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1052 /* During; seek to position */
1053 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1056 /* After; this piece is done */
1062 _last_video_time = time;
1063 _last_video_eyes = EYES_LEFT;
1064 _last_audio_time = time;
1066 _last_video_time = optional<DCPTime>();
1067 _last_video_eyes = optional<Eyes>();
1068 _last_audio_time = optional<DCPTime>();
1071 _black.set_position (time);
1072 _silent.set_position (time);
1074 _last_video.clear ();
1078 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1080 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1081 player before the video that requires them.
1083 _delay.push_back (make_pair (pv, time));
1085 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1086 _last_video_time = time + one_video_frame();
1088 _last_video_eyes = increment_eyes (pv->eyes());
1090 if (_delay.size() < 3) {
1094 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1096 do_emit_video (to_do.first, to_do.second);
1100 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1102 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1103 for (int i = 0; i < TEXT_COUNT; ++i) {
1104 _active_texts[i].clear_before (time);
1108 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1110 pv->set_text (subtitles.get ());
1117 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1119 /* Log if the assert below is about to fail */
1120 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1121 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1124 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1125 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1126 Audio (data, time, _film->audio_frame_rate());
1127 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1131 Player::fill_audio (DCPTimePeriod period)
1133 if (period.from == period.to) {
1137 DCPOMATIC_ASSERT (period.from < period.to);
1139 DCPTime t = period.from;
1140 while (t < period.to) {
1141 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1142 Frame const samples = block.frames_round(_film->audio_frame_rate());
1144 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1145 silence->make_silent ();
1146 emit_audio (silence, t);
1153 Player::one_video_frame () const
1155 return DCPTime::from_frames (1, _film->video_frame_rate ());
1158 pair<shared_ptr<AudioBuffers>, DCPTime>
1159 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1161 DCPTime const discard_time = discard_to - time;
1162 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1163 Frame remaining_frames = audio->frames() - discard_frames;
1164 if (remaining_frames <= 0) {
1165 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1167 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1168 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1169 return make_pair(cut, time + discard_time);
1173 Player::set_dcp_decode_reduction (optional<int> reduction)
1175 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1178 boost::mutex::scoped_lock lm (_mutex);
1180 if (reduction == _dcp_decode_reduction) {
1182 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1186 _dcp_decode_reduction = reduction;
1187 setup_pieces_unlocked ();
1190 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1194 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1196 boost::mutex::scoped_lock lm (_mutex);
1198 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1199 if (i->content == content) {
1200 return content_time_to_dcp (i, t);
1204 /* We couldn't find this content; perhaps things are being changed over */
1205 return optional<DCPTime>();