2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 using namespace dcpomatic;
83 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
84 int const PlayerProperty::PLAYLIST = 701;
85 int const PlayerProperty::FILM_CONTAINER = 702;
86 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
87 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
88 int const PlayerProperty::PLAYBACK_LENGTH = 705;
90 Player::Player (shared_ptr<const Film> film)
93 , _ignore_video (false)
94 , _ignore_audio (false)
95 , _ignore_text (false)
96 , _always_burn_open_subtitles (false)
98 , _tolerant (film->tolerant())
99 , _play_referenced (false)
100 , _audio_merger (_film->audio_frame_rate())
106 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
108 , _playlist (playlist_)
110 , _ignore_video (false)
111 , _ignore_audio (false)
112 , _ignore_text (false)
113 , _always_burn_open_subtitles (false)
115 , _tolerant (film->tolerant())
116 , _play_referenced (false)
117 , _audio_merger (_film->audio_frame_rate())
126 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127 /* The butler must hear about this first, so since we are proxying this through to the butler we must
130 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132 set_video_container_size (_film->frame_size ());
134 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
137 seek (DCPTime (), true);
146 Player::setup_pieces ()
148 boost::mutex::scoped_lock lm (_mutex);
149 setup_pieces_unlocked ();
154 have_video (shared_ptr<const Content> content)
156 return static_cast<bool>(content->video) && content->video->use();
160 have_audio (shared_ptr<const Content> content)
162 return static_cast<bool>(content->audio);
166 Player::setup_pieces_unlocked ()
168 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170 list<shared_ptr<Piece> > old_pieces = _pieces;
174 _shuffler = new Shuffler();
175 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
177 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
179 if (!i->paths_valid ()) {
183 if (_ignore_video && _ignore_audio && i->text.empty()) {
184 /* We're only interested in text and this content has none */
188 shared_ptr<Decoder> old_decoder;
190 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
191 if (j->content == i) {
192 old_decoder = j->decoder;
198 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
199 DCPOMATIC_ASSERT (decoder);
201 FrameRateChange frc (_film, i);
203 if (decoder->video && _ignore_video) {
204 decoder->video->set_ignore (true);
207 if (decoder->audio && _ignore_audio) {
208 decoder->audio->set_ignore (true);
212 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
213 i->set_ignore (true);
217 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
219 dcp->set_decode_referenced (_play_referenced);
220 if (_play_referenced) {
221 dcp->set_forced_reduction (_dcp_decode_reduction);
225 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
226 _pieces.push_back (piece);
228 if (decoder->video) {
229 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
230 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
231 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
233 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
237 if (decoder->audio) {
238 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
241 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
243 while (j != decoder->text.end()) {
244 (*j)->BitmapStart.connect (
245 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247 (*j)->PlainStart.connect (
248 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
257 if (decoder->atmos) {
258 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
262 _stream_states.clear ();
263 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
264 BOOST_FOREACH (AudioStreamPtr j, i->audio_streams()) {
265 _stream_states[j] = StreamState (i, i->content->position ());
269 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
272 _last_video_time = DCPTime ();
273 _last_video_eyes = EYES_BOTH;
274 _last_audio_time = DCPTime ();
278 Player::playlist_content_change (ChangeType type, int property, bool frequent)
280 if (type == CHANGE_TYPE_PENDING) {
281 /* The player content is probably about to change, so we can't carry on
282 until that has happened and we've rebuilt our pieces. Stop pass()
283 and seek() from working until then.
286 } else if (type == CHANGE_TYPE_DONE) {
287 /* A change in our content has gone through. Re-build our pieces. */
290 } else if (type == CHANGE_TYPE_CANCELLED) {
294 Change (type, property, frequent);
298 Player::set_video_container_size (dcp::Size s)
300 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
303 boost::mutex::scoped_lock lm (_mutex);
305 if (s == _video_container_size) {
307 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311 _video_container_size = s;
313 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314 _black_image->make_black ();
317 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321 Player::playlist_change (ChangeType type)
323 if (type == CHANGE_TYPE_DONE) {
326 Change (type, PlayerProperty::PLAYLIST, false);
330 Player::film_change (ChangeType type, Film::Property p)
332 /* Here we should notice Film properties that affect our output, and
333 alert listeners that our output now would be different to how it was
334 last time we were run.
337 if (p == Film::CONTAINER) {
338 Change (type, PlayerProperty::FILM_CONTAINER, false);
339 } else if (p == Film::VIDEO_FRAME_RATE) {
340 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
341 so we need new pieces here.
343 if (type == CHANGE_TYPE_DONE) {
346 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
347 } else if (p == Film::AUDIO_PROCESSOR) {
348 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
349 boost::mutex::scoped_lock lm (_mutex);
350 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
352 } else if (p == Film::AUDIO_CHANNELS) {
353 if (type == CHANGE_TYPE_DONE) {
354 boost::mutex::scoped_lock lm (_mutex);
355 _audio_merger.clear ();
360 shared_ptr<PlayerVideo>
361 Player::black_player_video_frame (Eyes eyes) const
363 return shared_ptr<PlayerVideo> (
365 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
368 _video_container_size,
369 _video_container_size,
372 PresetColourConversion::all().front().conversion,
374 boost::weak_ptr<Piece>(),
375 boost::optional<Frame>(),
382 list<shared_ptr<Font> >
383 Player::get_subtitle_fonts ()
385 boost::mutex::scoped_lock lm (_mutex);
387 list<shared_ptr<Font> > fonts;
388 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
389 /* XXX: things may go wrong if there are duplicate font IDs
390 with different font files.
392 list<shared_ptr<Font> > f = i->fonts ();
393 copy (f.begin(), f.end(), back_inserter(fonts));
399 /** Set this player never to produce any video data */
401 Player::set_ignore_video ()
403 boost::mutex::scoped_lock lm (_mutex);
404 _ignore_video = true;
405 setup_pieces_unlocked ();
409 Player::set_ignore_audio ()
411 boost::mutex::scoped_lock lm (_mutex);
412 _ignore_audio = true;
413 setup_pieces_unlocked ();
417 Player::set_ignore_text ()
419 boost::mutex::scoped_lock lm (_mutex);
421 setup_pieces_unlocked ();
424 /** Set the player to always burn open texts into the image regardless of the content settings */
426 Player::set_always_burn_open_subtitles ()
428 boost::mutex::scoped_lock lm (_mutex);
429 _always_burn_open_subtitles = true;
432 /** Sets up the player to be faster, possibly at the expense of quality */
436 boost::mutex::scoped_lock lm (_mutex);
438 setup_pieces_unlocked ();
442 Player::set_play_referenced ()
444 boost::mutex::scoped_lock lm (_mutex);
445 _play_referenced = true;
446 setup_pieces_unlocked ();
450 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
452 DCPOMATIC_ASSERT (r);
453 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
454 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
455 if (r->actual_duration() > 0) {
457 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
462 list<ReferencedReelAsset>
463 Player::get_reel_assets ()
465 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
467 list<ReferencedReelAsset> a;
469 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
470 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
475 scoped_ptr<DCPDecoder> decoder;
477 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
482 DCPOMATIC_ASSERT (j->video_frame_rate ());
483 double const cfr = j->video_frame_rate().get();
484 Frame const trim_start = j->trim_start().frames_round (cfr);
485 Frame const trim_end = j->trim_end().frames_round (cfr);
486 int const ffr = _film->video_frame_rate ();
488 /* position in the asset from the start */
489 int64_t offset_from_start = 0;
490 /* position in the asset from the end */
491 int64_t offset_from_end = 0;
492 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
493 /* Assume that main picture duration is the length of the reel */
494 offset_from_end += k->main_picture()->actual_duration();
497 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
499 /* Assume that main picture duration is the length of the reel */
500 int64_t const reel_duration = k->main_picture()->actual_duration();
502 /* See doc/design/trim_reels.svg */
503 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
504 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
506 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
507 if (j->reference_video ()) {
508 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
511 if (j->reference_audio ()) {
512 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
515 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
516 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
519 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
520 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
521 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
525 offset_from_start += reel_duration;
526 offset_from_end -= reel_duration;
536 boost::mutex::scoped_lock lm (_mutex);
539 /* We can't pass in this state */
543 if (_playback_length == DCPTime()) {
544 /* Special; just give one black frame */
545 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
549 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
551 shared_ptr<Piece> earliest_content;
552 optional<DCPTime> earliest_time;
554 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
559 DCPTime const t = i->content_time_to_dcp (max(i->decoder->position(), i->trim_start()));
560 if (t > i->end(_film)) {
564 /* Given two choices at the same time, pick the one with texts so we see it before
567 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
569 earliest_content = i;
583 if (earliest_content) {
587 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
588 earliest_time = _black.position ();
592 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
593 earliest_time = _silent.position ();
600 earliest_content->done = earliest_content->decoder->pass ();
601 if (!_play_referenced && earliest_content->referenced_audio()) {
602 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
603 to `hide' the fact that no audio was emitted during the referenced DCP (though
604 we need to behave as though it was).
606 _last_audio_time = earliest_content->end (_film);
611 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
612 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
613 _black.set_position (_black.position() + one_video_frame());
617 DCPTimePeriod period (_silent.period_at_position());
618 if (_last_audio_time) {
619 /* Sometimes the thing that happened last finishes fractionally before
620 or after this silence. Bodge the start time of the silence to fix it.
621 I think this is nothing to worry about since we will just add or
622 remove a little silence at the end of some content.
624 int64_t const error = labs(period.from.get() - _last_audio_time->get());
625 /* Let's not worry about less than a frame at 24fps */
626 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
627 if (error >= too_much_error) {
628 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
630 DCPOMATIC_ASSERT (error < too_much_error);
631 period.from = *_last_audio_time;
633 if (period.duration() > one_video_frame()) {
634 period.to = period.from + one_video_frame();
637 _silent.set_position (period.to);
645 /* Emit any audio that is ready */
647 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
648 of our streams, or the position of the _silent.
650 DCPTime pull_to = _playback_length;
651 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
652 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
653 pull_to = i->second.last_push_end;
656 if (!_silent.done() && _silent.position() < pull_to) {
657 pull_to = _silent.position();
660 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
661 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
662 if (_last_audio_time && i->second < *_last_audio_time) {
663 /* This new data comes before the last we emitted (or the last seek); discard it */
664 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
669 } else if (_last_audio_time && i->second > *_last_audio_time) {
670 /* There's a gap between this data and the last we emitted; fill with silence */
671 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
674 emit_audio (i->first, i->second);
679 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
680 do_emit_video(i->first, i->second);
687 /** @return Open subtitles for the frame at the given time, converted to images */
688 optional<PositionImage>
689 Player::open_subtitles_for_frame (DCPTime time) const
691 list<PositionImage> captions;
692 int const vfr = _film->video_frame_rate();
696 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
699 /* Bitmap subtitles */
700 BOOST_FOREACH (BitmapText i, j.bitmap) {
705 /* i.image will already have been scaled to fit _video_container_size */
706 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
712 lrint (_video_container_size.width * i.rectangle.x),
713 lrint (_video_container_size.height * i.rectangle.y)
719 /* String subtitles (rendered to an image) */
720 if (!j.string.empty ()) {
721 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
722 copy (s.begin(), s.end(), back_inserter (captions));
726 if (captions.empty ()) {
727 return optional<PositionImage> ();
730 return merge (captions);
734 Player::video (weak_ptr<Piece> wp, ContentVideo video)
736 shared_ptr<Piece> piece = wp.lock ();
741 if (!piece->video_use()) {
745 if (piece->frc.skip && (video.frame % 2) == 1) {
749 /* Time of the first frame we will emit */
750 DCPTime const time = piece->content_video_to_dcp (video.frame);
752 /* Discard if it's before the content's period or the last accurate seek. We can't discard
753 if it's after the content's period here as in that case we still need to fill any gap between
754 `now' and the end of the content's period.
756 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
760 /* Fill gaps that we discover now that we have some video which needs to be emitted.
761 This is where we need to fill to.
763 DCPTime fill_to = min (time, piece->end(_film));
765 if (_last_video_time) {
766 DCPTime fill_from = max (*_last_video_time, piece->position());
768 /* Fill if we have more than half a frame to do */
769 if ((fill_to - fill_from) > one_video_frame() / 2) {
770 LastVideoMap::const_iterator last = _last_video.find (wp);
771 if (_film->three_d()) {
772 Eyes fill_to_eyes = video.eyes;
773 if (fill_to_eyes == EYES_BOTH) {
774 fill_to_eyes = EYES_LEFT;
776 if (fill_to == piece->end(_film)) {
777 /* Don't fill after the end of the content */
778 fill_to_eyes = EYES_LEFT;
780 DCPTime j = fill_from;
781 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
782 if (eyes == EYES_BOTH) {
785 while (j < fill_to || eyes != fill_to_eyes) {
786 if (last != _last_video.end()) {
787 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
788 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
789 copy->set_eyes (eyes);
790 emit_video (copy, j);
792 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
793 emit_video (black_player_video_frame(eyes), j);
795 if (eyes == EYES_RIGHT) {
796 j += one_video_frame();
798 eyes = increment_eyes (eyes);
801 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
802 if (last != _last_video.end()) {
803 emit_video (last->second, j);
805 emit_video (black_player_video_frame(EYES_BOTH), j);
812 _last_video[wp].reset (
816 piece->video_fade(_film, video.frame),
817 scale_for_display(piece->video_scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
818 _video_container_size,
821 piece->video_colour_conversion(),
822 piece->video_range(),
830 for (int i = 0; i < piece->frc.repeat; ++i) {
831 if (t < piece->end(_film)) {
832 emit_video (_last_video[wp], t);
834 t += one_video_frame ();
839 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
841 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
843 shared_ptr<Piece> piece = wp.lock ();
848 int const rfr = piece->audio_resampled_frame_rate (_film);
850 /* Compute time in the DCP */
851 DCPTime time = piece->resampled_audio_to_dcp (_film, content_audio.frame);
852 /* And the end of this block in the DCP */
853 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
855 /* Remove anything that comes before the start or after the end of the content */
856 if (time < piece->position()) {
857 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
859 /* This audio is entirely discarded */
862 content_audio.audio = cut.first;
864 } else if (time > piece->end(_film)) {
867 } else if (end > piece->end(_film)) {
868 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
869 if (remaining_frames == 0) {
872 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
875 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
879 if (piece->audio_gain() != 0) {
880 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
881 gain->apply_gain (piece->audio_gain());
882 content_audio.audio = gain;
887 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
891 if (_audio_processor) {
892 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
897 _audio_merger.push (content_audio.audio, time);
898 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
899 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
903 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
905 shared_ptr<Piece> piece = wp.lock ();
906 shared_ptr<const TextContent> text = wc.lock ();
907 if (!piece || !text) {
911 /* Apply content's subtitle offsets */
912 subtitle.sub.rectangle.x += text->x_offset ();
913 subtitle.sub.rectangle.y += text->y_offset ();
915 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
916 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
917 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
919 /* Apply content's subtitle scale */
920 subtitle.sub.rectangle.width *= text->x_scale ();
921 subtitle.sub.rectangle.height *= text->y_scale ();
924 shared_ptr<Image> image = subtitle.sub.image;
926 /* We will scale the subtitle up to fit _video_container_size */
927 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
928 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
929 if (width == 0 || height == 0) {
933 dcp::Size scaled_size (width, height);
934 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
935 DCPTime from (piece->content_time_to_dcp(subtitle.from()));
937 _active_texts[text->type()].add_from (wc, ps, from);
941 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
943 shared_ptr<Piece> piece = wp.lock ();
944 shared_ptr<const TextContent> text = wc.lock ();
945 if (!piece || !text) {
950 DCPTime const from (piece->content_time_to_dcp( subtitle.from()));
952 if (from > piece->end(_film)) {
956 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
957 s.set_h_position (s.h_position() + text->x_offset ());
958 s.set_v_position (s.v_position() + text->y_offset ());
959 float const xs = text->x_scale();
960 float const ys = text->y_scale();
961 float size = s.size();
963 /* Adjust size to express the common part of the scaling;
964 e.g. if xs = ys = 0.5 we scale size by 2.
966 if (xs > 1e-5 && ys > 1e-5) {
967 size *= 1 / min (1 / xs, 1 / ys);
971 /* Then express aspect ratio changes */
972 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
973 s.set_aspect_adjust (xs / ys);
976 s.set_in (dcp::Time(from.seconds(), 1000));
977 ps.string.push_back (StringText (s, text->outline_width()));
978 ps.add_fonts (text->fonts ());
981 _active_texts[text->type()].add_from (wc, ps, from);
985 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
987 shared_ptr<const TextContent> text = wc.lock ();
992 if (!_active_texts[text->type()].have(wc)) {
996 shared_ptr<Piece> piece = wp.lock ();
1001 DCPTime const dcp_to = piece->content_time_to_dcp(to);
1003 if (dcp_to > piece->end(_film)) {
1007 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1009 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1010 if (text->use() && !always && !text->burn()) {
1011 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1016 Player::seek (DCPTime time, bool accurate)
1018 boost::mutex::scoped_lock lm (_mutex);
1021 /* We can't seek in this state */
1026 _shuffler->clear ();
1031 if (_audio_processor) {
1032 _audio_processor->flush ();
1035 _audio_merger.clear ();
1036 for (int i = 0; i < TEXT_COUNT; ++i) {
1037 _active_texts[i].clear ();
1040 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1041 if (time < i->position()) {
1042 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1043 we must seek this (following) content accurately, otherwise when we come to the end of the current
1044 content we may not start right at the beginning of the next, causing a gap (if the next content has
1045 been trimmed to a point between keyframes, or something).
1047 i->decoder->seek (i->dcp_to_content_time(_film, i->position()), true);
1049 } else if (i->position() <= time && time < i->end(_film)) {
1050 /* During; seek to position */
1051 i->decoder->seek (i->dcp_to_content_time(_film, time), accurate);
1054 /* After; this piece is done */
1060 _last_video_time = time;
1061 _last_video_eyes = EYES_LEFT;
1062 _last_audio_time = time;
1064 _last_video_time = optional<DCPTime>();
1065 _last_video_eyes = optional<Eyes>();
1066 _last_audio_time = optional<DCPTime>();
1069 _black.set_position (time);
1070 _silent.set_position (time);
1072 _last_video.clear ();
1076 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1078 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1079 player before the video that requires them.
1081 _delay.push_back (make_pair (pv, time));
1083 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1084 _last_video_time = time + one_video_frame();
1086 _last_video_eyes = increment_eyes (pv->eyes());
1088 if (_delay.size() < 3) {
1092 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1094 do_emit_video (to_do.first, to_do.second);
1098 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1100 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1101 for (int i = 0; i < TEXT_COUNT; ++i) {
1102 _active_texts[i].clear_before (time);
1106 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1108 pv->set_text (subtitles.get ());
1115 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1117 /* Log if the assert below is about to fail */
1118 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1119 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1122 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1123 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1124 Audio (data, time, _film->audio_frame_rate());
1125 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1129 Player::fill_audio (DCPTimePeriod period)
1131 if (period.from == period.to) {
1135 DCPOMATIC_ASSERT (period.from < period.to);
1137 DCPTime t = period.from;
1138 while (t < period.to) {
1139 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1140 Frame const samples = block.frames_round(_film->audio_frame_rate());
1142 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1143 silence->make_silent ();
1144 emit_audio (silence, t);
1151 Player::one_video_frame () const
1153 return DCPTime::from_frames (1, _film->video_frame_rate ());
1156 pair<shared_ptr<AudioBuffers>, DCPTime>
1157 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1159 DCPTime const discard_time = discard_to - time;
1160 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1161 Frame remaining_frames = audio->frames() - discard_frames;
1162 if (remaining_frames <= 0) {
1163 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1165 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1166 return make_pair(cut, time + discard_time);
1170 Player::set_dcp_decode_reduction (optional<int> reduction)
1172 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1175 boost::mutex::scoped_lock lm (_mutex);
1177 if (reduction == _dcp_decode_reduction) {
1179 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1183 _dcp_decode_reduction = reduction;
1184 setup_pieces_unlocked ();
1187 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1191 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1193 boost::mutex::scoped_lock lm (_mutex);
1195 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1196 optional<DCPTime> d = i->content_time_to_dcp (t);
1202 /* We couldn't find this content; perhaps things are being changed over */
1203 return optional<DCPTime>();
1207 shared_ptr<const Playlist>
1208 Player::playlist () const
1210 return _playlist ? _playlist : _film->playlist();
1215 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1217 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);