2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
29 #include "dcp_content.h"
30 #include "dcp_decoder.h"
31 #include "dcpomatic_log.h"
33 #include "decoder_factory.h"
34 #include "ffmpeg_content.h"
36 #include "frame_rate_change.h"
38 #include "image_decoder.h"
41 #include "piece_video.h"
43 #include "player_video.h"
46 #include "raw_image_proxy.h"
47 #include "referenced_reel_asset.h"
48 #include "render_text.h"
50 #include "text_content.h"
51 #include "text_decoder.h"
53 #include "video_decoder.h"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
68 using std::dynamic_pointer_cast;
71 using std::make_shared;
72 using std::make_shared;
78 using std::shared_ptr;
81 using std::unique_ptr;
82 using boost::optional;
83 #if BOOST_VERSION >= 106100
84 using namespace boost::placeholders;
86 using namespace dcpomatic;
89 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
90 int const PlayerProperty::PLAYLIST = 701;
91 int const PlayerProperty::FILM_CONTAINER = 702;
92 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
93 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
94 int const PlayerProperty::PLAYBACK_LENGTH = 705;
98 #define AUDIO_GAIN_EPSILON 0.001
101 Player::Player (shared_ptr<const Film> film)
104 , _tolerant (film->tolerant())
105 , _audio_merger (_film->audio_frame_rate())
111 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
113 , _playlist (playlist_)
115 , _tolerant (film->tolerant())
116 , _audio_merger (_film->audio_frame_rate())
125 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
126 /* The butler must hear about this first, so since we are proxying this through to the butler we must
129 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
130 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
131 set_video_container_size (_film->frame_size ());
133 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
136 seek (DCPTime (), true);
141 Player::setup_pieces ()
143 boost::mutex::scoped_lock lm (_mutex);
144 setup_pieces_unlocked ();
149 have_video (shared_ptr<const Content> content)
151 return static_cast<bool>(content->video) && content->video->use();
156 have_audio (shared_ptr<const Content> content)
158 return static_cast<bool>(content->audio);
162 vector<vector<shared_ptr<Content>>>
163 collect (shared_ptr<const Film> film, ContentList content)
165 vector<shared_ptr<Content>> ungrouped;
166 vector<vector<shared_ptr<Content>>> grouped;
168 auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
170 auto a_streams = a->streams();
171 auto b_streams = b->streams();
173 if (a_streams.size() != b_streams.size()) {
177 for (size_t i = 0; i < a_streams.size(); ++i) {
178 auto a_stream = a_streams[i];
179 auto b_stream = b_streams[i];
181 !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
182 a_stream->frame_rate() != b_stream->frame_rate() ||
183 a_stream->channels() != b_stream->channels()) {
189 fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
190 a->delay() == b->delay() &&
191 a->language() == b->language() &&
192 a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
193 a->channel_names() == b->channel_names()
197 for (auto i: content) {
198 if (i->video || !i->audio || !i->text.empty()) {
199 ungrouped.push_back (i);
202 for (auto& g: grouped) {
203 if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
209 grouped.push_back ({i});
214 for (auto i: ungrouped) {
215 grouped.push_back({i});
223 Player::setup_pieces_unlocked ()
225 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
227 auto old_pieces = _pieces;
230 _shuffler.reset (new Shuffler());
231 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
233 for (auto i: playlist()->content()) {
235 if (!i->paths_valid ()) {
239 if (_ignore_video && _ignore_audio && i->text.empty()) {
240 /* We're only interested in text and this content has none */
244 shared_ptr<Decoder> old_decoder;
245 for (auto j: old_pieces) {
246 auto decoder = j->decoder_for(i);
248 old_decoder = decoder;
253 auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
254 DCPOMATIC_ASSERT (decoder);
256 FrameRateChange frc (_film, i);
258 if (decoder->video && _ignore_video) {
259 decoder->video->set_ignore (true);
262 if (decoder->audio && _ignore_audio) {
263 decoder->audio->set_ignore (true);
267 for (auto i: decoder->text) {
268 i->set_ignore (true);
272 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
274 dcp->set_decode_referenced (_play_referenced);
275 if (_play_referenced) {
276 dcp->set_forced_reduction (_dcp_decode_reduction);
280 auto piece = make_shared<Piece>(_film, i, decoder, frc, _fast);
281 _pieces.push_back (piece);
284 if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
285 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
286 piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
288 piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
293 piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
296 piece->BitmapTextStart.connect (bind(&Player::bitmap_text_start, this, piece, _1));
297 piece->StringTextStart.connect (bind(&Player::string_text_start, this, piece, _1));
298 piece->TextStop.connect (bind(&Player::subtitle_stop, this, piece, _1));
299 piece->Atmos.connect (bind(&Player::atmos, this, piece, _1));
302 for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
303 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
304 /* Look for content later in the content list with in-use video that overlaps this */
305 for (auto j = std::next(i); j != _pieces.end(); ++j) {
306 if ((*j)->use_video()) {
307 (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
313 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
314 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
316 _last_video_time = boost::optional<dcpomatic::DCPTime>();
317 _last_video_eyes = Eyes::BOTH;
318 _last_audio_time = boost::optional<dcpomatic::DCPTime>();
323 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
325 boost::mutex::scoped_lock lm (_mutex);
327 for (auto i: _pieces) {
328 auto dcp = i->content_time_to_dcp(content, t);
334 /* We couldn't find this content; perhaps things are being changed over */
340 Player::playlist_content_change (ChangeType type, int property, bool frequent)
342 if (property == VideoContentProperty::CROP) {
343 if (type == ChangeType::DONE) {
344 auto const vcs = video_container_size();
345 boost::mutex::scoped_lock lm (_mutex);
346 for (auto const& i: _delay) {
347 i.first->reset_metadata (_film, vcs);
351 if (type == ChangeType::PENDING) {
352 /* The player content is probably about to change, so we can't carry on
353 until that has happened and we've rebuilt our pieces. Stop pass()
354 and seek() from working until then.
357 } else if (type == ChangeType::DONE) {
358 /* A change in our content has gone through. Re-build our pieces. */
361 } else if (type == ChangeType::CANCELLED) {
366 Change (type, property, frequent);
371 Player::set_video_container_size (dcp::Size s)
373 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
376 boost::mutex::scoped_lock lm (_mutex);
378 if (s == _video_container_size) {
380 Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
384 _video_container_size = s;
386 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
387 _black_image->make_black ();
390 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
395 Player::playlist_change (ChangeType type)
397 if (type == ChangeType::DONE) {
400 Change (type, PlayerProperty::PLAYLIST, false);
405 Player::film_change (ChangeType type, Film::Property p)
407 /* Here we should notice Film properties that affect our output, and
408 alert listeners that our output now would be different to how it was
409 last time we were run.
412 if (p == Film::Property::CONTAINER) {
413 Change (type, PlayerProperty::FILM_CONTAINER, false);
414 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
415 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
416 so we need new pieces here.
418 if (type == ChangeType::DONE) {
421 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
422 } else if (p == Film::Property::AUDIO_PROCESSOR) {
423 if (type == ChangeType::DONE && _film->audio_processor ()) {
424 boost::mutex::scoped_lock lm (_mutex);
425 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
427 } else if (p == Film::Property::AUDIO_CHANNELS) {
428 if (type == ChangeType::DONE) {
429 boost::mutex::scoped_lock lm (_mutex);
430 _audio_merger.clear ();
436 shared_ptr<PlayerVideo>
437 Player::black_player_video_frame (Eyes eyes) const
439 return std::make_shared<PlayerVideo> (
440 std::make_shared<const RawImageProxy>(_black_image),
443 _video_container_size,
444 _video_container_size,
447 PresetColourConversion::all().front().conversion,
449 std::weak_ptr<Content>(),
450 boost::optional<Frame>(),
457 Player::get_subtitle_fonts ()
459 boost::mutex::scoped_lock lm (_mutex);
461 vector<FontData> fonts;
462 for (auto i: _pieces) {
463 /* XXX: things may go wrong if there are duplicate font IDs
464 with different font files.
466 auto f = i->fonts ();
467 copy (f.begin(), f.end(), back_inserter(fonts));
474 /** Set this player never to produce any video data */
476 Player::set_ignore_video ()
478 boost::mutex::scoped_lock lm (_mutex);
479 _ignore_video = true;
480 setup_pieces_unlocked ();
485 Player::set_ignore_audio ()
487 boost::mutex::scoped_lock lm (_mutex);
488 _ignore_audio = true;
489 setup_pieces_unlocked ();
494 Player::set_ignore_text ()
496 boost::mutex::scoped_lock lm (_mutex);
498 setup_pieces_unlocked ();
502 /** Set the player to always burn open texts into the image regardless of the content settings */
504 Player::set_always_burn_open_subtitles ()
506 boost::mutex::scoped_lock lm (_mutex);
507 _always_burn_open_subtitles = true;
511 /** Sets up the player to be faster, possibly at the expense of quality */
515 boost::mutex::scoped_lock lm (_mutex);
517 setup_pieces_unlocked ();
522 Player::set_play_referenced ()
524 boost::mutex::scoped_lock lm (_mutex);
525 _play_referenced = true;
526 setup_pieces_unlocked ();
531 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
533 DCPOMATIC_ASSERT (r);
534 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
535 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
536 if (r->actual_duration() > 0) {
538 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
544 list<ReferencedReelAsset>
545 Player::get_reel_assets ()
547 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
549 list<ReferencedReelAsset> a;
551 for (auto i: playlist()->content()) {
552 auto j = dynamic_pointer_cast<DCPContent> (i);
557 unique_ptr<DCPDecoder> decoder;
559 decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
564 DCPOMATIC_ASSERT (j->video_frame_rate ());
565 double const cfr = j->video_frame_rate().get();
566 Frame const trim_start = j->trim_start().frames_round (cfr);
567 Frame const trim_end = j->trim_end().frames_round (cfr);
568 int const ffr = _film->video_frame_rate ();
570 /* position in the asset from the start */
571 int64_t offset_from_start = 0;
572 /* position in the asset from the end */
573 int64_t offset_from_end = 0;
574 for (auto k: decoder->reels()) {
575 /* Assume that main picture duration is the length of the reel */
576 offset_from_end += k->main_picture()->actual_duration();
579 for (auto k: decoder->reels()) {
581 /* Assume that main picture duration is the length of the reel */
582 int64_t const reel_duration = k->main_picture()->actual_duration();
584 /* See doc/design/trim_reels.svg */
585 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
586 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
588 auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
589 if (j->reference_video ()) {
590 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
593 if (j->reference_audio ()) {
594 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
597 if (j->reference_text (TextType::OPEN_SUBTITLE)) {
598 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
601 if (j->reference_text (TextType::CLOSED_CAPTION)) {
602 for (auto l: k->closed_captions()) {
603 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
607 offset_from_start += reel_duration;
608 offset_from_end -= reel_duration;
619 boost::mutex::scoped_lock lm (_mutex);
622 /* We can't pass in this state */
623 LOG_DEBUG_PLAYER_NC ("Player is suspended");
627 if (_playback_length == DCPTime()) {
628 /* Special; just give one black frame */
629 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
633 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
635 shared_ptr<Piece> earliest_content;
636 optional<DCPTime> earliest_time;
638 for (auto i: _pieces) {
639 auto time = i->decoder_before(earliest_time);
641 earliest_time = *time;
642 earliest_content = i;
655 if (earliest_content) {
659 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
660 earliest_time = _black.position ();
664 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
665 earliest_time = _silent.position ();
672 earliest_content->pass();
673 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
674 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
675 to `hide' the fact that no audio was emitted during the referenced DCP (though
676 we need to behave as though it was).
678 _last_audio_time = earliest_content->end ();
683 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
684 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
685 _black.set_position (_black.position() + one_video_frame());
689 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
690 DCPTimePeriod period (_silent.period_at_position());
691 if (_last_audio_time) {
692 /* Sometimes the thing that happened last finishes fractionally before
693 or after this silence. Bodge the start time of the silence to fix it.
694 I think this is nothing to worry about since we will just add or
695 remove a little silence at the end of some content.
697 int64_t const error = labs(period.from.get() - _last_audio_time->get());
698 /* Let's not worry about less than a frame at 24fps */
699 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
700 if (error >= too_much_error) {
701 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
703 DCPOMATIC_ASSERT (error < too_much_error);
704 period.from = *_last_audio_time;
706 if (period.duration() > one_video_frame()) {
707 period.to = period.from + one_video_frame();
710 _silent.set_position (period.to);
718 /* Emit any audio that is ready */
720 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
721 of our streams, or the position of the _silent.
723 auto pull_to = _playback_length;
724 for (auto i: _pieces) {
725 i->update_pull_to (pull_to);
727 if (!_silent.done() && _silent.position() < pull_to) {
728 pull_to = _silent.position();
731 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
732 auto audio = _audio_merger.pull (pull_to);
733 for (auto i = audio.begin(); i != audio.end(); ++i) {
734 if (_last_audio_time && i->second < *_last_audio_time) {
735 /* This new data comes before the last we emitted (or the last seek); discard it */
736 auto cut = discard_audio (i->first, i->second, *_last_audio_time);
741 } else if (_last_audio_time && i->second > *_last_audio_time) {
742 /* There's a gap between this data and the last we emitted; fill with silence */
743 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
746 emit_audio (i->first, i->second);
751 for (auto const& i: _delay) {
752 do_emit_video(i.first, i.second);
760 /** @return Open subtitles for the frame at the given time, converted to images */
761 optional<PositionImage>
762 Player::open_subtitles_for_frame (DCPTime time) const
764 list<PositionImage> captions;
765 int const vfr = _film->video_frame_rate();
769 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
772 /* Bitmap subtitles */
773 for (auto i: j.bitmap) {
778 /* i.image will already have been scaled to fit _video_container_size */
779 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
785 lrint(_video_container_size.width * i.rectangle.x),
786 lrint(_video_container_size.height * i.rectangle.y)
792 /* String subtitles (rendered to an image) */
793 if (!j.string.empty()) {
794 auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
795 copy (s.begin(), s.end(), back_inserter (captions));
799 if (captions.empty()) {
803 return merge (captions);
808 Player::video (weak_ptr<Piece> wp, PieceVideo video)
810 auto piece = wp.lock ();
815 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(video.time));
817 /* Discard if it's before the content's period or the last accurate seek. We can't discard
818 if it's after the content's period here as in that case we still need to fill any gap between
819 `now' and the end of the content's period.
821 if (video.time < piece->position() || (_last_video_time && video.time < *_last_video_time)) {
825 /* Fill gaps that we discover now that we have some video which needs to be emitted.
826 This is where we need to fill to.
828 DCPTime fill_to = min (video.time, piece->end());
830 if (_last_video_time) {
831 DCPTime fill_from = max (*_last_video_time, piece->position());
833 /* Fill if we have more than half a frame to do */
834 if ((fill_to - fill_from) > one_video_frame() / 2) {
835 auto last = _last_video.find (wp);
836 if (_film->three_d()) {
837 auto fill_to_eyes = video.eyes;
838 if (fill_to_eyes == Eyes::BOTH) {
839 fill_to_eyes = Eyes::LEFT;
841 if (fill_to == piece->end()) {
842 /* Don't fill after the end of the content */
843 fill_to_eyes = Eyes::LEFT;
846 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
847 if (eyes == Eyes::BOTH) {
850 while (j < fill_to || eyes != fill_to_eyes) {
851 if (last != _last_video.end()) {
852 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
853 auto copy = last->second->shallow_copy();
854 copy->set_eyes (eyes);
855 emit_video (copy, j);
857 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
858 emit_video (black_player_video_frame(eyes), j);
860 if (eyes == Eyes::RIGHT) {
861 j += one_video_frame();
863 eyes = increment_eyes (eyes);
866 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
867 if (last != _last_video.end()) {
868 emit_video (last->second, j);
870 emit_video (black_player_video_frame(Eyes::BOTH), j);
877 _last_video[wp] = piece->player_video (video, _video_container_size);
879 DCPTime t = video.time;
880 auto const frc = piece->frame_rate_change();
881 for (int i = 0; i < frc.repeat; ++i) {
882 if (t < piece->end()) {
883 emit_video (_last_video[wp], t);
885 t += one_video_frame ();
891 Player::audio (weak_ptr<Piece> wp, PieceAudio audio)
893 DCPOMATIC_ASSERT (audio.audio->frames() > 0);
895 auto piece = wp.lock ();
900 LOG_DEBUG_PLAYER("Received audio at %1", to_string(audio.time));
902 /* The end of this block in the DCP */
903 int const rfr = piece->resampled_audio_frame_rate ();
904 auto end = audio.time + DCPTime::from_frames(audio.audio->frames(), rfr);
906 /* Remove anything that comes before the start or after the end of the content */
907 if (audio.time < piece->position()) {
908 auto cut = discard_audio (audio.audio, audio.time, piece->position());
910 /* This audio is entirely discarded */
913 audio.audio = cut.first;
914 audio.time = cut.second;
915 } else if (audio.time > piece->end()) {
918 } else if (end > piece->end()) {
919 Frame const remaining_frames = DCPTime(piece->end() - audio.time).frames_round(rfr);
920 if (remaining_frames == 0) {
923 audio.audio = make_shared<AudioBuffers>(audio.audio, remaining_frames, 0);
926 DCPOMATIC_ASSERT (audio.audio->frames() > 0);
930 if (piece->audio_gain() != 0) {
931 auto gain = make_shared<AudioBuffers>(audio.audio);
932 gain->apply_gain (piece->audio_gain());
938 audio.audio = remap (audio.audio, _film->audio_channels(), audio.mapping);
942 if (_audio_processor) {
943 audio.audio = _audio_processor->run (audio.audio, _film->audio_channels());
948 _audio_merger.push (audio.audio, audio.time);
949 piece->set_last_push_end (audio.stream, audio.time + DCPTime::from_frames(audio.audio->frames(), _film->audio_frame_rate()));
954 Player::bitmap_text_start (weak_ptr<Piece> wp, PieceBitmapTextStart subtitle)
956 auto piece = wp.lock ();
957 auto content = subtitle.content().lock();
958 auto text = subtitle.text().lock();
959 if (!piece || !content || !text) {
963 /* Apply content's subtitle offsets */
964 subtitle.sub.rectangle.x += text->x_offset ();
965 subtitle.sub.rectangle.y += text->y_offset ();
967 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
968 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
969 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
971 /* Apply content's subtitle scale */
972 subtitle.sub.rectangle.width *= text->x_scale ();
973 subtitle.sub.rectangle.height *= text->y_scale ();
976 auto image = subtitle.sub.image;
978 /* We will scale the subtitle up to fit _video_container_size */
979 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
980 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
981 if (width == 0 || height == 0) {
985 dcp::Size scaled_size (width, height);
986 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
987 auto from = piece->content_time_to_dcp(content, subtitle.time());
988 DCPOMATIC_ASSERT (from);
990 _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
995 Player::string_text_start (weak_ptr<Piece> wp, PieceStringTextStart subtitle)
997 auto piece = wp.lock ();
998 auto content = subtitle.content().lock();
999 auto text = subtitle.text().lock();
1000 if (!piece || !content || !text) {
1005 auto const from = piece->content_time_to_dcp(content, subtitle.time());
1006 DCPOMATIC_ASSERT (from);
1008 if (from > piece->end()) {
1012 for (auto s: subtitle.subs) {
1013 s.set_h_position (s.h_position() + text->x_offset());
1014 s.set_v_position (s.v_position() + text->y_offset());
1015 float const xs = text->x_scale();
1016 float const ys = text->y_scale();
1017 float size = s.size();
1019 /* Adjust size to express the common part of the scaling;
1020 e.g. if xs = ys = 0.5 we scale size by 2.
1022 if (xs > 1e-5 && ys > 1e-5) {
1023 size *= 1 / min (1 / xs, 1 / ys);
1027 /* Then express aspect ratio changes */
1028 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1029 s.set_aspect_adjust (xs / ys);
1032 s.set_in (dcp::Time(from->seconds(), 1000));
1033 ps.string.push_back (StringText (s, text->outline_width()));
1034 ps.add_fonts (text->fonts ());
1037 _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
1042 Player::subtitle_stop (weak_ptr<Piece> wp, PieceTextStop stop)
1044 auto content = stop.content().lock();
1045 auto text = stop.text().lock();
1050 if (!_active_texts[static_cast<int>(text->type())].have(stop.text())) {
1054 shared_ptr<Piece> piece = wp.lock ();
1059 auto const dcp_to = piece->content_time_to_dcp(content, stop.time());
1060 DCPOMATIC_ASSERT (dcp_to);
1062 if (*dcp_to > piece->end()) {
1066 auto from = _active_texts[static_cast<int>(text->type())].add_to(stop.text(), *dcp_to);
1068 bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1069 if (text->use() && !always && !text->burn()) {
1070 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1076 Player::seek (DCPTime time, bool accurate)
1078 boost::mutex::scoped_lock lm (_mutex);
1079 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1082 /* We can't seek in this state */
1087 _shuffler->clear ();
1092 if (_audio_processor) {
1093 _audio_processor->flush ();
1096 _audio_merger.clear ();
1097 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1098 _active_texts[i].clear ();
1101 for (auto i: _pieces) {
1102 i->seek (time, accurate);
1106 _last_video_time = time;
1107 _last_video_eyes = Eyes::LEFT;
1108 _last_audio_time = time;
1110 _last_video_time = optional<DCPTime>();
1111 _last_video_eyes = optional<Eyes>();
1112 _last_audio_time = optional<DCPTime>();
1115 _black.set_position (time);
1116 _silent.set_position (time);
1118 _last_video.clear ();
1123 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1125 if (!_film->three_d()) {
1126 if (pv->eyes() == Eyes::LEFT) {
1127 /* Use left-eye images for both eyes... */
1128 pv->set_eyes (Eyes::BOTH);
1129 } else if (pv->eyes() == Eyes::RIGHT) {
1130 /* ...and discard the right */
1135 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1136 player before the video that requires them.
1138 _delay.push_back (make_pair (pv, time));
1140 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1141 _last_video_time = time + one_video_frame();
1143 _last_video_eyes = increment_eyes (pv->eyes());
1145 if (_delay.size() < 3) {
1149 auto to_do = _delay.front();
1151 do_emit_video (to_do.first, to_do.second);
1156 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1158 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1159 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1160 _active_texts[i].clear_before (time);
1164 auto subtitles = open_subtitles_for_frame (time);
1166 pv->set_text (subtitles.get ());
1174 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1176 /* Log if the assert below is about to fail */
1177 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1178 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1181 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1182 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1183 Audio (data, time, _film->audio_frame_rate());
1184 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1189 Player::fill_audio (DCPTimePeriod period)
1191 if (period.from == period.to) {
1195 DCPOMATIC_ASSERT (period.from < period.to);
1197 DCPTime t = period.from;
1198 while (t < period.to) {
1199 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1200 Frame const samples = block.frames_round(_film->audio_frame_rate());
1202 auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1203 silence->make_silent ();
1204 emit_audio (silence, t);
1212 Player::one_video_frame () const
1214 return DCPTime::from_frames (1, _film->video_frame_rate ());
1218 pair<shared_ptr<AudioBuffers>, DCPTime>
1219 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1221 auto const discard_time = discard_to - time;
1222 auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1223 auto remaining_frames = audio->frames() - discard_frames;
1224 if (remaining_frames <= 0) {
1225 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1227 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1228 return make_pair(cut, time + discard_time);
1233 Player::set_dcp_decode_reduction (optional<int> reduction)
1235 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1238 boost::mutex::scoped_lock lm (_mutex);
1240 if (reduction == _dcp_decode_reduction) {
1242 Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1246 _dcp_decode_reduction = reduction;
1247 setup_pieces_unlocked ();
1250 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1254 shared_ptr<const Playlist>
1255 Player::playlist () const
1257 return _playlist ? _playlist : _film->playlist();
1262 Player::atmos (weak_ptr<Piece>, PieceAtmos data)
1264 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);