2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 /* The butler must hear about this first, so since we are proxying this through to the butler we must
104 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106 set_video_container_size (_film->frame_size ());
108 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
111 seek (DCPTime (), true);
120 Player::setup_pieces ()
122 boost::mutex::scoped_lock lm (_mutex);
123 setup_pieces_unlocked ();
127 Player::setup_pieces_unlocked ()
132 _shuffler = new Shuffler();
133 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
135 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
137 if (!i->paths_valid ()) {
141 if (_ignore_video && _ignore_audio && i->text.empty()) {
142 /* We're only interested in text and this content has none */
146 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
147 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
150 /* Not something that we can decode; e.g. Atmos content */
154 if (decoder->video && _ignore_video) {
155 decoder->video->set_ignore (true);
158 if (decoder->audio && _ignore_audio) {
159 decoder->audio->set_ignore (true);
163 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
164 i->set_ignore (true);
168 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
170 dcp->set_decode_referenced (_play_referenced);
171 if (_play_referenced) {
172 dcp->set_forced_reduction (_dcp_decode_reduction);
176 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
177 _pieces.push_back (piece);
179 if (decoder->video) {
180 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
181 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
182 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
184 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
188 if (decoder->audio) {
189 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
192 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
194 while (j != decoder->text.end()) {
195 (*j)->BitmapStart.connect (
196 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
198 (*j)->PlainStart.connect (
199 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
202 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
209 _stream_states.clear ();
210 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
211 if (i->content->audio) {
212 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
213 _stream_states[j] = StreamState (i, i->content->position ());
218 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
219 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
221 _last_video_time = DCPTime ();
222 _last_video_eyes = EYES_BOTH;
223 _last_audio_time = DCPTime ();
228 Player::playlist_content_change (ChangeType type, int property, bool frequent)
230 if (type == CHANGE_TYPE_PENDING) {
231 boost::mutex::scoped_lock lm (_mutex);
232 /* The player content is probably about to change, so we can't carry on
233 until that has happened and we've rebuilt our pieces. Stop pass()
234 and seek() from working until then.
237 } else if (type == CHANGE_TYPE_DONE) {
238 /* A change in our content has gone through. Re-build our pieces. */
240 } else if (type == CHANGE_TYPE_CANCELLED) {
244 Change (type, property, frequent);
248 Player::set_video_container_size (dcp::Size s)
250 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
253 boost::mutex::scoped_lock lm (_mutex);
255 if (s == _video_container_size) {
257 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
261 _video_container_size = s;
263 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
264 _black_image->make_black ();
267 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
271 Player::playlist_change (ChangeType type)
273 if (type == CHANGE_TYPE_DONE) {
276 Change (type, PlayerProperty::PLAYLIST, false);
280 Player::film_change (ChangeType type, Film::Property p)
282 /* Here we should notice Film properties that affect our output, and
283 alert listeners that our output now would be different to how it was
284 last time we were run.
287 if (p == Film::CONTAINER) {
288 Change (type, PlayerProperty::FILM_CONTAINER, false);
289 } else if (p == Film::VIDEO_FRAME_RATE) {
290 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
291 so we need new pieces here.
293 if (type == CHANGE_TYPE_DONE) {
296 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
297 } else if (p == Film::AUDIO_PROCESSOR) {
298 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
299 boost::mutex::scoped_lock lm (_mutex);
300 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
302 } else if (p == Film::AUDIO_CHANNELS) {
303 if (type == CHANGE_TYPE_DONE) {
304 boost::mutex::scoped_lock lm (_mutex);
305 _audio_merger.clear ();
310 shared_ptr<PlayerVideo>
311 Player::black_player_video_frame (Eyes eyes) const
313 return shared_ptr<PlayerVideo> (
315 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
318 _video_container_size,
319 _video_container_size,
322 PresetColourConversion::all().front().conversion,
323 boost::weak_ptr<Content>(),
324 boost::optional<Frame>()
330 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
332 DCPTime s = t - piece->content->position ();
333 s = min (piece->content->length_after_trim(), s);
334 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
336 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
337 then convert that ContentTime to frames at the content's rate. However this fails for
338 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
339 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
341 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
343 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
347 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
349 /* See comment in dcp_to_content_video */
350 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
351 return d + piece->content->position();
355 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
357 DCPTime s = t - piece->content->position ();
358 s = min (piece->content->length_after_trim(), s);
359 /* See notes in dcp_to_content_video */
360 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
364 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
366 /* See comment in dcp_to_content_video */
367 return DCPTime::from_frames (f, _film->audio_frame_rate())
368 - DCPTime (piece->content->trim_start(), piece->frc)
369 + piece->content->position();
373 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
375 DCPTime s = t - piece->content->position ();
376 s = min (piece->content->length_after_trim(), s);
377 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
381 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
383 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
386 list<shared_ptr<Font> >
387 Player::get_subtitle_fonts ()
389 boost::mutex::scoped_lock lm (_mutex);
391 list<shared_ptr<Font> > fonts;
392 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
393 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
394 /* XXX: things may go wrong if there are duplicate font IDs
395 with different font files.
397 list<shared_ptr<Font> > f = j->fonts ();
398 copy (f.begin(), f.end(), back_inserter (fonts));
405 /** Set this player never to produce any video data */
407 Player::set_ignore_video ()
409 boost::mutex::scoped_lock lm (_mutex);
410 _ignore_video = true;
411 setup_pieces_unlocked ();
415 Player::set_ignore_audio ()
417 boost::mutex::scoped_lock lm (_mutex);
418 _ignore_audio = true;
419 setup_pieces_unlocked ();
423 Player::set_ignore_text ()
425 boost::mutex::scoped_lock lm (_mutex);
427 setup_pieces_unlocked ();
430 /** Set the player to always burn open texts into the image regardless of the content settings */
432 Player::set_always_burn_open_subtitles ()
434 boost::mutex::scoped_lock lm (_mutex);
435 _always_burn_open_subtitles = true;
438 /** Sets up the player to be faster, possibly at the expense of quality */
442 boost::mutex::scoped_lock lm (_mutex);
444 setup_pieces_unlocked ();
448 Player::set_play_referenced ()
450 boost::mutex::scoped_lock lm (_mutex);
451 _play_referenced = true;
452 setup_pieces_unlocked ();
455 list<ReferencedReelAsset>
456 Player::get_reel_assets ()
458 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
460 list<ReferencedReelAsset> a;
462 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
463 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
468 scoped_ptr<DCPDecoder> decoder;
470 decoder.reset (new DCPDecoder (j, _film->log(), false));
476 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
478 DCPOMATIC_ASSERT (j->video_frame_rate ());
479 double const cfr = j->video_frame_rate().get();
480 Frame const trim_start = j->trim_start().frames_round (cfr);
481 Frame const trim_end = j->trim_end().frames_round (cfr);
482 int const ffr = _film->video_frame_rate ();
484 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
485 if (j->reference_video ()) {
486 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
487 DCPOMATIC_ASSERT (ra);
488 ra->set_entry_point (ra->entry_point() + trim_start);
489 ra->set_duration (ra->duration() - trim_start - trim_end);
491 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
495 if (j->reference_audio ()) {
496 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
497 DCPOMATIC_ASSERT (ra);
498 ra->set_entry_point (ra->entry_point() + trim_start);
499 ra->set_duration (ra->duration() - trim_start - trim_end);
501 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
505 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
506 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
507 DCPOMATIC_ASSERT (ra);
508 ra->set_entry_point (ra->entry_point() + trim_start);
509 ra->set_duration (ra->duration() - trim_start - trim_end);
511 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
515 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
516 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
517 DCPOMATIC_ASSERT (l);
518 l->set_entry_point (l->entry_point() + trim_start);
519 l->set_duration (l->duration() - trim_start - trim_end);
521 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
526 /* Assume that main picture duration is the length of the reel */
527 offset += k->main_picture()->duration ();
537 boost::mutex::scoped_lock lm (_mutex);
540 /* We can't pass in this state */
544 if (_playlist->length() == DCPTime()) {
545 /* Special case of an empty Film; just give one black frame */
546 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
550 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
552 shared_ptr<Piece> earliest_content;
553 optional<DCPTime> earliest_time;
555 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
560 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
561 if (t > i->content->end()) {
565 /* Given two choices at the same time, pick the one with texts so we see it before
568 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
570 earliest_content = i;
584 if (earliest_content) {
588 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
589 earliest_time = _black.position ();
593 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
594 earliest_time = _silent.position ();
600 earliest_content->done = earliest_content->decoder->pass ();
603 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
604 _black.set_position (_black.position() + one_video_frame());
608 DCPTimePeriod period (_silent.period_at_position());
609 if (_last_audio_time) {
610 /* Sometimes the thing that happened last finishes fractionally before
611 this silence. Bodge the start time of the silence to fix it. I'm
612 not sure if this is the right solution --- maybe the last thing should
613 be padded `forward' rather than this thing padding `back'.
615 period.from = min(period.from, *_last_audio_time);
617 if (period.duration() > one_video_frame()) {
618 period.to = period.from + one_video_frame();
621 _silent.set_position (period.to);
629 /* Emit any audio that is ready */
631 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
632 of our streams, or the position of the _silent.
634 DCPTime pull_to = _film->length ();
635 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
636 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
637 pull_to = i->second.last_push_end;
640 if (!_silent.done() && _silent.position() < pull_to) {
641 pull_to = _silent.position();
644 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
645 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
646 if (_last_audio_time && i->second < *_last_audio_time) {
647 /* This new data comes before the last we emitted (or the last seek); discard it */
648 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
653 } else if (_last_audio_time && i->second > *_last_audio_time) {
654 /* There's a gap between this data and the last we emitted; fill with silence */
655 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
658 emit_audio (i->first, i->second);
663 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
664 do_emit_video(i->first, i->second);
671 /** @return Open subtitles for the frame at the given time, converted to images */
672 optional<PositionImage>
673 Player::open_subtitles_for_frame (DCPTime time) const
675 list<PositionImage> captions;
676 int const vfr = _film->video_frame_rate();
680 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
683 /* Bitmap subtitles */
684 BOOST_FOREACH (BitmapText i, j.bitmap) {
689 /* i.image will already have been scaled to fit _video_container_size */
690 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
696 lrint (_video_container_size.width * i.rectangle.x),
697 lrint (_video_container_size.height * i.rectangle.y)
703 /* String subtitles (rendered to an image) */
704 if (!j.string.empty ()) {
705 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
706 copy (s.begin(), s.end(), back_inserter (captions));
710 if (captions.empty ()) {
711 return optional<PositionImage> ();
714 return merge (captions);
718 Player::video (weak_ptr<Piece> wp, ContentVideo video)
720 shared_ptr<Piece> piece = wp.lock ();
725 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
726 if (frc.skip && (video.frame % 2) == 1) {
730 /* Time of the first frame we will emit */
731 DCPTime const time = content_video_to_dcp (piece, video.frame);
733 /* Discard if it's before the content's period or the last accurate seek. We can't discard
734 if it's after the content's period here as in that case we still need to fill any gap between
735 `now' and the end of the content's period.
737 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
741 /* Fill gaps that we discover now that we have some video which needs to be emitted.
742 This is where we need to fill to.
744 DCPTime fill_to = min (time, piece->content->end());
746 if (_last_video_time) {
747 DCPTime fill_from = max (*_last_video_time, piece->content->position());
748 LastVideoMap::const_iterator last = _last_video.find (wp);
749 if (_film->three_d()) {
750 Eyes fill_to_eyes = video.eyes;
751 if (fill_to_eyes == EYES_BOTH) {
752 fill_to_eyes = EYES_LEFT;
754 if (fill_to == piece->content->end()) {
755 /* Don't fill after the end of the content */
756 fill_to_eyes = EYES_LEFT;
758 DCPTime j = fill_from;
759 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
760 if (eyes == EYES_BOTH) {
763 while (j < fill_to || eyes != fill_to_eyes) {
764 if (last != _last_video.end()) {
765 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
766 copy->set_eyes (eyes);
767 emit_video (copy, j);
769 emit_video (black_player_video_frame(eyes), j);
771 if (eyes == EYES_RIGHT) {
772 j += one_video_frame();
774 eyes = increment_eyes (eyes);
777 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
778 if (last != _last_video.end()) {
779 emit_video (last->second, j);
781 emit_video (black_player_video_frame(EYES_BOTH), j);
787 _last_video[wp].reset (
790 piece->content->video->crop (),
791 piece->content->video->fade (video.frame),
792 piece->content->video->scale().size (
793 piece->content->video, _video_container_size, _film->frame_size ()
795 _video_container_size,
798 piece->content->video->colour_conversion(),
805 for (int i = 0; i < frc.repeat; ++i) {
806 if (t < piece->content->end()) {
807 emit_video (_last_video[wp], t);
809 t += one_video_frame ();
814 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
816 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
818 shared_ptr<Piece> piece = wp.lock ();
823 shared_ptr<AudioContent> content = piece->content->audio;
824 DCPOMATIC_ASSERT (content);
826 /* Compute time in the DCP */
827 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
828 /* And the end of this block in the DCP */
829 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
831 /* Remove anything that comes before the start or after the end of the content */
832 if (time < piece->content->position()) {
833 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
835 /* This audio is entirely discarded */
838 content_audio.audio = cut.first;
840 } else if (time > piece->content->end()) {
843 } else if (end > piece->content->end()) {
844 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
845 if (remaining_frames == 0) {
848 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
849 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
850 content_audio.audio = cut;
853 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
857 if (content->gain() != 0) {
858 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
859 gain->apply_gain (content->gain ());
860 content_audio.audio = gain;
865 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
869 if (_audio_processor) {
870 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
875 _audio_merger.push (content_audio.audio, time);
876 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
877 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
881 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
883 shared_ptr<Piece> piece = wp.lock ();
884 shared_ptr<const TextContent> text = wc.lock ();
885 if (!piece || !text) {
889 /* Apply content's subtitle offsets */
890 subtitle.sub.rectangle.x += text->x_offset ();
891 subtitle.sub.rectangle.y += text->y_offset ();
893 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
894 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
895 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
897 /* Apply content's subtitle scale */
898 subtitle.sub.rectangle.width *= text->x_scale ();
899 subtitle.sub.rectangle.height *= text->y_scale ();
902 shared_ptr<Image> image = subtitle.sub.image;
903 /* We will scale the subtitle up to fit _video_container_size */
904 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
905 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
906 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
908 _active_texts[text->type()].add_from (wc, ps, from);
912 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
914 shared_ptr<Piece> piece = wp.lock ();
915 shared_ptr<const TextContent> text = wc.lock ();
916 if (!piece || !text) {
921 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
923 if (from > piece->content->end()) {
927 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
928 s.set_h_position (s.h_position() + text->x_offset ());
929 s.set_v_position (s.v_position() + text->y_offset ());
930 float const xs = text->x_scale();
931 float const ys = text->y_scale();
932 float size = s.size();
934 /* Adjust size to express the common part of the scaling;
935 e.g. if xs = ys = 0.5 we scale size by 2.
937 if (xs > 1e-5 && ys > 1e-5) {
938 size *= 1 / min (1 / xs, 1 / ys);
942 /* Then express aspect ratio changes */
943 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
944 s.set_aspect_adjust (xs / ys);
947 s.set_in (dcp::Time(from.seconds(), 1000));
948 ps.string.push_back (StringText (s, text->outline_width()));
949 ps.add_fonts (text->fonts ());
952 _active_texts[text->type()].add_from (wc, ps, from);
956 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
958 shared_ptr<const TextContent> text = wc.lock ();
963 if (!_active_texts[text->type()].have(wc)) {
967 shared_ptr<Piece> piece = wp.lock ();
972 DCPTime const dcp_to = content_time_to_dcp (piece, to);
974 if (dcp_to > piece->content->end()) {
978 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
980 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
981 if (text->use() && !always && !text->burn()) {
982 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
987 Player::seek (DCPTime time, bool accurate)
989 boost::mutex::scoped_lock lm (_mutex);
992 /* We can't seek in this state */
1002 if (_audio_processor) {
1003 _audio_processor->flush ();
1006 _audio_merger.clear ();
1007 for (int i = 0; i < TEXT_COUNT; ++i) {
1008 _active_texts[i].clear ();
1011 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1012 if (time < i->content->position()) {
1013 /* Before; seek to the start of the content */
1014 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1016 } else if (i->content->position() <= time && time < i->content->end()) {
1017 /* During; seek to position */
1018 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1021 /* After; this piece is done */
1027 _last_video_time = time;
1028 _last_video_eyes = EYES_LEFT;
1029 _last_audio_time = time;
1031 _last_video_time = optional<DCPTime>();
1032 _last_video_eyes = optional<Eyes>();
1033 _last_audio_time = optional<DCPTime>();
1036 _black.set_position (time);
1037 _silent.set_position (time);
1039 _last_video.clear ();
1043 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1045 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1046 player before the video that requires them.
1048 _delay.push_back (make_pair (pv, time));
1050 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1051 _last_video_time = time + one_video_frame();
1053 _last_video_eyes = increment_eyes (pv->eyes());
1055 if (_delay.size() < 3) {
1059 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1061 do_emit_video (to_do.first, to_do.second);
1065 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1067 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1068 for (int i = 0; i < TEXT_COUNT; ++i) {
1069 _active_texts[i].clear_before (time);
1073 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1075 pv->set_text (subtitles.get ());
1082 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1084 /* Log if the assert below is about to fail */
1085 if (_last_audio_time && time != *_last_audio_time) {
1086 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1089 /* This audio must follow on from the previous */
1090 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1092 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1096 Player::fill_audio (DCPTimePeriod period)
1098 if (period.from == period.to) {
1102 DCPOMATIC_ASSERT (period.from < period.to);
1104 DCPTime t = period.from;
1105 while (t < period.to) {
1106 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1107 Frame const samples = block.frames_round(_film->audio_frame_rate());
1109 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1110 silence->make_silent ();
1111 emit_audio (silence, t);
1118 Player::one_video_frame () const
1120 return DCPTime::from_frames (1, _film->video_frame_rate ());
1123 pair<shared_ptr<AudioBuffers>, DCPTime>
1124 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1126 DCPTime const discard_time = discard_to - time;
1127 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1128 Frame remaining_frames = audio->frames() - discard_frames;
1129 if (remaining_frames <= 0) {
1130 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1132 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1133 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1134 return make_pair(cut, time + discard_time);
1138 Player::set_dcp_decode_reduction (optional<int> reduction)
1140 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1143 boost::mutex::scoped_lock lm (_mutex);
1145 if (reduction == _dcp_decode_reduction) {
1147 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1151 _dcp_decode_reduction = reduction;
1152 setup_pieces_unlocked ();
1155 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1159 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1161 boost::mutex::scoped_lock lm (_mutex);
1163 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1164 if (i->content == content) {
1165 return content_time_to_dcp (i, t);
1169 /* We couldn't find this content; perhaps things are being changed over */
1170 return optional<DCPTime>();