2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
78 using namespace dcpomatic;
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88 , _playlist (playlist)
90 , _ignore_video (false)
91 , _ignore_audio (false)
92 , _ignore_text (false)
93 , _always_burn_open_subtitles (false)
95 , _play_referenced (false)
96 , _audio_merger (_film->audio_frame_rate())
99 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
100 /* The butler must hear about this first, so since we are proxying this through to the butler we must
103 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
104 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
105 set_video_container_size (_film->frame_size ());
107 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
110 seek (DCPTime (), true);
119 Player::setup_pieces ()
121 boost::mutex::scoped_lock lm (_mutex);
122 setup_pieces_unlocked ();
126 have_video (shared_ptr<Piece> piece)
128 return piece->decoder && piece->decoder->video;
132 have_audio (shared_ptr<Piece> piece)
134 return piece->decoder && piece->decoder->audio;
138 Player::setup_pieces_unlocked ()
143 _shuffler = new Shuffler();
144 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
146 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
148 if (!i->paths_valid ()) {
152 if (_ignore_video && _ignore_audio && i->text.empty()) {
153 /* We're only interested in text and this content has none */
157 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
158 FrameRateChange frc (_film, i);
161 /* Not something that we can decode; e.g. Atmos content */
165 if (decoder->video && _ignore_video) {
166 decoder->video->set_ignore (true);
169 if (decoder->audio && _ignore_audio) {
170 decoder->audio->set_ignore (true);
174 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
175 i->set_ignore (true);
179 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
181 dcp->set_decode_referenced (_play_referenced);
182 if (_play_referenced) {
183 dcp->set_forced_reduction (_dcp_decode_reduction);
187 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
188 _pieces.push_back (piece);
190 if (decoder->video) {
191 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
192 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
193 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
195 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
199 if (decoder->audio) {
200 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
203 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
205 while (j != decoder->text.end()) {
206 (*j)->BitmapStart.connect (
207 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
209 (*j)->PlainStart.connect (
210 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
213 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
220 _stream_states.clear ();
221 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
222 if (i->content->audio) {
223 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
224 _stream_states[j] = StreamState (i, i->content->position ());
229 _black = Empty (_film, _pieces, bind(&have_video, _1));
230 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
232 _last_video_time = DCPTime ();
233 _last_video_eyes = EYES_BOTH;
234 _last_audio_time = DCPTime ();
238 Player::playlist_content_change (ChangeType type, int property, bool frequent)
240 if (type == CHANGE_TYPE_PENDING) {
241 boost::mutex::scoped_lock lm (_mutex);
242 /* The player content is probably about to change, so we can't carry on
243 until that has happened and we've rebuilt our pieces. Stop pass()
244 and seek() from working until then.
247 } else if (type == CHANGE_TYPE_DONE) {
248 /* A change in our content has gone through. Re-build our pieces. */
251 } else if (type == CHANGE_TYPE_CANCELLED) {
252 boost::mutex::scoped_lock lm (_mutex);
256 Change (type, property, frequent);
260 Player::set_video_container_size (dcp::Size s)
262 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
265 boost::mutex::scoped_lock lm (_mutex);
267 if (s == _video_container_size) {
269 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
273 _video_container_size = s;
275 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
276 _black_image->make_black ();
279 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
283 Player::playlist_change (ChangeType type)
285 if (type == CHANGE_TYPE_DONE) {
288 Change (type, PlayerProperty::PLAYLIST, false);
292 Player::film_change (ChangeType type, Film::Property p)
294 /* Here we should notice Film properties that affect our output, and
295 alert listeners that our output now would be different to how it was
296 last time we were run.
299 if (p == Film::CONTAINER) {
300 Change (type, PlayerProperty::FILM_CONTAINER, false);
301 } else if (p == Film::VIDEO_FRAME_RATE) {
302 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
303 so we need new pieces here.
305 if (type == CHANGE_TYPE_DONE) {
308 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
309 } else if (p == Film::AUDIO_PROCESSOR) {
310 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
311 boost::mutex::scoped_lock lm (_mutex);
312 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
314 } else if (p == Film::AUDIO_CHANNELS) {
315 if (type == CHANGE_TYPE_DONE) {
316 boost::mutex::scoped_lock lm (_mutex);
317 _audio_merger.clear ();
322 shared_ptr<PlayerVideo>
323 Player::black_player_video_frame (Eyes eyes) const
325 return shared_ptr<PlayerVideo> (
327 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
330 _video_container_size,
331 _video_container_size,
334 PresetColourConversion::all().front().conversion,
336 boost::weak_ptr<Content>(),
337 boost::optional<Frame>()
343 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
345 DCPTime s = t - piece->content->position ();
346 s = min (piece->content->length_after_trim(_film), s);
347 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
349 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
350 then convert that ContentTime to frames at the content's rate. However this fails for
351 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
352 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
354 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
356 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
360 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
362 /* See comment in dcp_to_content_video */
363 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
364 return d + piece->content->position();
368 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
370 DCPTime s = t - piece->content->position ();
371 s = min (piece->content->length_after_trim(_film), s);
372 /* See notes in dcp_to_content_video */
373 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
377 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
379 /* See comment in dcp_to_content_video */
380 return DCPTime::from_frames (f, _film->audio_frame_rate())
381 - DCPTime (piece->content->trim_start(), piece->frc)
382 + piece->content->position();
386 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
388 DCPTime s = t - piece->content->position ();
389 s = min (piece->content->length_after_trim(_film), s);
390 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
394 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
396 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
399 list<shared_ptr<Font> >
400 Player::get_subtitle_fonts ()
402 boost::mutex::scoped_lock lm (_mutex);
404 list<shared_ptr<Font> > fonts;
405 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
406 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
407 /* XXX: things may go wrong if there are duplicate font IDs
408 with different font files.
410 list<shared_ptr<Font> > f = j->fonts ();
411 copy (f.begin(), f.end(), back_inserter (fonts));
418 /** Set this player never to produce any video data */
420 Player::set_ignore_video ()
422 boost::mutex::scoped_lock lm (_mutex);
423 _ignore_video = true;
424 setup_pieces_unlocked ();
428 Player::set_ignore_audio ()
430 boost::mutex::scoped_lock lm (_mutex);
431 _ignore_audio = true;
432 setup_pieces_unlocked ();
436 Player::set_ignore_text ()
438 boost::mutex::scoped_lock lm (_mutex);
440 setup_pieces_unlocked ();
443 /** Set the player to always burn open texts into the image regardless of the content settings */
445 Player::set_always_burn_open_subtitles ()
447 boost::mutex::scoped_lock lm (_mutex);
448 _always_burn_open_subtitles = true;
451 /** Sets up the player to be faster, possibly at the expense of quality */
455 boost::mutex::scoped_lock lm (_mutex);
457 setup_pieces_unlocked ();
461 Player::set_play_referenced ()
463 boost::mutex::scoped_lock lm (_mutex);
464 _play_referenced = true;
465 setup_pieces_unlocked ();
469 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
471 DCPOMATIC_ASSERT (r);
472 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
473 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
474 if (r->actual_duration() > 0) {
476 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
481 list<ReferencedReelAsset>
482 Player::get_reel_assets ()
484 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
486 list<ReferencedReelAsset> a;
488 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
489 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
494 scoped_ptr<DCPDecoder> decoder;
496 decoder.reset (new DCPDecoder (_film, j, false));
501 DCPOMATIC_ASSERT (j->video_frame_rate ());
502 double const cfr = j->video_frame_rate().get();
503 Frame const trim_start = j->trim_start().frames_round (cfr);
504 Frame const trim_end = j->trim_end().frames_round (cfr);
505 int const ffr = _film->video_frame_rate ();
507 /* position in the asset from the start */
508 int64_t offset_from_start = 0;
509 /* position in the asset from the end */
510 int64_t offset_from_end = 0;
511 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
512 /* Assume that main picture duration is the length of the reel */
513 offset_from_end += k->main_picture()->actual_duration();
516 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
518 /* Assume that main picture duration is the length of the reel */
519 int64_t const reel_duration = k->main_picture()->actual_duration();
521 /* See doc/design/trim_reels.svg */
522 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
523 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
525 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
526 if (j->reference_video ()) {
527 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
530 if (j->reference_audio ()) {
531 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
534 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
535 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
538 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
539 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
540 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
544 offset_from_start += reel_duration;
545 offset_from_end -= reel_duration;
555 boost::mutex::scoped_lock lm (_mutex);
558 /* We can't pass in this state */
562 if (_playlist->length(_film) == DCPTime()) {
563 /* Special case of an empty Film; just give one black frame */
564 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
568 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
570 shared_ptr<Piece> earliest_content;
571 optional<DCPTime> earliest_time;
573 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
578 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
579 if (t > i->content->end(_film)) {
583 /* Given two choices at the same time, pick the one with texts so we see it before
586 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
588 earliest_content = i;
602 if (earliest_content) {
606 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
607 earliest_time = _black.position ();
611 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
612 earliest_time = _silent.position ();
619 earliest_content->done = earliest_content->decoder->pass ();
620 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
621 if (dcp && !_play_referenced && dcp->reference_audio()) {
622 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
623 to `hide' the fact that no audio was emitted during the referenced DCP (though
624 we need to behave as though it was).
626 _last_audio_time = dcp->end (_film);
631 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
632 _black.set_position (_black.position() + one_video_frame());
636 DCPTimePeriod period (_silent.period_at_position());
637 if (_last_audio_time) {
638 /* Sometimes the thing that happened last finishes fractionally before
639 or after this silence. Bodge the start time of the silence to fix it.
641 DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
642 period.from = *_last_audio_time;
644 if (period.duration() > one_video_frame()) {
645 period.to = period.from + one_video_frame();
648 _silent.set_position (period.to);
656 /* Emit any audio that is ready */
658 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
659 of our streams, or the position of the _silent.
661 DCPTime pull_to = _film->length ();
662 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
663 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
664 pull_to = i->second.last_push_end;
667 if (!_silent.done() && _silent.position() < pull_to) {
668 pull_to = _silent.position();
671 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
672 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
673 if (_last_audio_time && i->second < *_last_audio_time) {
674 /* This new data comes before the last we emitted (or the last seek); discard it */
675 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
680 } else if (_last_audio_time && i->second > *_last_audio_time) {
681 /* There's a gap between this data and the last we emitted; fill with silence */
682 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
685 emit_audio (i->first, i->second);
690 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
691 do_emit_video(i->first, i->second);
698 /** @return Open subtitles for the frame at the given time, converted to images */
699 optional<PositionImage>
700 Player::open_subtitles_for_frame (DCPTime time) const
702 list<PositionImage> captions;
703 int const vfr = _film->video_frame_rate();
707 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
710 /* Bitmap subtitles */
711 BOOST_FOREACH (BitmapText i, j.bitmap) {
716 /* i.image will already have been scaled to fit _video_container_size */
717 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
723 lrint (_video_container_size.width * i.rectangle.x),
724 lrint (_video_container_size.height * i.rectangle.y)
730 /* String subtitles (rendered to an image) */
731 if (!j.string.empty ()) {
732 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
733 copy (s.begin(), s.end(), back_inserter (captions));
737 if (captions.empty ()) {
738 return optional<PositionImage> ();
741 return merge (captions);
745 Player::video (weak_ptr<Piece> wp, ContentVideo video)
747 shared_ptr<Piece> piece = wp.lock ();
752 FrameRateChange frc (_film, piece->content);
753 if (frc.skip && (video.frame % 2) == 1) {
757 /* Time of the first frame we will emit */
758 DCPTime const time = content_video_to_dcp (piece, video.frame);
760 /* Discard if it's before the content's period or the last accurate seek. We can't discard
761 if it's after the content's period here as in that case we still need to fill any gap between
762 `now' and the end of the content's period.
764 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
768 /* Fill gaps that we discover now that we have some video which needs to be emitted.
769 This is where we need to fill to.
771 DCPTime fill_to = min (time, piece->content->end(_film));
773 if (_last_video_time) {
774 DCPTime fill_from = max (*_last_video_time, piece->content->position());
776 /* Fill if we have more than half a frame to do */
777 if ((fill_to - fill_from) > one_video_frame() / 2) {
778 LastVideoMap::const_iterator last = _last_video.find (wp);
779 if (_film->three_d()) {
780 Eyes fill_to_eyes = video.eyes;
781 if (fill_to_eyes == EYES_BOTH) {
782 fill_to_eyes = EYES_LEFT;
784 if (fill_to == piece->content->end(_film)) {
785 /* Don't fill after the end of the content */
786 fill_to_eyes = EYES_LEFT;
788 DCPTime j = fill_from;
789 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
790 if (eyes == EYES_BOTH) {
793 while (j < fill_to || eyes != fill_to_eyes) {
794 if (last != _last_video.end()) {
795 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
796 copy->set_eyes (eyes);
797 emit_video (copy, j);
799 emit_video (black_player_video_frame(eyes), j);
801 if (eyes == EYES_RIGHT) {
802 j += one_video_frame();
804 eyes = increment_eyes (eyes);
807 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
808 if (last != _last_video.end()) {
809 emit_video (last->second, j);
811 emit_video (black_player_video_frame(EYES_BOTH), j);
818 _last_video[wp].reset (
821 piece->content->video->crop (),
822 piece->content->video->fade (_film, video.frame),
823 piece->content->video->scale().size (
824 piece->content->video, _video_container_size, _film->frame_size ()
826 _video_container_size,
829 piece->content->video->colour_conversion(),
830 piece->content->video->range(),
837 for (int i = 0; i < frc.repeat; ++i) {
838 if (t < piece->content->end(_film)) {
839 emit_video (_last_video[wp], t);
841 t += one_video_frame ();
846 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
848 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
850 shared_ptr<Piece> piece = wp.lock ();
855 shared_ptr<AudioContent> content = piece->content->audio;
856 DCPOMATIC_ASSERT (content);
858 int const rfr = content->resampled_frame_rate (_film);
860 /* Compute time in the DCP */
861 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
862 /* And the end of this block in the DCP */
863 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
865 /* Remove anything that comes before the start or after the end of the content */
866 if (time < piece->content->position()) {
867 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
869 /* This audio is entirely discarded */
872 content_audio.audio = cut.first;
874 } else if (time > piece->content->end(_film)) {
877 } else if (end > piece->content->end(_film)) {
878 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
879 if (remaining_frames == 0) {
882 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
883 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
884 content_audio.audio = cut;
887 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
891 if (content->gain() != 0) {
892 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
893 gain->apply_gain (content->gain ());
894 content_audio.audio = gain;
899 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
903 if (_audio_processor) {
904 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
909 _audio_merger.push (content_audio.audio, time);
910 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
911 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
915 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
917 shared_ptr<Piece> piece = wp.lock ();
918 shared_ptr<const TextContent> text = wc.lock ();
919 if (!piece || !text) {
923 /* Apply content's subtitle offsets */
924 subtitle.sub.rectangle.x += text->x_offset ();
925 subtitle.sub.rectangle.y += text->y_offset ();
927 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
928 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
929 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
931 /* Apply content's subtitle scale */
932 subtitle.sub.rectangle.width *= text->x_scale ();
933 subtitle.sub.rectangle.height *= text->y_scale ();
936 shared_ptr<Image> image = subtitle.sub.image;
937 /* We will scale the subtitle up to fit _video_container_size */
938 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
939 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
940 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
942 _active_texts[text->type()].add_from (wc, ps, from);
946 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
948 shared_ptr<Piece> piece = wp.lock ();
949 shared_ptr<const TextContent> text = wc.lock ();
950 if (!piece || !text) {
955 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
957 if (from > piece->content->end(_film)) {
961 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
962 s.set_h_position (s.h_position() + text->x_offset ());
963 s.set_v_position (s.v_position() + text->y_offset ());
964 float const xs = text->x_scale();
965 float const ys = text->y_scale();
966 float size = s.size();
968 /* Adjust size to express the common part of the scaling;
969 e.g. if xs = ys = 0.5 we scale size by 2.
971 if (xs > 1e-5 && ys > 1e-5) {
972 size *= 1 / min (1 / xs, 1 / ys);
976 /* Then express aspect ratio changes */
977 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
978 s.set_aspect_adjust (xs / ys);
981 s.set_in (dcp::Time(from.seconds(), 1000));
982 ps.string.push_back (StringText (s, text->outline_width()));
983 ps.add_fonts (text->fonts ());
986 _active_texts[text->type()].add_from (wc, ps, from);
990 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
992 shared_ptr<const TextContent> text = wc.lock ();
997 if (!_active_texts[text->type()].have(wc)) {
1001 shared_ptr<Piece> piece = wp.lock ();
1006 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1008 if (dcp_to > piece->content->end(_film)) {
1012 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1014 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1015 if (text->use() && !always && !text->burn()) {
1016 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1021 Player::seek (DCPTime time, bool accurate)
1023 boost::mutex::scoped_lock lm (_mutex);
1026 /* We can't seek in this state */
1031 _shuffler->clear ();
1036 if (_audio_processor) {
1037 _audio_processor->flush ();
1040 _audio_merger.clear ();
1041 for (int i = 0; i < TEXT_COUNT; ++i) {
1042 _active_texts[i].clear ();
1045 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1046 if (time < i->content->position()) {
1047 /* Before; seek to the start of the content */
1048 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1050 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1051 /* During; seek to position */
1052 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1055 /* After; this piece is done */
1061 _last_video_time = time;
1062 _last_video_eyes = EYES_LEFT;
1063 _last_audio_time = time;
1065 _last_video_time = optional<DCPTime>();
1066 _last_video_eyes = optional<Eyes>();
1067 _last_audio_time = optional<DCPTime>();
1070 _black.set_position (time);
1071 _silent.set_position (time);
1073 _last_video.clear ();
1077 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1079 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1080 player before the video that requires them.
1082 _delay.push_back (make_pair (pv, time));
1084 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1085 _last_video_time = time + one_video_frame();
1087 _last_video_eyes = increment_eyes (pv->eyes());
1089 if (_delay.size() < 3) {
1093 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1095 do_emit_video (to_do.first, to_do.second);
1099 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1101 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1102 for (int i = 0; i < TEXT_COUNT; ++i) {
1103 _active_texts[i].clear_before (time);
1107 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1109 pv->set_text (subtitles.get ());
1116 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1118 /* Log if the assert below is about to fail */
1119 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1120 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1123 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1124 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1125 Audio (data, time, _film->audio_frame_rate());
1126 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1130 Player::fill_audio (DCPTimePeriod period)
1132 if (period.from == period.to) {
1136 DCPOMATIC_ASSERT (period.from < period.to);
1138 DCPTime t = period.from;
1139 while (t < period.to) {
1140 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1141 Frame const samples = block.frames_round(_film->audio_frame_rate());
1143 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1144 silence->make_silent ();
1145 emit_audio (silence, t);
1152 Player::one_video_frame () const
1154 return DCPTime::from_frames (1, _film->video_frame_rate ());
1157 pair<shared_ptr<AudioBuffers>, DCPTime>
1158 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1160 DCPTime const discard_time = discard_to - time;
1161 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1162 Frame remaining_frames = audio->frames() - discard_frames;
1163 if (remaining_frames <= 0) {
1164 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1166 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1167 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1168 return make_pair(cut, time + discard_time);
1172 Player::set_dcp_decode_reduction (optional<int> reduction)
1174 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1177 boost::mutex::scoped_lock lm (_mutex);
1179 if (reduction == _dcp_decode_reduction) {
1181 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1185 _dcp_decode_reduction = reduction;
1186 setup_pieces_unlocked ();
1189 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1193 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1195 boost::mutex::scoped_lock lm (_mutex);
1197 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1198 if (i->content == content) {
1199 return content_time_to_dcp (i, t);
1203 /* We couldn't find this content; perhaps things are being changed over */
1204 return optional<DCPTime>();