2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 /* The butler must hear about this first, so since we are proxying this through to the butler we must
104 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106 set_video_container_size (_film->frame_size ());
108 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
111 seek (DCPTime (), true);
120 Player::setup_pieces ()
122 boost::mutex::scoped_lock lm (_mutex);
123 setup_pieces_unlocked ();
127 have_video (shared_ptr<Piece> piece)
129 return piece->decoder && piece->decoder->video;
133 have_audio (shared_ptr<Piece> piece)
135 return piece->decoder && piece->decoder->audio;
139 Player::setup_pieces_unlocked ()
144 _shuffler = new Shuffler();
145 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
147 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
149 if (!i->paths_valid ()) {
153 if (_ignore_video && _ignore_audio && i->text.empty()) {
154 /* We're only interested in text and this content has none */
158 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
159 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
162 /* Not something that we can decode; e.g. Atmos content */
166 if (decoder->video && _ignore_video) {
167 decoder->video->set_ignore (true);
170 if (decoder->audio && _ignore_audio) {
171 decoder->audio->set_ignore (true);
175 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
176 i->set_ignore (true);
180 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
182 dcp->set_decode_referenced (_play_referenced);
183 if (_play_referenced) {
184 dcp->set_forced_reduction (_dcp_decode_reduction);
188 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
189 _pieces.push_back (piece);
191 if (decoder->video) {
192 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
193 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
194 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
196 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
200 if (decoder->audio) {
201 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
204 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
206 while (j != decoder->text.end()) {
207 (*j)->BitmapStart.connect (
208 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
210 (*j)->PlainStart.connect (
211 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
214 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
221 _stream_states.clear ();
222 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
223 if (i->content->audio) {
224 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
225 _stream_states[j] = StreamState (i, i->content->position ());
230 _black = Empty (_pieces, _film->length(), bind(&have_video, _1));
231 _silent = Empty (_pieces, _film->length(), bind(&have_audio, _1));
233 _last_video_time = DCPTime ();
234 _last_video_eyes = EYES_BOTH;
235 _last_audio_time = DCPTime ();
240 Player::playlist_content_change (ChangeType type, int property, bool frequent)
242 if (type == CHANGE_TYPE_PENDING) {
243 boost::mutex::scoped_lock lm (_mutex);
244 /* The player content is probably about to change, so we can't carry on
245 until that has happened and we've rebuilt our pieces. Stop pass()
246 and seek() from working until then.
249 } else if (type == CHANGE_TYPE_DONE) {
250 /* A change in our content has gone through. Re-build our pieces. */
252 } else if (type == CHANGE_TYPE_CANCELLED) {
253 boost::mutex::scoped_lock lm (_mutex);
257 Change (type, property, frequent);
261 Player::set_video_container_size (dcp::Size s)
263 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
266 boost::mutex::scoped_lock lm (_mutex);
268 if (s == _video_container_size) {
270 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
274 _video_container_size = s;
276 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
277 _black_image->make_black ();
280 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
284 Player::playlist_change (ChangeType type)
286 if (type == CHANGE_TYPE_DONE) {
289 Change (type, PlayerProperty::PLAYLIST, false);
293 Player::film_change (ChangeType type, Film::Property p)
295 /* Here we should notice Film properties that affect our output, and
296 alert listeners that our output now would be different to how it was
297 last time we were run.
300 if (p == Film::CONTAINER) {
301 Change (type, PlayerProperty::FILM_CONTAINER, false);
302 } else if (p == Film::VIDEO_FRAME_RATE) {
303 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
304 so we need new pieces here.
306 if (type == CHANGE_TYPE_DONE) {
309 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
310 } else if (p == Film::AUDIO_PROCESSOR) {
311 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
312 boost::mutex::scoped_lock lm (_mutex);
313 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
315 } else if (p == Film::AUDIO_CHANNELS) {
316 if (type == CHANGE_TYPE_DONE) {
317 boost::mutex::scoped_lock lm (_mutex);
318 _audio_merger.clear ();
323 shared_ptr<PlayerVideo>
324 Player::black_player_video_frame (Eyes eyes) const
326 return shared_ptr<PlayerVideo> (
328 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
331 _video_container_size,
332 _video_container_size,
335 PresetColourConversion::all().front().conversion,
336 boost::weak_ptr<Content>(),
337 boost::optional<Frame>()
343 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
345 DCPTime s = t - piece->content->position ();
346 s = min (piece->content->length_after_trim(), s);
347 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
349 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
350 then convert that ContentTime to frames at the content's rate. However this fails for
351 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
352 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
354 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
356 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
360 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
362 /* See comment in dcp_to_content_video */
363 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
364 return d + piece->content->position();
368 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
370 DCPTime s = t - piece->content->position ();
371 s = min (piece->content->length_after_trim(), s);
372 /* See notes in dcp_to_content_video */
373 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
377 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
379 /* See comment in dcp_to_content_video */
380 return DCPTime::from_frames (f, _film->audio_frame_rate())
381 - DCPTime (piece->content->trim_start(), piece->frc)
382 + piece->content->position();
386 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
388 DCPTime s = t - piece->content->position ();
389 s = min (piece->content->length_after_trim(), s);
390 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
394 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
396 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
399 list<shared_ptr<Font> >
400 Player::get_subtitle_fonts ()
402 boost::mutex::scoped_lock lm (_mutex);
404 list<shared_ptr<Font> > fonts;
405 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
406 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
407 /* XXX: things may go wrong if there are duplicate font IDs
408 with different font files.
410 list<shared_ptr<Font> > f = j->fonts ();
411 copy (f.begin(), f.end(), back_inserter (fonts));
418 /** Set this player never to produce any video data */
420 Player::set_ignore_video ()
422 boost::mutex::scoped_lock lm (_mutex);
423 _ignore_video = true;
424 setup_pieces_unlocked ();
428 Player::set_ignore_audio ()
430 boost::mutex::scoped_lock lm (_mutex);
431 _ignore_audio = true;
432 setup_pieces_unlocked ();
436 Player::set_ignore_text ()
438 boost::mutex::scoped_lock lm (_mutex);
440 setup_pieces_unlocked ();
443 /** Set the player to always burn open texts into the image regardless of the content settings */
445 Player::set_always_burn_open_subtitles ()
447 boost::mutex::scoped_lock lm (_mutex);
448 _always_burn_open_subtitles = true;
451 /** Sets up the player to be faster, possibly at the expense of quality */
455 boost::mutex::scoped_lock lm (_mutex);
457 setup_pieces_unlocked ();
461 Player::set_play_referenced ()
463 boost::mutex::scoped_lock lm (_mutex);
464 _play_referenced = true;
465 setup_pieces_unlocked ();
468 list<ReferencedReelAsset>
469 Player::get_reel_assets ()
471 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
473 list<ReferencedReelAsset> a;
475 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
476 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
481 scoped_ptr<DCPDecoder> decoder;
483 decoder.reset (new DCPDecoder (j, _film->log(), false));
489 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
491 DCPOMATIC_ASSERT (j->video_frame_rate ());
492 double const cfr = j->video_frame_rate().get();
493 Frame const trim_start = j->trim_start().frames_round (cfr);
494 Frame const trim_end = j->trim_end().frames_round (cfr);
495 int const ffr = _film->video_frame_rate ();
497 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
498 if (j->reference_video ()) {
499 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
500 DCPOMATIC_ASSERT (ra);
501 ra->set_entry_point (ra->entry_point() + trim_start);
502 ra->set_duration (ra->duration() - trim_start - trim_end);
504 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
508 if (j->reference_audio ()) {
509 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
510 DCPOMATIC_ASSERT (ra);
511 ra->set_entry_point (ra->entry_point() + trim_start);
512 ra->set_duration (ra->duration() - trim_start - trim_end);
514 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
518 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
519 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
520 DCPOMATIC_ASSERT (ra);
521 ra->set_entry_point (ra->entry_point() + trim_start);
522 ra->set_duration (ra->duration() - trim_start - trim_end);
524 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
528 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
529 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
530 DCPOMATIC_ASSERT (l);
531 l->set_entry_point (l->entry_point() + trim_start);
532 l->set_duration (l->duration() - trim_start - trim_end);
534 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
539 /* Assume that main picture duration is the length of the reel */
540 offset += k->main_picture()->duration ();
550 boost::mutex::scoped_lock lm (_mutex);
553 /* We can't pass in this state */
557 if (_playlist->length() == DCPTime()) {
558 /* Special case of an empty Film; just give one black frame */
559 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
563 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
565 shared_ptr<Piece> earliest_content;
566 optional<DCPTime> earliest_time;
568 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
573 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
574 if (t > i->content->end()) {
578 /* Given two choices at the same time, pick the one with texts so we see it before
581 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
583 earliest_content = i;
597 if (earliest_content) {
601 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
602 earliest_time = _black.position ();
606 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
607 earliest_time = _silent.position ();
613 earliest_content->done = earliest_content->decoder->pass ();
616 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
617 _black.set_position (_black.position() + one_video_frame());
621 DCPTimePeriod period (_silent.period_at_position());
622 if (_last_audio_time) {
623 /* Sometimes the thing that happened last finishes fractionally before
624 this silence. Bodge the start time of the silence to fix it. I'm
625 not sure if this is the right solution --- maybe the last thing should
626 be padded `forward' rather than this thing padding `back'.
628 period.from = min(period.from, *_last_audio_time);
630 if (period.duration() > one_video_frame()) {
631 period.to = period.from + one_video_frame();
634 _silent.set_position (period.to);
642 /* Emit any audio that is ready */
644 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
645 of our streams, or the position of the _silent.
647 DCPTime pull_to = _film->length ();
648 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
649 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
650 pull_to = i->second.last_push_end;
653 if (!_silent.done() && _silent.position() < pull_to) {
654 pull_to = _silent.position();
657 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
658 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
659 if (_last_audio_time && i->second < *_last_audio_time) {
660 /* This new data comes before the last we emitted (or the last seek); discard it */
661 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
666 } else if (_last_audio_time && i->second > *_last_audio_time) {
667 /* There's a gap between this data and the last we emitted; fill with silence */
668 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
671 emit_audio (i->first, i->second);
676 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
677 do_emit_video(i->first, i->second);
684 /** @return Open subtitles for the frame at the given time, converted to images */
685 optional<PositionImage>
686 Player::open_subtitles_for_frame (DCPTime time) const
688 list<PositionImage> captions;
689 int const vfr = _film->video_frame_rate();
693 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
696 /* Bitmap subtitles */
697 BOOST_FOREACH (BitmapText i, j.bitmap) {
702 /* i.image will already have been scaled to fit _video_container_size */
703 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
709 lrint (_video_container_size.width * i.rectangle.x),
710 lrint (_video_container_size.height * i.rectangle.y)
716 /* String subtitles (rendered to an image) */
717 if (!j.string.empty ()) {
718 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
719 copy (s.begin(), s.end(), back_inserter (captions));
723 if (captions.empty ()) {
724 return optional<PositionImage> ();
727 return merge (captions);
731 Player::video (weak_ptr<Piece> wp, ContentVideo video)
733 shared_ptr<Piece> piece = wp.lock ();
738 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
739 if (frc.skip && (video.frame % 2) == 1) {
743 /* Time of the first frame we will emit */
744 DCPTime const time = content_video_to_dcp (piece, video.frame);
746 /* Discard if it's before the content's period or the last accurate seek. We can't discard
747 if it's after the content's period here as in that case we still need to fill any gap between
748 `now' and the end of the content's period.
750 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
754 /* Fill gaps that we discover now that we have some video which needs to be emitted.
755 This is where we need to fill to.
757 DCPTime fill_to = min (time, piece->content->end());
759 if (_last_video_time) {
760 DCPTime fill_from = max (*_last_video_time, piece->content->position());
761 LastVideoMap::const_iterator last = _last_video.find (wp);
762 if (_film->three_d()) {
763 Eyes fill_to_eyes = video.eyes;
764 if (fill_to_eyes == EYES_BOTH) {
765 fill_to_eyes = EYES_LEFT;
767 if (fill_to == piece->content->end()) {
768 /* Don't fill after the end of the content */
769 fill_to_eyes = EYES_LEFT;
771 DCPTime j = fill_from;
772 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
773 if (eyes == EYES_BOTH) {
776 while (j < fill_to || eyes != fill_to_eyes) {
777 if (last != _last_video.end()) {
778 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
779 copy->set_eyes (eyes);
780 emit_video (copy, j);
782 emit_video (black_player_video_frame(eyes), j);
784 if (eyes == EYES_RIGHT) {
785 j += one_video_frame();
787 eyes = increment_eyes (eyes);
790 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
791 if (last != _last_video.end()) {
792 emit_video (last->second, j);
794 emit_video (black_player_video_frame(EYES_BOTH), j);
800 _last_video[wp].reset (
803 piece->content->video->crop (),
804 piece->content->video->fade (video.frame),
805 piece->content->video->scale().size (
806 piece->content->video, _video_container_size, _film->frame_size ()
808 _video_container_size,
811 piece->content->video->colour_conversion(),
818 for (int i = 0; i < frc.repeat; ++i) {
819 if (t < piece->content->end()) {
820 emit_video (_last_video[wp], t);
822 t += one_video_frame ();
827 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
829 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
831 shared_ptr<Piece> piece = wp.lock ();
836 shared_ptr<AudioContent> content = piece->content->audio;
837 DCPOMATIC_ASSERT (content);
839 /* Compute time in the DCP */
840 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
841 /* And the end of this block in the DCP */
842 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
844 /* Remove anything that comes before the start or after the end of the content */
845 if (time < piece->content->position()) {
846 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
848 /* This audio is entirely discarded */
851 content_audio.audio = cut.first;
853 } else if (time > piece->content->end()) {
856 } else if (end > piece->content->end()) {
857 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
858 if (remaining_frames == 0) {
861 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
862 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
863 content_audio.audio = cut;
866 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
870 if (content->gain() != 0) {
871 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
872 gain->apply_gain (content->gain ());
873 content_audio.audio = gain;
878 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
882 if (_audio_processor) {
883 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
888 _audio_merger.push (content_audio.audio, time);
889 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
890 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
894 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
896 shared_ptr<Piece> piece = wp.lock ();
897 shared_ptr<const TextContent> text = wc.lock ();
898 if (!piece || !text) {
902 /* Apply content's subtitle offsets */
903 subtitle.sub.rectangle.x += text->x_offset ();
904 subtitle.sub.rectangle.y += text->y_offset ();
906 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
907 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
908 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
910 /* Apply content's subtitle scale */
911 subtitle.sub.rectangle.width *= text->x_scale ();
912 subtitle.sub.rectangle.height *= text->y_scale ();
915 shared_ptr<Image> image = subtitle.sub.image;
916 /* We will scale the subtitle up to fit _video_container_size */
917 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
918 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
919 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
921 _active_texts[text->type()].add_from (wc, ps, from);
925 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
927 shared_ptr<Piece> piece = wp.lock ();
928 shared_ptr<const TextContent> text = wc.lock ();
929 if (!piece || !text) {
934 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
936 if (from > piece->content->end()) {
940 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
941 s.set_h_position (s.h_position() + text->x_offset ());
942 s.set_v_position (s.v_position() + text->y_offset ());
943 float const xs = text->x_scale();
944 float const ys = text->y_scale();
945 float size = s.size();
947 /* Adjust size to express the common part of the scaling;
948 e.g. if xs = ys = 0.5 we scale size by 2.
950 if (xs > 1e-5 && ys > 1e-5) {
951 size *= 1 / min (1 / xs, 1 / ys);
955 /* Then express aspect ratio changes */
956 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
957 s.set_aspect_adjust (xs / ys);
960 s.set_in (dcp::Time(from.seconds(), 1000));
961 ps.string.push_back (StringText (s, text->outline_width()));
962 ps.add_fonts (text->fonts ());
965 _active_texts[text->type()].add_from (wc, ps, from);
969 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
971 shared_ptr<const TextContent> text = wc.lock ();
976 if (!_active_texts[text->type()].have(wc)) {
980 shared_ptr<Piece> piece = wp.lock ();
985 DCPTime const dcp_to = content_time_to_dcp (piece, to);
987 if (dcp_to > piece->content->end()) {
991 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
993 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
994 if (text->use() && !always && !text->burn()) {
995 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1000 Player::seek (DCPTime time, bool accurate)
1002 boost::mutex::scoped_lock lm (_mutex);
1005 /* We can't seek in this state */
1010 _shuffler->clear ();
1015 if (_audio_processor) {
1016 _audio_processor->flush ();
1019 _audio_merger.clear ();
1020 for (int i = 0; i < TEXT_COUNT; ++i) {
1021 _active_texts[i].clear ();
1024 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1025 if (time < i->content->position()) {
1026 /* Before; seek to the start of the content */
1027 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1029 } else if (i->content->position() <= time && time < i->content->end()) {
1030 /* During; seek to position */
1031 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1034 /* After; this piece is done */
1040 _last_video_time = time;
1041 _last_video_eyes = EYES_LEFT;
1042 _last_audio_time = time;
1044 _last_video_time = optional<DCPTime>();
1045 _last_video_eyes = optional<Eyes>();
1046 _last_audio_time = optional<DCPTime>();
1049 _black.set_position (time);
1050 _silent.set_position (time);
1052 _last_video.clear ();
1056 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1058 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1059 player before the video that requires them.
1061 _delay.push_back (make_pair (pv, time));
1063 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1064 _last_video_time = time + one_video_frame();
1066 _last_video_eyes = increment_eyes (pv->eyes());
1068 if (_delay.size() < 3) {
1072 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1074 do_emit_video (to_do.first, to_do.second);
1078 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1080 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1081 for (int i = 0; i < TEXT_COUNT; ++i) {
1082 _active_texts[i].clear_before (time);
1086 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1088 pv->set_text (subtitles.get ());
1095 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1097 /* Log if the assert below is about to fail */
1098 if (_last_audio_time && time != *_last_audio_time) {
1099 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1102 /* This audio must follow on from the previous */
1103 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1105 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1109 Player::fill_audio (DCPTimePeriod period)
1111 if (period.from == period.to) {
1115 DCPOMATIC_ASSERT (period.from < period.to);
1117 DCPTime t = period.from;
1118 while (t < period.to) {
1119 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1120 Frame const samples = block.frames_round(_film->audio_frame_rate());
1122 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1123 silence->make_silent ();
1124 emit_audio (silence, t);
1131 Player::one_video_frame () const
1133 return DCPTime::from_frames (1, _film->video_frame_rate ());
1136 pair<shared_ptr<AudioBuffers>, DCPTime>
1137 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1139 DCPTime const discard_time = discard_to - time;
1140 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1141 Frame remaining_frames = audio->frames() - discard_frames;
1142 if (remaining_frames <= 0) {
1143 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1145 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1146 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1147 return make_pair(cut, time + discard_time);
1151 Player::set_dcp_decode_reduction (optional<int> reduction)
1153 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1156 boost::mutex::scoped_lock lm (_mutex);
1158 if (reduction == _dcp_decode_reduction) {
1160 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1164 _dcp_decode_reduction = reduction;
1165 setup_pieces_unlocked ();
1168 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1172 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1174 boost::mutex::scoped_lock lm (_mutex);
1176 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1177 if (i->content == content) {
1178 return content_time_to_dcp (i, t);
1182 /* We couldn't find this content; perhaps things are being changed over */
1183 return optional<DCPTime>();