wip.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 using namespace dcpomatic;
82
83 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
84 int const PlayerProperty::PLAYLIST = 701;
85 int const PlayerProperty::FILM_CONTAINER = 702;
86 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
87 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
88 int const PlayerProperty::PLAYBACK_LENGTH = 705;
89
90 Player::Player (shared_ptr<const Film> film)
91         : _film (film)
92         , _suspended (0)
93         , _ignore_video (false)
94         , _ignore_audio (false)
95         , _ignore_text (false)
96         , _always_burn_open_subtitles (false)
97         , _fast (false)
98         , _tolerant (film->tolerant())
99         , _play_referenced (false)
100         , _audio_merger (_film->audio_frame_rate())
101         , _shuffler (0)
102 {
103         construct ();
104 }
105
106 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
107         : _film (film)
108         , _playlist (playlist_)
109         , _suspended (0)
110         , _ignore_video (false)
111         , _ignore_audio (false)
112         , _ignore_text (false)
113         , _always_burn_open_subtitles (false)
114         , _fast (false)
115         , _tolerant (film->tolerant())
116         , _play_referenced (false)
117         , _audio_merger (_film->audio_frame_rate())
118         , _shuffler (0)
119 {
120         construct ();
121 }
122
123 void
124 Player::construct ()
125 {
126         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127         /* The butler must hear about this first, so since we are proxying this through to the butler we must
128            be first.
129         */
130         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132         set_video_container_size (_film->frame_size ());
133
134         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
135
136         setup_pieces ();
137         seek (DCPTime (), true);
138 }
139
140 Player::~Player ()
141 {
142         delete _shuffler;
143 }
144
145 void
146 Player::setup_pieces ()
147 {
148         boost::mutex::scoped_lock lm (_mutex);
149         setup_pieces_unlocked ();
150 }
151
152
153 bool
154 have_video (shared_ptr<const Content> content)
155 {
156         return static_cast<bool>(content->video) && content->video->use();
157 }
158
159 bool
160 have_audio (shared_ptr<const Content> content)
161 {
162         return static_cast<bool>(content->audio);
163 }
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         list<shared_ptr<Piece> > old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 /* XXX
190                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
191                         if (j->content == i) {
192                                 old_decoder = j->decoder;
193                                 break;
194                         }
195                 }
196                 */
197
198                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
199                 DCPOMATIC_ASSERT (decoder);
200
201                 FrameRateChange frc (_film, i);
202
203                 if (decoder->video && _ignore_video) {
204                         decoder->video->set_ignore (true);
205                 }
206
207                 if (decoder->audio && _ignore_audio) {
208                         decoder->audio->set_ignore (true);
209                 }
210
211                 if (_ignore_text) {
212                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
213                                 i->set_ignore (true);
214                         }
215                 }
216
217                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
218                 if (dcp) {
219                         dcp->set_decode_referenced (_play_referenced);
220                         if (_play_referenced) {
221                                 dcp->set_forced_reduction (_dcp_decode_reduction);
222                         }
223                 }
224
225                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
226                 _pieces.push_back (piece);
227
228                 if (decoder->video) {
229                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
230                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
231                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
232                         } else {
233                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
234                         }
235                 }
236
237                 if (decoder->audio) {
238                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
239                 }
240
241                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
242
243                 while (j != decoder->text.end()) {
244                         (*j)->BitmapStart.connect (
245                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
246                                 );
247                         (*j)->PlainStart.connect (
248                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
249                                 );
250                         (*j)->Stop.connect (
251                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252                                 );
253
254                         ++j;
255                 }
256
257                 if (decoder->atmos) {
258                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
259                 }
260         }
261
262         _stream_states.clear ();
263         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
264                 BOOST_FOREACH (AudioStreamPtr j, i->audio_streams()) {
265                         _stream_states[j] = StreamState (i, i->content->position ());
266                 }
267         }
268
269         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
271
272         _last_video_time = DCPTime ();
273         _last_video_eyes = EYES_BOTH;
274         _last_audio_time = DCPTime ();
275 }
276
277 void
278 Player::playlist_content_change (ChangeType type, int property, bool frequent)
279 {
280         if (type == CHANGE_TYPE_PENDING) {
281                 /* The player content is probably about to change, so we can't carry on
282                    until that has happened and we've rebuilt our pieces.  Stop pass()
283                    and seek() from working until then.
284                 */
285                 ++_suspended;
286         } else if (type == CHANGE_TYPE_DONE) {
287                 /* A change in our content has gone through.  Re-build our pieces. */
288                 setup_pieces ();
289                 --_suspended;
290         } else if (type == CHANGE_TYPE_CANCELLED) {
291                 --_suspended;
292         }
293
294         Change (type, property, frequent);
295 }
296
297 void
298 Player::set_video_container_size (dcp::Size s)
299 {
300         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
301
302         {
303                 boost::mutex::scoped_lock lm (_mutex);
304
305                 if (s == _video_container_size) {
306                         lm.unlock ();
307                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
308                         return;
309                 }
310
311                 _video_container_size = s;
312
313                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314                 _black_image->make_black ();
315         }
316
317         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318 }
319
320 void
321 Player::playlist_change (ChangeType type)
322 {
323         if (type == CHANGE_TYPE_DONE) {
324                 setup_pieces ();
325         }
326         Change (type, PlayerProperty::PLAYLIST, false);
327 }
328
329 void
330 Player::film_change (ChangeType type, Film::Property p)
331 {
332         /* Here we should notice Film properties that affect our output, and
333            alert listeners that our output now would be different to how it was
334            last time we were run.
335         */
336
337         if (p == Film::CONTAINER) {
338                 Change (type, PlayerProperty::FILM_CONTAINER, false);
339         } else if (p == Film::VIDEO_FRAME_RATE) {
340                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
341                    so we need new pieces here.
342                 */
343                 if (type == CHANGE_TYPE_DONE) {
344                         setup_pieces ();
345                 }
346                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
347         } else if (p == Film::AUDIO_PROCESSOR) {
348                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
349                         boost::mutex::scoped_lock lm (_mutex);
350                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
351                 }
352         } else if (p == Film::AUDIO_CHANNELS) {
353                 if (type == CHANGE_TYPE_DONE) {
354                         boost::mutex::scoped_lock lm (_mutex);
355                         _audio_merger.clear ();
356                 }
357         }
358 }
359
360 shared_ptr<PlayerVideo>
361 Player::black_player_video_frame (Eyes eyes) const
362 {
363         return shared_ptr<PlayerVideo> (
364                 new PlayerVideo (
365                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
366                         Crop (),
367                         optional<double> (),
368                         _video_container_size,
369                         _video_container_size,
370                         eyes,
371                         PART_WHOLE,
372                         PresetColourConversion::all().front().conversion,
373                         VIDEO_RANGE_FULL,
374                         boost::weak_ptr<Piece>(),
375                         boost::optional<Frame>(),
376                         false
377                 )
378         );
379 }
380
381
382 list<shared_ptr<Font> >
383 Player::get_subtitle_fonts ()
384 {
385         boost::mutex::scoped_lock lm (_mutex);
386
387         list<shared_ptr<Font> > fonts;
388         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
389                 /* XXX: things may go wrong if there are duplicate font IDs
390                    with different font files.
391                    */
392                 list<shared_ptr<Font> > f = i->fonts ();
393                 copy (f.begin(), f.end(), back_inserter(fonts));
394         }
395
396         return fonts;
397 }
398
399 /** Set this player never to produce any video data */
400 void
401 Player::set_ignore_video ()
402 {
403         boost::mutex::scoped_lock lm (_mutex);
404         _ignore_video = true;
405         setup_pieces_unlocked ();
406 }
407
408 void
409 Player::set_ignore_audio ()
410 {
411         boost::mutex::scoped_lock lm (_mutex);
412         _ignore_audio = true;
413         setup_pieces_unlocked ();
414 }
415
416 void
417 Player::set_ignore_text ()
418 {
419         boost::mutex::scoped_lock lm (_mutex);
420         _ignore_text = true;
421         setup_pieces_unlocked ();
422 }
423
424 /** Set the player to always burn open texts into the image regardless of the content settings */
425 void
426 Player::set_always_burn_open_subtitles ()
427 {
428         boost::mutex::scoped_lock lm (_mutex);
429         _always_burn_open_subtitles = true;
430 }
431
432 /** Sets up the player to be faster, possibly at the expense of quality */
433 void
434 Player::set_fast ()
435 {
436         boost::mutex::scoped_lock lm (_mutex);
437         _fast = true;
438         setup_pieces_unlocked ();
439 }
440
441 void
442 Player::set_play_referenced ()
443 {
444         boost::mutex::scoped_lock lm (_mutex);
445         _play_referenced = true;
446         setup_pieces_unlocked ();
447 }
448
449 static void
450 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
451 {
452         DCPOMATIC_ASSERT (r);
453         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
454         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
455         if (r->actual_duration() > 0) {
456                 a.push_back (
457                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
458                         );
459         }
460 }
461
462 list<ReferencedReelAsset>
463 Player::get_reel_assets ()
464 {
465         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
466
467         list<ReferencedReelAsset> a;
468
469         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
470                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
471                 if (!j) {
472                         continue;
473                 }
474
475                 scoped_ptr<DCPDecoder> decoder;
476                 try {
477                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
478                 } catch (...) {
479                         return a;
480                 }
481
482                 DCPOMATIC_ASSERT (j->video_frame_rate ());
483                 double const cfr = j->video_frame_rate().get();
484                 Frame const trim_start = j->trim_start().frames_round (cfr);
485                 Frame const trim_end = j->trim_end().frames_round (cfr);
486                 int const ffr = _film->video_frame_rate ();
487
488                 /* position in the asset from the start */
489                 int64_t offset_from_start = 0;
490                 /* position in the asset from the end */
491                 int64_t offset_from_end = 0;
492                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
493                         /* Assume that main picture duration is the length of the reel */
494                         offset_from_end += k->main_picture()->actual_duration();
495                 }
496
497                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
498
499                         /* Assume that main picture duration is the length of the reel */
500                         int64_t const reel_duration = k->main_picture()->actual_duration();
501
502                         /* See doc/design/trim_reels.svg */
503                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
504                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
505
506                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
507                         if (j->reference_video ()) {
508                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
509                         }
510
511                         if (j->reference_audio ()) {
512                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
513                         }
514
515                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
516                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
517                         }
518
519                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
520                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
521                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
522                                 }
523                         }
524
525                         offset_from_start += reel_duration;
526                         offset_from_end -= reel_duration;
527                 }
528         }
529
530         return a;
531 }
532
533 bool
534 Player::pass ()
535 {
536         boost::mutex::scoped_lock lm (_mutex);
537
538         if (_suspended) {
539                 /* We can't pass in this state */
540                 return false;
541         }
542
543         if (_playback_length == DCPTime()) {
544                 /* Special; just give one black frame */
545                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
546                 return true;
547         }
548
549         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
550
551         shared_ptr<Piece> earliest_content;
552         optional<DCPTime> earliest_time;
553
554         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
555                 if (i->done) {
556                         continue;
557                 }
558
559                 DCPTime const t = i->content_time_to_dcp (max(i->decoder->position(), i->trim_start()));
560                 if (t > i->end(_film)) {
561                         i->done = true;
562                 } else {
563
564                         /* Given two choices at the same time, pick the one with texts so we see it before
565                            the video.
566                         */
567                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
568                                 earliest_time = t;
569                                 earliest_content = i;
570                         }
571                 }
572         }
573
574         bool done = false;
575
576         enum {
577                 NONE,
578                 CONTENT,
579                 BLACK,
580                 SILENT
581         } which = NONE;
582
583         if (earliest_content) {
584                 which = CONTENT;
585         }
586
587         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
588                 earliest_time = _black.position ();
589                 which = BLACK;
590         }
591
592         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
593                 earliest_time = _silent.position ();
594                 which = SILENT;
595         }
596
597         switch (which) {
598         case CONTENT:
599         {
600                 earliest_content->done = earliest_content->decoder->pass ();
601                 if (!_play_referenced && earliest_content->referenced_audio()) {
602                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
603                            to `hide' the fact that no audio was emitted during the referenced DCP (though
604                            we need to behave as though it was).
605                         */
606                         _last_audio_time = earliest_content->end (_film);
607                 }
608                 break;
609         }
610         case BLACK:
611                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
612                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
613                 _black.set_position (_black.position() + one_video_frame());
614                 break;
615         case SILENT:
616         {
617                 DCPTimePeriod period (_silent.period_at_position());
618                 if (_last_audio_time) {
619                         /* Sometimes the thing that happened last finishes fractionally before
620                            or after this silence.  Bodge the start time of the silence to fix it.
621                            I think this is nothing to worry about since we will just add or
622                            remove a little silence at the end of some content.
623                         */
624                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
625                         /* Let's not worry about less than a frame at 24fps */
626                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
627                         if (error >= too_much_error) {
628                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
629                         }
630                         DCPOMATIC_ASSERT (error < too_much_error);
631                         period.from = *_last_audio_time;
632                 }
633                 if (period.duration() > one_video_frame()) {
634                         period.to = period.from + one_video_frame();
635                 }
636                 fill_audio (period);
637                 _silent.set_position (period.to);
638                 break;
639         }
640         case NONE:
641                 done = true;
642                 break;
643         }
644
645         /* Emit any audio that is ready */
646
647         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
648            of our streams, or the position of the _silent.
649         */
650         DCPTime pull_to = _playback_length;
651         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
652                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
653                         pull_to = i->second.last_push_end;
654                 }
655         }
656         if (!_silent.done() && _silent.position() < pull_to) {
657                 pull_to = _silent.position();
658         }
659
660         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
661         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
662                 if (_last_audio_time && i->second < *_last_audio_time) {
663                         /* This new data comes before the last we emitted (or the last seek); discard it */
664                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
665                         if (!cut.first) {
666                                 continue;
667                         }
668                         *i = cut;
669                 } else if (_last_audio_time && i->second > *_last_audio_time) {
670                         /* There's a gap between this data and the last we emitted; fill with silence */
671                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
672                 }
673
674                 emit_audio (i->first, i->second);
675         }
676
677         if (done) {
678                 _shuffler->flush ();
679                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
680                         do_emit_video(i->first, i->second);
681                 }
682         }
683
684         return done;
685 }
686
687 /** @return Open subtitles for the frame at the given time, converted to images */
688 optional<PositionImage>
689 Player::open_subtitles_for_frame (DCPTime time) const
690 {
691         list<PositionImage> captions;
692         int const vfr = _film->video_frame_rate();
693
694         BOOST_FOREACH (
695                 PlayerText j,
696                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
697                 ) {
698
699                 /* Bitmap subtitles */
700                 BOOST_FOREACH (BitmapText i, j.bitmap) {
701                         if (!i.image) {
702                                 continue;
703                         }
704
705                         /* i.image will already have been scaled to fit _video_container_size */
706                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
707
708                         captions.push_back (
709                                 PositionImage (
710                                         i.image,
711                                         Position<int> (
712                                                 lrint (_video_container_size.width * i.rectangle.x),
713                                                 lrint (_video_container_size.height * i.rectangle.y)
714                                                 )
715                                         )
716                                 );
717                 }
718
719                 /* String subtitles (rendered to an image) */
720                 if (!j.string.empty ()) {
721                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
722                         copy (s.begin(), s.end(), back_inserter (captions));
723                 }
724         }
725
726         if (captions.empty ()) {
727                 return optional<PositionImage> ();
728         }
729
730         return merge (captions);
731 }
732
733 void
734 Player::video (weak_ptr<Piece> wp, ContentVideo video)
735 {
736         shared_ptr<Piece> piece = wp.lock ();
737         if (!piece) {
738                 return;
739         }
740
741         if (!piece->video_use()) {
742                 return;
743         }
744
745         if (piece->frc.skip && (video.frame % 2) == 1) {
746                 return;
747         }
748
749         /* Time of the first frame we will emit */
750         DCPTime const time = piece->content_video_to_dcp (video.frame);
751
752         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
753            if it's after the content's period here as in that case we still need to fill any gap between
754            `now' and the end of the content's period.
755         */
756         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
757                 return;
758         }
759
760         /* Fill gaps that we discover now that we have some video which needs to be emitted.
761            This is where we need to fill to.
762         */
763         DCPTime fill_to = min (time, piece->end(_film));
764
765         if (_last_video_time) {
766                 DCPTime fill_from = max (*_last_video_time, piece->position());
767
768                 /* Fill if we have more than half a frame to do */
769                 if ((fill_to - fill_from) > one_video_frame() / 2) {
770                         LastVideoMap::const_iterator last = _last_video.find (wp);
771                         if (_film->three_d()) {
772                                 Eyes fill_to_eyes = video.eyes;
773                                 if (fill_to_eyes == EYES_BOTH) {
774                                         fill_to_eyes = EYES_LEFT;
775                                 }
776                                 if (fill_to == piece->end(_film)) {
777                                         /* Don't fill after the end of the content */
778                                         fill_to_eyes = EYES_LEFT;
779                                 }
780                                 DCPTime j = fill_from;
781                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
782                                 if (eyes == EYES_BOTH) {
783                                         eyes = EYES_LEFT;
784                                 }
785                                 while (j < fill_to || eyes != fill_to_eyes) {
786                                         if (last != _last_video.end()) {
787                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
788                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
789                                                 copy->set_eyes (eyes);
790                                                 emit_video (copy, j);
791                                         } else {
792                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
793                                                 emit_video (black_player_video_frame(eyes), j);
794                                         }
795                                         if (eyes == EYES_RIGHT) {
796                                                 j += one_video_frame();
797                                         }
798                                         eyes = increment_eyes (eyes);
799                                 }
800                         } else {
801                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
802                                         if (last != _last_video.end()) {
803                                                 emit_video (last->second, j);
804                                         } else {
805                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
806                                         }
807                                 }
808                         }
809                 }
810         }
811
812         _last_video[wp].reset (
813                 new PlayerVideo (
814                         video.image,
815                         piece->video_crop(),
816                         piece->video_fade(_film, video.frame),
817                         scale_for_display(piece->video_scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
818                         _video_container_size,
819                         video.eyes,
820                         video.part,
821                         piece->video_colour_conversion(),
822                         piece->video_range(),
823                         piece,
824                         video.frame,
825                         false
826                         )
827                 );
828
829         DCPTime t = time;
830         for (int i = 0; i < piece->frc.repeat; ++i) {
831                 if (t < piece->end(_film)) {
832                         emit_video (_last_video[wp], t);
833                 }
834                 t += one_video_frame ();
835         }
836 }
837
838 void
839 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
840 {
841         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
842
843         shared_ptr<Piece> piece = wp.lock ();
844         if (!piece) {
845                 return;
846         }
847
848         int const rfr = piece->audio_resampled_frame_rate (_film);
849
850         /* Compute time in the DCP */
851         DCPTime time = piece->resampled_audio_to_dcp (_film, content_audio.frame);
852         /* And the end of this block in the DCP */
853         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
854
855         /* Remove anything that comes before the start or after the end of the content */
856         if (time < piece->position()) {
857                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->position());
858                 if (!cut.first) {
859                         /* This audio is entirely discarded */
860                         return;
861                 }
862                 content_audio.audio = cut.first;
863                 time = cut.second;
864         } else if (time > piece->end(_film)) {
865                 /* Discard it all */
866                 return;
867         } else if (end > piece->end(_film)) {
868                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
869                 if (remaining_frames == 0) {
870                         return;
871                 }
872                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
873         }
874
875         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
876
877         /* Gain */
878
879         if (piece->audio_gain() != 0) {
880                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
881                 gain->apply_gain (piece->audio_gain());
882                 content_audio.audio = gain;
883         }
884
885         /* Remap */
886
887         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
888
889         /* Process */
890
891         if (_audio_processor) {
892                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
893         }
894
895         /* Push */
896
897         _audio_merger.push (content_audio.audio, time);
898         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
899         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
900 }
901
902 void
903 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
904 {
905         shared_ptr<Piece> piece = wp.lock ();
906         shared_ptr<const TextContent> text = wc.lock ();
907         if (!piece || !text) {
908                 return;
909         }
910
911         /* Apply content's subtitle offsets */
912         subtitle.sub.rectangle.x += text->x_offset ();
913         subtitle.sub.rectangle.y += text->y_offset ();
914
915         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
916         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
917         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
918
919         /* Apply content's subtitle scale */
920         subtitle.sub.rectangle.width *= text->x_scale ();
921         subtitle.sub.rectangle.height *= text->y_scale ();
922
923         PlayerText ps;
924         shared_ptr<Image> image = subtitle.sub.image;
925
926         /* We will scale the subtitle up to fit _video_container_size */
927         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
928         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
929         if (width == 0 || height == 0) {
930                 return;
931         }
932
933         dcp::Size scaled_size (width, height);
934         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
935         DCPTime from (piece->content_time_to_dcp(subtitle.from()));
936
937         _active_texts[text->type()].add_from (wc, ps, from);
938 }
939
940 void
941 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
942 {
943         shared_ptr<Piece> piece = wp.lock ();
944         shared_ptr<const TextContent> text = wc.lock ();
945         if (!piece || !text) {
946                 return;
947         }
948
949         PlayerText ps;
950         DCPTime const from (piece->content_time_to_dcp( subtitle.from()));
951
952         if (from > piece->end(_film)) {
953                 return;
954         }
955
956         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
957                 s.set_h_position (s.h_position() + text->x_offset ());
958                 s.set_v_position (s.v_position() + text->y_offset ());
959                 float const xs = text->x_scale();
960                 float const ys = text->y_scale();
961                 float size = s.size();
962
963                 /* Adjust size to express the common part of the scaling;
964                    e.g. if xs = ys = 0.5 we scale size by 2.
965                 */
966                 if (xs > 1e-5 && ys > 1e-5) {
967                         size *= 1 / min (1 / xs, 1 / ys);
968                 }
969                 s.set_size (size);
970
971                 /* Then express aspect ratio changes */
972                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
973                         s.set_aspect_adjust (xs / ys);
974                 }
975
976                 s.set_in (dcp::Time(from.seconds(), 1000));
977                 ps.string.push_back (StringText (s, text->outline_width()));
978                 ps.add_fonts (text->fonts ());
979         }
980
981         _active_texts[text->type()].add_from (wc, ps, from);
982 }
983
984 void
985 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
986 {
987         shared_ptr<const TextContent> text = wc.lock ();
988         if (!text) {
989                 return;
990         }
991
992         if (!_active_texts[text->type()].have(wc)) {
993                 return;
994         }
995
996         shared_ptr<Piece> piece = wp.lock ();
997         if (!piece) {
998                 return;
999         }
1000
1001         DCPTime const dcp_to = piece->content_time_to_dcp(to);
1002
1003         if (dcp_to > piece->end(_film)) {
1004                 return;
1005         }
1006
1007         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1008
1009         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1010         if (text->use() && !always && !text->burn()) {
1011                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1012         }
1013 }
1014
1015 void
1016 Player::seek (DCPTime time, bool accurate)
1017 {
1018         boost::mutex::scoped_lock lm (_mutex);
1019
1020         if (_suspended) {
1021                 /* We can't seek in this state */
1022                 return;
1023         }
1024
1025         if (_shuffler) {
1026                 _shuffler->clear ();
1027         }
1028
1029         _delay.clear ();
1030
1031         if (_audio_processor) {
1032                 _audio_processor->flush ();
1033         }
1034
1035         _audio_merger.clear ();
1036         for (int i = 0; i < TEXT_COUNT; ++i) {
1037                 _active_texts[i].clear ();
1038         }
1039
1040         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1041                 if (time < i->position()) {
1042                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1043                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1044                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1045                            been trimmed to a point between keyframes, or something).
1046                         */
1047                         i->decoder->seek (i->dcp_to_content_time(_film, i->position()), true);
1048                         i->done = false;
1049                 } else if (i->position() <= time && time < i->end(_film)) {
1050                         /* During; seek to position */
1051                         i->decoder->seek (i->dcp_to_content_time(_film, time), accurate);
1052                         i->done = false;
1053                 } else {
1054                         /* After; this piece is done */
1055                         i->done = true;
1056                 }
1057         }
1058
1059         if (accurate) {
1060                 _last_video_time = time;
1061                 _last_video_eyes = EYES_LEFT;
1062                 _last_audio_time = time;
1063         } else {
1064                 _last_video_time = optional<DCPTime>();
1065                 _last_video_eyes = optional<Eyes>();
1066                 _last_audio_time = optional<DCPTime>();
1067         }
1068
1069         _black.set_position (time);
1070         _silent.set_position (time);
1071
1072         _last_video.clear ();
1073 }
1074
1075 void
1076 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1077 {
1078         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1079            player before the video that requires them.
1080         */
1081         _delay.push_back (make_pair (pv, time));
1082
1083         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1084                 _last_video_time = time + one_video_frame();
1085         }
1086         _last_video_eyes = increment_eyes (pv->eyes());
1087
1088         if (_delay.size() < 3) {
1089                 return;
1090         }
1091
1092         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1093         _delay.pop_front();
1094         do_emit_video (to_do.first, to_do.second);
1095 }
1096
1097 void
1098 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1099 {
1100         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1101                 for (int i = 0; i < TEXT_COUNT; ++i) {
1102                         _active_texts[i].clear_before (time);
1103                 }
1104         }
1105
1106         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1107         if (subtitles) {
1108                 pv->set_text (subtitles.get ());
1109         }
1110
1111         Video (pv, time);
1112 }
1113
1114 void
1115 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1116 {
1117         /* Log if the assert below is about to fail */
1118         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1119                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1120         }
1121
1122         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1123         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1124         Audio (data, time, _film->audio_frame_rate());
1125         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1126 }
1127
1128 void
1129 Player::fill_audio (DCPTimePeriod period)
1130 {
1131         if (period.from == period.to) {
1132                 return;
1133         }
1134
1135         DCPOMATIC_ASSERT (period.from < period.to);
1136
1137         DCPTime t = period.from;
1138         while (t < period.to) {
1139                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1140                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1141                 if (samples) {
1142                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1143                         silence->make_silent ();
1144                         emit_audio (silence, t);
1145                 }
1146                 t += block;
1147         }
1148 }
1149
1150 DCPTime
1151 Player::one_video_frame () const
1152 {
1153         return DCPTime::from_frames (1, _film->video_frame_rate ());
1154 }
1155
1156 pair<shared_ptr<AudioBuffers>, DCPTime>
1157 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1158 {
1159         DCPTime const discard_time = discard_to - time;
1160         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1161         Frame remaining_frames = audio->frames() - discard_frames;
1162         if (remaining_frames <= 0) {
1163                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1164         }
1165         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1166         return make_pair(cut, time + discard_time);
1167 }
1168
1169 void
1170 Player::set_dcp_decode_reduction (optional<int> reduction)
1171 {
1172         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1173
1174         {
1175                 boost::mutex::scoped_lock lm (_mutex);
1176
1177                 if (reduction == _dcp_decode_reduction) {
1178                         lm.unlock ();
1179                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1180                         return;
1181                 }
1182
1183                 _dcp_decode_reduction = reduction;
1184                 setup_pieces_unlocked ();
1185         }
1186
1187         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1188 }
1189
1190 optional<DCPTime>
1191 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1192 {
1193         boost::mutex::scoped_lock lm (_mutex);
1194
1195         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1196                 optional<DCPTime> d = i->content_time_to_dcp (t);
1197                 if (d) {
1198                         return d;
1199                 }
1200         }
1201
1202         /* We couldn't find this content; perhaps things are being changed over */
1203         return optional<DCPTime>();
1204 }
1205
1206
1207 shared_ptr<const Playlist>
1208 Player::playlist () const
1209 {
1210         return _playlist ? _playlist : _film->playlist();
1211 }
1212
1213
1214 void
1215 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1216 {
1217         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1218 }
1219