Add Piece::seek().
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         auto decoder = j->decoder_for(i);
191                         if (decoder) {
192                                 old_decoder = decoder;
193                                 break;
194                         }
195                 }
196
197                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
198                 DCPOMATIC_ASSERT (decoder);
199
200                 FrameRateChange frc (_film, i);
201
202                 if (decoder->video && _ignore_video) {
203                         decoder->video->set_ignore (true);
204                 }
205
206                 if (decoder->audio && _ignore_audio) {
207                         decoder->audio->set_ignore (true);
208                 }
209
210                 if (_ignore_text) {
211                         for (auto i: decoder->text) {
212                                 i->set_ignore (true);
213                         }
214                 }
215
216                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217                 if (dcp) {
218                         dcp->set_decode_referenced (_play_referenced);
219                         if (_play_referenced) {
220                                 dcp->set_forced_reduction (_dcp_decode_reduction);
221                         }
222                 }
223
224                 auto piece = make_shared<Piece>(i, decoder, frc);
225                 _pieces.push_back (piece);
226
227                 if (decoder->video) {
228                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
229                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231                         } else {
232                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
233                         }
234                 }
235
236                 if (decoder->audio) {
237                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
238                 }
239
240                 auto j = decoder->text.begin();
241
242                 while (j != decoder->text.end()) {
243                         (*j)->BitmapStart.connect (
244                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
245                                 );
246                         (*j)->PlainStart.connect (
247                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
248                                 );
249                         (*j)->Stop.connect (
250                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252
253                         ++j;
254                 }
255
256                 if (decoder->atmos) {
257                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
258                 }
259         }
260
261         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
262                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
263                         /* Look for content later in the content list with in-use video that overlaps this */
264                         auto period = DCPTimePeriod((*i)->position(), (*i)->end(_film));
265                         auto j = i;
266                         ++j;
267                         for (; j != _pieces.end(); ++j) {
268                                 if ((*j)->use_video()) {
269                                         (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->end(_film)).overlap(period);
270                                 }
271                         }
272                 }
273         }
274
275         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
276         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
277
278         _last_video_time = boost::optional<dcpomatic::DCPTime>();
279         _last_video_eyes = Eyes::BOTH;
280         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
281 }
282
283
284 optional<DCPTime>
285 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
286 {
287         boost::mutex::scoped_lock lm (_mutex);
288
289         for (auto i: _pieces) {
290                 auto dcp = i->content_time_to_dcp(content, t);
291                 if (dcp) {
292                         return *dcp;
293                 }
294         }
295
296         /* We couldn't find this content; perhaps things are being changed over */
297         return {};
298 }
299
300
301 void
302 Player::playlist_content_change (ChangeType type, int property, bool frequent)
303 {
304         if (property == VideoContentProperty::CROP) {
305                 if (type == ChangeType::DONE) {
306                         auto const vcs = video_container_size();
307                         boost::mutex::scoped_lock lm (_mutex);
308                         for (auto const& i: _delay) {
309                                 i.first->reset_metadata (_film, vcs);
310                         }
311                 }
312         } else {
313                 if (type == ChangeType::PENDING) {
314                         /* The player content is probably about to change, so we can't carry on
315                            until that has happened and we've rebuilt our pieces.  Stop pass()
316                            and seek() from working until then.
317                         */
318                         ++_suspended;
319                 } else if (type == ChangeType::DONE) {
320                         /* A change in our content has gone through.  Re-build our pieces. */
321                         setup_pieces ();
322                         --_suspended;
323                 } else if (type == ChangeType::CANCELLED) {
324                         --_suspended;
325                 }
326         }
327
328         Change (type, property, frequent);
329 }
330
331
332 void
333 Player::set_video_container_size (dcp::Size s)
334 {
335         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
336
337         {
338                 boost::mutex::scoped_lock lm (_mutex);
339
340                 if (s == _video_container_size) {
341                         lm.unlock ();
342                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
343                         return;
344                 }
345
346                 _video_container_size = s;
347
348                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
349                 _black_image->make_black ();
350         }
351
352         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
353 }
354
355
356 void
357 Player::playlist_change (ChangeType type)
358 {
359         if (type == ChangeType::DONE) {
360                 setup_pieces ();
361         }
362         Change (type, PlayerProperty::PLAYLIST, false);
363 }
364
365
366 void
367 Player::film_change (ChangeType type, Film::Property p)
368 {
369         /* Here we should notice Film properties that affect our output, and
370            alert listeners that our output now would be different to how it was
371            last time we were run.
372         */
373
374         if (p == Film::Property::CONTAINER) {
375                 Change (type, PlayerProperty::FILM_CONTAINER, false);
376         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
377                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
378                    so we need new pieces here.
379                 */
380                 if (type == ChangeType::DONE) {
381                         setup_pieces ();
382                 }
383                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
384         } else if (p == Film::Property::AUDIO_PROCESSOR) {
385                 if (type == ChangeType::DONE && _film->audio_processor ()) {
386                         boost::mutex::scoped_lock lm (_mutex);
387                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
388                 }
389         } else if (p == Film::Property::AUDIO_CHANNELS) {
390                 if (type == ChangeType::DONE) {
391                         boost::mutex::scoped_lock lm (_mutex);
392                         _audio_merger.clear ();
393                 }
394         }
395 }
396
397
398 shared_ptr<PlayerVideo>
399 Player::black_player_video_frame (Eyes eyes) const
400 {
401         return std::make_shared<PlayerVideo> (
402                 std::make_shared<const RawImageProxy>(_black_image),
403                 Crop(),
404                 optional<double>(),
405                 _video_container_size,
406                 _video_container_size,
407                 eyes,
408                 Part::WHOLE,
409                 PresetColourConversion::all().front().conversion,
410                 VideoRange::FULL,
411                 std::weak_ptr<Content>(),
412                 boost::optional<Frame>(),
413                 false
414         );
415 }
416
417
418 vector<FontData>
419 Player::get_subtitle_fonts ()
420 {
421         boost::mutex::scoped_lock lm (_mutex);
422
423         vector<FontData> fonts;
424         for (auto i: _pieces) {
425                 /* XXX: things may go wrong if there are duplicate font IDs
426                    with different font files.
427                 */
428                 auto f = i->decoder->fonts ();
429                 copy (f.begin(), f.end(), back_inserter(fonts));
430         }
431
432         return fonts;
433 }
434
435
436 /** Set this player never to produce any video data */
437 void
438 Player::set_ignore_video ()
439 {
440         boost::mutex::scoped_lock lm (_mutex);
441         _ignore_video = true;
442         setup_pieces_unlocked ();
443 }
444
445
446 void
447 Player::set_ignore_audio ()
448 {
449         boost::mutex::scoped_lock lm (_mutex);
450         _ignore_audio = true;
451         setup_pieces_unlocked ();
452 }
453
454
455 void
456 Player::set_ignore_text ()
457 {
458         boost::mutex::scoped_lock lm (_mutex);
459         _ignore_text = true;
460         setup_pieces_unlocked ();
461 }
462
463
464 /** Set the player to always burn open texts into the image regardless of the content settings */
465 void
466 Player::set_always_burn_open_subtitles ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _always_burn_open_subtitles = true;
470 }
471
472
473 /** Sets up the player to be faster, possibly at the expense of quality */
474 void
475 Player::set_fast ()
476 {
477         boost::mutex::scoped_lock lm (_mutex);
478         _fast = true;
479         setup_pieces_unlocked ();
480 }
481
482
483 void
484 Player::set_play_referenced ()
485 {
486         boost::mutex::scoped_lock lm (_mutex);
487         _play_referenced = true;
488         setup_pieces_unlocked ();
489 }
490
491
492 static void
493 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
494 {
495         DCPOMATIC_ASSERT (r);
496         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
497         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
498         if (r->actual_duration() > 0) {
499                 a.push_back (
500                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
501                         );
502         }
503 }
504
505
506 list<ReferencedReelAsset>
507 Player::get_reel_assets ()
508 {
509         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
510
511         list<ReferencedReelAsset> a;
512
513         for (auto i: playlist()->content()) {
514                 auto j = dynamic_pointer_cast<DCPContent> (i);
515                 if (!j) {
516                         continue;
517                 }
518
519                 scoped_ptr<DCPDecoder> decoder;
520                 try {
521                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
522                 } catch (...) {
523                         return a;
524                 }
525
526                 DCPOMATIC_ASSERT (j->video_frame_rate ());
527                 double const cfr = j->video_frame_rate().get();
528                 Frame const trim_start = j->trim_start().frames_round (cfr);
529                 Frame const trim_end = j->trim_end().frames_round (cfr);
530                 int const ffr = _film->video_frame_rate ();
531
532                 /* position in the asset from the start */
533                 int64_t offset_from_start = 0;
534                 /* position in the asset from the end */
535                 int64_t offset_from_end = 0;
536                 for (auto k: decoder->reels()) {
537                         /* Assume that main picture duration is the length of the reel */
538                         offset_from_end += k->main_picture()->actual_duration();
539                 }
540
541                 for (auto k: decoder->reels()) {
542
543                         /* Assume that main picture duration is the length of the reel */
544                         int64_t const reel_duration = k->main_picture()->actual_duration();
545
546                         /* See doc/design/trim_reels.svg */
547                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
548                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
549
550                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
551                         if (j->reference_video ()) {
552                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
553                         }
554
555                         if (j->reference_audio ()) {
556                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
557                         }
558
559                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
560                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
561                         }
562
563                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
564                                 for (auto l: k->closed_captions()) {
565                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
566                                 }
567                         }
568
569                         offset_from_start += reel_duration;
570                         offset_from_end -= reel_duration;
571                 }
572         }
573
574         return a;
575 }
576
577
578 bool
579 Player::pass ()
580 {
581         boost::mutex::scoped_lock lm (_mutex);
582
583         if (_suspended) {
584                 /* We can't pass in this state */
585                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
586                 return false;
587         }
588
589         if (_playback_length == DCPTime()) {
590                 /* Special; just give one black frame */
591                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
592                 return true;
593         }
594
595         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
596
597         shared_ptr<Piece> earliest_content;
598         optional<DCPTime> earliest_time;
599
600         for (auto i: _pieces) {
601                 if (i->done) {
602                         continue;
603                 }
604
605                 auto const t = i->decoder_position ();
606                 if (t > i->end(_film)) {
607                         i->done = true;
608                 } else {
609
610                         /* Given two choices at the same time, pick the one with texts so we see it before
611                            the video.
612                         */
613                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->has_text())) {
614                                 earliest_time = t;
615                                 earliest_content = i;
616                         }
617                 }
618         }
619
620         bool done = false;
621
622         enum {
623                 NONE,
624                 CONTENT,
625                 BLACK,
626                 SILENT
627         } which = NONE;
628
629         if (earliest_content) {
630                 which = CONTENT;
631         }
632
633         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
634                 earliest_time = _black.position ();
635                 which = BLACK;
636         }
637
638         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
639                 earliest_time = _silent.position ();
640                 which = SILENT;
641         }
642
643         switch (which) {
644         case CONTENT:
645         {
646                 earliest_content->pass();
647                 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
648                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
649                            to `hide' the fact that no audio was emitted during the referenced DCP (though
650                            we need to behave as though it was).
651                         */
652                         _last_audio_time = earliest_content->end (_film);
653                 }
654                 break;
655         }
656         case BLACK:
657                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
658                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
659                 _black.set_position (_black.position() + one_video_frame());
660                 break;
661         case SILENT:
662         {
663                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
664                 DCPTimePeriod period (_silent.period_at_position());
665                 if (_last_audio_time) {
666                         /* Sometimes the thing that happened last finishes fractionally before
667                            or after this silence.  Bodge the start time of the silence to fix it.
668                            I think this is nothing to worry about since we will just add or
669                            remove a little silence at the end of some content.
670                         */
671                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
672                         /* Let's not worry about less than a frame at 24fps */
673                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
674                         if (error >= too_much_error) {
675                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
676                         }
677                         DCPOMATIC_ASSERT (error < too_much_error);
678                         period.from = *_last_audio_time;
679                 }
680                 if (period.duration() > one_video_frame()) {
681                         period.to = period.from + one_video_frame();
682                 }
683                 fill_audio (period);
684                 _silent.set_position (period.to);
685                 break;
686         }
687         case NONE:
688                 done = true;
689                 break;
690         }
691
692         /* Emit any audio that is ready */
693
694         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
695            of our streams, or the position of the _silent.
696         */
697         auto pull_to = _playback_length;
698         for (auto i: _pieces) {
699                 i->update_pull_to (pull_to);
700         }
701         if (!_silent.done() && _silent.position() < pull_to) {
702                 pull_to = _silent.position();
703         }
704
705         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
706         auto audio = _audio_merger.pull (pull_to);
707         for (auto i = audio.begin(); i != audio.end(); ++i) {
708                 if (_last_audio_time && i->second < *_last_audio_time) {
709                         /* This new data comes before the last we emitted (or the last seek); discard it */
710                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
711                         if (!cut.first) {
712                                 continue;
713                         }
714                         *i = cut;
715                 } else if (_last_audio_time && i->second > *_last_audio_time) {
716                         /* There's a gap between this data and the last we emitted; fill with silence */
717                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
718                 }
719
720                 emit_audio (i->first, i->second);
721         }
722
723         if (done) {
724                 _shuffler->flush ();
725                 for (auto const& i: _delay) {
726                         do_emit_video(i.first, i.second);
727                 }
728         }
729
730         return done;
731 }
732
733
734 /** @return Open subtitles for the frame at the given time, converted to images */
735 optional<PositionImage>
736 Player::open_subtitles_for_frame (DCPTime time) const
737 {
738         list<PositionImage> captions;
739         int const vfr = _film->video_frame_rate();
740
741         for (
742                 auto j:
743                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
744                 ) {
745
746                 /* Bitmap subtitles */
747                 for (auto i: j.bitmap) {
748                         if (!i.image) {
749                                 continue;
750                         }
751
752                         /* i.image will already have been scaled to fit _video_container_size */
753                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
754
755                         captions.push_back (
756                                 PositionImage (
757                                         i.image,
758                                         Position<int> (
759                                                 lrint(_video_container_size.width * i.rectangle.x),
760                                                 lrint(_video_container_size.height * i.rectangle.y)
761                                                 )
762                                         )
763                                 );
764                 }
765
766                 /* String subtitles (rendered to an image) */
767                 if (!j.string.empty()) {
768                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
769                         copy (s.begin(), s.end(), back_inserter (captions));
770                 }
771         }
772
773         if (captions.empty()) {
774                 return {};
775         }
776
777         return merge (captions);
778 }
779
780
781 void
782 Player::video (weak_ptr<Piece> wp, ContentVideo video)
783 {
784         auto piece = wp.lock ();
785         if (!piece) {
786                 return;
787         }
788
789         if (!piece->use_video()) {
790                 return;
791         }
792
793         auto frc = piece->frame_rate_change();
794         if (frc.skip && (video.frame % 2) == 1) {
795                 return;
796         }
797
798         /* Time of the first frame we will emit */
799         DCPTime const time = piece->content_video_to_dcp (video.frame);
800         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
801
802         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
803            if it's after the content's period here as in that case we still need to fill any gap between
804            `now' and the end of the content's period.
805         */
806         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
807                 return;
808         }
809
810         if (piece->ignore_video && piece->ignore_video->contains(time)) {
811                 return;
812         }
813
814         /* Fill gaps that we discover now that we have some video which needs to be emitted.
815            This is where we need to fill to.
816         */
817         DCPTime fill_to = min (time, piece->end(_film));
818
819         if (_last_video_time) {
820                 DCPTime fill_from = max (*_last_video_time, piece->position());
821
822                 /* Fill if we have more than half a frame to do */
823                 if ((fill_to - fill_from) > one_video_frame() / 2) {
824                         auto last = _last_video.find (wp);
825                         if (_film->three_d()) {
826                                 auto fill_to_eyes = video.eyes;
827                                 if (fill_to_eyes == Eyes::BOTH) {
828                                         fill_to_eyes = Eyes::LEFT;
829                                 }
830                                 if (fill_to == piece->end(_film)) {
831                                         /* Don't fill after the end of the content */
832                                         fill_to_eyes = Eyes::LEFT;
833                                 }
834                                 auto j = fill_from;
835                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
836                                 if (eyes == Eyes::BOTH) {
837                                         eyes = Eyes::LEFT;
838                                 }
839                                 while (j < fill_to || eyes != fill_to_eyes) {
840                                         if (last != _last_video.end()) {
841                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
842                                                 auto copy = last->second->shallow_copy();
843                                                 copy->set_eyes (eyes);
844                                                 emit_video (copy, j);
845                                         } else {
846                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
847                                                 emit_video (black_player_video_frame(eyes), j);
848                                         }
849                                         if (eyes == Eyes::RIGHT) {
850                                                 j += one_video_frame();
851                                         }
852                                         eyes = increment_eyes (eyes);
853                                 }
854                         } else {
855                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
856                                         if (last != _last_video.end()) {
857                                                 emit_video (last->second, j);
858                                         } else {
859                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
860                                         }
861                                 }
862                         }
863                 }
864         }
865
866         _last_video[wp] = piece->player_video (video, _film, _video_container_size);
867
868         DCPTime t = time;
869         for (int i = 0; i < frc.repeat; ++i) {
870                 if (t < piece->end(_film)) {
871                         emit_video (_last_video[wp], t);
872                 }
873                 t += one_video_frame ();
874         }
875 }
876
877
878 void
879 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
880 {
881         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
882
883         auto piece = wp.lock ();
884         if (!piece) {
885                 return;
886         }
887
888         int const rfr = piece->resampled_audio_frame_rate (_film);
889
890         /* Compute time in the DCP */
891         auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
892         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
893
894         /* And the end of this block in the DCP */
895         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
896
897         /* Remove anything that comes before the start or after the end of the content */
898         if (time < piece->position()) {
899                 auto cut = discard_audio (content_audio.audio, time, piece->position());
900                 if (!cut.first) {
901                         /* This audio is entirely discarded */
902                         return;
903                 }
904                 content_audio.audio = cut.first;
905                 time = cut.second;
906         } else if (time > piece->end(_film)) {
907                 /* Discard it all */
908                 return;
909         } else if (end > piece->end(_film)) {
910                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
911                 if (remaining_frames == 0) {
912                         return;
913                 }
914                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
915         }
916
917         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
918
919         /* Gain */
920
921         if (piece->audio_gain() != 0) {
922                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
923                 gain->apply_gain (piece->audio_gain());
924                 content_audio.audio = gain;
925         }
926
927         /* Remap */
928
929         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
930
931         /* Process */
932
933         if (_audio_processor) {
934                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
935         }
936
937         /* Push */
938
939         _audio_merger.push (content_audio.audio, time);
940         piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
941 }
942
943
944 void
945 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
946 {
947         auto piece = wp.lock ();
948         auto content = wc.lock ();
949         auto text = wt.lock ();
950         if (!piece || !content || !text) {
951                 return;
952         }
953
954         /* Apply content's subtitle offsets */
955         subtitle.sub.rectangle.x += text->x_offset ();
956         subtitle.sub.rectangle.y += text->y_offset ();
957
958         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
959         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
960         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
961
962         /* Apply content's subtitle scale */
963         subtitle.sub.rectangle.width *= text->x_scale ();
964         subtitle.sub.rectangle.height *= text->y_scale ();
965
966         PlayerText ps;
967         auto image = subtitle.sub.image;
968
969         /* We will scale the subtitle up to fit _video_container_size */
970         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
971         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
972         if (width == 0 || height == 0) {
973                 return;
974         }
975
976         dcp::Size scaled_size (width, height);
977         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
978         auto from = piece->content_time_to_dcp(content, subtitle.from());
979         DCPOMATIC_ASSERT (from);
980
981         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
982 }
983
984
985 void
986 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
987 {
988         auto piece = wp.lock ();
989         auto content = wc.lock ();
990         auto text = wt.lock ();
991         if (!piece || !content || !text) {
992                 return;
993         }
994
995         PlayerText ps;
996         auto const from = piece->content_time_to_dcp(content, subtitle.from());
997         DCPOMATIC_ASSERT (from);
998
999         if (from > piece->end(_film)) {
1000                 return;
1001         }
1002
1003         for (auto s: subtitle.subs) {
1004                 s.set_h_position (s.h_position() + text->x_offset ());
1005                 s.set_v_position (s.v_position() + text->y_offset ());
1006                 float const xs = text->x_scale();
1007                 float const ys = text->y_scale();
1008                 float size = s.size();
1009
1010                 /* Adjust size to express the common part of the scaling;
1011                    e.g. if xs = ys = 0.5 we scale size by 2.
1012                 */
1013                 if (xs > 1e-5 && ys > 1e-5) {
1014                         size *= 1 / min (1 / xs, 1 / ys);
1015                 }
1016                 s.set_size (size);
1017
1018                 /* Then express aspect ratio changes */
1019                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1020                         s.set_aspect_adjust (xs / ys);
1021                 }
1022
1023                 s.set_in (dcp::Time(from->seconds(), 1000));
1024                 ps.string.push_back (StringText (s, text->outline_width()));
1025                 ps.add_fonts (text->fonts ());
1026         }
1027
1028         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1029 }
1030
1031
1032 void
1033 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1034 {
1035         auto content = wc.lock ();
1036         auto text = wt.lock ();
1037         if (!text) {
1038                 return;
1039         }
1040
1041         if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1042                 return;
1043         }
1044
1045         shared_ptr<Piece> piece = wp.lock ();
1046         if (!piece) {
1047                 return;
1048         }
1049
1050         auto const dcp_to = piece->content_time_to_dcp(content, to);
1051         DCPOMATIC_ASSERT (dcp_to);
1052
1053         if (*dcp_to > piece->end(_film)) {
1054                 return;
1055         }
1056
1057         auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1058
1059         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1060         if (text->use() && !always && !text->burn()) {
1061                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1062         }
1063 }
1064
1065
1066 void
1067 Player::seek (DCPTime time, bool accurate)
1068 {
1069         boost::mutex::scoped_lock lm (_mutex);
1070         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1071
1072         if (_suspended) {
1073                 /* We can't seek in this state */
1074                 return;
1075         }
1076
1077         if (_shuffler) {
1078                 _shuffler->clear ();
1079         }
1080
1081         _delay.clear ();
1082
1083         if (_audio_processor) {
1084                 _audio_processor->flush ();
1085         }
1086
1087         _audio_merger.clear ();
1088         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1089                 _active_texts[i].clear ();
1090         }
1091
1092         for (auto i: _pieces) {
1093                 i->seek (_film, time, accurate);
1094         }
1095
1096         if (accurate) {
1097                 _last_video_time = time;
1098                 _last_video_eyes = Eyes::LEFT;
1099                 _last_audio_time = time;
1100         } else {
1101                 _last_video_time = optional<DCPTime>();
1102                 _last_video_eyes = optional<Eyes>();
1103                 _last_audio_time = optional<DCPTime>();
1104         }
1105
1106         _black.set_position (time);
1107         _silent.set_position (time);
1108
1109         _last_video.clear ();
1110 }
1111
1112
1113 void
1114 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1115 {
1116         if (!_film->three_d()) {
1117                 if (pv->eyes() == Eyes::LEFT) {
1118                         /* Use left-eye images for both eyes... */
1119                         pv->set_eyes (Eyes::BOTH);
1120                 } else if (pv->eyes() == Eyes::RIGHT) {
1121                         /* ...and discard the right */
1122                         return;
1123                 }
1124         }
1125
1126         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1127            player before the video that requires them.
1128         */
1129         _delay.push_back (make_pair (pv, time));
1130
1131         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1132                 _last_video_time = time + one_video_frame();
1133         }
1134         _last_video_eyes = increment_eyes (pv->eyes());
1135
1136         if (_delay.size() < 3) {
1137                 return;
1138         }
1139
1140         auto to_do = _delay.front();
1141         _delay.pop_front();
1142         do_emit_video (to_do.first, to_do.second);
1143 }
1144
1145
1146 void
1147 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1148 {
1149         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1150                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1151                         _active_texts[i].clear_before (time);
1152                 }
1153         }
1154
1155         auto subtitles = open_subtitles_for_frame (time);
1156         if (subtitles) {
1157                 pv->set_text (subtitles.get ());
1158         }
1159
1160         Video (pv, time);
1161 }
1162
1163
1164 void
1165 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1166 {
1167         /* Log if the assert below is about to fail */
1168         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1169                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1170         }
1171
1172         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1173         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1174         Audio (data, time, _film->audio_frame_rate());
1175         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1176 }
1177
1178
1179 void
1180 Player::fill_audio (DCPTimePeriod period)
1181 {
1182         if (period.from == period.to) {
1183                 return;
1184         }
1185
1186         DCPOMATIC_ASSERT (period.from < period.to);
1187
1188         DCPTime t = period.from;
1189         while (t < period.to) {
1190                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1191                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1192                 if (samples) {
1193                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1194                         silence->make_silent ();
1195                         emit_audio (silence, t);
1196                 }
1197                 t += block;
1198         }
1199 }
1200
1201
1202 DCPTime
1203 Player::one_video_frame () const
1204 {
1205         return DCPTime::from_frames (1, _film->video_frame_rate ());
1206 }
1207
1208
1209 pair<shared_ptr<AudioBuffers>, DCPTime>
1210 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1211 {
1212         auto const discard_time = discard_to - time;
1213         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1214         auto remaining_frames = audio->frames() - discard_frames;
1215         if (remaining_frames <= 0) {
1216                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1217         }
1218         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1219         return make_pair(cut, time + discard_time);
1220 }
1221
1222
1223 void
1224 Player::set_dcp_decode_reduction (optional<int> reduction)
1225 {
1226         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1227
1228         {
1229                 boost::mutex::scoped_lock lm (_mutex);
1230
1231                 if (reduction == _dcp_decode_reduction) {
1232                         lm.unlock ();
1233                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1234                         return;
1235                 }
1236
1237                 _dcp_decode_reduction = reduction;
1238                 setup_pieces_unlocked ();
1239         }
1240
1241         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1242 }
1243
1244
1245 shared_ptr<const Playlist>
1246 Player::playlist () const
1247 {
1248         return _playlist ? _playlist : _film->playlist();
1249 }
1250
1251
1252 void
1253 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1254 {
1255         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1256 }
1257