Add Piece::resampled_audio_frame_rate().
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         if (j->content == i) {
191                                 old_decoder = j->decoder;
192                                 break;
193                         }
194                 }
195
196                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197                 DCPOMATIC_ASSERT (decoder);
198
199                 FrameRateChange frc (_film, i);
200
201                 if (decoder->video && _ignore_video) {
202                         decoder->video->set_ignore (true);
203                 }
204
205                 if (decoder->audio && _ignore_audio) {
206                         decoder->audio->set_ignore (true);
207                 }
208
209                 if (_ignore_text) {
210                         for (auto i: decoder->text) {
211                                 i->set_ignore (true);
212                         }
213                 }
214
215                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
216                 if (dcp) {
217                         dcp->set_decode_referenced (_play_referenced);
218                         if (_play_referenced) {
219                                 dcp->set_forced_reduction (_dcp_decode_reduction);
220                         }
221                 }
222
223                 auto piece = make_shared<Piece>(i, decoder, frc);
224                 _pieces.push_back (piece);
225
226                 if (decoder->video) {
227                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
228                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
230                         } else {
231                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
232                         }
233                 }
234
235                 if (decoder->audio) {
236                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
237                 }
238
239                 auto j = decoder->text.begin();
240
241                 while (j != decoder->text.end()) {
242                         (*j)->BitmapStart.connect (
243                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245                         (*j)->PlainStart.connect (
246                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->Stop.connect (
249                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251
252                         ++j;
253                 }
254
255                 if (decoder->atmos) {
256                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
257                 }
258         }
259
260         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
261                 if ((*i)->use_video() && (*i)->content->video->frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->content->video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
262                         /* Look for content later in the content list with in-use video that overlaps this */
263                         auto period = DCPTimePeriod((*i)->position(), (*i)->end(_film));
264                         auto j = i;
265                         ++j;
266                         for (; j != _pieces.end(); ++j) {
267                                 if ((*j)->use_video()) {
268                                         (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->end(_film)).overlap(period);
269                                 }
270                         }
271                 }
272         }
273
274         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
275         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
276
277         _last_video_time = boost::optional<dcpomatic::DCPTime>();
278         _last_video_eyes = Eyes::BOTH;
279         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
280 }
281
282
283 void
284 Player::playlist_content_change (ChangeType type, int property, bool frequent)
285 {
286         if (property == VideoContentProperty::CROP) {
287                 if (type == ChangeType::DONE) {
288                         auto const vcs = video_container_size();
289                         boost::mutex::scoped_lock lm (_mutex);
290                         for (auto const& i: _delay) {
291                                 i.first->reset_metadata (_film, vcs);
292                         }
293                 }
294         } else {
295                 if (type == ChangeType::PENDING) {
296                         /* The player content is probably about to change, so we can't carry on
297                            until that has happened and we've rebuilt our pieces.  Stop pass()
298                            and seek() from working until then.
299                         */
300                         ++_suspended;
301                 } else if (type == ChangeType::DONE) {
302                         /* A change in our content has gone through.  Re-build our pieces. */
303                         setup_pieces ();
304                         --_suspended;
305                 } else if (type == ChangeType::CANCELLED) {
306                         --_suspended;
307                 }
308         }
309
310         Change (type, property, frequent);
311 }
312
313
314 void
315 Player::set_video_container_size (dcp::Size s)
316 {
317         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318
319         {
320                 boost::mutex::scoped_lock lm (_mutex);
321
322                 if (s == _video_container_size) {
323                         lm.unlock ();
324                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
325                         return;
326                 }
327
328                 _video_container_size = s;
329
330                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
331                 _black_image->make_black ();
332         }
333
334         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
335 }
336
337
338 void
339 Player::playlist_change (ChangeType type)
340 {
341         if (type == ChangeType::DONE) {
342                 setup_pieces ();
343         }
344         Change (type, PlayerProperty::PLAYLIST, false);
345 }
346
347
348 void
349 Player::film_change (ChangeType type, Film::Property p)
350 {
351         /* Here we should notice Film properties that affect our output, and
352            alert listeners that our output now would be different to how it was
353            last time we were run.
354         */
355
356         if (p == Film::Property::CONTAINER) {
357                 Change (type, PlayerProperty::FILM_CONTAINER, false);
358         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
359                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
360                    so we need new pieces here.
361                 */
362                 if (type == ChangeType::DONE) {
363                         setup_pieces ();
364                 }
365                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
366         } else if (p == Film::Property::AUDIO_PROCESSOR) {
367                 if (type == ChangeType::DONE && _film->audio_processor ()) {
368                         boost::mutex::scoped_lock lm (_mutex);
369                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
370                 }
371         } else if (p == Film::Property::AUDIO_CHANNELS) {
372                 if (type == ChangeType::DONE) {
373                         boost::mutex::scoped_lock lm (_mutex);
374                         _audio_merger.clear ();
375                 }
376         }
377 }
378
379
380 shared_ptr<PlayerVideo>
381 Player::black_player_video_frame (Eyes eyes) const
382 {
383         return std::make_shared<PlayerVideo> (
384                 std::make_shared<const RawImageProxy>(_black_image),
385                 Crop(),
386                 optional<double>(),
387                 _video_container_size,
388                 _video_container_size,
389                 eyes,
390                 Part::WHOLE,
391                 PresetColourConversion::all().front().conversion,
392                 VideoRange::FULL,
393                 std::weak_ptr<Content>(),
394                 boost::optional<Frame>(),
395                 false
396         );
397 }
398
399
400 vector<FontData>
401 Player::get_subtitle_fonts ()
402 {
403         boost::mutex::scoped_lock lm (_mutex);
404
405         vector<FontData> fonts;
406         for (auto i: _pieces) {
407                 /* XXX: things may go wrong if there are duplicate font IDs
408                    with different font files.
409                 */
410                 auto f = i->decoder->fonts ();
411                 copy (f.begin(), f.end(), back_inserter(fonts));
412         }
413
414         return fonts;
415 }
416
417
418 /** Set this player never to produce any video data */
419 void
420 Player::set_ignore_video ()
421 {
422         boost::mutex::scoped_lock lm (_mutex);
423         _ignore_video = true;
424         setup_pieces_unlocked ();
425 }
426
427
428 void
429 Player::set_ignore_audio ()
430 {
431         boost::mutex::scoped_lock lm (_mutex);
432         _ignore_audio = true;
433         setup_pieces_unlocked ();
434 }
435
436
437 void
438 Player::set_ignore_text ()
439 {
440         boost::mutex::scoped_lock lm (_mutex);
441         _ignore_text = true;
442         setup_pieces_unlocked ();
443 }
444
445
446 /** Set the player to always burn open texts into the image regardless of the content settings */
447 void
448 Player::set_always_burn_open_subtitles ()
449 {
450         boost::mutex::scoped_lock lm (_mutex);
451         _always_burn_open_subtitles = true;
452 }
453
454
455 /** Sets up the player to be faster, possibly at the expense of quality */
456 void
457 Player::set_fast ()
458 {
459         boost::mutex::scoped_lock lm (_mutex);
460         _fast = true;
461         setup_pieces_unlocked ();
462 }
463
464
465 void
466 Player::set_play_referenced ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _play_referenced = true;
470         setup_pieces_unlocked ();
471 }
472
473
474 static void
475 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
476 {
477         DCPOMATIC_ASSERT (r);
478         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
479         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
480         if (r->actual_duration() > 0) {
481                 a.push_back (
482                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
483                         );
484         }
485 }
486
487
488 list<ReferencedReelAsset>
489 Player::get_reel_assets ()
490 {
491         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
492
493         list<ReferencedReelAsset> a;
494
495         for (auto i: playlist()->content()) {
496                 auto j = dynamic_pointer_cast<DCPContent> (i);
497                 if (!j) {
498                         continue;
499                 }
500
501                 scoped_ptr<DCPDecoder> decoder;
502                 try {
503                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
504                 } catch (...) {
505                         return a;
506                 }
507
508                 DCPOMATIC_ASSERT (j->video_frame_rate ());
509                 double const cfr = j->video_frame_rate().get();
510                 Frame const trim_start = j->trim_start().frames_round (cfr);
511                 Frame const trim_end = j->trim_end().frames_round (cfr);
512                 int const ffr = _film->video_frame_rate ();
513
514                 /* position in the asset from the start */
515                 int64_t offset_from_start = 0;
516                 /* position in the asset from the end */
517                 int64_t offset_from_end = 0;
518                 for (auto k: decoder->reels()) {
519                         /* Assume that main picture duration is the length of the reel */
520                         offset_from_end += k->main_picture()->actual_duration();
521                 }
522
523                 for (auto k: decoder->reels()) {
524
525                         /* Assume that main picture duration is the length of the reel */
526                         int64_t const reel_duration = k->main_picture()->actual_duration();
527
528                         /* See doc/design/trim_reels.svg */
529                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
530                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
531
532                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
533                         if (j->reference_video ()) {
534                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
535                         }
536
537                         if (j->reference_audio ()) {
538                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
539                         }
540
541                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
542                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
543                         }
544
545                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
546                                 for (auto l: k->closed_captions()) {
547                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
548                                 }
549                         }
550
551                         offset_from_start += reel_duration;
552                         offset_from_end -= reel_duration;
553                 }
554         }
555
556         return a;
557 }
558
559
560 bool
561 Player::pass ()
562 {
563         boost::mutex::scoped_lock lm (_mutex);
564
565         if (_suspended) {
566                 /* We can't pass in this state */
567                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
568                 return false;
569         }
570
571         if (_playback_length == DCPTime()) {
572                 /* Special; just give one black frame */
573                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
574                 return true;
575         }
576
577         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
578
579         shared_ptr<Piece> earliest_content;
580         optional<DCPTime> earliest_time;
581
582         for (auto i: _pieces) {
583                 if (i->done) {
584                         continue;
585                 }
586
587                 auto const t = i->content_time_to_dcp (max(i->decoder->position(), i->content->trim_start()));
588                 if (t > i->end(_film)) {
589                         i->done = true;
590                 } else {
591
592                         /* Given two choices at the same time, pick the one with texts so we see it before
593                            the video.
594                         */
595                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
596                                 earliest_time = t;
597                                 earliest_content = i;
598                         }
599                 }
600         }
601
602         bool done = false;
603
604         enum {
605                 NONE,
606                 CONTENT,
607                 BLACK,
608                 SILENT
609         } which = NONE;
610
611         if (earliest_content) {
612                 which = CONTENT;
613         }
614
615         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
616                 earliest_time = _black.position ();
617                 which = BLACK;
618         }
619
620         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
621                 earliest_time = _silent.position ();
622                 which = SILENT;
623         }
624
625         switch (which) {
626         case CONTENT:
627         {
628                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
629                 earliest_content->done = earliest_content->decoder->pass ();
630                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
631                 if (dcp && !_play_referenced && dcp->reference_audio()) {
632                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
633                            to `hide' the fact that no audio was emitted during the referenced DCP (though
634                            we need to behave as though it was).
635                         */
636                         _last_audio_time = dcp->end (_film);
637                 }
638                 break;
639         }
640         case BLACK:
641                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
642                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
643                 _black.set_position (_black.position() + one_video_frame());
644                 break;
645         case SILENT:
646         {
647                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
648                 DCPTimePeriod period (_silent.period_at_position());
649                 if (_last_audio_time) {
650                         /* Sometimes the thing that happened last finishes fractionally before
651                            or after this silence.  Bodge the start time of the silence to fix it.
652                            I think this is nothing to worry about since we will just add or
653                            remove a little silence at the end of some content.
654                         */
655                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
656                         /* Let's not worry about less than a frame at 24fps */
657                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
658                         if (error >= too_much_error) {
659                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
660                         }
661                         DCPOMATIC_ASSERT (error < too_much_error);
662                         period.from = *_last_audio_time;
663                 }
664                 if (period.duration() > one_video_frame()) {
665                         period.to = period.from + one_video_frame();
666                 }
667                 fill_audio (period);
668                 _silent.set_position (period.to);
669                 break;
670         }
671         case NONE:
672                 done = true;
673                 break;
674         }
675
676         /* Emit any audio that is ready */
677
678         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
679            of our streams, or the position of the _silent.
680         */
681         auto pull_to = _playback_length;
682         for (auto i: _pieces) {
683                 i->update_pull_to (pull_to);
684         }
685         if (!_silent.done() && _silent.position() < pull_to) {
686                 pull_to = _silent.position();
687         }
688
689         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
690         auto audio = _audio_merger.pull (pull_to);
691         for (auto i = audio.begin(); i != audio.end(); ++i) {
692                 if (_last_audio_time && i->second < *_last_audio_time) {
693                         /* This new data comes before the last we emitted (or the last seek); discard it */
694                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
695                         if (!cut.first) {
696                                 continue;
697                         }
698                         *i = cut;
699                 } else if (_last_audio_time && i->second > *_last_audio_time) {
700                         /* There's a gap between this data and the last we emitted; fill with silence */
701                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
702                 }
703
704                 emit_audio (i->first, i->second);
705         }
706
707         if (done) {
708                 _shuffler->flush ();
709                 for (auto const& i: _delay) {
710                         do_emit_video(i.first, i.second);
711                 }
712         }
713
714         return done;
715 }
716
717
718 /** @return Open subtitles for the frame at the given time, converted to images */
719 optional<PositionImage>
720 Player::open_subtitles_for_frame (DCPTime time) const
721 {
722         list<PositionImage> captions;
723         int const vfr = _film->video_frame_rate();
724
725         for (
726                 auto j:
727                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
728                 ) {
729
730                 /* Bitmap subtitles */
731                 for (auto i: j.bitmap) {
732                         if (!i.image) {
733                                 continue;
734                         }
735
736                         /* i.image will already have been scaled to fit _video_container_size */
737                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
738
739                         captions.push_back (
740                                 PositionImage (
741                                         i.image,
742                                         Position<int> (
743                                                 lrint(_video_container_size.width * i.rectangle.x),
744                                                 lrint(_video_container_size.height * i.rectangle.y)
745                                                 )
746                                         )
747                                 );
748                 }
749
750                 /* String subtitles (rendered to an image) */
751                 if (!j.string.empty()) {
752                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
753                         copy (s.begin(), s.end(), back_inserter (captions));
754                 }
755         }
756
757         if (captions.empty()) {
758                 return {};
759         }
760
761         return merge (captions);
762 }
763
764
765 void
766 Player::video (weak_ptr<Piece> wp, ContentVideo video)
767 {
768         auto piece = wp.lock ();
769         if (!piece) {
770                 return;
771         }
772
773         if (!piece->use_video()) {
774                 return;
775         }
776
777         auto frc = piece->frame_rate_change();
778         if (frc.skip && (video.frame % 2) == 1) {
779                 return;
780         }
781
782         /* Time of the first frame we will emit */
783         DCPTime const time = piece->content_video_to_dcp (video.frame);
784         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
785
786         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
787            if it's after the content's period here as in that case we still need to fill any gap between
788            `now' and the end of the content's period.
789         */
790         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
791                 return;
792         }
793
794         if (piece->ignore_video && piece->ignore_video->contains(time)) {
795                 return;
796         }
797
798         /* Fill gaps that we discover now that we have some video which needs to be emitted.
799            This is where we need to fill to.
800         */
801         DCPTime fill_to = min (time, piece->end(_film));
802
803         if (_last_video_time) {
804                 DCPTime fill_from = max (*_last_video_time, piece->position());
805
806                 /* Fill if we have more than half a frame to do */
807                 if ((fill_to - fill_from) > one_video_frame() / 2) {
808                         auto last = _last_video.find (wp);
809                         if (_film->three_d()) {
810                                 auto fill_to_eyes = video.eyes;
811                                 if (fill_to_eyes == Eyes::BOTH) {
812                                         fill_to_eyes = Eyes::LEFT;
813                                 }
814                                 if (fill_to == piece->end(_film)) {
815                                         /* Don't fill after the end of the content */
816                                         fill_to_eyes = Eyes::LEFT;
817                                 }
818                                 auto j = fill_from;
819                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
820                                 if (eyes == Eyes::BOTH) {
821                                         eyes = Eyes::LEFT;
822                                 }
823                                 while (j < fill_to || eyes != fill_to_eyes) {
824                                         if (last != _last_video.end()) {
825                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
826                                                 auto copy = last->second->shallow_copy();
827                                                 copy->set_eyes (eyes);
828                                                 emit_video (copy, j);
829                                         } else {
830                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
831                                                 emit_video (black_player_video_frame(eyes), j);
832                                         }
833                                         if (eyes == Eyes::RIGHT) {
834                                                 j += one_video_frame();
835                                         }
836                                         eyes = increment_eyes (eyes);
837                                 }
838                         } else {
839                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
840                                         if (last != _last_video.end()) {
841                                                 emit_video (last->second, j);
842                                         } else {
843                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
844                                         }
845                                 }
846                         }
847                 }
848         }
849
850         _last_video[wp] = piece->player_video (video, _film, _video_container_size);
851
852         DCPTime t = time;
853         for (int i = 0; i < frc.repeat; ++i) {
854                 if (t < piece->end(_film)) {
855                         emit_video (_last_video[wp], t);
856                 }
857                 t += one_video_frame ();
858         }
859 }
860
861
862 void
863 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
864 {
865         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
866
867         auto piece = wp.lock ();
868         if (!piece) {
869                 return;
870         }
871
872         auto content = piece->content->audio;
873         DCPOMATIC_ASSERT (content);
874
875         int const rfr = piece->resampled_audio_frame_rate (_film);
876
877         /* Compute time in the DCP */
878         auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
879         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
880
881         /* And the end of this block in the DCP */
882         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
883
884         /* Remove anything that comes before the start or after the end of the content */
885         if (time < piece->position()) {
886                 auto cut = discard_audio (content_audio.audio, time, piece->position());
887                 if (!cut.first) {
888                         /* This audio is entirely discarded */
889                         return;
890                 }
891                 content_audio.audio = cut.first;
892                 time = cut.second;
893         } else if (time > piece->end(_film)) {
894                 /* Discard it all */
895                 return;
896         } else if (end > piece->end(_film)) {
897                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
898                 if (remaining_frames == 0) {
899                         return;
900                 }
901                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
902         }
903
904         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
905
906         /* Gain */
907
908         if (content->gain() != 0) {
909                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
910                 gain->apply_gain (content->gain());
911                 content_audio.audio = gain;
912         }
913
914         /* Remap */
915
916         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
917
918         /* Process */
919
920         if (_audio_processor) {
921                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
922         }
923
924         /* Push */
925
926         _audio_merger.push (content_audio.audio, time);
927         piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
928 }
929
930
931 void
932 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
933 {
934         auto piece = wp.lock ();
935         auto text = wc.lock ();
936         if (!piece || !text) {
937                 return;
938         }
939
940         /* Apply content's subtitle offsets */
941         subtitle.sub.rectangle.x += text->x_offset ();
942         subtitle.sub.rectangle.y += text->y_offset ();
943
944         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
945         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
946         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
947
948         /* Apply content's subtitle scale */
949         subtitle.sub.rectangle.width *= text->x_scale ();
950         subtitle.sub.rectangle.height *= text->y_scale ();
951
952         PlayerText ps;
953         auto image = subtitle.sub.image;
954
955         /* We will scale the subtitle up to fit _video_container_size */
956         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
957         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
958         if (width == 0 || height == 0) {
959                 return;
960         }
961
962         dcp::Size scaled_size (width, height);
963         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
964         DCPTime from (piece->content_time_to_dcp(subtitle.from()));
965
966         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
967 }
968
969
970 void
971 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
972 {
973         auto piece = wp.lock ();
974         auto text = wc.lock ();
975         if (!piece || !text) {
976                 return;
977         }
978
979         PlayerText ps;
980         DCPTime const from (piece->content_time_to_dcp(subtitle.from()));
981
982         if (from > piece->end(_film)) {
983                 return;
984         }
985
986         for (auto s: subtitle.subs) {
987                 s.set_h_position (s.h_position() + text->x_offset ());
988                 s.set_v_position (s.v_position() + text->y_offset ());
989                 float const xs = text->x_scale();
990                 float const ys = text->y_scale();
991                 float size = s.size();
992
993                 /* Adjust size to express the common part of the scaling;
994                    e.g. if xs = ys = 0.5 we scale size by 2.
995                 */
996                 if (xs > 1e-5 && ys > 1e-5) {
997                         size *= 1 / min (1 / xs, 1 / ys);
998                 }
999                 s.set_size (size);
1000
1001                 /* Then express aspect ratio changes */
1002                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1003                         s.set_aspect_adjust (xs / ys);
1004                 }
1005
1006                 s.set_in (dcp::Time(from.seconds(), 1000));
1007                 ps.string.push_back (StringText (s, text->outline_width()));
1008                 ps.add_fonts (text->fonts ());
1009         }
1010
1011         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1012 }
1013
1014
1015 void
1016 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1017 {
1018         auto text = wc.lock ();
1019         if (!text) {
1020                 return;
1021         }
1022
1023         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1024                 return;
1025         }
1026
1027         shared_ptr<Piece> piece = wp.lock ();
1028         if (!piece) {
1029                 return;
1030         }
1031
1032         auto const dcp_to = piece->content_time_to_dcp(to);
1033
1034         if (dcp_to > piece->end(_film)) {
1035                 return;
1036         }
1037
1038         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1039
1040         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1041         if (text->use() && !always && !text->burn()) {
1042                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1043         }
1044 }
1045
1046
1047 void
1048 Player::seek (DCPTime time, bool accurate)
1049 {
1050         boost::mutex::scoped_lock lm (_mutex);
1051         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1052
1053         if (_suspended) {
1054                 /* We can't seek in this state */
1055                 return;
1056         }
1057
1058         if (_shuffler) {
1059                 _shuffler->clear ();
1060         }
1061
1062         _delay.clear ();
1063
1064         if (_audio_processor) {
1065                 _audio_processor->flush ();
1066         }
1067
1068         _audio_merger.clear ();
1069         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1070                 _active_texts[i].clear ();
1071         }
1072
1073         for (auto i: _pieces) {
1074                 if (time < i->position()) {
1075                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1076                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1077                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1078                            been trimmed to a point between keyframes, or something).
1079                         */
1080                         i->decoder->seek (i->dcp_to_content_time(i->position(), _film), true);
1081                         i->done = false;
1082                 } else if (i->position() <= time && time < i->end(_film)) {
1083                         /* During; seek to position */
1084                         i->decoder->seek (i->dcp_to_content_time(time, _film), accurate);
1085                         i->done = false;
1086                 } else {
1087                         /* After; this piece is done */
1088                         i->done = true;
1089                 }
1090         }
1091
1092         if (accurate) {
1093                 _last_video_time = time;
1094                 _last_video_eyes = Eyes::LEFT;
1095                 _last_audio_time = time;
1096         } else {
1097                 _last_video_time = optional<DCPTime>();
1098                 _last_video_eyes = optional<Eyes>();
1099                 _last_audio_time = optional<DCPTime>();
1100         }
1101
1102         _black.set_position (time);
1103         _silent.set_position (time);
1104
1105         _last_video.clear ();
1106 }
1107
1108
1109 void
1110 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1111 {
1112         if (!_film->three_d()) {
1113                 if (pv->eyes() == Eyes::LEFT) {
1114                         /* Use left-eye images for both eyes... */
1115                         pv->set_eyes (Eyes::BOTH);
1116                 } else if (pv->eyes() == Eyes::RIGHT) {
1117                         /* ...and discard the right */
1118                         return;
1119                 }
1120         }
1121
1122         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1123            player before the video that requires them.
1124         */
1125         _delay.push_back (make_pair (pv, time));
1126
1127         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1128                 _last_video_time = time + one_video_frame();
1129         }
1130         _last_video_eyes = increment_eyes (pv->eyes());
1131
1132         if (_delay.size() < 3) {
1133                 return;
1134         }
1135
1136         auto to_do = _delay.front();
1137         _delay.pop_front();
1138         do_emit_video (to_do.first, to_do.second);
1139 }
1140
1141
1142 void
1143 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1144 {
1145         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1146                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1147                         _active_texts[i].clear_before (time);
1148                 }
1149         }
1150
1151         auto subtitles = open_subtitles_for_frame (time);
1152         if (subtitles) {
1153                 pv->set_text (subtitles.get ());
1154         }
1155
1156         Video (pv, time);
1157 }
1158
1159
1160 void
1161 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1162 {
1163         /* Log if the assert below is about to fail */
1164         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1165                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1166         }
1167
1168         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1169         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1170         Audio (data, time, _film->audio_frame_rate());
1171         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1172 }
1173
1174
1175 void
1176 Player::fill_audio (DCPTimePeriod period)
1177 {
1178         if (period.from == period.to) {
1179                 return;
1180         }
1181
1182         DCPOMATIC_ASSERT (period.from < period.to);
1183
1184         DCPTime t = period.from;
1185         while (t < period.to) {
1186                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1187                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1188                 if (samples) {
1189                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1190                         silence->make_silent ();
1191                         emit_audio (silence, t);
1192                 }
1193                 t += block;
1194         }
1195 }
1196
1197
1198 DCPTime
1199 Player::one_video_frame () const
1200 {
1201         return DCPTime::from_frames (1, _film->video_frame_rate ());
1202 }
1203
1204
1205 pair<shared_ptr<AudioBuffers>, DCPTime>
1206 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1207 {
1208         auto const discard_time = discard_to - time;
1209         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1210         auto remaining_frames = audio->frames() - discard_frames;
1211         if (remaining_frames <= 0) {
1212                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1213         }
1214         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1215         return make_pair(cut, time + discard_time);
1216 }
1217
1218
1219 void
1220 Player::set_dcp_decode_reduction (optional<int> reduction)
1221 {
1222         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1223
1224         {
1225                 boost::mutex::scoped_lock lm (_mutex);
1226
1227                 if (reduction == _dcp_decode_reduction) {
1228                         lm.unlock ();
1229                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1230                         return;
1231                 }
1232
1233                 _dcp_decode_reduction = reduction;
1234                 setup_pieces_unlocked ();
1235         }
1236
1237         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1238 }
1239
1240
1241 optional<DCPTime>
1242 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1243 {
1244         boost::mutex::scoped_lock lm (_mutex);
1245
1246         for (auto i: _pieces) {
1247                 if (i->content == content) {
1248                         return i->content_time_to_dcp(t);
1249                 }
1250         }
1251
1252         /* We couldn't find this content; perhaps things are being changed over */
1253         return {};
1254 }
1255
1256
1257 shared_ptr<const Playlist>
1258 Player::playlist () const
1259 {
1260         return _playlist ? _playlist : _film->playlist();
1261 }
1262
1263
1264 void
1265 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1266 {
1267         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1268 }
1269