9cc7e5d25b31567c7e80cb003000e71452aaf76a
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::make_shared;
75 using std::max;
76 using std::min;
77 using std::min;
78 using std::pair;
79 using std::shared_ptr;
80 using std::vector;
81 using std::weak_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
99         : _film (film)
100         , _suspended (0)
101         , _ignore_video(false)
102         , _ignore_audio(false)
103         , _ignore_text(false)
104         , _always_burn_open_subtitles(false)
105         , _fast(false)
106         , _tolerant (film->tolerant())
107         , _play_referenced(false)
108         , _audio_merger (_film->audio_frame_rate())
109         , _subtitle_alignment (subtitle_alignment)
110 {
111         construct ();
112 }
113
114
115 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
116         : _film (film)
117         , _playlist (playlist_)
118         , _suspended (0)
119         , _ignore_video(false)
120         , _ignore_audio(false)
121         , _ignore_text(false)
122         , _always_burn_open_subtitles(false)
123         , _fast(false)
124         , _tolerant (film->tolerant())
125         , _play_referenced(false)
126         , _audio_merger (_film->audio_frame_rate())
127 {
128         construct ();
129 }
130
131
132 void
133 Player::construct ()
134 {
135         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
136         /* The butler must hear about this first, so since we are proxying this through to the butler we must
137            be first.
138         */
139         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
140         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
141         set_video_container_size (_film->frame_size ());
142
143         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
144
145         setup_pieces ();
146         seek (DCPTime (), true);
147 }
148
149
150 bool
151 have_video (shared_ptr<const Content> content)
152 {
153         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
154 }
155
156
157 bool
158 have_audio (shared_ptr<const Content> content)
159 {
160         return static_cast<bool>(content->audio) && content->can_be_played();
161 }
162
163
164 void
165 Player::setup_pieces ()
166 {
167         boost::mutex::scoped_lock lm (_mutex);
168
169         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170
171         auto old_pieces = _pieces;
172         _pieces.clear ();
173
174         auto playlist_content = playlist()->content();
175         bool const have_threed = std::any_of(
176                 playlist_content.begin(),
177                 playlist_content.end(),
178                 [](shared_ptr<const Content> c) {
179                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
180                 });
181
182
183         if (have_threed) {
184                 _shuffler.reset(new Shuffler());
185                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
186         }
187
188         for (auto i: playlist()->content()) {
189
190                 if (!i->paths_valid ()) {
191                         continue;
192                 }
193
194                 if (_ignore_video && _ignore_audio && i->text.empty()) {
195                         /* We're only interested in text and this content has none */
196                         continue;
197                 }
198
199                 shared_ptr<Decoder> old_decoder;
200                 for (auto j: old_pieces) {
201                         if (j->content == i) {
202                                 old_decoder = j->decoder;
203                                 break;
204                         }
205                 }
206
207                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
208                 DCPOMATIC_ASSERT (decoder);
209
210                 FrameRateChange frc (_film, i);
211
212                 if (decoder->video && _ignore_video) {
213                         decoder->video->set_ignore (true);
214                 }
215
216                 if (decoder->audio && _ignore_audio) {
217                         decoder->audio->set_ignore (true);
218                 }
219
220                 if (_ignore_text) {
221                         for (auto i: decoder->text) {
222                                 i->set_ignore (true);
223                         }
224                 }
225
226                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
227                 if (dcp) {
228                         dcp->set_decode_referenced (_play_referenced);
229                         if (_play_referenced) {
230                                 dcp->set_forced_reduction (_dcp_decode_reduction);
231                         }
232                 }
233
234                 auto piece = make_shared<Piece>(i, decoder, frc);
235                 _pieces.push_back (piece);
236
237                 if (decoder->video) {
238                         if (have_threed) {
239                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
240                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
241                         } else {
242                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
243                         }
244                 }
245
246                 if (decoder->audio) {
247                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
248                 }
249
250                 auto j = decoder->text.begin();
251
252                 while (j != decoder->text.end()) {
253                         (*j)->BitmapStart.connect (
254                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255                                 );
256                         (*j)->PlainStart.connect (
257                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258                                 );
259                         (*j)->Stop.connect (
260                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
261                                 );
262
263                         ++j;
264                 }
265
266                 if (decoder->atmos) {
267                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
268                 }
269         }
270
271         _stream_states.clear ();
272         for (auto i: _pieces) {
273                 if (i->content->audio) {
274                         for (auto j: i->content->audio->streams()) {
275                                 _stream_states[j] = StreamState (i, i->content->position ());
276                         }
277                 }
278         }
279
280         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
281                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
282         };
283
284         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
285                 if (ignore_overlap((*i)->content->video)) {
286                         /* Look for content later in the content list with in-use video that overlaps this */
287                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
288                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
289                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
290                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
291                                 }
292                         }
293                 }
294         }
295
296         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
297         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
298
299         _next_video_time = boost::none;
300         _next_video_eyes = Eyes::BOTH;
301         _next_audio_time = boost::none;
302 }
303
304
305 void
306 Player::playlist_content_change (ChangeType type, int property, bool frequent)
307 {
308         if (property == VideoContentProperty::CROP) {
309                 if (type == ChangeType::DONE) {
310                         boost::mutex::scoped_lock lm (_mutex);
311                         for (auto const& i: _delay) {
312                                 i.first->reset_metadata(_film, _video_container_size);
313                         }
314                 }
315         } else {
316                 if (type == ChangeType::PENDING) {
317                         /* The player content is probably about to change, so we can't carry on
318                            until that has happened and we've rebuilt our pieces.  Stop pass()
319                            and seek() from working until then.
320                         */
321                         ++_suspended;
322                 } else if (type == ChangeType::DONE) {
323                         /* A change in our content has gone through.  Re-build our pieces. */
324                         setup_pieces ();
325                         --_suspended;
326                 } else if (type == ChangeType::CANCELLED) {
327                         --_suspended;
328                 }
329         }
330
331         Change (type, property, frequent);
332 }
333
334
335 void
336 Player::set_video_container_size (dcp::Size s)
337 {
338         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
339
340         if (s == _video_container_size) {
341                 Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
342                 return;
343         }
344
345         _video_container_size = s;
346
347         {
348                 boost::mutex::scoped_lock lm (_mutex);
349                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
350                 _black_image->make_black ();
351         }
352
353         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
354 }
355
356
357 void
358 Player::playlist_change (ChangeType type)
359 {
360         if (type == ChangeType::DONE) {
361                 setup_pieces ();
362         }
363         Change (type, PlayerProperty::PLAYLIST, false);
364 }
365
366
367 void
368 Player::film_change (ChangeType type, Film::Property p)
369 {
370         /* Here we should notice Film properties that affect our output, and
371            alert listeners that our output now would be different to how it was
372            last time we were run.
373         */
374
375         if (p == Film::Property::CONTAINER) {
376                 Change (type, PlayerProperty::FILM_CONTAINER, false);
377         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
378                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
379                    so we need new pieces here.
380                 */
381                 if (type == ChangeType::DONE) {
382                         setup_pieces ();
383                 }
384                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
385         } else if (p == Film::Property::AUDIO_PROCESSOR) {
386                 if (type == ChangeType::DONE && _film->audio_processor ()) {
387                         boost::mutex::scoped_lock lm (_mutex);
388                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
389                 }
390         } else if (p == Film::Property::AUDIO_CHANNELS) {
391                 if (type == ChangeType::DONE) {
392                         boost::mutex::scoped_lock lm (_mutex);
393                         _audio_merger.clear ();
394                 }
395         }
396 }
397
398
399 shared_ptr<PlayerVideo>
400 Player::black_player_video_frame (Eyes eyes) const
401 {
402         return std::make_shared<PlayerVideo> (
403                 std::make_shared<const RawImageProxy>(_black_image),
404                 Crop(),
405                 optional<double>(),
406                 _video_container_size,
407                 _video_container_size,
408                 eyes,
409                 Part::WHOLE,
410                 PresetColourConversion::all().front().conversion,
411                 VideoRange::FULL,
412                 std::weak_ptr<Content>(),
413                 boost::optional<Frame>(),
414                 false
415         );
416 }
417
418
419 Frame
420 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
421 {
422         auto s = t - piece->content->position ();
423         s = min (piece->content->length_after_trim(_film), s);
424         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
425
426         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
427            then convert that ContentTime to frames at the content's rate.  However this fails for
428            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
429            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
430
431            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
432         */
433         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
434 }
435
436
437 DCPTime
438 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
439 {
440         /* See comment in dcp_to_content_video */
441         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
442         return d + piece->content->position();
443 }
444
445
446 Frame
447 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
448 {
449         auto s = t - piece->content->position ();
450         s = min (piece->content->length_after_trim(_film), s);
451         /* See notes in dcp_to_content_video */
452         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
453 }
454
455
456 DCPTime
457 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
458 {
459         /* See comment in dcp_to_content_video */
460         return DCPTime::from_frames (f, _film->audio_frame_rate())
461                 - DCPTime (piece->content->trim_start(), piece->frc)
462                 + piece->content->position();
463 }
464
465
466 ContentTime
467 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
468 {
469         auto s = t - piece->content->position ();
470         s = min (piece->content->length_after_trim(_film), s);
471         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
472 }
473
474
475 DCPTime
476 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
477 {
478         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
479 }
480
481
482 vector<shared_ptr<Font>>
483 Player::get_subtitle_fonts ()
484 {
485         boost::mutex::scoped_lock lm (_mutex);
486
487         vector<shared_ptr<Font>> fonts;
488         for (auto piece: _pieces) {
489                 for (auto text: piece->content->text) {
490                         auto text_fonts = text->fonts();
491                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
492                 }
493         }
494
495         return fonts;
496 }
497
498
499 /** Set this player never to produce any video data */
500 void
501 Player::set_ignore_video ()
502 {
503         _ignore_video = true;
504         setup_pieces();
505 }
506
507
508 void
509 Player::set_ignore_audio ()
510 {
511         _ignore_audio = true;
512         setup_pieces();
513 }
514
515
516 void
517 Player::set_ignore_text ()
518 {
519         _ignore_text = true;
520         setup_pieces();
521 }
522
523
524 /** Set the player to always burn open texts into the image regardless of the content settings */
525 void
526 Player::set_always_burn_open_subtitles ()
527 {
528         _always_burn_open_subtitles = true;
529 }
530
531
532 /** Sets up the player to be faster, possibly at the expense of quality */
533 void
534 Player::set_fast ()
535 {
536         _fast = true;
537         setup_pieces();
538 }
539
540
541 void
542 Player::set_play_referenced ()
543 {
544         _play_referenced = true;
545         setup_pieces();
546 }
547
548
549 bool
550 Player::pass ()
551 {
552         boost::mutex::scoped_lock lm (_mutex);
553
554         if (_suspended) {
555                 /* We can't pass in this state */
556                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
557                 return false;
558         }
559
560         if (_playback_length.load() == DCPTime()) {
561                 /* Special; just give one black frame */
562                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
563                 return true;
564         }
565
566         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
567
568         shared_ptr<Piece> earliest_content;
569         optional<DCPTime> earliest_time;
570
571         for (auto i: _pieces) {
572                 if (i->done) {
573                         continue;
574                 }
575
576                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
577                 if (t > i->content->end(_film)) {
578                         i->done = true;
579                 } else {
580
581                         /* Given two choices at the same time, pick the one with texts so we see it before
582                            the video.
583                         */
584                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
585                                 earliest_time = t;
586                                 earliest_content = i;
587                         }
588                 }
589         }
590
591         bool done = false;
592
593         enum {
594                 NONE,
595                 CONTENT,
596                 BLACK,
597                 SILENT
598         } which = NONE;
599
600         if (earliest_content) {
601                 which = CONTENT;
602         }
603
604         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
605                 earliest_time = _black.position ();
606                 which = BLACK;
607         }
608
609         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
610                 earliest_time = _silent.position ();
611                 which = SILENT;
612         }
613
614         switch (which) {
615         case CONTENT:
616         {
617                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
618                 earliest_content->done = earliest_content->decoder->pass ();
619                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
620                 if (dcp && !_play_referenced && dcp->reference_audio()) {
621                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
622                            to `hide' the fact that no audio was emitted during the referenced DCP (though
623                            we need to behave as though it was).
624                         */
625                         _next_audio_time = dcp->end (_film);
626                 }
627                 break;
628         }
629         case BLACK:
630                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
631                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
632                 _black.set_position (_black.position() + one_video_frame());
633                 break;
634         case SILENT:
635         {
636                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
637                 DCPTimePeriod period (_silent.period_at_position());
638                 if (_next_audio_time) {
639                         /* Sometimes the thing that happened last finishes fractionally before
640                            or after this silence.  Bodge the start time of the silence to fix it.
641                            I think this is nothing to worry about since we will just add or
642                            remove a little silence at the end of some content.
643                         */
644                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
645                         /* Let's not worry about less than a frame at 24fps */
646                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
647                         if (error >= too_much_error) {
648                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
649                         }
650                         DCPOMATIC_ASSERT (error < too_much_error);
651                         period.from = *_next_audio_time;
652                 }
653                 if (period.duration() > one_video_frame()) {
654                         period.to = period.from + one_video_frame();
655                 }
656                 fill_audio (period);
657                 _silent.set_position (period.to);
658                 break;
659         }
660         case NONE:
661                 done = true;
662                 break;
663         }
664
665         /* Emit any audio that is ready */
666
667         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
668            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
669            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
670            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
671            that will never come, causing bugs like #2101.
672         */
673         constexpr int ignore_streams_behind = 5;
674
675         using state_pair = std::pair<AudioStreamPtr, StreamState>;
676
677         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
678         auto latest_last_push_end = std::max_element(
679                 _stream_states.begin(),
680                 _stream_states.end(),
681                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
682                 );
683
684         if (latest_last_push_end != _stream_states.end()) {
685                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
686         }
687
688         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
689         std::map<AudioStreamPtr, StreamState> alive_stream_states;
690         for (auto const& i: _stream_states) {
691                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
692                         alive_stream_states.insert(i);
693                 } else {
694                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
695                 }
696         }
697
698         auto pull_to = _playback_length.load();
699         for (auto const& i: alive_stream_states) {
700                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
701                         pull_to = i.second.last_push_end;
702                 }
703         }
704         if (!_silent.done() && _silent.position() < pull_to) {
705                 pull_to = _silent.position();
706         }
707
708         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
709         auto audio = _audio_merger.pull (pull_to);
710         for (auto i = audio.begin(); i != audio.end(); ++i) {
711                 if (_next_audio_time && i->second < *_next_audio_time) {
712                         /* This new data comes before the last we emitted (or the last seek); discard it */
713                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
714                         if (!cut.first) {
715                                 continue;
716                         }
717                         *i = cut;
718                 } else if (_next_audio_time && i->second > *_next_audio_time) {
719                         /* There's a gap between this data and the last we emitted; fill with silence */
720                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
721                 }
722
723                 emit_audio (i->first, i->second);
724         }
725
726         if (done) {
727                 if (_shuffler) {
728                         _shuffler->flush ();
729                 }
730                 for (auto const& i: _delay) {
731                         do_emit_video(i.first, i.second);
732                 }
733
734                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
735                  * However, if we have L and R video files, and one is shorter than the other,
736                  * the fill code in ::video mostly takes care of filling in the gaps.
737                  * However, since it fills at the point when it knows there is more video coming
738                  * at time t (so it should fill any gap up to t) it can't do anything right at the
739                  * end.  This is particularly bad news if the last frame emitted is a LEFT
740                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
741                  * Here's a hack to workaround that particular case.
742                  */
743                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
744                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
745                 }
746         }
747
748         return done;
749 }
750
751
752 /** @return Open subtitles for the frame at the given time, converted to images */
753 optional<PositionImage>
754 Player::open_subtitles_for_frame (DCPTime time) const
755 {
756         list<PositionImage> captions;
757         int const vfr = _film->video_frame_rate();
758
759         for (
760                 auto j:
761                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
762                 ) {
763
764                 /* Bitmap subtitles */
765                 for (auto i: j.bitmap) {
766                         if (!i.image) {
767                                 continue;
768                         }
769
770                         /* i.image will already have been scaled to fit _video_container_size */
771                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
772
773                         captions.push_back (
774                                 PositionImage (
775                                         i.image,
776                                         Position<int> (
777                                                 lrint(_video_container_size.load().width * i.rectangle.x),
778                                                 lrint(_video_container_size.load().height * i.rectangle.y)
779                                                 )
780                                         )
781                                 );
782                 }
783
784                 /* String subtitles (rendered to an image) */
785                 if (!j.string.empty()) {
786                         auto s = render_text(j.string, _video_container_size, time, vfr);
787                         copy (s.begin(), s.end(), back_inserter (captions));
788                 }
789         }
790
791         if (captions.empty()) {
792                 return {};
793         }
794
795         return merge (captions, _subtitle_alignment);
796 }
797
798
799 void
800 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
801 {
802         if (_suspended) {
803                 return;
804         }
805
806         auto piece = weak_piece.lock ();
807         if (!piece) {
808                 return;
809         }
810
811         if (!piece->content->video->use()) {
812                 return;
813         }
814
815         FrameRateChange frc (_film, piece->content);
816         if (frc.skip && (video.frame % 2) == 1) {
817                 return;
818         }
819
820         /* Time of the first frame we will emit */
821         DCPTime const time = content_video_to_dcp (piece, video.frame);
822         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
823
824         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
825            if it's after the content's period here as in that case we still need to fill any gap between
826            `now' and the end of the content's period.
827         */
828         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
829                 return;
830         }
831
832         if (piece->ignore_video && piece->ignore_video->contains(time)) {
833                 return;
834         }
835
836         /* Fill gaps that we discover now that we have some video which needs to be emitted.
837            This is where we need to fill to.
838         */
839         DCPTime fill_to = min (time, piece->content->end(_film));
840
841         if (_next_video_time) {
842                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
843
844                 /* Fill if we have more than half a frame to do */
845                 if ((fill_to - fill_from) > one_video_frame() / 2) {
846                         auto last = _last_video.find (weak_piece);
847                         if (_film->three_d()) {
848                                 auto fill_to_eyes = video.eyes;
849                                 if (fill_to_eyes == Eyes::BOTH) {
850                                         fill_to_eyes = Eyes::LEFT;
851                                 }
852                                 if (fill_to == piece->content->end(_film)) {
853                                         /* Don't fill after the end of the content */
854                                         fill_to_eyes = Eyes::LEFT;
855                                 }
856                                 auto j = fill_from;
857                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
858                                 if (eyes == Eyes::BOTH) {
859                                         eyes = Eyes::LEFT;
860                                 }
861                                 while (j < fill_to || eyes != fill_to_eyes) {
862                                         if (last != _last_video.end()) {
863                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
864                                                 auto copy = last->second->shallow_copy();
865                                                 copy->set_eyes (eyes);
866                                                 emit_video (copy, j);
867                                         } else {
868                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
869                                                 emit_video (black_player_video_frame(eyes), j);
870                                         }
871                                         if (eyes == Eyes::RIGHT) {
872                                                 j += one_video_frame();
873                                         }
874                                         eyes = increment_eyes (eyes);
875                                 }
876                         } else {
877                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
878                                         if (last != _last_video.end()) {
879                                                 emit_video (last->second, j);
880                                         } else {
881                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
882                                         }
883                                 }
884                         }
885                 }
886         }
887
888         auto const content_video = piece->content->video;
889
890         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
891                 video.image,
892                 content_video->actual_crop(),
893                 content_video->fade (_film, video.frame),
894                 scale_for_display(
895                         content_video->scaled_size(_film->frame_size()),
896                         _video_container_size,
897                         _film->frame_size(),
898                         content_video->pixel_quanta()
899                         ),
900                 _video_container_size,
901                 video.eyes,
902                 video.part,
903                 content_video->colour_conversion(),
904                 content_video->range(),
905                 piece->content,
906                 video.frame,
907                 false
908                 );
909
910         DCPTime t = time;
911         for (int i = 0; i < frc.repeat; ++i) {
912                 if (t < piece->content->end(_film)) {
913                         emit_video (_last_video[weak_piece], t);
914                 }
915                 t += one_video_frame ();
916         }
917 }
918
919
920 void
921 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
922 {
923         if (_suspended) {
924                 return;
925         }
926
927         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
928
929         auto piece = weak_piece.lock ();
930         if (!piece) {
931                 return;
932         }
933
934         auto content = piece->content->audio;
935         DCPOMATIC_ASSERT (content);
936
937         int const rfr = content->resampled_frame_rate (_film);
938
939         /* Compute time in the DCP */
940         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
941         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
942
943         /* And the end of this block in the DCP */
944         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
945
946         /* Remove anything that comes before the start or after the end of the content */
947         if (time < piece->content->position()) {
948                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
949                 if (!cut.first) {
950                         /* This audio is entirely discarded */
951                         return;
952                 }
953                 content_audio.audio = cut.first;
954                 time = cut.second;
955         } else if (time > piece->content->end(_film)) {
956                 /* Discard it all */
957                 return;
958         } else if (end > piece->content->end(_film)) {
959                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
960                 if (remaining_frames == 0) {
961                         return;
962                 }
963                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
964         }
965
966         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
967
968         /* Gain and fade */
969
970         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
971         if (content->gain() != 0 || !fade_coeffs.empty()) {
972                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
973                 if (!fade_coeffs.empty()) {
974                         /* Apply both fade and gain */
975                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
976                         auto const channels = gain_buffers->channels();
977                         auto const frames = fade_coeffs.size();
978                         auto data = gain_buffers->data();
979                         auto const gain = db_to_linear (content->gain());
980                         for (auto channel = 0; channel < channels; ++channel) {
981                                 for (auto frame = 0U; frame < frames; ++frame) {
982                                         data[channel][frame] *= gain * fade_coeffs[frame];
983                                 }
984                         }
985                 } else {
986                         /* Just apply gain */
987                         gain_buffers->apply_gain (content->gain());
988                 }
989                 content_audio.audio = gain_buffers;
990         }
991
992         /* Remap */
993
994         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
995
996         /* Process */
997
998         if (_audio_processor) {
999                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1000         }
1001
1002         /* Push */
1003
1004         _audio_merger.push (content_audio.audio, time);
1005         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1006         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1007 }
1008
1009
1010 void
1011 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1012 {
1013         if (_suspended) {
1014                 return;
1015         }
1016
1017         auto piece = weak_piece.lock ();
1018         auto content = weak_content.lock ();
1019         if (!piece || !content) {
1020                 return;
1021         }
1022
1023         PlayerText ps;
1024         for (auto& sub: subtitle.subs)
1025         {
1026                 /* Apply content's subtitle offsets */
1027                 sub.rectangle.x += content->x_offset ();
1028                 sub.rectangle.y += content->y_offset ();
1029
1030                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1031                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1032                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1033
1034                 /* Apply content's subtitle scale */
1035                 sub.rectangle.width *= content->x_scale ();
1036                 sub.rectangle.height *= content->y_scale ();
1037
1038                 auto image = sub.image;
1039
1040                 /* We will scale the subtitle up to fit _video_container_size */
1041                 int const width = sub.rectangle.width * _video_container_size.load().width;
1042                 int const height = sub.rectangle.height * _video_container_size.load().height;
1043                 if (width == 0 || height == 0) {
1044                         return;
1045                 }
1046
1047                 dcp::Size scaled_size (width, height);
1048                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1049         }
1050
1051         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1052         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1053 }
1054
1055
1056 void
1057 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1058 {
1059         if (_suspended) {
1060                 return;
1061         }
1062
1063         auto piece = weak_piece.lock ();
1064         auto content = weak_content.lock ();
1065         if (!piece || !content) {
1066                 return;
1067         }
1068
1069         PlayerText ps;
1070         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1071
1072         if (from > piece->content->end(_film)) {
1073                 return;
1074         }
1075
1076         for (auto s: subtitle.subs) {
1077                 s.set_h_position (s.h_position() + content->x_offset());
1078                 s.set_v_position (s.v_position() + content->y_offset());
1079                 float const xs = content->x_scale();
1080                 float const ys = content->y_scale();
1081                 float size = s.size();
1082
1083                 /* Adjust size to express the common part of the scaling;
1084                    e.g. if xs = ys = 0.5 we scale size by 2.
1085                 */
1086                 if (xs > 1e-5 && ys > 1e-5) {
1087                         size *= 1 / min (1 / xs, 1 / ys);
1088                 }
1089                 s.set_size (size);
1090
1091                 /* Then express aspect ratio changes */
1092                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1093                         s.set_aspect_adjust (xs / ys);
1094                 }
1095
1096                 s.set_in (dcp::Time(from.seconds(), 1000));
1097                 ps.string.push_back (s);
1098         }
1099
1100         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1101 }
1102
1103
1104 void
1105 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1106 {
1107         if (_suspended) {
1108                 return;
1109         }
1110
1111         auto content = weak_content.lock ();
1112         if (!content) {
1113                 return;
1114         }
1115
1116         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1117                 return;
1118         }
1119
1120         auto piece = weak_piece.lock ();
1121         if (!piece) {
1122                 return;
1123         }
1124
1125         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1126
1127         if (dcp_to > piece->content->end(_film)) {
1128                 return;
1129         }
1130
1131         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1132
1133         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1134         if (content->use() && !always && !content->burn()) {
1135                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1136         }
1137 }
1138
1139
1140 void
1141 Player::seek (DCPTime time, bool accurate)
1142 {
1143         boost::mutex::scoped_lock lm (_mutex);
1144         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1145
1146         if (_suspended) {
1147                 /* We can't seek in this state */
1148                 return;
1149         }
1150
1151         if (_shuffler) {
1152                 _shuffler->clear ();
1153         }
1154
1155         _delay.clear ();
1156
1157         if (_audio_processor) {
1158                 _audio_processor->flush ();
1159         }
1160
1161         _audio_merger.clear ();
1162         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1163                 _active_texts[i].clear ();
1164         }
1165
1166         for (auto i: _pieces) {
1167                 if (time < i->content->position()) {
1168                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1169                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1170                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1171                            been trimmed to a point between keyframes, or something).
1172                         */
1173                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1174                         i->done = false;
1175                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1176                         /* During; seek to position */
1177                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1178                         i->done = false;
1179                 } else {
1180                         /* After; this piece is done */
1181                         i->done = true;
1182                 }
1183         }
1184
1185         if (accurate) {
1186                 _next_video_time = time;
1187                 _next_video_eyes = Eyes::LEFT;
1188                 _next_audio_time = time;
1189         } else {
1190                 _next_video_time = boost::none;
1191                 _next_video_eyes = boost::none;
1192                 _next_audio_time = boost::none;
1193         }
1194
1195         _black.set_position (time);
1196         _silent.set_position (time);
1197
1198         _last_video.clear ();
1199 }
1200
1201
1202 void
1203 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1204 {
1205         if (!_film->three_d()) {
1206                 if (pv->eyes() == Eyes::LEFT) {
1207                         /* Use left-eye images for both eyes... */
1208                         pv->set_eyes (Eyes::BOTH);
1209                 } else if (pv->eyes() == Eyes::RIGHT) {
1210                         /* ...and discard the right */
1211                         return;
1212                 }
1213         }
1214
1215         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1216            player before the video that requires them.
1217         */
1218         _delay.push_back (make_pair (pv, time));
1219
1220         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1221                 _next_video_time = time + one_video_frame();
1222         }
1223         _next_video_eyes = increment_eyes (pv->eyes());
1224
1225         if (_delay.size() < 3) {
1226                 return;
1227         }
1228
1229         auto to_do = _delay.front();
1230         _delay.pop_front();
1231         do_emit_video (to_do.first, to_do.second);
1232 }
1233
1234
1235 void
1236 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1237 {
1238         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1239                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1240                         _active_texts[i].clear_before (time);
1241                 }
1242         }
1243
1244         auto subtitles = open_subtitles_for_frame (time);
1245         if (subtitles) {
1246                 pv->set_text (subtitles.get ());
1247         }
1248
1249         Video (pv, time);
1250 }
1251
1252
1253 void
1254 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1255 {
1256         /* Log if the assert below is about to fail */
1257         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1258                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1259         }
1260
1261         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1262         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1263         Audio (data, time, _film->audio_frame_rate());
1264         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1265 }
1266
1267
1268 void
1269 Player::fill_audio (DCPTimePeriod period)
1270 {
1271         if (period.from == period.to) {
1272                 return;
1273         }
1274
1275         DCPOMATIC_ASSERT (period.from < period.to);
1276
1277         DCPTime t = period.from;
1278         while (t < period.to) {
1279                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1280                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1281                 if (samples) {
1282                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1283                         silence->make_silent ();
1284                         emit_audio (silence, t);
1285                 }
1286                 t += block;
1287         }
1288 }
1289
1290
1291 DCPTime
1292 Player::one_video_frame () const
1293 {
1294         return DCPTime::from_frames (1, _film->video_frame_rate ());
1295 }
1296
1297
1298 pair<shared_ptr<AudioBuffers>, DCPTime>
1299 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1300 {
1301         auto const discard_time = discard_to - time;
1302         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1303         auto remaining_frames = audio->frames() - discard_frames;
1304         if (remaining_frames <= 0) {
1305                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1306         }
1307         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1308         return make_pair(cut, time + discard_time);
1309 }
1310
1311
1312 void
1313 Player::set_dcp_decode_reduction (optional<int> reduction)
1314 {
1315         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1316
1317         if (reduction == _dcp_decode_reduction.load()) {
1318                 Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1319                 return;
1320         }
1321
1322         _dcp_decode_reduction = reduction;
1323         setup_pieces();
1324
1325         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1326 }
1327
1328
1329 optional<DCPTime>
1330 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1331 {
1332         boost::mutex::scoped_lock lm (_mutex);
1333
1334         for (auto i: _pieces) {
1335                 if (i->content == content) {
1336                         return content_time_to_dcp (i, t);
1337                 }
1338         }
1339
1340         /* We couldn't find this content; perhaps things are being changed over */
1341         return {};
1342 }
1343
1344
1345 optional<ContentTime>
1346 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1347 {
1348         boost::mutex::scoped_lock lm (_mutex);
1349
1350         for (auto i: _pieces) {
1351                 if (i->content == content) {
1352                         return dcp_to_content_time (i, t);
1353                 }
1354         }
1355
1356         /* We couldn't find this content; perhaps things are being changed over */
1357         return {};
1358 }
1359
1360
1361 shared_ptr<const Playlist>
1362 Player::playlist () const
1363 {
1364         return _playlist ? _playlist : _film->playlist();
1365 }
1366
1367
1368 void
1369 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1370 {
1371         if (_suspended) {
1372                 return;
1373         }
1374
1375         auto piece = weak_piece.lock ();
1376         DCPOMATIC_ASSERT (piece);
1377
1378         auto const vfr = _film->video_frame_rate();
1379
1380         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1381         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1382                 return;
1383         }
1384
1385         Atmos (data.data, dcp_time, data.metadata);
1386 }
1387