Move get_reel_assets() out of Player, as it doesn't need to be there.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::make_shared;
75 using std::max;
76 using std::min;
77 using std::min;
78 using std::pair;
79 using std::shared_ptr;
80 using std::vector;
81 using std::weak_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
99         : _film (film)
100         , _suspended (0)
101         , _ignore_video(false)
102         , _ignore_audio(false)
103         , _ignore_text(false)
104         , _always_burn_open_subtitles(false)
105         , _fast(false)
106         , _tolerant (film->tolerant())
107         , _play_referenced(false)
108         , _audio_merger (_film->audio_frame_rate())
109         , _subtitle_alignment (subtitle_alignment)
110 {
111         construct ();
112 }
113
114
115 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
116         : _film (film)
117         , _playlist (playlist_)
118         , _suspended (0)
119         , _ignore_video(false)
120         , _ignore_audio(false)
121         , _ignore_text(false)
122         , _always_burn_open_subtitles(false)
123         , _fast(false)
124         , _tolerant (film->tolerant())
125         , _play_referenced(false)
126         , _audio_merger (_film->audio_frame_rate())
127 {
128         construct ();
129 }
130
131
132 void
133 Player::construct ()
134 {
135         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
136         /* The butler must hear about this first, so since we are proxying this through to the butler we must
137            be first.
138         */
139         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
140         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
141         set_video_container_size (_film->frame_size ());
142
143         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
144
145         setup_pieces ();
146         seek (DCPTime (), true);
147 }
148
149
150 bool
151 have_video (shared_ptr<const Content> content)
152 {
153         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
154 }
155
156
157 bool
158 have_audio (shared_ptr<const Content> content)
159 {
160         return static_cast<bool>(content->audio) && content->can_be_played();
161 }
162
163
164 void
165 Player::setup_pieces ()
166 {
167         boost::mutex::scoped_lock lm (_mutex);
168
169         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170
171         auto old_pieces = _pieces;
172         _pieces.clear ();
173
174         auto playlist_content = playlist()->content();
175         bool const have_threed = std::any_of(
176                 playlist_content.begin(),
177                 playlist_content.end(),
178                 [](shared_ptr<const Content> c) {
179                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
180                 });
181
182
183         if (have_threed) {
184                 _shuffler.reset(new Shuffler());
185                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
186         }
187
188         for (auto i: playlist()->content()) {
189
190                 if (!i->paths_valid ()) {
191                         continue;
192                 }
193
194                 if (_ignore_video && _ignore_audio && i->text.empty()) {
195                         /* We're only interested in text and this content has none */
196                         continue;
197                 }
198
199                 shared_ptr<Decoder> old_decoder;
200                 for (auto j: old_pieces) {
201                         if (j->content == i) {
202                                 old_decoder = j->decoder;
203                                 break;
204                         }
205                 }
206
207                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
208                 DCPOMATIC_ASSERT (decoder);
209
210                 FrameRateChange frc (_film, i);
211
212                 if (decoder->video && _ignore_video) {
213                         decoder->video->set_ignore (true);
214                 }
215
216                 if (decoder->audio && _ignore_audio) {
217                         decoder->audio->set_ignore (true);
218                 }
219
220                 if (_ignore_text) {
221                         for (auto i: decoder->text) {
222                                 i->set_ignore (true);
223                         }
224                 }
225
226                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
227                 if (dcp) {
228                         dcp->set_decode_referenced (_play_referenced);
229                         if (_play_referenced) {
230                                 dcp->set_forced_reduction (_dcp_decode_reduction);
231                         }
232                 }
233
234                 auto piece = make_shared<Piece>(i, decoder, frc);
235                 _pieces.push_back (piece);
236
237                 if (decoder->video) {
238                         if (have_threed) {
239                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
240                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
241                         } else {
242                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
243                         }
244                 }
245
246                 if (decoder->audio) {
247                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
248                 }
249
250                 auto j = decoder->text.begin();
251
252                 while (j != decoder->text.end()) {
253                         (*j)->BitmapStart.connect (
254                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255                                 );
256                         (*j)->PlainStart.connect (
257                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258                                 );
259                         (*j)->Stop.connect (
260                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
261                                 );
262
263                         ++j;
264                 }
265
266                 if (decoder->atmos) {
267                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
268                 }
269         }
270
271         _stream_states.clear ();
272         for (auto i: _pieces) {
273                 if (i->content->audio) {
274                         for (auto j: i->content->audio->streams()) {
275                                 _stream_states[j] = StreamState (i, i->content->position ());
276                         }
277                 }
278         }
279
280         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
281                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
282         };
283
284         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
285                 if (ignore_overlap((*i)->content->video)) {
286                         /* Look for content later in the content list with in-use video that overlaps this */
287                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
288                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
289                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
290                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
291                                 }
292                         }
293                 }
294         }
295
296         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
297         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
298
299         _next_video_time = boost::none;
300         _next_video_eyes = Eyes::BOTH;
301         _next_audio_time = boost::none;
302 }
303
304
305 void
306 Player::playlist_content_change (ChangeType type, int property, bool frequent)
307 {
308         if (property == VideoContentProperty::CROP) {
309                 if (type == ChangeType::DONE) {
310                         auto const vcs = video_container_size();
311                         boost::mutex::scoped_lock lm (_mutex);
312                         for (auto const& i: _delay) {
313                                 i.first->reset_metadata (_film, vcs);
314                         }
315                 }
316         } else {
317                 if (type == ChangeType::PENDING) {
318                         /* The player content is probably about to change, so we can't carry on
319                            until that has happened and we've rebuilt our pieces.  Stop pass()
320                            and seek() from working until then.
321                         */
322                         ++_suspended;
323                 } else if (type == ChangeType::DONE) {
324                         /* A change in our content has gone through.  Re-build our pieces. */
325                         setup_pieces ();
326                         --_suspended;
327                 } else if (type == ChangeType::CANCELLED) {
328                         --_suspended;
329                 }
330         }
331
332         Change (type, property, frequent);
333 }
334
335
336 void
337 Player::set_video_container_size (dcp::Size s)
338 {
339         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
340
341         if (s == _video_container_size) {
342                 Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
343                 return;
344         }
345
346         _video_container_size = s;
347
348         {
349                 boost::mutex::scoped_lock lm (_mutex);
350                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
351                 _black_image->make_black ();
352         }
353
354         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
355 }
356
357
358 void
359 Player::playlist_change (ChangeType type)
360 {
361         if (type == ChangeType::DONE) {
362                 setup_pieces ();
363         }
364         Change (type, PlayerProperty::PLAYLIST, false);
365 }
366
367
368 void
369 Player::film_change (ChangeType type, Film::Property p)
370 {
371         /* Here we should notice Film properties that affect our output, and
372            alert listeners that our output now would be different to how it was
373            last time we were run.
374         */
375
376         if (p == Film::Property::CONTAINER) {
377                 Change (type, PlayerProperty::FILM_CONTAINER, false);
378         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
379                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
380                    so we need new pieces here.
381                 */
382                 if (type == ChangeType::DONE) {
383                         setup_pieces ();
384                 }
385                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
386         } else if (p == Film::Property::AUDIO_PROCESSOR) {
387                 if (type == ChangeType::DONE && _film->audio_processor ()) {
388                         boost::mutex::scoped_lock lm (_mutex);
389                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
390                 }
391         } else if (p == Film::Property::AUDIO_CHANNELS) {
392                 if (type == ChangeType::DONE) {
393                         boost::mutex::scoped_lock lm (_mutex);
394                         _audio_merger.clear ();
395                 }
396         }
397 }
398
399
400 shared_ptr<PlayerVideo>
401 Player::black_player_video_frame (Eyes eyes) const
402 {
403         return std::make_shared<PlayerVideo> (
404                 std::make_shared<const RawImageProxy>(_black_image),
405                 Crop(),
406                 optional<double>(),
407                 _video_container_size,
408                 _video_container_size,
409                 eyes,
410                 Part::WHOLE,
411                 PresetColourConversion::all().front().conversion,
412                 VideoRange::FULL,
413                 std::weak_ptr<Content>(),
414                 boost::optional<Frame>(),
415                 false
416         );
417 }
418
419
420 Frame
421 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
422 {
423         auto s = t - piece->content->position ();
424         s = min (piece->content->length_after_trim(_film), s);
425         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
426
427         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
428            then convert that ContentTime to frames at the content's rate.  However this fails for
429            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
430            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
431
432            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
433         */
434         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
435 }
436
437
438 DCPTime
439 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
440 {
441         /* See comment in dcp_to_content_video */
442         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
443         return d + piece->content->position();
444 }
445
446
447 Frame
448 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
449 {
450         auto s = t - piece->content->position ();
451         s = min (piece->content->length_after_trim(_film), s);
452         /* See notes in dcp_to_content_video */
453         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
454 }
455
456
457 DCPTime
458 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
459 {
460         /* See comment in dcp_to_content_video */
461         return DCPTime::from_frames (f, _film->audio_frame_rate())
462                 - DCPTime (piece->content->trim_start(), piece->frc)
463                 + piece->content->position();
464 }
465
466
467 ContentTime
468 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
469 {
470         auto s = t - piece->content->position ();
471         s = min (piece->content->length_after_trim(_film), s);
472         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
473 }
474
475
476 DCPTime
477 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
478 {
479         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
480 }
481
482
483 vector<shared_ptr<Font>>
484 Player::get_subtitle_fonts ()
485 {
486         boost::mutex::scoped_lock lm (_mutex);
487
488         vector<shared_ptr<Font>> fonts;
489         for (auto piece: _pieces) {
490                 for (auto text: piece->content->text) {
491                         auto text_fonts = text->fonts();
492                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
493                 }
494         }
495
496         return fonts;
497 }
498
499
500 /** Set this player never to produce any video data */
501 void
502 Player::set_ignore_video ()
503 {
504         _ignore_video = true;
505         setup_pieces();
506 }
507
508
509 void
510 Player::set_ignore_audio ()
511 {
512         _ignore_audio = true;
513         setup_pieces();
514 }
515
516
517 void
518 Player::set_ignore_text ()
519 {
520         _ignore_text = true;
521         setup_pieces();
522 }
523
524
525 /** Set the player to always burn open texts into the image regardless of the content settings */
526 void
527 Player::set_always_burn_open_subtitles ()
528 {
529         _always_burn_open_subtitles = true;
530 }
531
532
533 /** Sets up the player to be faster, possibly at the expense of quality */
534 void
535 Player::set_fast ()
536 {
537         _fast = true;
538         setup_pieces();
539 }
540
541
542 void
543 Player::set_play_referenced ()
544 {
545         _play_referenced = true;
546         setup_pieces();
547 }
548
549
550 bool
551 Player::pass ()
552 {
553         boost::mutex::scoped_lock lm (_mutex);
554
555         if (_suspended) {
556                 /* We can't pass in this state */
557                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
558                 return false;
559         }
560
561         if (_playback_length == DCPTime()) {
562                 /* Special; just give one black frame */
563                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
564                 return true;
565         }
566
567         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
568
569         shared_ptr<Piece> earliest_content;
570         optional<DCPTime> earliest_time;
571
572         for (auto i: _pieces) {
573                 if (i->done) {
574                         continue;
575                 }
576
577                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
578                 if (t > i->content->end(_film)) {
579                         i->done = true;
580                 } else {
581
582                         /* Given two choices at the same time, pick the one with texts so we see it before
583                            the video.
584                         */
585                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
586                                 earliest_time = t;
587                                 earliest_content = i;
588                         }
589                 }
590         }
591
592         bool done = false;
593
594         enum {
595                 NONE,
596                 CONTENT,
597                 BLACK,
598                 SILENT
599         } which = NONE;
600
601         if (earliest_content) {
602                 which = CONTENT;
603         }
604
605         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
606                 earliest_time = _black.position ();
607                 which = BLACK;
608         }
609
610         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
611                 earliest_time = _silent.position ();
612                 which = SILENT;
613         }
614
615         switch (which) {
616         case CONTENT:
617         {
618                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
619                 earliest_content->done = earliest_content->decoder->pass ();
620                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
621                 if (dcp && !_play_referenced && dcp->reference_audio()) {
622                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
623                            to `hide' the fact that no audio was emitted during the referenced DCP (though
624                            we need to behave as though it was).
625                         */
626                         _next_audio_time = dcp->end (_film);
627                 }
628                 break;
629         }
630         case BLACK:
631                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
632                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
633                 _black.set_position (_black.position() + one_video_frame());
634                 break;
635         case SILENT:
636         {
637                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
638                 DCPTimePeriod period (_silent.period_at_position());
639                 if (_next_audio_time) {
640                         /* Sometimes the thing that happened last finishes fractionally before
641                            or after this silence.  Bodge the start time of the silence to fix it.
642                            I think this is nothing to worry about since we will just add or
643                            remove a little silence at the end of some content.
644                         */
645                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
646                         /* Let's not worry about less than a frame at 24fps */
647                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
648                         if (error >= too_much_error) {
649                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
650                         }
651                         DCPOMATIC_ASSERT (error < too_much_error);
652                         period.from = *_next_audio_time;
653                 }
654                 if (period.duration() > one_video_frame()) {
655                         period.to = period.from + one_video_frame();
656                 }
657                 fill_audio (period);
658                 _silent.set_position (period.to);
659                 break;
660         }
661         case NONE:
662                 done = true;
663                 break;
664         }
665
666         /* Emit any audio that is ready */
667
668         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
669            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
670            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
671            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
672            that will never come, causing bugs like #2101.
673         */
674         constexpr int ignore_streams_behind = 5;
675
676         using state_pair = std::pair<AudioStreamPtr, StreamState>;
677
678         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
679         auto latest_last_push_end = std::max_element(
680                 _stream_states.begin(),
681                 _stream_states.end(),
682                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
683                 );
684
685         if (latest_last_push_end != _stream_states.end()) {
686                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
687         }
688
689         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
690         std::map<AudioStreamPtr, StreamState> alive_stream_states;
691         for (auto const& i: _stream_states) {
692                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
693                         alive_stream_states.insert(i);
694                 } else {
695                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
696                 }
697         }
698
699         auto pull_to = _playback_length;
700         for (auto const& i: alive_stream_states) {
701                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
702                         pull_to = i.second.last_push_end;
703                 }
704         }
705         if (!_silent.done() && _silent.position() < pull_to) {
706                 pull_to = _silent.position();
707         }
708
709         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
710         auto audio = _audio_merger.pull (pull_to);
711         for (auto i = audio.begin(); i != audio.end(); ++i) {
712                 if (_next_audio_time && i->second < *_next_audio_time) {
713                         /* This new data comes before the last we emitted (or the last seek); discard it */
714                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
715                         if (!cut.first) {
716                                 continue;
717                         }
718                         *i = cut;
719                 } else if (_next_audio_time && i->second > *_next_audio_time) {
720                         /* There's a gap between this data and the last we emitted; fill with silence */
721                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
722                 }
723
724                 emit_audio (i->first, i->second);
725         }
726
727         if (done) {
728                 if (_shuffler) {
729                         _shuffler->flush ();
730                 }
731                 for (auto const& i: _delay) {
732                         do_emit_video(i.first, i.second);
733                 }
734
735                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
736                  * However, if we have L and R video files, and one is shorter than the other,
737                  * the fill code in ::video mostly takes care of filling in the gaps.
738                  * However, since it fills at the point when it knows there is more video coming
739                  * at time t (so it should fill any gap up to t) it can't do anything right at the
740                  * end.  This is particularly bad news if the last frame emitted is a LEFT
741                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
742                  * Here's a hack to workaround that particular case.
743                  */
744                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
745                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
746                 }
747         }
748
749         return done;
750 }
751
752
753 /** @return Open subtitles for the frame at the given time, converted to images */
754 optional<PositionImage>
755 Player::open_subtitles_for_frame (DCPTime time) const
756 {
757         list<PositionImage> captions;
758         int const vfr = _film->video_frame_rate();
759
760         for (
761                 auto j:
762                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
763                 ) {
764
765                 /* Bitmap subtitles */
766                 for (auto i: j.bitmap) {
767                         if (!i.image) {
768                                 continue;
769                         }
770
771                         /* i.image will already have been scaled to fit _video_container_size */
772                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
773
774                         captions.push_back (
775                                 PositionImage (
776                                         i.image,
777                                         Position<int> (
778                                                 lrint(_video_container_size.load().width * i.rectangle.x),
779                                                 lrint(_video_container_size.load().height * i.rectangle.y)
780                                                 )
781                                         )
782                                 );
783                 }
784
785                 /* String subtitles (rendered to an image) */
786                 if (!j.string.empty()) {
787                         auto s = render_text(j.string, _video_container_size, time, vfr);
788                         copy (s.begin(), s.end(), back_inserter (captions));
789                 }
790         }
791
792         if (captions.empty()) {
793                 return {};
794         }
795
796         return merge (captions, _subtitle_alignment);
797 }
798
799
800 void
801 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
802 {
803         if (_suspended) {
804                 return;
805         }
806
807         auto piece = weak_piece.lock ();
808         if (!piece) {
809                 return;
810         }
811
812         if (!piece->content->video->use()) {
813                 return;
814         }
815
816         FrameRateChange frc (_film, piece->content);
817         if (frc.skip && (video.frame % 2) == 1) {
818                 return;
819         }
820
821         /* Time of the first frame we will emit */
822         DCPTime const time = content_video_to_dcp (piece, video.frame);
823         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
824
825         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
826            if it's after the content's period here as in that case we still need to fill any gap between
827            `now' and the end of the content's period.
828         */
829         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
830                 return;
831         }
832
833         if (piece->ignore_video && piece->ignore_video->contains(time)) {
834                 return;
835         }
836
837         /* Fill gaps that we discover now that we have some video which needs to be emitted.
838            This is where we need to fill to.
839         */
840         DCPTime fill_to = min (time, piece->content->end(_film));
841
842         if (_next_video_time) {
843                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
844
845                 /* Fill if we have more than half a frame to do */
846                 if ((fill_to - fill_from) > one_video_frame() / 2) {
847                         auto last = _last_video.find (weak_piece);
848                         if (_film->three_d()) {
849                                 auto fill_to_eyes = video.eyes;
850                                 if (fill_to_eyes == Eyes::BOTH) {
851                                         fill_to_eyes = Eyes::LEFT;
852                                 }
853                                 if (fill_to == piece->content->end(_film)) {
854                                         /* Don't fill after the end of the content */
855                                         fill_to_eyes = Eyes::LEFT;
856                                 }
857                                 auto j = fill_from;
858                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
859                                 if (eyes == Eyes::BOTH) {
860                                         eyes = Eyes::LEFT;
861                                 }
862                                 while (j < fill_to || eyes != fill_to_eyes) {
863                                         if (last != _last_video.end()) {
864                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
865                                                 auto copy = last->second->shallow_copy();
866                                                 copy->set_eyes (eyes);
867                                                 emit_video (copy, j);
868                                         } else {
869                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
870                                                 emit_video (black_player_video_frame(eyes), j);
871                                         }
872                                         if (eyes == Eyes::RIGHT) {
873                                                 j += one_video_frame();
874                                         }
875                                         eyes = increment_eyes (eyes);
876                                 }
877                         } else {
878                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
879                                         if (last != _last_video.end()) {
880                                                 emit_video (last->second, j);
881                                         } else {
882                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
883                                         }
884                                 }
885                         }
886                 }
887         }
888
889         auto const content_video = piece->content->video;
890
891         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
892                 video.image,
893                 content_video->actual_crop(),
894                 content_video->fade (_film, video.frame),
895                 scale_for_display(
896                         content_video->scaled_size(_film->frame_size()),
897                         _video_container_size,
898                         _film->frame_size(),
899                         content_video->pixel_quanta()
900                         ),
901                 _video_container_size,
902                 video.eyes,
903                 video.part,
904                 content_video->colour_conversion(),
905                 content_video->range(),
906                 piece->content,
907                 video.frame,
908                 false
909                 );
910
911         DCPTime t = time;
912         for (int i = 0; i < frc.repeat; ++i) {
913                 if (t < piece->content->end(_film)) {
914                         emit_video (_last_video[weak_piece], t);
915                 }
916                 t += one_video_frame ();
917         }
918 }
919
920
921 void
922 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
923 {
924         if (_suspended) {
925                 return;
926         }
927
928         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
929
930         auto piece = weak_piece.lock ();
931         if (!piece) {
932                 return;
933         }
934
935         auto content = piece->content->audio;
936         DCPOMATIC_ASSERT (content);
937
938         int const rfr = content->resampled_frame_rate (_film);
939
940         /* Compute time in the DCP */
941         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
942         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
943
944         /* And the end of this block in the DCP */
945         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
946
947         /* Remove anything that comes before the start or after the end of the content */
948         if (time < piece->content->position()) {
949                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
950                 if (!cut.first) {
951                         /* This audio is entirely discarded */
952                         return;
953                 }
954                 content_audio.audio = cut.first;
955                 time = cut.second;
956         } else if (time > piece->content->end(_film)) {
957                 /* Discard it all */
958                 return;
959         } else if (end > piece->content->end(_film)) {
960                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
961                 if (remaining_frames == 0) {
962                         return;
963                 }
964                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
965         }
966
967         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
968
969         /* Gain and fade */
970
971         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
972         if (content->gain() != 0 || !fade_coeffs.empty()) {
973                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
974                 if (!fade_coeffs.empty()) {
975                         /* Apply both fade and gain */
976                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
977                         auto const channels = gain_buffers->channels();
978                         auto const frames = fade_coeffs.size();
979                         auto data = gain_buffers->data();
980                         auto const gain = db_to_linear (content->gain());
981                         for (auto channel = 0; channel < channels; ++channel) {
982                                 for (auto frame = 0U; frame < frames; ++frame) {
983                                         data[channel][frame] *= gain * fade_coeffs[frame];
984                                 }
985                         }
986                 } else {
987                         /* Just apply gain */
988                         gain_buffers->apply_gain (content->gain());
989                 }
990                 content_audio.audio = gain_buffers;
991         }
992
993         /* Remap */
994
995         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
996
997         /* Process */
998
999         if (_audio_processor) {
1000                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1001         }
1002
1003         /* Push */
1004
1005         _audio_merger.push (content_audio.audio, time);
1006         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1007         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1008 }
1009
1010
1011 void
1012 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1013 {
1014         if (_suspended) {
1015                 return;
1016         }
1017
1018         auto piece = weak_piece.lock ();
1019         auto content = weak_content.lock ();
1020         if (!piece || !content) {
1021                 return;
1022         }
1023
1024         PlayerText ps;
1025         for (auto& sub: subtitle.subs)
1026         {
1027                 /* Apply content's subtitle offsets */
1028                 sub.rectangle.x += content->x_offset ();
1029                 sub.rectangle.y += content->y_offset ();
1030
1031                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1032                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1033                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1034
1035                 /* Apply content's subtitle scale */
1036                 sub.rectangle.width *= content->x_scale ();
1037                 sub.rectangle.height *= content->y_scale ();
1038
1039                 auto image = sub.image;
1040
1041                 /* We will scale the subtitle up to fit _video_container_size */
1042                 int const width = sub.rectangle.width * _video_container_size.load().width;
1043                 int const height = sub.rectangle.height * _video_container_size.load().height;
1044                 if (width == 0 || height == 0) {
1045                         return;
1046                 }
1047
1048                 dcp::Size scaled_size (width, height);
1049                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1050         }
1051
1052         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1053         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1054 }
1055
1056
1057 void
1058 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1059 {
1060         if (_suspended) {
1061                 return;
1062         }
1063
1064         auto piece = weak_piece.lock ();
1065         auto content = weak_content.lock ();
1066         if (!piece || !content) {
1067                 return;
1068         }
1069
1070         PlayerText ps;
1071         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1072
1073         if (from > piece->content->end(_film)) {
1074                 return;
1075         }
1076
1077         for (auto s: subtitle.subs) {
1078                 s.set_h_position (s.h_position() + content->x_offset());
1079                 s.set_v_position (s.v_position() + content->y_offset());
1080                 float const xs = content->x_scale();
1081                 float const ys = content->y_scale();
1082                 float size = s.size();
1083
1084                 /* Adjust size to express the common part of the scaling;
1085                    e.g. if xs = ys = 0.5 we scale size by 2.
1086                 */
1087                 if (xs > 1e-5 && ys > 1e-5) {
1088                         size *= 1 / min (1 / xs, 1 / ys);
1089                 }
1090                 s.set_size (size);
1091
1092                 /* Then express aspect ratio changes */
1093                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1094                         s.set_aspect_adjust (xs / ys);
1095                 }
1096
1097                 s.set_in (dcp::Time(from.seconds(), 1000));
1098                 ps.string.push_back (s);
1099         }
1100
1101         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1102 }
1103
1104
1105 void
1106 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1107 {
1108         if (_suspended) {
1109                 return;
1110         }
1111
1112         auto content = weak_content.lock ();
1113         if (!content) {
1114                 return;
1115         }
1116
1117         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1118                 return;
1119         }
1120
1121         auto piece = weak_piece.lock ();
1122         if (!piece) {
1123                 return;
1124         }
1125
1126         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1127
1128         if (dcp_to > piece->content->end(_film)) {
1129                 return;
1130         }
1131
1132         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1133
1134         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1135         if (content->use() && !always && !content->burn()) {
1136                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1137         }
1138 }
1139
1140
1141 void
1142 Player::seek (DCPTime time, bool accurate)
1143 {
1144         boost::mutex::scoped_lock lm (_mutex);
1145         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1146
1147         if (_suspended) {
1148                 /* We can't seek in this state */
1149                 return;
1150         }
1151
1152         if (_shuffler) {
1153                 _shuffler->clear ();
1154         }
1155
1156         _delay.clear ();
1157
1158         if (_audio_processor) {
1159                 _audio_processor->flush ();
1160         }
1161
1162         _audio_merger.clear ();
1163         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1164                 _active_texts[i].clear ();
1165         }
1166
1167         for (auto i: _pieces) {
1168                 if (time < i->content->position()) {
1169                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1170                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1171                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1172                            been trimmed to a point between keyframes, or something).
1173                         */
1174                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1175                         i->done = false;
1176                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1177                         /* During; seek to position */
1178                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1179                         i->done = false;
1180                 } else {
1181                         /* After; this piece is done */
1182                         i->done = true;
1183                 }
1184         }
1185
1186         if (accurate) {
1187                 _next_video_time = time;
1188                 _next_video_eyes = Eyes::LEFT;
1189                 _next_audio_time = time;
1190         } else {
1191                 _next_video_time = boost::none;
1192                 _next_video_eyes = boost::none;
1193                 _next_audio_time = boost::none;
1194         }
1195
1196         _black.set_position (time);
1197         _silent.set_position (time);
1198
1199         _last_video.clear ();
1200 }
1201
1202
1203 void
1204 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1205 {
1206         if (!_film->three_d()) {
1207                 if (pv->eyes() == Eyes::LEFT) {
1208                         /* Use left-eye images for both eyes... */
1209                         pv->set_eyes (Eyes::BOTH);
1210                 } else if (pv->eyes() == Eyes::RIGHT) {
1211                         /* ...and discard the right */
1212                         return;
1213                 }
1214         }
1215
1216         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1217            player before the video that requires them.
1218         */
1219         _delay.push_back (make_pair (pv, time));
1220
1221         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1222                 _next_video_time = time + one_video_frame();
1223         }
1224         _next_video_eyes = increment_eyes (pv->eyes());
1225
1226         if (_delay.size() < 3) {
1227                 return;
1228         }
1229
1230         auto to_do = _delay.front();
1231         _delay.pop_front();
1232         do_emit_video (to_do.first, to_do.second);
1233 }
1234
1235
1236 void
1237 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1238 {
1239         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1240                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1241                         _active_texts[i].clear_before (time);
1242                 }
1243         }
1244
1245         auto subtitles = open_subtitles_for_frame (time);
1246         if (subtitles) {
1247                 pv->set_text (subtitles.get ());
1248         }
1249
1250         Video (pv, time);
1251 }
1252
1253
1254 void
1255 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1256 {
1257         /* Log if the assert below is about to fail */
1258         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1259                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1260         }
1261
1262         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1263         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1264         Audio (data, time, _film->audio_frame_rate());
1265         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1266 }
1267
1268
1269 void
1270 Player::fill_audio (DCPTimePeriod period)
1271 {
1272         if (period.from == period.to) {
1273                 return;
1274         }
1275
1276         DCPOMATIC_ASSERT (period.from < period.to);
1277
1278         DCPTime t = period.from;
1279         while (t < period.to) {
1280                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1281                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1282                 if (samples) {
1283                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1284                         silence->make_silent ();
1285                         emit_audio (silence, t);
1286                 }
1287                 t += block;
1288         }
1289 }
1290
1291
1292 DCPTime
1293 Player::one_video_frame () const
1294 {
1295         return DCPTime::from_frames (1, _film->video_frame_rate ());
1296 }
1297
1298
1299 pair<shared_ptr<AudioBuffers>, DCPTime>
1300 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1301 {
1302         auto const discard_time = discard_to - time;
1303         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1304         auto remaining_frames = audio->frames() - discard_frames;
1305         if (remaining_frames <= 0) {
1306                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1307         }
1308         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1309         return make_pair(cut, time + discard_time);
1310 }
1311
1312
1313 void
1314 Player::set_dcp_decode_reduction (optional<int> reduction)
1315 {
1316         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1317
1318         if (reduction == _dcp_decode_reduction.load()) {
1319                 Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1320                 return;
1321         }
1322
1323         _dcp_decode_reduction = reduction;
1324         setup_pieces();
1325
1326         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1327 }
1328
1329
1330 optional<DCPTime>
1331 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1332 {
1333         boost::mutex::scoped_lock lm (_mutex);
1334
1335         for (auto i: _pieces) {
1336                 if (i->content == content) {
1337                         return content_time_to_dcp (i, t);
1338                 }
1339         }
1340
1341         /* We couldn't find this content; perhaps things are being changed over */
1342         return {};
1343 }
1344
1345
1346 optional<ContentTime>
1347 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1348 {
1349         boost::mutex::scoped_lock lm (_mutex);
1350
1351         for (auto i: _pieces) {
1352                 if (i->content == content) {
1353                         return dcp_to_content_time (i, t);
1354                 }
1355         }
1356
1357         /* We couldn't find this content; perhaps things are being changed over */
1358         return {};
1359 }
1360
1361
1362 shared_ptr<const Playlist>
1363 Player::playlist () const
1364 {
1365         return _playlist ? _playlist : _film->playlist();
1366 }
1367
1368
1369 void
1370 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1371 {
1372         if (_suspended) {
1373                 return;
1374         }
1375
1376         auto piece = weak_piece.lock ();
1377         DCPOMATIC_ASSERT (piece);
1378
1379         auto const vfr = _film->video_frame_rate();
1380
1381         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1382         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1383                 return;
1384         }
1385
1386         Atmos (data.data, dcp_time, data.metadata);
1387 }
1388