Rename piece_audio -> audio.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "dcp_content.h"
30 #include "dcp_decoder.h"
31 #include "dcpomatic_log.h"
32 #include "decoder.h"
33 #include "decoder_factory.h"
34 #include "ffmpeg_content.h"
35 #include "film.h"
36 #include "frame_rate_change.h"
37 #include "image.h"
38 #include "image_decoder.h"
39 #include "job.h"
40 #include "log.h"
41 #include "piece_video.h"
42 #include "player.h"
43 #include "player_video.h"
44 #include "playlist.h"
45 #include "ratio.h"
46 #include "raw_image_proxy.h"
47 #include "referenced_reel_asset.h"
48 #include "render_text.h"
49 #include "shuffler.h"
50 #include "text_content.h"
51 #include "text_decoder.h"
52 #include "timer.h"
53 #include "video_decoder.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <stdint.h>
60 #include <algorithm>
61 #include <iostream>
62
63 #include "i18n.h"
64
65
66 using std::copy;
67 using std::cout;
68 using std::dynamic_pointer_cast;
69 using std::list;
70 using std::make_pair;
71 using std::make_shared;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::unique_ptr;
82 using boost::optional;
83 #if BOOST_VERSION >= 106100
84 using namespace boost::placeholders;
85 #endif
86 using namespace dcpomatic;
87
88
89 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
90 int const PlayerProperty::PLAYLIST = 701;
91 int const PlayerProperty::FILM_CONTAINER = 702;
92 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
93 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
94 int const PlayerProperty::PLAYBACK_LENGTH = 705;
95
96
97 /** About 0.01dB */
98 #define AUDIO_GAIN_EPSILON 0.001
99
100
101 Player::Player (shared_ptr<const Film> film)
102         : _film (film)
103         , _suspended (0)
104         , _tolerant (film->tolerant())
105         , _audio_merger (_film->audio_frame_rate())
106 {
107         construct ();
108 }
109
110
111 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
112         : _film (film)
113         , _playlist (playlist_)
114         , _suspended (0)
115         , _tolerant (film->tolerant())
116         , _audio_merger (_film->audio_frame_rate())
117 {
118         construct ();
119 }
120
121
122 void
123 Player::construct ()
124 {
125         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
126         /* The butler must hear about this first, so since we are proxying this through to the butler we must
127            be first.
128         */
129         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
130         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
131         set_video_container_size (_film->frame_size ());
132
133         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
134
135         setup_pieces ();
136         seek (DCPTime (), true);
137 }
138
139
140 void
141 Player::setup_pieces ()
142 {
143         boost::mutex::scoped_lock lm (_mutex);
144         setup_pieces_unlocked ();
145 }
146
147
148 bool
149 have_video (shared_ptr<const Content> content)
150 {
151         return static_cast<bool>(content->video) && content->video->use();
152 }
153
154
155 bool
156 have_audio (shared_ptr<const Content> content)
157 {
158         return static_cast<bool>(content->audio);
159 }
160
161
162 vector<vector<shared_ptr<Content>>>
163 collect (shared_ptr<const Film> film, ContentList content)
164 {
165         vector<shared_ptr<Content>> ungrouped;
166         vector<vector<shared_ptr<Content>>> grouped;
167
168         auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
169
170                 auto a_streams = a->streams();
171                 auto b_streams = b->streams();
172
173                 if (a_streams.size() != b_streams.size()) {
174                         return false;
175                 }
176
177                 for (size_t i = 0; i < a_streams.size(); ++i) {
178                         auto a_stream = a_streams[i];
179                         auto b_stream = b_streams[i];
180                         if (
181                                 !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
182                                 a_stream->frame_rate() != b_stream->frame_rate() ||
183                                 a_stream->channels() != b_stream->channels()) {
184                                 return false;
185                         }
186                 }
187
188                 return (
189                         fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
190                         a->delay() == b->delay() &&
191                         a->language() == b->language() &&
192                         a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
193                         a->channel_names() == b->channel_names()
194                        );
195         };
196
197         for (auto i: content) {
198                 if (i->video || !i->audio || !i->text.empty()) {
199                         ungrouped.push_back (i);
200                 } else {
201                         bool done = false;
202                         for (auto& g: grouped) {
203                                 if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
204                                         g.push_back (i);
205                                         done = true;
206                                 }
207                         }
208                         if (!done) {
209                                 grouped.push_back ({i});
210                         }
211                 }
212         }
213
214         for (auto i: ungrouped) {
215                 grouped.push_back({i});
216         }
217
218         return grouped;
219 }
220
221
222 void
223 Player::setup_pieces_unlocked ()
224 {
225         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
226
227         auto old_pieces = _pieces;
228         _pieces.clear ();
229
230         _shuffler.reset (new Shuffler());
231         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
232
233         for (auto i: playlist()->content()) {
234
235                 if (!i->paths_valid ()) {
236                         continue;
237                 }
238
239                 if (_ignore_video && _ignore_audio && i->text.empty()) {
240                         /* We're only interested in text and this content has none */
241                         continue;
242                 }
243
244                 shared_ptr<Decoder> old_decoder;
245                 for (auto j: old_pieces) {
246                         auto decoder = j->decoder_for(i);
247                         if (decoder) {
248                                 old_decoder = decoder;
249                                 break;
250                         }
251                 }
252
253                 auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
254                 DCPOMATIC_ASSERT (decoder);
255
256                 FrameRateChange frc (_film, i);
257
258                 if (decoder->video && _ignore_video) {
259                         decoder->video->set_ignore (true);
260                 }
261
262                 if (decoder->audio && _ignore_audio) {
263                         decoder->audio->set_ignore (true);
264                 }
265
266                 if (_ignore_text) {
267                         for (auto i: decoder->text) {
268                                 i->set_ignore (true);
269                         }
270                 }
271
272                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
273                 if (dcp) {
274                         dcp->set_decode_referenced (_play_referenced);
275                         if (_play_referenced) {
276                                 dcp->set_forced_reduction (_dcp_decode_reduction);
277                         }
278                 }
279
280                 auto piece = make_shared<Piece>(_film, i, decoder, frc, _fast);
281                 _pieces.push_back (piece);
282
283                 if (i->video) {
284                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
285                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
286                                 piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
287                         } else {
288                                 piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
289                         }
290                 }
291
292                 if (i->audio) {
293                         piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
294                 }
295
296                 piece->BitmapTextStart.connect (bind(&Player::bitmap_text_start, this, piece, _1));
297                 piece->StringTextStart.connect (bind(&Player::string_text_start, this, piece, _1));
298                 piece->TextStop.connect (bind(&Player::subtitle_stop, this, piece, _1));
299                 piece->Atmos.connect (bind(&Player::atmos, this, piece, _1));
300         }
301
302         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
303                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
304                         /* Look for content later in the content list with in-use video that overlaps this */
305                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
306                                 if ((*j)->use_video()) {
307                                         (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
308                                 }
309                         }
310                 }
311         }
312
313         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
314         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
315
316         _last_video_time = boost::optional<dcpomatic::DCPTime>();
317         _last_video_eyes = Eyes::BOTH;
318         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
319 }
320
321
322 optional<DCPTime>
323 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
324 {
325         boost::mutex::scoped_lock lm (_mutex);
326
327         for (auto i: _pieces) {
328                 auto dcp = i->content_time_to_dcp(content, t);
329                 if (dcp) {
330                         return *dcp;
331                 }
332         }
333
334         /* We couldn't find this content; perhaps things are being changed over */
335         return {};
336 }
337
338
339 void
340 Player::playlist_content_change (ChangeType type, int property, bool frequent)
341 {
342         if (property == VideoContentProperty::CROP) {
343                 if (type == ChangeType::DONE) {
344                         auto const vcs = video_container_size();
345                         boost::mutex::scoped_lock lm (_mutex);
346                         for (auto const& i: _delay) {
347                                 i.first->reset_metadata (_film, vcs);
348                         }
349                 }
350         } else {
351                 if (type == ChangeType::PENDING) {
352                         /* The player content is probably about to change, so we can't carry on
353                            until that has happened and we've rebuilt our pieces.  Stop pass()
354                            and seek() from working until then.
355                         */
356                         ++_suspended;
357                 } else if (type == ChangeType::DONE) {
358                         /* A change in our content has gone through.  Re-build our pieces. */
359                         setup_pieces ();
360                         --_suspended;
361                 } else if (type == ChangeType::CANCELLED) {
362                         --_suspended;
363                 }
364         }
365
366         Change (type, property, frequent);
367 }
368
369
370 void
371 Player::set_video_container_size (dcp::Size s)
372 {
373         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
374
375         {
376                 boost::mutex::scoped_lock lm (_mutex);
377
378                 if (s == _video_container_size) {
379                         lm.unlock ();
380                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
381                         return;
382                 }
383
384                 _video_container_size = s;
385
386                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
387                 _black_image->make_black ();
388         }
389
390         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
391 }
392
393
394 void
395 Player::playlist_change (ChangeType type)
396 {
397         if (type == ChangeType::DONE) {
398                 setup_pieces ();
399         }
400         Change (type, PlayerProperty::PLAYLIST, false);
401 }
402
403
404 void
405 Player::film_change (ChangeType type, Film::Property p)
406 {
407         /* Here we should notice Film properties that affect our output, and
408            alert listeners that our output now would be different to how it was
409            last time we were run.
410         */
411
412         if (p == Film::Property::CONTAINER) {
413                 Change (type, PlayerProperty::FILM_CONTAINER, false);
414         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
415                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
416                    so we need new pieces here.
417                 */
418                 if (type == ChangeType::DONE) {
419                         setup_pieces ();
420                 }
421                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
422         } else if (p == Film::Property::AUDIO_PROCESSOR) {
423                 if (type == ChangeType::DONE && _film->audio_processor ()) {
424                         boost::mutex::scoped_lock lm (_mutex);
425                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
426                 }
427         } else if (p == Film::Property::AUDIO_CHANNELS) {
428                 if (type == ChangeType::DONE) {
429                         boost::mutex::scoped_lock lm (_mutex);
430                         _audio_merger.clear ();
431                 }
432         }
433 }
434
435
436 shared_ptr<PlayerVideo>
437 Player::black_player_video_frame (Eyes eyes) const
438 {
439         return std::make_shared<PlayerVideo> (
440                 std::make_shared<const RawImageProxy>(_black_image),
441                 Crop(),
442                 optional<double>(),
443                 _video_container_size,
444                 _video_container_size,
445                 eyes,
446                 Part::WHOLE,
447                 PresetColourConversion::all().front().conversion,
448                 VideoRange::FULL,
449                 std::weak_ptr<Content>(),
450                 boost::optional<Frame>(),
451                 false
452         );
453 }
454
455
456 vector<FontData>
457 Player::get_subtitle_fonts ()
458 {
459         boost::mutex::scoped_lock lm (_mutex);
460
461         vector<FontData> fonts;
462         for (auto i: _pieces) {
463                 /* XXX: things may go wrong if there are duplicate font IDs
464                    with different font files.
465                 */
466                 auto f = i->fonts ();
467                 copy (f.begin(), f.end(), back_inserter(fonts));
468         }
469
470         return fonts;
471 }
472
473
474 /** Set this player never to produce any video data */
475 void
476 Player::set_ignore_video ()
477 {
478         boost::mutex::scoped_lock lm (_mutex);
479         _ignore_video = true;
480         setup_pieces_unlocked ();
481 }
482
483
484 void
485 Player::set_ignore_audio ()
486 {
487         boost::mutex::scoped_lock lm (_mutex);
488         _ignore_audio = true;
489         setup_pieces_unlocked ();
490 }
491
492
493 void
494 Player::set_ignore_text ()
495 {
496         boost::mutex::scoped_lock lm (_mutex);
497         _ignore_text = true;
498         setup_pieces_unlocked ();
499 }
500
501
502 /** Set the player to always burn open texts into the image regardless of the content settings */
503 void
504 Player::set_always_burn_open_subtitles ()
505 {
506         boost::mutex::scoped_lock lm (_mutex);
507         _always_burn_open_subtitles = true;
508 }
509
510
511 /** Sets up the player to be faster, possibly at the expense of quality */
512 void
513 Player::set_fast ()
514 {
515         boost::mutex::scoped_lock lm (_mutex);
516         _fast = true;
517         setup_pieces_unlocked ();
518 }
519
520
521 void
522 Player::set_play_referenced ()
523 {
524         boost::mutex::scoped_lock lm (_mutex);
525         _play_referenced = true;
526         setup_pieces_unlocked ();
527 }
528
529
530 static void
531 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
532 {
533         DCPOMATIC_ASSERT (r);
534         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
535         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
536         if (r->actual_duration() > 0) {
537                 a.push_back (
538                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
539                         );
540         }
541 }
542
543
544 list<ReferencedReelAsset>
545 Player::get_reel_assets ()
546 {
547         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
548
549         list<ReferencedReelAsset> a;
550
551         for (auto i: playlist()->content()) {
552                 auto j = dynamic_pointer_cast<DCPContent> (i);
553                 if (!j) {
554                         continue;
555                 }
556
557                 unique_ptr<DCPDecoder> decoder;
558                 try {
559                         decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
560                 } catch (...) {
561                         return a;
562                 }
563
564                 DCPOMATIC_ASSERT (j->video_frame_rate ());
565                 double const cfr = j->video_frame_rate().get();
566                 Frame const trim_start = j->trim_start().frames_round (cfr);
567                 Frame const trim_end = j->trim_end().frames_round (cfr);
568                 int const ffr = _film->video_frame_rate ();
569
570                 /* position in the asset from the start */
571                 int64_t offset_from_start = 0;
572                 /* position in the asset from the end */
573                 int64_t offset_from_end = 0;
574                 for (auto k: decoder->reels()) {
575                         /* Assume that main picture duration is the length of the reel */
576                         offset_from_end += k->main_picture()->actual_duration();
577                 }
578
579                 for (auto k: decoder->reels()) {
580
581                         /* Assume that main picture duration is the length of the reel */
582                         int64_t const reel_duration = k->main_picture()->actual_duration();
583
584                         /* See doc/design/trim_reels.svg */
585                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
586                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
587
588                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
589                         if (j->reference_video ()) {
590                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
591                         }
592
593                         if (j->reference_audio ()) {
594                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
595                         }
596
597                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
598                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
599                         }
600
601                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
602                                 for (auto l: k->closed_captions()) {
603                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
604                                 }
605                         }
606
607                         offset_from_start += reel_duration;
608                         offset_from_end -= reel_duration;
609                 }
610         }
611
612         return a;
613 }
614
615
616 bool
617 Player::pass ()
618 {
619         boost::mutex::scoped_lock lm (_mutex);
620
621         if (_suspended) {
622                 /* We can't pass in this state */
623                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
624                 return false;
625         }
626
627         if (_playback_length == DCPTime()) {
628                 /* Special; just give one black frame */
629                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
630                 return true;
631         }
632
633         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
634
635         shared_ptr<Piece> earliest_content;
636         optional<DCPTime> earliest_time;
637
638         for (auto i: _pieces) {
639                 auto time = i->decoder_before(earliest_time);
640                 if (time) {
641                         earliest_time = *time;
642                         earliest_content = i;
643                 }
644         }
645
646         bool done = false;
647
648         enum {
649                 NONE,
650                 CONTENT,
651                 BLACK,
652                 SILENT
653         } which = NONE;
654
655         if (earliest_content) {
656                 which = CONTENT;
657         }
658
659         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
660                 earliest_time = _black.position ();
661                 which = BLACK;
662         }
663
664         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
665                 earliest_time = _silent.position ();
666                 which = SILENT;
667         }
668
669         switch (which) {
670         case CONTENT:
671         {
672                 earliest_content->pass();
673                 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
674                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
675                            to `hide' the fact that no audio was emitted during the referenced DCP (though
676                            we need to behave as though it was).
677                         */
678                         _last_audio_time = earliest_content->end ();
679                 }
680                 break;
681         }
682         case BLACK:
683                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
684                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
685                 _black.set_position (_black.position() + one_video_frame());
686                 break;
687         case SILENT:
688         {
689                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
690                 DCPTimePeriod period (_silent.period_at_position());
691                 if (_last_audio_time) {
692                         /* Sometimes the thing that happened last finishes fractionally before
693                            or after this silence.  Bodge the start time of the silence to fix it.
694                            I think this is nothing to worry about since we will just add or
695                            remove a little silence at the end of some content.
696                         */
697                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
698                         /* Let's not worry about less than a frame at 24fps */
699                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
700                         if (error >= too_much_error) {
701                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
702                         }
703                         DCPOMATIC_ASSERT (error < too_much_error);
704                         period.from = *_last_audio_time;
705                 }
706                 if (period.duration() > one_video_frame()) {
707                         period.to = period.from + one_video_frame();
708                 }
709                 fill_audio (period);
710                 _silent.set_position (period.to);
711                 break;
712         }
713         case NONE:
714                 done = true;
715                 break;
716         }
717
718         /* Emit any audio that is ready */
719
720         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
721            of our streams, or the position of the _silent.
722         */
723         auto pull_to = _playback_length;
724         for (auto i: _pieces) {
725                 i->update_pull_to (pull_to);
726         }
727         if (!_silent.done() && _silent.position() < pull_to) {
728                 pull_to = _silent.position();
729         }
730
731         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
732         auto audio = _audio_merger.pull (pull_to);
733         for (auto i = audio.begin(); i != audio.end(); ++i) {
734                 if (_last_audio_time && i->second < *_last_audio_time) {
735                         /* This new data comes before the last we emitted (or the last seek); discard it */
736                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
737                         if (!cut.first) {
738                                 continue;
739                         }
740                         *i = cut;
741                 } else if (_last_audio_time && i->second > *_last_audio_time) {
742                         /* There's a gap between this data and the last we emitted; fill with silence */
743                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
744                 }
745
746                 emit_audio (i->first, i->second);
747         }
748
749         if (done) {
750                 _shuffler->flush ();
751                 for (auto const& i: _delay) {
752                         do_emit_video(i.first, i.second);
753                 }
754         }
755
756         return done;
757 }
758
759
760 /** @return Open subtitles for the frame at the given time, converted to images */
761 optional<PositionImage>
762 Player::open_subtitles_for_frame (DCPTime time) const
763 {
764         list<PositionImage> captions;
765         int const vfr = _film->video_frame_rate();
766
767         for (
768                 auto j:
769                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
770                 ) {
771
772                 /* Bitmap subtitles */
773                 for (auto i: j.bitmap) {
774                         if (!i.image) {
775                                 continue;
776                         }
777
778                         /* i.image will already have been scaled to fit _video_container_size */
779                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
780
781                         captions.push_back (
782                                 PositionImage (
783                                         i.image,
784                                         Position<int> (
785                                                 lrint(_video_container_size.width * i.rectangle.x),
786                                                 lrint(_video_container_size.height * i.rectangle.y)
787                                                 )
788                                         )
789                                 );
790                 }
791
792                 /* String subtitles (rendered to an image) */
793                 if (!j.string.empty()) {
794                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
795                         copy (s.begin(), s.end(), back_inserter (captions));
796                 }
797         }
798
799         if (captions.empty()) {
800                 return {};
801         }
802
803         return merge (captions);
804 }
805
806
807 void
808 Player::video (weak_ptr<Piece> wp, PieceVideo video)
809 {
810         auto piece = wp.lock ();
811         if (!piece) {
812                 return;
813         }
814
815         if (!piece->use_video()) {
816                 return;
817         }
818
819         auto frc = piece->frame_rate_change();
820         if (frc.skip && (video.frame % 2) == 1) {
821                 return;
822         }
823
824         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(video.time));
825
826         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
827            if it's after the content's period here as in that case we still need to fill any gap between
828            `now' and the end of the content's period.
829         */
830         if (video.time < piece->position() || (_last_video_time && video.time < *_last_video_time)) {
831                 return;
832         }
833
834         if (piece->ignore_video_at(video.time)) {
835                 return;
836         }
837
838         /* Fill gaps that we discover now that we have some video which needs to be emitted.
839            This is where we need to fill to.
840         */
841         DCPTime fill_to = min (video.time, piece->end());
842
843         if (_last_video_time) {
844                 DCPTime fill_from = max (*_last_video_time, piece->position());
845
846                 /* Fill if we have more than half a frame to do */
847                 if ((fill_to - fill_from) > one_video_frame() / 2) {
848                         auto last = _last_video.find (wp);
849                         if (_film->three_d()) {
850                                 auto fill_to_eyes = video.eyes;
851                                 if (fill_to_eyes == Eyes::BOTH) {
852                                         fill_to_eyes = Eyes::LEFT;
853                                 }
854                                 if (fill_to == piece->end()) {
855                                         /* Don't fill after the end of the content */
856                                         fill_to_eyes = Eyes::LEFT;
857                                 }
858                                 auto j = fill_from;
859                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
860                                 if (eyes == Eyes::BOTH) {
861                                         eyes = Eyes::LEFT;
862                                 }
863                                 while (j < fill_to || eyes != fill_to_eyes) {
864                                         if (last != _last_video.end()) {
865                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
866                                                 auto copy = last->second->shallow_copy();
867                                                 copy->set_eyes (eyes);
868                                                 emit_video (copy, j);
869                                         } else {
870                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
871                                                 emit_video (black_player_video_frame(eyes), j);
872                                         }
873                                         if (eyes == Eyes::RIGHT) {
874                                                 j += one_video_frame();
875                                         }
876                                         eyes = increment_eyes (eyes);
877                                 }
878                         } else {
879                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
880                                         if (last != _last_video.end()) {
881                                                 emit_video (last->second, j);
882                                         } else {
883                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
884                                         }
885                                 }
886                         }
887                 }
888         }
889
890         _last_video[wp] = piece->player_video (video, _video_container_size);
891
892         DCPTime t = video.time;
893         for (int i = 0; i < frc.repeat; ++i) {
894                 if (t < piece->end()) {
895                         emit_video (_last_video[wp], t);
896                 }
897                 t += one_video_frame ();
898         }
899 }
900
901
902 void
903 Player::audio (weak_ptr<Piece> wp, PieceAudio audio)
904 {
905         DCPOMATIC_ASSERT (audio.audio->frames() > 0);
906
907         auto piece = wp.lock ();
908         if (!piece) {
909                 return;
910         }
911
912         int const rfr = piece->resampled_audio_frame_rate ();
913
914         /* Compute time in the DCP */
915         auto time = piece->resampled_audio_to_dcp (audio.frame);
916         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", audio.frame, to_string(time));
917
918         /* And the end of this block in the DCP */
919         auto end = time + DCPTime::from_frames(audio.audio->frames(), rfr);
920
921         /* Remove anything that comes before the start or after the end of the content */
922         if (time < piece->position()) {
923                 auto cut = discard_audio (audio.audio, time, piece->position());
924                 if (!cut.first) {
925                         /* This audio is entirely discarded */
926                         return;
927                 }
928                 audio.audio = cut.first;
929                 time = cut.second;
930         } else if (time > piece->end()) {
931                 /* Discard it all */
932                 return;
933         } else if (end > piece->end()) {
934                 Frame const remaining_frames = DCPTime(piece->end() - time).frames_round(rfr);
935                 if (remaining_frames == 0) {
936                         return;
937                 }
938                 audio.audio = make_shared<AudioBuffers>(audio.audio, remaining_frames, 0);
939         }
940
941         DCPOMATIC_ASSERT (audio.audio->frames() > 0);
942
943         /* Gain */
944
945         if (piece->audio_gain() != 0) {
946                 auto gain = make_shared<AudioBuffers>(audio.audio);
947                 gain->apply_gain (piece->audio_gain());
948                 audio.audio = gain;
949         }
950
951         /* Remap */
952
953         audio.audio = remap (audio.audio, _film->audio_channels(), audio.stream->mapping());
954
955         /* Process */
956
957         if (_audio_processor) {
958                 audio.audio = _audio_processor->run (audio.audio, _film->audio_channels ());
959         }
960
961         /* Push */
962
963         _audio_merger.push (audio.audio, time);
964         piece->set_last_push_end (audio.stream, time + DCPTime::from_frames(audio.audio->frames(), _film->audio_frame_rate()));
965 }
966
967
968 void
969 Player::bitmap_text_start (weak_ptr<Piece> wp, PieceBitmapTextStart subtitle)
970 {
971         auto piece = wp.lock ();
972         auto content = subtitle.content().lock();
973         auto text = subtitle.text().lock();
974         if (!piece || !content || !text) {
975                 return;
976         }
977
978         /* Apply content's subtitle offsets */
979         subtitle.sub.rectangle.x += text->x_offset ();
980         subtitle.sub.rectangle.y += text->y_offset ();
981
982         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
983         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
984         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
985
986         /* Apply content's subtitle scale */
987         subtitle.sub.rectangle.width *= text->x_scale ();
988         subtitle.sub.rectangle.height *= text->y_scale ();
989
990         PlayerText ps;
991         auto image = subtitle.sub.image;
992
993         /* We will scale the subtitle up to fit _video_container_size */
994         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
995         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
996         if (width == 0 || height == 0) {
997                 return;
998         }
999
1000         dcp::Size scaled_size (width, height);
1001         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1002         auto from = piece->content_time_to_dcp(content, subtitle.time());
1003         DCPOMATIC_ASSERT (from);
1004
1005         _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
1006 }
1007
1008
1009 void
1010 Player::string_text_start (weak_ptr<Piece> wp, PieceStringTextStart subtitle)
1011 {
1012         auto piece = wp.lock ();
1013         auto content = subtitle.content().lock();
1014         auto text = subtitle.text().lock();
1015         if (!piece || !content || !text) {
1016                 return;
1017         }
1018
1019         PlayerText ps;
1020         auto const from = piece->content_time_to_dcp(content, subtitle.time());
1021         DCPOMATIC_ASSERT (from);
1022
1023         if (from > piece->end()) {
1024                 return;
1025         }
1026
1027         for (auto s: subtitle.subs) {
1028                 s.set_h_position (s.h_position() + text->x_offset());
1029                 s.set_v_position (s.v_position() + text->y_offset());
1030                 float const xs = text->x_scale();
1031                 float const ys = text->y_scale();
1032                 float size = s.size();
1033
1034                 /* Adjust size to express the common part of the scaling;
1035                    e.g. if xs = ys = 0.5 we scale size by 2.
1036                 */
1037                 if (xs > 1e-5 && ys > 1e-5) {
1038                         size *= 1 / min (1 / xs, 1 / ys);
1039                 }
1040                 s.set_size (size);
1041
1042                 /* Then express aspect ratio changes */
1043                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1044                         s.set_aspect_adjust (xs / ys);
1045                 }
1046
1047                 s.set_in (dcp::Time(from->seconds(), 1000));
1048                 ps.string.push_back (StringText (s, text->outline_width()));
1049                 ps.add_fonts (text->fonts ());
1050         }
1051
1052         _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
1053 }
1054
1055
1056 void
1057 Player::subtitle_stop (weak_ptr<Piece> wp, PieceTextStop stop)
1058 {
1059         auto content = stop.content().lock();
1060         auto text = stop.text().lock();
1061         if (!text) {
1062                 return;
1063         }
1064
1065         if (!_active_texts[static_cast<int>(text->type())].have(stop.text())) {
1066                 return;
1067         }
1068
1069         shared_ptr<Piece> piece = wp.lock ();
1070         if (!piece) {
1071                 return;
1072         }
1073
1074         auto const dcp_to = piece->content_time_to_dcp(content, stop.time());
1075         DCPOMATIC_ASSERT (dcp_to);
1076
1077         if (*dcp_to > piece->end()) {
1078                 return;
1079         }
1080
1081         auto from = _active_texts[static_cast<int>(text->type())].add_to(stop.text(), *dcp_to);
1082
1083         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1084         if (text->use() && !always && !text->burn()) {
1085                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1086         }
1087 }
1088
1089
1090 void
1091 Player::seek (DCPTime time, bool accurate)
1092 {
1093         boost::mutex::scoped_lock lm (_mutex);
1094         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1095
1096         if (_suspended) {
1097                 /* We can't seek in this state */
1098                 return;
1099         }
1100
1101         if (_shuffler) {
1102                 _shuffler->clear ();
1103         }
1104
1105         _delay.clear ();
1106
1107         if (_audio_processor) {
1108                 _audio_processor->flush ();
1109         }
1110
1111         _audio_merger.clear ();
1112         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1113                 _active_texts[i].clear ();
1114         }
1115
1116         for (auto i: _pieces) {
1117                 i->seek (time, accurate);
1118         }
1119
1120         if (accurate) {
1121                 _last_video_time = time;
1122                 _last_video_eyes = Eyes::LEFT;
1123                 _last_audio_time = time;
1124         } else {
1125                 _last_video_time = optional<DCPTime>();
1126                 _last_video_eyes = optional<Eyes>();
1127                 _last_audio_time = optional<DCPTime>();
1128         }
1129
1130         _black.set_position (time);
1131         _silent.set_position (time);
1132
1133         _last_video.clear ();
1134 }
1135
1136
1137 void
1138 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1139 {
1140         if (!_film->three_d()) {
1141                 if (pv->eyes() == Eyes::LEFT) {
1142                         /* Use left-eye images for both eyes... */
1143                         pv->set_eyes (Eyes::BOTH);
1144                 } else if (pv->eyes() == Eyes::RIGHT) {
1145                         /* ...and discard the right */
1146                         return;
1147                 }
1148         }
1149
1150         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1151            player before the video that requires them.
1152         */
1153         _delay.push_back (make_pair (pv, time));
1154
1155         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1156                 _last_video_time = time + one_video_frame();
1157         }
1158         _last_video_eyes = increment_eyes (pv->eyes());
1159
1160         if (_delay.size() < 3) {
1161                 return;
1162         }
1163
1164         auto to_do = _delay.front();
1165         _delay.pop_front();
1166         do_emit_video (to_do.first, to_do.second);
1167 }
1168
1169
1170 void
1171 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1172 {
1173         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1174                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1175                         _active_texts[i].clear_before (time);
1176                 }
1177         }
1178
1179         auto subtitles = open_subtitles_for_frame (time);
1180         if (subtitles) {
1181                 pv->set_text (subtitles.get ());
1182         }
1183
1184         Video (pv, time);
1185 }
1186
1187
1188 void
1189 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1190 {
1191         /* Log if the assert below is about to fail */
1192         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1193                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1194         }
1195
1196         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1197         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1198         Audio (data, time, _film->audio_frame_rate());
1199         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1200 }
1201
1202
1203 void
1204 Player::fill_audio (DCPTimePeriod period)
1205 {
1206         if (period.from == period.to) {
1207                 return;
1208         }
1209
1210         DCPOMATIC_ASSERT (period.from < period.to);
1211
1212         DCPTime t = period.from;
1213         while (t < period.to) {
1214                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1215                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1216                 if (samples) {
1217                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1218                         silence->make_silent ();
1219                         emit_audio (silence, t);
1220                 }
1221                 t += block;
1222         }
1223 }
1224
1225
1226 DCPTime
1227 Player::one_video_frame () const
1228 {
1229         return DCPTime::from_frames (1, _film->video_frame_rate ());
1230 }
1231
1232
1233 pair<shared_ptr<AudioBuffers>, DCPTime>
1234 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1235 {
1236         auto const discard_time = discard_to - time;
1237         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1238         auto remaining_frames = audio->frames() - discard_frames;
1239         if (remaining_frames <= 0) {
1240                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1241         }
1242         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1243         return make_pair(cut, time + discard_time);
1244 }
1245
1246
1247 void
1248 Player::set_dcp_decode_reduction (optional<int> reduction)
1249 {
1250         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1251
1252         {
1253                 boost::mutex::scoped_lock lm (_mutex);
1254
1255                 if (reduction == _dcp_decode_reduction) {
1256                         lm.unlock ();
1257                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1258                         return;
1259                 }
1260
1261                 _dcp_decode_reduction = reduction;
1262                 setup_pieces_unlocked ();
1263         }
1264
1265         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1266 }
1267
1268
1269 shared_ptr<const Playlist>
1270 Player::playlist () const
1271 {
1272         return _playlist ? _playlist : _film->playlist();
1273 }
1274
1275
1276 void
1277 Player::atmos (weak_ptr<Piece>, PieceAtmos data)
1278 {
1279         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1280 }
1281