Debug.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "dcpomatic_log.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
85
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
87         : _film (film)
88         , _playlist (playlist)
89         , _suspended (false)
90         , _ignore_video (false)
91         , _ignore_audio (false)
92         , _ignore_text (false)
93         , _always_burn_open_subtitles (false)
94         , _fast (false)
95         , _play_referenced (false)
96         , _audio_merger (_film->audio_frame_rate())
97         , _shuffler (0)
98 {
99         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
100         /* The butler must hear about this first, so since we are proxying this through to the butler we must
101            be first.
102         */
103         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
104         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
105         set_video_container_size (_film->frame_size ());
106
107         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
108
109         setup_pieces ();
110         seek (DCPTime (), true);
111 }
112
113 Player::~Player ()
114 {
115         delete _shuffler;
116 }
117
118 void
119 Player::setup_pieces ()
120 {
121         boost::mutex::scoped_lock lm (_mutex);
122         setup_pieces_unlocked ();
123 }
124
125 bool
126 have_video (shared_ptr<Piece> piece)
127 {
128         return piece->decoder && piece->decoder->video;
129 }
130
131 bool
132 have_audio (shared_ptr<Piece> piece)
133 {
134         return piece->decoder && piece->decoder->audio;
135 }
136
137 void
138 Player::setup_pieces_unlocked ()
139 {
140         _pieces.clear ();
141
142         delete _shuffler;
143         _shuffler = new Shuffler();
144         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
145
146         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
147
148                 if (!i->paths_valid ()) {
149                         continue;
150                 }
151
152                 if (_ignore_video && _ignore_audio && i->text.empty()) {
153                         /* We're only interested in text and this content has none */
154                         continue;
155                 }
156
157                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
158                 FrameRateChange frc (_film, i);
159
160                 if (!decoder) {
161                         /* Not something that we can decode; e.g. Atmos content */
162                         continue;
163                 }
164
165                 if (decoder->video && _ignore_video) {
166                         decoder->video->set_ignore (true);
167                 }
168
169                 if (decoder->audio && _ignore_audio) {
170                         decoder->audio->set_ignore (true);
171                 }
172
173                 if (_ignore_text) {
174                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
175                                 i->set_ignore (true);
176                         }
177                 }
178
179                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
180                 if (dcp) {
181                         dcp->set_decode_referenced (_play_referenced);
182                         if (_play_referenced) {
183                                 dcp->set_forced_reduction (_dcp_decode_reduction);
184                         }
185                 }
186
187                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
188                 _pieces.push_back (piece);
189
190                 if (decoder->video) {
191                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
192                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
193                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
194                         } else {
195                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
196                         }
197                 }
198
199                 if (decoder->audio) {
200                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
201                 }
202
203                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
204
205                 while (j != decoder->text.end()) {
206                         (*j)->BitmapStart.connect (
207                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
208                                 );
209                         (*j)->PlainStart.connect (
210                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
211                                 );
212                         (*j)->Stop.connect (
213                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
214                                 );
215
216                         ++j;
217                 }
218         }
219
220         _stream_states.clear ();
221         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
222                 if (i->content->audio) {
223                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
224                                 _stream_states[j] = StreamState (i, i->content->position ());
225                         }
226                 }
227         }
228
229         _black = Empty (_film, _pieces, bind(&have_video, _1));
230         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
231
232         _last_video_time = DCPTime ();
233         _last_video_eyes = EYES_BOTH;
234         _last_audio_time = DCPTime ();
235 }
236
237 void
238 Player::playlist_content_change (ChangeType type, int property, bool frequent)
239 {
240         if (type == CHANGE_TYPE_PENDING) {
241                 boost::mutex::scoped_lock lm (_mutex);
242                 /* The player content is probably about to change, so we can't carry on
243                    until that has happened and we've rebuilt our pieces.  Stop pass()
244                    and seek() from working until then.
245                 */
246                 _suspended = true;
247         } else if (type == CHANGE_TYPE_DONE) {
248                 /* A change in our content has gone through.  Re-build our pieces. */
249                 setup_pieces ();
250                 _suspended = false;
251         } else if (type == CHANGE_TYPE_CANCELLED) {
252                 boost::mutex::scoped_lock lm (_mutex);
253                 _suspended = false;
254         }
255
256         Change (type, property, frequent);
257 }
258
259 void
260 Player::set_video_container_size (dcp::Size s)
261 {
262         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
263
264         {
265                 boost::mutex::scoped_lock lm (_mutex);
266
267                 if (s == _video_container_size) {
268                         lm.unlock ();
269                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
270                         return;
271                 }
272
273                 _video_container_size = s;
274
275                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
276                 _black_image->make_black ();
277         }
278
279         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
280 }
281
282 void
283 Player::playlist_change (ChangeType type)
284 {
285         if (type == CHANGE_TYPE_DONE) {
286                 setup_pieces ();
287         }
288         Change (type, PlayerProperty::PLAYLIST, false);
289 }
290
291 void
292 Player::film_change (ChangeType type, Film::Property p)
293 {
294         /* Here we should notice Film properties that affect our output, and
295            alert listeners that our output now would be different to how it was
296            last time we were run.
297         */
298
299         if (p == Film::CONTAINER) {
300                 Change (type, PlayerProperty::FILM_CONTAINER, false);
301         } else if (p == Film::VIDEO_FRAME_RATE) {
302                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
303                    so we need new pieces here.
304                 */
305                 if (type == CHANGE_TYPE_DONE) {
306                         setup_pieces ();
307                 }
308                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
309         } else if (p == Film::AUDIO_PROCESSOR) {
310                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
311                         boost::mutex::scoped_lock lm (_mutex);
312                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
313                 }
314         } else if (p == Film::AUDIO_CHANNELS) {
315                 if (type == CHANGE_TYPE_DONE) {
316                         boost::mutex::scoped_lock lm (_mutex);
317                         _audio_merger.clear ();
318                 }
319         }
320 }
321
322 shared_ptr<PlayerVideo>
323 Player::black_player_video_frame (Eyes eyes) const
324 {
325         return shared_ptr<PlayerVideo> (
326                 new PlayerVideo (
327                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
328                         Crop (),
329                         optional<double> (),
330                         _video_container_size,
331                         _video_container_size,
332                         eyes,
333                         PART_WHOLE,
334                         PresetColourConversion::all().front().conversion,
335                         boost::weak_ptr<Content>(),
336                         boost::optional<Frame>()
337                 )
338         );
339 }
340
341 Frame
342 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
343 {
344         DCPTime s = t - piece->content->position ();
345         s = min (piece->content->length_after_trim(_film), s);
346         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
347
348         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
349            then convert that ContentTime to frames at the content's rate.  However this fails for
350            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
351            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
352
353            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
354         */
355         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
356 }
357
358 DCPTime
359 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
360 {
361         /* See comment in dcp_to_content_video */
362         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
363         return d + piece->content->position();
364 }
365
366 Frame
367 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
368 {
369         DCPTime s = t - piece->content->position ();
370         s = min (piece->content->length_after_trim(_film), s);
371         /* See notes in dcp_to_content_video */
372         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
373 }
374
375 DCPTime
376 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 {
378         /* See comment in dcp_to_content_video */
379         return DCPTime::from_frames (f, _film->audio_frame_rate())
380                 - DCPTime (piece->content->trim_start(), piece->frc)
381                 + piece->content->position();
382 }
383
384 ContentTime
385 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
386 {
387         DCPTime s = t - piece->content->position ();
388         s = min (piece->content->length_after_trim(_film), s);
389         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
390 }
391
392 DCPTime
393 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
394 {
395         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
396 }
397
398 list<shared_ptr<Font> >
399 Player::get_subtitle_fonts ()
400 {
401         boost::mutex::scoped_lock lm (_mutex);
402
403         list<shared_ptr<Font> > fonts;
404         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
405                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
406                         /* XXX: things may go wrong if there are duplicate font IDs
407                            with different font files.
408                         */
409                         list<shared_ptr<Font> > f = j->fonts ();
410                         copy (f.begin(), f.end(), back_inserter (fonts));
411                 }
412         }
413
414         return fonts;
415 }
416
417 /** Set this player never to produce any video data */
418 void
419 Player::set_ignore_video ()
420 {
421         boost::mutex::scoped_lock lm (_mutex);
422         _ignore_video = true;
423         setup_pieces_unlocked ();
424 }
425
426 void
427 Player::set_ignore_audio ()
428 {
429         boost::mutex::scoped_lock lm (_mutex);
430         _ignore_audio = true;
431         setup_pieces_unlocked ();
432 }
433
434 void
435 Player::set_ignore_text ()
436 {
437         boost::mutex::scoped_lock lm (_mutex);
438         _ignore_text = true;
439         setup_pieces_unlocked ();
440 }
441
442 /** Set the player to always burn open texts into the image regardless of the content settings */
443 void
444 Player::set_always_burn_open_subtitles ()
445 {
446         boost::mutex::scoped_lock lm (_mutex);
447         _always_burn_open_subtitles = true;
448 }
449
450 /** Sets up the player to be faster, possibly at the expense of quality */
451 void
452 Player::set_fast ()
453 {
454         boost::mutex::scoped_lock lm (_mutex);
455         _fast = true;
456         setup_pieces_unlocked ();
457 }
458
459 void
460 Player::set_play_referenced ()
461 {
462         boost::mutex::scoped_lock lm (_mutex);
463         _play_referenced = true;
464         setup_pieces_unlocked ();
465 }
466
467 static void
468 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
469 {
470         DCPOMATIC_ASSERT (r);
471         r->set_entry_point (r->entry_point() + reel_trim_start);
472         r->set_duration (r->duration() - reel_trim_start - reel_trim_end);
473         if (r->duration() > 0) {
474                 a.push_back (
475                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->duration(), ffr)))
476                         );
477         }
478 }
479
480 list<ReferencedReelAsset>
481 Player::get_reel_assets ()
482 {
483         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
484
485         list<ReferencedReelAsset> a;
486
487         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
488                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
489                 if (!j) {
490                         continue;
491                 }
492
493                 scoped_ptr<DCPDecoder> decoder;
494                 try {
495                         decoder.reset (new DCPDecoder (_film, j, false));
496                 } catch (...) {
497                         return a;
498                 }
499
500                 DCPOMATIC_ASSERT (j->video_frame_rate ());
501                 double const cfr = j->video_frame_rate().get();
502                 Frame const trim_start = j->trim_start().frames_round (cfr);
503                 Frame const trim_end = j->trim_end().frames_round (cfr);
504                 int const ffr = _film->video_frame_rate ();
505
506                 /* position in the asset from the start */
507                 int64_t offset_from_start = 0;
508                 /* position in the asset from the end */
509                 int64_t offset_from_end = 0;
510                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
511                         /* Assume that main picture duration is the length of the reel */
512                         offset_from_end += k->main_picture()->duration();
513                 }
514
515                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
516
517                         /* Assume that main picture duration is the length of the reel */
518                         int64_t const reel_duration = k->main_picture()->duration();
519
520                         /* See doc/design/trim_reels.svg */
521                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
522                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
523
524                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
525                         if (j->reference_video ()) {
526                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
527                         }
528
529                         if (j->reference_audio ()) {
530                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
531                         }
532
533                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
534                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
535                         }
536
537                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
538                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
539                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
540                                 }
541                         }
542
543                         offset_from_start += reel_duration;
544                         offset_from_end -= reel_duration;
545                 }
546         }
547
548         return a;
549 }
550
551 bool
552 Player::pass ()
553 {
554         boost::mutex::scoped_lock lm (_mutex);
555
556         if (_suspended) {
557                 /* We can't pass in this state */
558                 return false;
559         }
560
561         if (_playlist->length(_film) == DCPTime()) {
562                 /* Special case of an empty Film; just give one black frame */
563                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
564                 return true;
565         }
566
567         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
568
569         shared_ptr<Piece> earliest_content;
570         optional<DCPTime> earliest_time;
571
572         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
573                 if (i->done) {
574                         continue;
575                 }
576
577                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
578                 if (t > i->content->end(_film)) {
579                         i->done = true;
580                 } else {
581
582                         /* Given two choices at the same time, pick the one with texts so we see it before
583                            the video.
584                         */
585                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
586                                 earliest_time = t;
587                                 earliest_content = i;
588                         }
589                 }
590         }
591
592         bool done = false;
593
594         enum {
595                 NONE,
596                 CONTENT,
597                 BLACK,
598                 SILENT
599         } which = NONE;
600
601         if (earliest_content) {
602                 which = CONTENT;
603         }
604
605         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
606                 earliest_time = _black.position ();
607                 which = BLACK;
608         }
609
610         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
611                 earliest_time = _silent.position ();
612                 which = SILENT;
613         }
614
615         switch (which) {
616         case CONTENT:
617         {
618                 earliest_content->done = earliest_content->decoder->pass ();
619                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
620                 if (dcp && !_play_referenced && dcp->reference_audio()) {
621                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
622                            to `hide' the fact that no audio was emitted during the referenced DCP (though
623                            we need to behave as though it was).
624                         */
625                         _last_audio_time = dcp->end (_film);
626                 }
627                 break;
628         }
629         case BLACK:
630                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
631                 _black.set_position (_black.position() + one_video_frame());
632                 break;
633         case SILENT:
634         {
635                 DCPTimePeriod period (_silent.period_at_position());
636                 if (_last_audio_time) {
637                         /* Sometimes the thing that happened last finishes fractionally before
638                            or after this silence.  Bodge the start time of the silence to fix it.
639                            I think this is nothing to worry about since we will just add or
640                            remove a little silence at the end of some content.
641                         */
642                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
643                         /* Let's not worry about less than a frame at 24fps */
644                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
645                         if (error >= too_much_error) {
646                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
647                         }
648                         DCPOMATIC_ASSERT (error < too_much_error);
649                         period.from = *_last_audio_time;
650                 }
651                 if (period.duration() > one_video_frame()) {
652                         period.to = period.from + one_video_frame();
653                 }
654                 fill_audio (period);
655                 _silent.set_position (period.to);
656                 break;
657         }
658         case NONE:
659                 done = true;
660                 break;
661         }
662
663         /* Emit any audio that is ready */
664
665         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
666            of our streams, or the position of the _silent.
667         */
668         DCPTime pull_to = _film->length ();
669         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
670                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
671                         pull_to = i->second.last_push_end;
672                 }
673         }
674         if (!_silent.done() && _silent.position() < pull_to) {
675                 pull_to = _silent.position();
676         }
677
678         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
679         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
680                 if (_last_audio_time && i->second < *_last_audio_time) {
681                         /* This new data comes before the last we emitted (or the last seek); discard it */
682                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
683                         if (!cut.first) {
684                                 continue;
685                         }
686                         *i = cut;
687                 } else if (_last_audio_time && i->second > *_last_audio_time) {
688                         /* There's a gap between this data and the last we emitted; fill with silence */
689                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
690                 }
691
692                 emit_audio (i->first, i->second);
693         }
694
695         if (done) {
696                 _shuffler->flush ();
697                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
698                         do_emit_video(i->first, i->second);
699                 }
700         }
701
702         return done;
703 }
704
705 /** @return Open subtitles for the frame at the given time, converted to images */
706 optional<PositionImage>
707 Player::open_subtitles_for_frame (DCPTime time) const
708 {
709         list<PositionImage> captions;
710         int const vfr = _film->video_frame_rate();
711
712         BOOST_FOREACH (
713                 PlayerText j,
714                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
715                 ) {
716
717                 /* Bitmap subtitles */
718                 BOOST_FOREACH (BitmapText i, j.bitmap) {
719                         if (!i.image) {
720                                 continue;
721                         }
722
723                         /* i.image will already have been scaled to fit _video_container_size */
724                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
725
726                         captions.push_back (
727                                 PositionImage (
728                                         i.image,
729                                         Position<int> (
730                                                 lrint (_video_container_size.width * i.rectangle.x),
731                                                 lrint (_video_container_size.height * i.rectangle.y)
732                                                 )
733                                         )
734                                 );
735                 }
736
737                 /* String subtitles (rendered to an image) */
738                 if (!j.string.empty ()) {
739                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
740                         copy (s.begin(), s.end(), back_inserter (captions));
741                 }
742         }
743
744         if (captions.empty ()) {
745                 return optional<PositionImage> ();
746         }
747
748         return merge (captions);
749 }
750
751 void
752 Player::video (weak_ptr<Piece> wp, ContentVideo video)
753 {
754         shared_ptr<Piece> piece = wp.lock ();
755         if (!piece) {
756                 return;
757         }
758
759         FrameRateChange frc (_film, piece->content);
760         if (frc.skip && (video.frame % 2) == 1) {
761                 return;
762         }
763
764         /* Time of the first frame we will emit */
765         DCPTime const time = content_video_to_dcp (piece, video.frame);
766
767         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
768            if it's after the content's period here as in that case we still need to fill any gap between
769            `now' and the end of the content's period.
770         */
771         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
772                 return;
773         }
774
775         /* Fill gaps that we discover now that we have some video which needs to be emitted.
776            This is where we need to fill to.
777         */
778         DCPTime fill_to = min (time, piece->content->end(_film));
779
780         if (_last_video_time) {
781                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
782
783                 /* Fill if we have more than half a frame to do */
784                 if ((fill_to - fill_from) > one_video_frame() / 2) {
785                         LastVideoMap::const_iterator last = _last_video.find (wp);
786                         if (_film->three_d()) {
787                                 Eyes fill_to_eyes = video.eyes;
788                                 if (fill_to_eyes == EYES_BOTH) {
789                                         fill_to_eyes = EYES_LEFT;
790                                 }
791                                 if (fill_to == piece->content->end(_film)) {
792                                         /* Don't fill after the end of the content */
793                                         fill_to_eyes = EYES_LEFT;
794                                 }
795                                 DCPTime j = fill_from;
796                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
797                                 if (eyes == EYES_BOTH) {
798                                         eyes = EYES_LEFT;
799                                 }
800                                 while (j < fill_to || eyes != fill_to_eyes) {
801                                         if (last != _last_video.end()) {
802                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
803                                                 copy->set_eyes (eyes);
804                                                 emit_video (copy, j);
805                                         } else {
806                                                 emit_video (black_player_video_frame(eyes), j);
807                                         }
808                                         if (eyes == EYES_RIGHT) {
809                                                 j += one_video_frame();
810                                         }
811                                         eyes = increment_eyes (eyes);
812                                 }
813                         } else {
814                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
815                                         if (last != _last_video.end()) {
816                                                 emit_video (last->second, j);
817                                         } else {
818                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
819                                         }
820                                 }
821                         }
822                 }
823         }
824
825         _last_video[wp].reset (
826                 new PlayerVideo (
827                         video.image,
828                         piece->content->video->crop (),
829                         piece->content->video->fade (_film, video.frame),
830                         piece->content->video->scale().size (
831                                 piece->content->video, _video_container_size, _film->frame_size ()
832                                 ),
833                         _video_container_size,
834                         video.eyes,
835                         video.part,
836                         piece->content->video->colour_conversion(),
837                         piece->content,
838                         video.frame
839                         )
840                 );
841
842         DCPTime t = time;
843         for (int i = 0; i < frc.repeat; ++i) {
844                 if (t < piece->content->end(_film)) {
845                         emit_video (_last_video[wp], t);
846                 }
847                 t += one_video_frame ();
848         }
849 }
850
851 void
852 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
853 {
854         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
855
856         shared_ptr<Piece> piece = wp.lock ();
857         if (!piece) {
858                 return;
859         }
860
861         shared_ptr<AudioContent> content = piece->content->audio;
862         DCPOMATIC_ASSERT (content);
863
864         /* Compute time in the DCP */
865         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
866         /* And the end of this block in the DCP */
867         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
868
869         /* Remove anything that comes before the start or after the end of the content */
870         if (time < piece->content->position()) {
871                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
872                 if (!cut.first) {
873                         /* This audio is entirely discarded */
874                         return;
875                 }
876                 content_audio.audio = cut.first;
877                 time = cut.second;
878         } else if (time > piece->content->end(_film)) {
879                 /* Discard it all */
880                 return;
881         } else if (end > piece->content->end(_film)) {
882                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
883                 if (remaining_frames == 0) {
884                         return;
885                 }
886                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
887                 LOG_GENERAL_NC("copy_from #8");
888                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
889                 content_audio.audio = cut;
890         }
891
892         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
893
894         /* Gain */
895
896         if (content->gain() != 0) {
897                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
898                 gain->apply_gain (content->gain ());
899                 content_audio.audio = gain;
900         }
901
902         /* Remap */
903
904         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
905
906         /* Process */
907
908         if (_audio_processor) {
909                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
910         }
911
912         /* Push */
913
914         _audio_merger.push (content_audio.audio, time);
915         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
916         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
917 }
918
919 void
920 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
921 {
922         shared_ptr<Piece> piece = wp.lock ();
923         shared_ptr<const TextContent> text = wc.lock ();
924         if (!piece || !text) {
925                 return;
926         }
927
928         /* Apply content's subtitle offsets */
929         subtitle.sub.rectangle.x += text->x_offset ();
930         subtitle.sub.rectangle.y += text->y_offset ();
931
932         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
933         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
934         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
935
936         /* Apply content's subtitle scale */
937         subtitle.sub.rectangle.width *= text->x_scale ();
938         subtitle.sub.rectangle.height *= text->y_scale ();
939
940         PlayerText ps;
941         shared_ptr<Image> image = subtitle.sub.image;
942         /* We will scale the subtitle up to fit _video_container_size */
943         dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
944         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
945         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
946
947         _active_texts[text->type()].add_from (wc, ps, from);
948 }
949
950 void
951 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
952 {
953         shared_ptr<Piece> piece = wp.lock ();
954         shared_ptr<const TextContent> text = wc.lock ();
955         if (!piece || !text) {
956                 return;
957         }
958
959         PlayerText ps;
960         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
961
962         if (from > piece->content->end(_film)) {
963                 return;
964         }
965
966         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
967                 s.set_h_position (s.h_position() + text->x_offset ());
968                 s.set_v_position (s.v_position() + text->y_offset ());
969                 float const xs = text->x_scale();
970                 float const ys = text->y_scale();
971                 float size = s.size();
972
973                 /* Adjust size to express the common part of the scaling;
974                    e.g. if xs = ys = 0.5 we scale size by 2.
975                 */
976                 if (xs > 1e-5 && ys > 1e-5) {
977                         size *= 1 / min (1 / xs, 1 / ys);
978                 }
979                 s.set_size (size);
980
981                 /* Then express aspect ratio changes */
982                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
983                         s.set_aspect_adjust (xs / ys);
984                 }
985
986                 s.set_in (dcp::Time(from.seconds(), 1000));
987                 ps.string.push_back (StringText (s, text->outline_width()));
988                 ps.add_fonts (text->fonts ());
989         }
990
991         _active_texts[text->type()].add_from (wc, ps, from);
992 }
993
994 void
995 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
996 {
997         shared_ptr<const TextContent> text = wc.lock ();
998         if (!text) {
999                 return;
1000         }
1001
1002         if (!_active_texts[text->type()].have(wc)) {
1003                 return;
1004         }
1005
1006         shared_ptr<Piece> piece = wp.lock ();
1007         if (!piece) {
1008                 return;
1009         }
1010
1011         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1012
1013         if (dcp_to > piece->content->end(_film)) {
1014                 return;
1015         }
1016
1017         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1018
1019         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1020         if (text->use() && !always && !text->burn()) {
1021                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1022         }
1023 }
1024
1025 void
1026 Player::seek (DCPTime time, bool accurate)
1027 {
1028         boost::mutex::scoped_lock lm (_mutex);
1029
1030         if (_suspended) {
1031                 /* We can't seek in this state */
1032                 return;
1033         }
1034
1035         if (_shuffler) {
1036                 _shuffler->clear ();
1037         }
1038
1039         _delay.clear ();
1040
1041         if (_audio_processor) {
1042                 _audio_processor->flush ();
1043         }
1044
1045         _audio_merger.clear ();
1046         for (int i = 0; i < TEXT_COUNT; ++i) {
1047                 _active_texts[i].clear ();
1048         }
1049
1050         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1051                 if (time < i->content->position()) {
1052                         /* Before; seek to the start of the content */
1053                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1054                         i->done = false;
1055                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1056                         /* During; seek to position */
1057                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1058                         i->done = false;
1059                 } else {
1060                         /* After; this piece is done */
1061                         i->done = true;
1062                 }
1063         }
1064
1065         if (accurate) {
1066                 _last_video_time = time;
1067                 _last_video_eyes = EYES_LEFT;
1068                 _last_audio_time = time;
1069         } else {
1070                 _last_video_time = optional<DCPTime>();
1071                 _last_video_eyes = optional<Eyes>();
1072                 _last_audio_time = optional<DCPTime>();
1073         }
1074
1075         _black.set_position (time);
1076         _silent.set_position (time);
1077
1078         _last_video.clear ();
1079 }
1080
1081 void
1082 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1083 {
1084         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1085            player before the video that requires them.
1086         */
1087         _delay.push_back (make_pair (pv, time));
1088
1089         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1090                 _last_video_time = time + one_video_frame();
1091         }
1092         _last_video_eyes = increment_eyes (pv->eyes());
1093
1094         if (_delay.size() < 3) {
1095                 return;
1096         }
1097
1098         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1099         _delay.pop_front();
1100         do_emit_video (to_do.first, to_do.second);
1101 }
1102
1103 void
1104 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1105 {
1106         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1107                 for (int i = 0; i < TEXT_COUNT; ++i) {
1108                         _active_texts[i].clear_before (time);
1109                 }
1110         }
1111
1112         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1113         if (subtitles) {
1114                 pv->set_text (subtitles.get ());
1115         }
1116
1117         Video (pv, time);
1118 }
1119
1120 void
1121 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1122 {
1123         /* Log if the assert below is about to fail */
1124         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1125                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1126         }
1127
1128         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1129         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1130         Audio (data, time, _film->audio_frame_rate());
1131         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1132 }
1133
1134 void
1135 Player::fill_audio (DCPTimePeriod period)
1136 {
1137         if (period.from == period.to) {
1138                 return;
1139         }
1140
1141         DCPOMATIC_ASSERT (period.from < period.to);
1142
1143         DCPTime t = period.from;
1144         while (t < period.to) {
1145                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1146                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1147                 if (samples) {
1148                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1149                         silence->make_silent ();
1150                         emit_audio (silence, t);
1151                 }
1152                 t += block;
1153         }
1154 }
1155
1156 DCPTime
1157 Player::one_video_frame () const
1158 {
1159         return DCPTime::from_frames (1, _film->video_frame_rate ());
1160 }
1161
1162 pair<shared_ptr<AudioBuffers>, DCPTime>
1163 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1164 {
1165         DCPTime const discard_time = discard_to - time;
1166         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1167         Frame remaining_frames = audio->frames() - discard_frames;
1168         if (remaining_frames <= 0) {
1169                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1170         }
1171         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1172         LOG_GENERAL_NC("copy_from #9");
1173         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1174         return make_pair(cut, time + discard_time);
1175 }
1176
1177 void
1178 Player::set_dcp_decode_reduction (optional<int> reduction)
1179 {
1180         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1181
1182         {
1183                 boost::mutex::scoped_lock lm (_mutex);
1184
1185                 if (reduction == _dcp_decode_reduction) {
1186                         lm.unlock ();
1187                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1188                         return;
1189                 }
1190
1191                 _dcp_decode_reduction = reduction;
1192                 setup_pieces_unlocked ();
1193         }
1194
1195         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1196 }
1197
1198 optional<DCPTime>
1199 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1200 {
1201         boost::mutex::scoped_lock lm (_mutex);
1202
1203         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1204                 if (i->content == content) {
1205                         return content_time_to_dcp (i, t);
1206                 }
1207         }
1208
1209         /* We couldn't find this content; perhaps things are being changed over */
1210         return optional<DCPTime>();
1211 }