operator bool on Time is a really bad idea; removed it and fixed lots of bugs.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include <stdint.h>
21 #include <algorithm>
22 #include "player.h"
23 #include "film.h"
24 #include "ffmpeg_decoder.h"
25 #include "audio_buffers.h"
26 #include "ffmpeg_content.h"
27 #include "image_decoder.h"
28 #include "image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
32 #include "subrip_decoder.h"
33 #include "subrip_content.h"
34 #include "playlist.h"
35 #include "job.h"
36 #include "image.h"
37 #include "ratio.h"
38 #include "log.h"
39 #include "scaler.h"
40 #include "render_subtitles.h"
41
42 using std::list;
43 using std::cout;
44 using std::min;
45 using std::max;
46 using std::vector;
47 using std::pair;
48 using std::map;
49 using boost::shared_ptr;
50 using boost::weak_ptr;
51 using boost::dynamic_pointer_cast;
52 using boost::optional;
53
54 class Piece
55 {
56 public:
57         Piece (shared_ptr<Content> c, shared_ptr<Decoder> d, FrameRateChange f)
58                 : content (c)
59                 , decoder (d)
60                 , frc (f)
61         {}
62
63         shared_ptr<Content> content;
64         shared_ptr<Decoder> decoder;
65         FrameRateChange frc;
66 };
67
68 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
69         : _film (f)
70         , _playlist (p)
71         , _video (true)
72         , _audio (true)
73         , _have_valid_pieces (false)
74         , _video_position (0)
75         , _audio_position (0)
76         , _audio_merger (f->audio_channels(), f->audio_frame_rate ())
77         , _last_emit_was_black (false)
78         , _just_did_inaccurate_seek (false)
79         , _approximate_size (false)
80 {
81         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
82         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
83         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
84         set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
85 }
86
87 void
88 Player::disable_video ()
89 {
90         _video = false;
91 }
92
93 void
94 Player::disable_audio ()
95 {
96         _audio = false;
97 }
98
99 bool
100 Player::pass ()
101 {
102         if (!_have_valid_pieces) {
103                 setup_pieces ();
104         }
105
106         /* Interrogate all our pieces to find the one with the earliest decoded data */
107
108         shared_ptr<Piece> earliest_piece;
109         shared_ptr<Decoded> earliest_decoded;
110         DCPTime earliest_time = DCPTime::max ();
111         DCPTime earliest_audio = DCPTime::max ();
112
113         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
114
115                 DCPTime const offset = (*i)->content->position() - (*i)->content->trim_start();
116                 
117                 bool done = false;
118                 shared_ptr<Decoded> dec;
119                 while (!done) {
120                         dec = (*i)->decoder->peek ();
121                         if (!dec) {
122                                 /* Decoder has nothing else to give us */
123                                 break;
124                         }
125
126
127                         dec->set_dcp_times ((*i)->frc, offset);
128                         DCPTime const t = dec->dcp_time - offset;
129                         if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
130                                 /* In the end-trimmed part; decoder has nothing else to give us */
131                                 dec.reset ();
132                                 done = true;
133                         } else if (t >= (*i)->content->trim_start ()) {
134                                 /* Within the un-trimmed part; everything's ok */
135                                 done = true;
136                         } else {
137                                 /* Within the start-trimmed part; get something else */
138                                 (*i)->decoder->consume ();
139                         }
140                 }
141
142                 if (!dec) {
143                         continue;
144                 }
145
146                 if (dec->dcp_time < earliest_time) {
147                         earliest_piece = *i;
148                         earliest_decoded = dec;
149                         earliest_time = dec->dcp_time;
150                 }
151
152                 if (dynamic_pointer_cast<DecodedAudio> (dec) && dec->dcp_time < earliest_audio) {
153                         earliest_audio = dec->dcp_time;
154                 }
155         }
156                 
157         if (!earliest_piece) {
158                 flush ();
159                 return true;
160         }
161
162         if (earliest_audio != DCPTime::max ()) {
163                 if (earliest_audio.get() < 0) {
164                         earliest_audio = DCPTime ();
165                 }
166                 TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (earliest_audio);
167                 Audio (tb.audio, tb.time);
168                 /* This assumes that the audio-frames-to-time conversion is exact
169                    so that there are no accumulated errors caused by rounding.
170                 */
171                 _audio_position += DCPTime::from_frames (tb.audio->frames(), _film->audio_frame_rate ());
172         }
173
174         /* Emit the earliest thing */
175
176         shared_ptr<DecodedVideo> dv = dynamic_pointer_cast<DecodedVideo> (earliest_decoded);
177         shared_ptr<DecodedAudio> da = dynamic_pointer_cast<DecodedAudio> (earliest_decoded);
178         shared_ptr<DecodedImageSubtitle> dis = dynamic_pointer_cast<DecodedImageSubtitle> (earliest_decoded);
179         shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (earliest_decoded);
180
181         /* Will be set to false if we shouldn't consume the peeked DecodedThing */
182         bool consume = true;
183
184         if (dv && _video) {
185
186                 if (_just_did_inaccurate_seek) {
187
188                         /* Just emit; no subtlety */
189                         emit_video (earliest_piece, dv);
190                         step_video_position (dv);
191                         
192                 } else if (dv->dcp_time > _video_position) {
193
194                         /* Too far ahead */
195
196                         list<shared_ptr<Piece> >::iterator i = _pieces.begin();
197                         while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) {
198                                 ++i;
199                         }
200
201                         if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) {
202                                 /* We're outside all video content */
203                                 emit_black ();
204                                 _statistics.video.black++;
205                         } else {
206                                 /* We're inside some video; repeat the frame */
207                                 _last_incoming_video.video->dcp_time = _video_position;
208                                 emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video);
209                                 step_video_position (_last_incoming_video.video);
210                                 _statistics.video.repeat++;
211                         }
212
213                         consume = false;
214
215                 } else if (dv->dcp_time == _video_position) {
216                         /* We're ok */
217                         emit_video (earliest_piece, dv);
218                         step_video_position (dv);
219                         _statistics.video.good++;
220                 } else {
221                         /* Too far behind: skip */
222                         _statistics.video.skip++;
223                 }
224
225                 _just_did_inaccurate_seek = false;
226
227         } else if (da && _audio) {
228
229                 if (da->dcp_time > _audio_position) {
230                         /* Too far ahead */
231                         emit_silence (da->dcp_time - _audio_position);
232                         consume = false;
233                         _statistics.audio.silence += (da->dcp_time - _audio_position);
234                 } else if (da->dcp_time == _audio_position) {
235                         /* We're ok */
236                         emit_audio (earliest_piece, da);
237                         _statistics.audio.good += da->data->frames();
238                 } else {
239                         /* Too far behind: skip */
240                         _statistics.audio.skip += da->data->frames();
241                 }
242                 
243         } else if (dis && _video) {
244                 _image_subtitle.piece = earliest_piece;
245                 _image_subtitle.subtitle = dis;
246                 update_subtitle_from_image ();
247         } else if (dts && _video) {
248                 _text_subtitle.piece = earliest_piece;
249                 _text_subtitle.subtitle = dts;
250                 update_subtitle_from_text ();
251         }
252
253         if (consume) {
254                 earliest_piece->decoder->consume ();
255         }                       
256         
257         return false;
258 }
259
260 void
261 Player::emit_video (weak_ptr<Piece> weak_piece, shared_ptr<DecodedVideo> video)
262 {
263         /* Keep a note of what came in so that we can repeat it if required */
264         _last_incoming_video.weak_piece = weak_piece;
265         _last_incoming_video.video = video;
266         
267         shared_ptr<Piece> piece = weak_piece.lock ();
268         if (!piece) {
269                 return;
270         }
271
272         shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
273         assert (content);
274
275         FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate());
276
277         dcp::Size image_size = content->scale().size (content, _video_container_size);
278         if (_approximate_size) {
279                 image_size.width &= ~3;
280                 image_size.height &= ~3;
281         }
282
283         shared_ptr<PlayerImage> pi (
284                 new PlayerImage (
285                         video->image,
286                         content->crop(),
287                         image_size,
288                         _video_container_size,
289                         _film->scaler()
290                         )
291                 );
292         
293         if (
294                 _film->with_subtitles () &&
295                 _out_subtitle.image &&
296                 video->dcp_time >= _out_subtitle.from && video->dcp_time <= _out_subtitle.to
297                 ) {
298
299                 Position<int> const container_offset (
300                         (_video_container_size.width - image_size.width) / 2,
301                         (_video_container_size.height - image_size.height) / 2
302                         );
303
304                 pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset);
305         }
306                 
307                                             
308 #ifdef DCPOMATIC_DEBUG
309         _last_video = piece->content;
310 #endif
311
312         Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time);
313         
314         _last_emit_was_black = false;
315 }
316
317 void
318 Player::step_video_position (shared_ptr<DecodedVideo> video)
319 {
320         /* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */
321         if (video->eyes != EYES_LEFT) {
322                 /* This assumes that the video-frames-to-time conversion is exact
323                    so that there are no accumulated errors caused by rounding.
324                 */
325                 _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
326         }
327 }
328
329 void
330 Player::emit_audio (weak_ptr<Piece> weak_piece, shared_ptr<DecodedAudio> audio)
331 {
332         shared_ptr<Piece> piece = weak_piece.lock ();
333         if (!piece) {
334                 return;
335         }
336
337         shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
338         assert (content);
339
340         /* Gain */
341         if (content->audio_gain() != 0) {
342                 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio->data));
343                 gain->apply_gain (content->audio_gain ());
344                 audio->data = gain;
345         }
346
347         /* Remap channels */
348         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames()));
349         dcp_mapped->make_silent ();
350         AudioMapping map = content->audio_mapping ();
351         for (int i = 0; i < map.content_channels(); ++i) {
352                 for (int j = 0; j < _film->audio_channels(); ++j) {
353                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
354                                 dcp_mapped->accumulate_channel (
355                                         audio->data.get(),
356                                         i,
357                                         static_cast<dcp::Channel> (j),
358                                         map.get (i, static_cast<dcp::Channel> (j))
359                                         );
360                         }
361                 }
362         }
363
364         audio->data = dcp_mapped;
365
366         /* Delay */
367         audio->dcp_time += DCPTime::from_seconds (content->audio_delay() / 1000.0);
368         if (audio->dcp_time < DCPTime (0)) {
369                 int const frames = - audio->dcp_time.frames (_film->audio_frame_rate());
370                 if (frames >= audio->data->frames ()) {
371                         return;
372                 }
373
374                 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames));
375                 trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0);
376
377                 audio->data = trimmed;
378                 audio->dcp_time = DCPTime ();
379         }
380
381         _audio_merger.push (audio->data, audio->dcp_time);
382 }
383
384 void
385 Player::flush ()
386 {
387         TimedAudioBuffers<DCPTime> tb = _audio_merger.flush ();
388         if (_audio && tb.audio) {
389                 Audio (tb.audio, tb.time);
390                 _audio_position += DCPTime::from_frames (tb.audio->frames (), _film->audio_frame_rate ());
391         }
392
393         while (_video && _video_position < _audio_position) {
394                 emit_black ();
395         }
396
397         while (_audio && _audio_position < _video_position) {
398                 emit_silence (_video_position - _audio_position);
399         }
400 }
401
402 /** Seek so that the next pass() will yield (approximately) the requested frame.
403  *  Pass accurate = true to try harder to get close to the request.
404  *  @return true on error
405  */
406 void
407 Player::seek (DCPTime t, bool accurate)
408 {
409         if (!_have_valid_pieces) {
410                 setup_pieces ();
411         }
412
413         if (_pieces.empty ()) {
414                 return;
415         }
416
417         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
418                 /* s is the offset of t from the start position of this content */
419                 DCPTime s = t - (*i)->content->position ();
420                 s = max (static_cast<DCPTime> (0), s);
421                 s = min ((*i)->content->length_after_trim(), s);
422
423                 /* Convert this to the content time */
424                 ContentTime ct (s + (*i)->content->trim_start(), (*i)->frc);
425
426                 /* And seek the decoder */
427                 (*i)->decoder->seek (ct, accurate);
428         }
429
430         _video_position = t.round_up (_film->video_frame_rate());
431         _audio_position = t.round_up (_film->audio_frame_rate());
432
433         _audio_merger.clear (_audio_position);
434
435         if (!accurate) {
436                 /* We just did an inaccurate seek, so it's likely that the next thing seen
437                    out of pass() will be a fair distance from _{video,audio}_position.  Setting
438                    this flag stops pass() from trying to fix that: we assume that if it
439                    was an inaccurate seek then the caller does not care too much about
440                    inserting black/silence to keep the time tidy.
441                 */
442                 _just_did_inaccurate_seek = true;
443         }
444 }
445
446 void
447 Player::setup_pieces ()
448 {
449         list<shared_ptr<Piece> > old_pieces = _pieces;
450         _pieces.clear ();
451
452         ContentList content = _playlist->content ();
453
454         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
455
456                 if (!(*i)->paths_valid ()) {
457                         continue;
458                 }
459                 
460                 shared_ptr<Decoder> decoder;
461                 optional<FrameRateChange> frc;
462
463                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
464                 DCPTime best_overlap_t;
465                 shared_ptr<VideoContent> best_overlap;
466                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
467                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
468                         if (!vc) {
469                                 continue;
470                         }
471                         
472                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
473                         if (overlap > best_overlap_t) {
474                                 best_overlap = vc;
475                                 best_overlap_t = overlap;
476                         }
477                 }
478
479                 optional<FrameRateChange> best_overlap_frc;
480                 if (best_overlap) {
481                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
482                 } else {
483                         /* No video overlap; e.g. if the DCP is just audio */
484                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
485                 }
486
487                 /* FFmpeg */
488                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
489                 if (fc) {
490                         decoder.reset (new FFmpegDecoder (_film, fc, _video, _audio));
491                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
492                 }
493
494                 /* ImageContent */
495                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
496                 if (ic) {
497                         /* See if we can re-use an old ImageDecoder */
498                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
499                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
500                                 if (imd && imd->content() == ic) {
501                                         decoder = imd;
502                                 }
503                         }
504
505                         if (!decoder) {
506                                 decoder.reset (new ImageDecoder (_film, ic));
507                         }
508
509                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
510                 }
511
512                 /* SndfileContent */
513                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
514                 if (sc) {
515                         decoder.reset (new SndfileDecoder (_film, sc));
516                         frc = best_overlap_frc;
517                 }
518
519                 /* SubRipContent */
520                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
521                 if (rc) {
522                         decoder.reset (new SubRipDecoder (_film, rc));
523                         frc = best_overlap_frc;
524                 }
525
526                 ContentTime st ((*i)->trim_start(), frc.get ());
527                 decoder->seek (st, true);
528                 
529                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
530         }
531
532         _have_valid_pieces = true;
533
534         /* The Piece for the _last_incoming_video will no longer be valid */
535         _last_incoming_video.video.reset ();
536
537         _video_position = DCPTime ();
538         _audio_position = DCPTime ();
539 }
540
541 void
542 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
543 {
544         shared_ptr<Content> c = w.lock ();
545         if (!c) {
546                 return;
547         }
548
549         if (
550                 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
551                 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END ||
552                 property == VideoContentProperty::VIDEO_FRAME_TYPE 
553                 ) {
554                 
555                 _have_valid_pieces = false;
556                 Changed (frequent);
557
558         } else if (
559                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
560                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
561                 property == SubtitleContentProperty::SUBTITLE_SCALE
562                 ) {
563
564                 update_subtitle_from_image ();
565                 update_subtitle_from_text ();
566                 Changed (frequent);
567
568         } else if (
569                 property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_SCALE ||
570                 property == VideoContentProperty::VIDEO_FRAME_RATE
571                 ) {
572                 
573                 Changed (frequent);
574
575         } else if (property == ContentProperty::PATH) {
576
577                 _have_valid_pieces = false;
578                 Changed (frequent);
579         }
580 }
581
582 void
583 Player::playlist_changed ()
584 {
585         _have_valid_pieces = false;
586         Changed (false);
587 }
588
589 void
590 Player::set_video_container_size (dcp::Size s)
591 {
592         _video_container_size = s;
593
594         shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
595         im->make_black ();
596         
597         _black_frame.reset (
598                 new PlayerImage (
599                         im,
600                         Crop(),
601                         _video_container_size,
602                         _video_container_size,
603                         Scaler::from_id ("bicubic")
604                         )
605                 );
606 }
607
608 void
609 Player::emit_black ()
610 {
611 #ifdef DCPOMATIC_DEBUG
612         _last_video.reset ();
613 #endif
614
615         Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
616         _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
617         _last_emit_was_black = true;
618 }
619
620 void
621 Player::emit_silence (DCPTime most)
622 {
623         if (most == DCPTime ()) {
624                 return;
625         }
626         
627         DCPTime t = min (most, DCPTime::from_seconds (0.5));
628         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t.frames (_film->audio_frame_rate())));
629         silence->make_silent ();
630         Audio (silence, _audio_position);
631         
632         _audio_position += t;
633 }
634
635 void
636 Player::film_changed (Film::Property p)
637 {
638         /* Here we should notice Film properties that affect our output, and
639            alert listeners that our output now would be different to how it was
640            last time we were run.
641         */
642
643         if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
644                 Changed (false);
645         }
646 }
647
648 void
649 Player::update_subtitle_from_image ()
650 {
651         shared_ptr<Piece> piece = _image_subtitle.piece.lock ();
652         if (!piece) {
653                 return;
654         }
655
656         if (!_image_subtitle.subtitle->image) {
657                 _out_subtitle.image.reset ();
658                 return;
659         }
660
661         shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
662         assert (sc);
663
664         dcpomatic::Rect<double> in_rect = _image_subtitle.subtitle->rect;
665         dcp::Size scaled_size;
666
667         in_rect.x += sc->subtitle_x_offset ();
668         in_rect.y += sc->subtitle_y_offset ();
669
670         /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
671         scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
672         scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
673
674         /* Then we need a corrective translation, consisting of two parts:
675          *
676          * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
677          *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
678          *
679          * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
680          *     (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
681          *     (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
682          *
683          * Combining these two translations gives these expressions.
684          */
685         
686         _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
687         _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
688         
689         _out_subtitle.image = _image_subtitle.subtitle->image->scale (
690                 scaled_size,
691                 Scaler::from_id ("bicubic"),
692                 _image_subtitle.subtitle->image->pixel_format (),
693                 true
694                 );
695         
696         _out_subtitle.from = _image_subtitle.subtitle->dcp_time + piece->content->position ();
697         _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to + piece->content->position ();
698 }
699
700 /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
701  *  @return false if this could not be done.
702  */
703 bool
704 Player::repeat_last_video ()
705 {
706         if (!_last_incoming_video.video || !_have_valid_pieces) {
707                 return false;
708         }
709
710         emit_video (
711                 _last_incoming_video.weak_piece,
712                 _last_incoming_video.video
713                 );
714
715         return true;
716 }
717
718 void
719 Player::update_subtitle_from_text ()
720 {
721         if (_text_subtitle.subtitle->subs.empty ()) {
722                 _out_subtitle.image.reset ();
723                 return;
724         }
725
726         render_subtitles (_text_subtitle.subtitle->subs, _video_container_size, _out_subtitle.image, _out_subtitle.position);
727 }
728
729 void
730 Player::set_approximate_size ()
731 {
732         _approximate_size = true;
733 }
734                               
735 PlayerImage::PlayerImage (
736         shared_ptr<const Image> in,
737         Crop crop,
738         dcp::Size inter_size,
739         dcp::Size out_size,
740         Scaler const * scaler
741         )
742         : _in (in)
743         , _crop (crop)
744         , _inter_size (inter_size)
745         , _out_size (out_size)
746         , _scaler (scaler)
747 {
748
749 }
750
751 void
752 PlayerImage::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
753 {
754         _subtitle_image = image;
755         _subtitle_position = pos;
756 }
757
758 shared_ptr<Image>
759 PlayerImage::image (AVPixelFormat format, bool aligned)
760 {
761         shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, format, aligned);
762         
763         Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
764
765         if (_subtitle_image) {
766                 out->alpha_blend (_subtitle_image, _subtitle_position);
767         }
768
769         return out;
770 }
771
772 void
773 PlayerStatistics::dump (shared_ptr<Log> log) const
774 {
775         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
776         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()));
777 }
778
779 PlayerStatistics const &
780 Player::statistics () const
781 {
782         return _statistics;
783 }