Merge master.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include <stdint.h>
21 #include <algorithm>
22 #include "player.h"
23 #include "film.h"
24 #include "ffmpeg_decoder.h"
25 #include "audio_buffers.h"
26 #include "ffmpeg_content.h"
27 #include "image_decoder.h"
28 #include "image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
32 #include "subrip_decoder.h"
33 #include "subrip_content.h"
34 #include "playlist.h"
35 #include "job.h"
36 #include "image.h"
37 #include "ratio.h"
38 #include "log.h"
39 #include "scaler.h"
40 #include "render_subtitles.h"
41
42 using std::list;
43 using std::cout;
44 using std::min;
45 using std::max;
46 using std::vector;
47 using std::pair;
48 using std::map;
49 using boost::shared_ptr;
50 using boost::weak_ptr;
51 using boost::dynamic_pointer_cast;
52 using boost::optional;
53
54 class Piece
55 {
56 public:
57         Piece (shared_ptr<Content> c, shared_ptr<Decoder> d, FrameRateChange f)
58                 : content (c)
59                 , decoder (d)
60                 , frc (f)
61         {}
62
63         shared_ptr<Content> content;
64         shared_ptr<Decoder> decoder;
65         FrameRateChange frc;
66 };
67
68 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
69         : _film (f)
70         , _playlist (p)
71         , _video (true)
72         , _audio (true)
73         , _have_valid_pieces (false)
74         , _video_position (0)
75         , _audio_position (0)
76         , _audio_merger (f->audio_channels(), f->audio_frame_rate ())
77         , _last_emit_was_black (false)
78         , _just_did_inaccurate_seek (false)
79         , _approximate_size (false)
80 {
81         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
82         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
83         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
84         set_video_container_size (_film->frame_size ());
85 }
86
87 void
88 Player::disable_video ()
89 {
90         _video = false;
91 }
92
93 void
94 Player::disable_audio ()
95 {
96         _audio = false;
97 }
98
99 bool
100 Player::pass ()
101 {
102         if (!_have_valid_pieces) {
103                 setup_pieces ();
104         }
105
106         /* Interrogate all our pieces to find the one with the earliest decoded data */
107
108         shared_ptr<Piece> earliest_piece;
109         shared_ptr<Decoded> earliest_decoded;
110         DCPTime earliest_time = DCPTime::max ();
111         DCPTime earliest_audio = DCPTime::max ();
112
113         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
114
115                 DCPTime const offset = (*i)->content->position() - (*i)->content->trim_start();
116                 
117                 bool done = false;
118                 shared_ptr<Decoded> dec;
119                 while (!done) {
120                         dec = (*i)->decoder->peek ();
121                         if (!dec) {
122                                 /* Decoder has nothing else to give us */
123                                 break;
124                         }
125
126                         dec->set_dcp_times ((*i)->frc, offset);
127                         DCPTime const t = dec->dcp_time - offset;
128                         if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
129                                 /* In the end-trimmed part; decoder has nothing else to give us */
130                                 dec.reset ();
131                                 done = true;
132                         } else if (t >= (*i)->content->trim_start ()) {
133                                 /* Within the un-trimmed part; everything's ok */
134                                 done = true;
135                         } else {
136                                 /* Within the start-trimmed part; get something else */
137                                 (*i)->decoder->consume ();
138                         }
139                 }
140
141                 if (!dec) {
142                         continue;
143                 }
144
145                 if (dec->dcp_time < earliest_time) {
146                         earliest_piece = *i;
147                         earliest_decoded = dec;
148                         earliest_time = dec->dcp_time;
149                 }
150
151                 if (dynamic_pointer_cast<DecodedAudio> (dec) && dec->dcp_time < earliest_audio) {
152                         earliest_audio = dec->dcp_time;
153                 }
154         }
155                 
156         if (!earliest_piece) {
157                 flush ();
158                 return true;
159         }
160
161         if (earliest_audio != DCPTime::max ()) {
162                 if (earliest_audio.get() < 0) {
163                         earliest_audio = DCPTime ();
164                 }
165                 TimedAudioBuffers tb = _audio_merger.pull (earliest_audio);
166                 Audio (tb.audio, tb.time);
167                 /* This assumes that the audio-frames-to-time conversion is exact
168                    so that there are no accumulated errors caused by rounding.
169                 */
170                 _audio_position += DCPTime::from_frames (tb.audio->frames(), _film->audio_frame_rate ());
171         }
172
173         /* Emit the earliest thing */
174
175         shared_ptr<DecodedVideo> dv = dynamic_pointer_cast<DecodedVideo> (earliest_decoded);
176         shared_ptr<DecodedAudio> da = dynamic_pointer_cast<DecodedAudio> (earliest_decoded);
177         shared_ptr<DecodedImageSubtitle> dis = dynamic_pointer_cast<DecodedImageSubtitle> (earliest_decoded);
178         shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (earliest_decoded);
179
180         /* Will be set to false if we shouldn't consume the peeked DecodedThing */
181         bool consume = true;
182
183         if (dv && _video) {
184
185                 if (_just_did_inaccurate_seek) {
186
187                         /* Just emit; no subtlety */
188                         emit_video (earliest_piece, dv);
189                         step_video_position (dv);
190                         
191                 } else if (dv->dcp_time > _video_position) {
192
193                         /* Too far ahead */
194
195                         list<shared_ptr<Piece> >::iterator i = _pieces.begin();
196                         while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) {
197                                 ++i;
198                         }
199
200                         if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) {
201                                 /* We're outside all video content */
202                                 emit_black ();
203                                 _statistics.video.black++;
204                         } else {
205                                 /* We're inside some video; repeat the frame */
206                                 _last_incoming_video.video->dcp_time = _video_position;
207                                 emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video);
208                                 step_video_position (_last_incoming_video.video);
209                                 _statistics.video.repeat++;
210                         }
211
212                         consume = false;
213
214                 } else if (dv->dcp_time == _video_position) {
215                         /* We're ok */
216                         emit_video (earliest_piece, dv);
217                         step_video_position (dv);
218                         _statistics.video.good++;
219                 } else {
220                         /* Too far behind: skip */
221                         _statistics.video.skip++;
222                 }
223
224                 _just_did_inaccurate_seek = false;
225
226         } else if (da && _audio) {
227
228                 if (da->dcp_time > _audio_position) {
229                         /* Too far ahead */
230                         emit_silence (da->dcp_time - _audio_position);
231                         consume = false;
232                         _statistics.audio.silence += (da->dcp_time - _audio_position);
233                 } else if (da->dcp_time == _audio_position) {
234                         /* We're ok */
235                         emit_audio (earliest_piece, da);
236                         _statistics.audio.good += da->data->frames();
237                 } else {
238                         /* Too far behind: skip */
239                         _statistics.audio.skip += da->data->frames();
240                 }
241                 
242         } else if (dis && _video) {
243                 _image_subtitle.piece = earliest_piece;
244                 _image_subtitle.subtitle = dis;
245                 update_subtitle_from_image ();
246         } else if (dts && _video) {
247                 _text_subtitle.piece = earliest_piece;
248                 _text_subtitle.subtitle = dts;
249                 update_subtitle_from_text ();
250         }
251
252         if (consume) {
253                 earliest_piece->decoder->consume ();
254         }                       
255         
256         return false;
257 }
258
259 void
260 Player::emit_video (weak_ptr<Piece> weak_piece, shared_ptr<DecodedVideo> video)
261 {
262         /* Keep a note of what came in so that we can repeat it if required */
263         _last_incoming_video.weak_piece = weak_piece;
264         _last_incoming_video.video = video;
265         
266         shared_ptr<Piece> piece = weak_piece.lock ();
267         if (!piece) {
268                 return;
269         }
270
271         shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
272         assert (content);
273
274         FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate());
275
276         dcp::Size image_size = content->scale().size (content, _video_container_size);
277         if (_approximate_size) {
278                 image_size.width &= ~3;
279                 image_size.height &= ~3;
280         }
281
282         shared_ptr<PlayerImage> pi (
283                 new PlayerImage (
284                         video->image,
285                         content->crop(),
286                         image_size,
287                         _video_container_size,
288                         _film->scaler()
289                         )
290                 );
291         
292         if (
293                 _film->with_subtitles () &&
294                 _out_subtitle.image &&
295                 video->dcp_time >= _out_subtitle.from && video->dcp_time <= _out_subtitle.to
296                 ) {
297
298                 Position<int> const container_offset (
299                         (_video_container_size.width - image_size.width) / 2,
300                         (_video_container_size.height - image_size.height) / 2
301                         );
302
303                 pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset);
304         }
305                 
306                                             
307 #ifdef DCPOMATIC_DEBUG
308         _last_video = piece->content;
309 #endif
310
311         Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time);
312         
313         _last_emit_was_black = false;
314 }
315
316 void
317 Player::step_video_position (shared_ptr<DecodedVideo> video)
318 {
319         /* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */
320         if (video->eyes != EYES_LEFT) {
321                 /* This assumes that the video-frames-to-time conversion is exact
322                    so that there are no accumulated errors caused by rounding.
323                 */
324                 _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
325         }
326 }
327
328 void
329 Player::emit_audio (weak_ptr<Piece> weak_piece, shared_ptr<DecodedAudio> audio)
330 {
331         shared_ptr<Piece> piece = weak_piece.lock ();
332         if (!piece) {
333                 return;
334         }
335
336         shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
337         assert (content);
338
339         /* Gain */
340         if (content->audio_gain() != 0) {
341                 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio->data));
342                 gain->apply_gain (content->audio_gain ());
343                 audio->data = gain;
344         }
345
346         /* Remap channels */
347         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames()));
348         dcp_mapped->make_silent ();
349         AudioMapping map = content->audio_mapping ();
350         for (int i = 0; i < map.content_channels(); ++i) {
351                 for (int j = 0; j < _film->audio_channels(); ++j) {
352                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
353                                 dcp_mapped->accumulate_channel (
354                                         audio->data.get(),
355                                         i,
356                                         static_cast<dcp::Channel> (j),
357                                         map.get (i, static_cast<dcp::Channel> (j))
358                                         );
359                         }
360                 }
361         }
362
363         audio->data = dcp_mapped;
364
365         /* Delay */
366         audio->dcp_time += DCPTime::from_seconds (content->audio_delay() / 1000.0);
367         if (audio->dcp_time < DCPTime (0)) {
368                 int const frames = - audio->dcp_time.frames (_film->audio_frame_rate());
369                 if (frames >= audio->data->frames ()) {
370                         return;
371                 }
372
373                 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames));
374                 trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0);
375
376                 audio->data = trimmed;
377                 audio->dcp_time = DCPTime ();
378         }
379
380         _audio_merger.push (audio->data, audio->dcp_time);
381 }
382
383 void
384 Player::flush ()
385 {
386         TimedAudioBuffers tb = _audio_merger.flush ();
387         if (_audio && tb.audio) {
388                 Audio (tb.audio, tb.time);
389                 _audio_position += DCPTime::from_frames (tb.audio->frames (), _film->audio_frame_rate ());
390         }
391
392         while (_video && _video_position < _audio_position) {
393                 emit_black ();
394         }
395
396         while (_audio && _audio_position < _video_position) {
397                 emit_silence (_video_position - _audio_position);
398         }
399 }
400
401 /** Seek so that the next pass() will yield (approximately) the requested frame.
402  *  Pass accurate = true to try harder to get close to the request.
403  *  @return true on error
404  */
405 void
406 Player::seek (DCPTime t, bool accurate)
407 {
408         if (!_have_valid_pieces) {
409                 setup_pieces ();
410         }
411
412         if (_pieces.empty ()) {
413                 return;
414         }
415
416         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
417                 /* s is the offset of t from the start position of this content */
418                 DCPTime s = t - (*i)->content->position ();
419                 s = max (static_cast<DCPTime> (0), s);
420                 s = min ((*i)->content->length_after_trim(), s);
421
422                 /* Convert this to the content time */
423                 ContentTime ct (s + (*i)->content->trim_start(), (*i)->frc);
424
425                 /* And seek the decoder */
426                 (*i)->decoder->seek (ct, accurate);
427         }
428
429         _video_position = t.round_up (_film->video_frame_rate());
430         _audio_position = t.round_up (_film->audio_frame_rate());
431
432         _audio_merger.clear (_audio_position);
433
434         if (!accurate) {
435                 /* We just did an inaccurate seek, so it's likely that the next thing seen
436                    out of pass() will be a fair distance from _{video,audio}_position.  Setting
437                    this flag stops pass() from trying to fix that: we assume that if it
438                    was an inaccurate seek then the caller does not care too much about
439                    inserting black/silence to keep the time tidy.
440                 */
441                 _just_did_inaccurate_seek = true;
442         }
443 }
444
445 void
446 Player::setup_pieces ()
447 {
448         list<shared_ptr<Piece> > old_pieces = _pieces;
449         _pieces.clear ();
450
451         ContentList content = _playlist->content ();
452
453         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
454
455                 if (!(*i)->paths_valid ()) {
456                         continue;
457                 }
458                 
459                 shared_ptr<Decoder> decoder;
460                 optional<FrameRateChange> frc;
461
462                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
463                 DCPTime best_overlap_t;
464                 shared_ptr<VideoContent> best_overlap;
465                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
466                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
467                         if (!vc) {
468                                 continue;
469                         }
470                         
471                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
472                         if (overlap > best_overlap_t) {
473                                 best_overlap = vc;
474                                 best_overlap_t = overlap;
475                         }
476                 }
477
478                 optional<FrameRateChange> best_overlap_frc;
479                 if (best_overlap) {
480                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
481                 } else {
482                         /* No video overlap; e.g. if the DCP is just audio */
483                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
484                 }
485
486                 /* FFmpeg */
487                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
488                 if (fc) {
489                         decoder.reset (new FFmpegDecoder (fc, _film->log(), _video, _audio, _film->with_subtitles ()));
490                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
491                 }
492
493                 /* ImageContent */
494                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
495                 if (ic) {
496                         /* See if we can re-use an old ImageDecoder */
497                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
498                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
499                                 if (imd && imd->content() == ic) {
500                                         decoder = imd;
501                                 }
502                         }
503
504                         if (!decoder) {
505                                 decoder.reset (new ImageDecoder (ic));
506                         }
507
508                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
509                 }
510
511                 /* SndfileContent */
512                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
513                 if (sc) {
514                         decoder.reset (new SndfileDecoder (sc));
515                         frc = best_overlap_frc;
516                 }
517
518                 /* SubRipContent */
519                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
520                 if (rc) {
521                         decoder.reset (new SubRipDecoder (rc));
522                         frc = best_overlap_frc;
523                 }
524
525                 ContentTime st ((*i)->trim_start(), frc.get ());
526                 decoder->seek (st, true);
527                 
528                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
529         }
530
531         _have_valid_pieces = true;
532
533         /* The Piece for the _last_incoming_video will no longer be valid */
534         _last_incoming_video.video.reset ();
535
536         _video_position = DCPTime ();
537         _audio_position = DCPTime ();
538 }
539
540 void
541 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
542 {
543         shared_ptr<Content> c = w.lock ();
544         if (!c) {
545                 return;
546         }
547
548         if (
549                 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
550                 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END ||
551                 property == VideoContentProperty::VIDEO_FRAME_TYPE 
552                 ) {
553                 
554                 _have_valid_pieces = false;
555                 Changed (frequent);
556
557         } else if (
558                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
559                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
560                 property == SubtitleContentProperty::SUBTITLE_SCALE
561                 ) {
562
563                 update_subtitle_from_image ();
564                 update_subtitle_from_text ();
565                 Changed (frequent);
566
567         } else if (
568                 property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_SCALE ||
569                 property == VideoContentProperty::VIDEO_FRAME_RATE
570                 ) {
571                 
572                 Changed (frequent);
573
574         } else if (property == ContentProperty::PATH) {
575
576                 _have_valid_pieces = false;
577                 Changed (frequent);
578         }
579 }
580
581 void
582 Player::playlist_changed ()
583 {
584         _have_valid_pieces = false;
585         Changed (false);
586 }
587
588 void
589 Player::set_video_container_size (dcp::Size s)
590 {
591         _video_container_size = s;
592
593         shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
594         im->make_black ();
595         
596         _black_frame.reset (
597                 new PlayerImage (
598                         im,
599                         Crop(),
600                         _video_container_size,
601                         _video_container_size,
602                         Scaler::from_id ("bicubic")
603                         )
604                 );
605 }
606
607 void
608 Player::emit_black ()
609 {
610 #ifdef DCPOMATIC_DEBUG
611         _last_video.reset ();
612 #endif
613
614         Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
615         _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
616         _last_emit_was_black = true;
617 }
618
619 void
620 Player::emit_silence (DCPTime most)
621 {
622         if (most == DCPTime ()) {
623                 return;
624         }
625         
626         DCPTime t = min (most, DCPTime::from_seconds (0.5));
627         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t.frames (_film->audio_frame_rate())));
628         silence->make_silent ();
629         Audio (silence, _audio_position);
630         
631         _audio_position += t;
632 }
633
634 void
635 Player::film_changed (Film::Property p)
636 {
637         /* Here we should notice Film properties that affect our output, and
638            alert listeners that our output now would be different to how it was
639            last time we were run.
640         */
641
642         if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
643                 Changed (false);
644         }
645 }
646
647 void
648 Player::update_subtitle_from_image ()
649 {
650         shared_ptr<Piece> piece = _image_subtitle.piece.lock ();
651         if (!piece) {
652                 return;
653         }
654
655         if (!_image_subtitle.subtitle->image) {
656                 _out_subtitle.image.reset ();
657                 return;
658         }
659
660         shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
661         assert (sc);
662
663         dcpomatic::Rect<double> in_rect = _image_subtitle.subtitle->rect;
664         dcp::Size scaled_size;
665
666         in_rect.x += sc->subtitle_x_offset ();
667         in_rect.y += sc->subtitle_y_offset ();
668
669         /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
670         scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
671         scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
672
673         /* Then we need a corrective translation, consisting of two parts:
674          *
675          * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
676          *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
677          *
678          * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
679          *     (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
680          *     (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
681          *
682          * Combining these two translations gives these expressions.
683          */
684         
685         _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
686         _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
687         
688         _out_subtitle.image = _image_subtitle.subtitle->image->scale (
689                 scaled_size,
690                 Scaler::from_id ("bicubic"),
691                 _image_subtitle.subtitle->image->pixel_format (),
692                 true
693                 );
694         
695         _out_subtitle.from = _image_subtitle.subtitle->dcp_time + piece->content->position ();
696         _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to + piece->content->position ();
697 }
698
699 /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
700  *  @return false if this could not be done.
701  */
702 bool
703 Player::repeat_last_video ()
704 {
705         if (!_last_incoming_video.video || !_have_valid_pieces) {
706                 return false;
707         }
708
709         emit_video (
710                 _last_incoming_video.weak_piece,
711                 _last_incoming_video.video
712                 );
713
714         return true;
715 }
716
717 void
718 Player::update_subtitle_from_text ()
719 {
720         if (_text_subtitle.subtitle->subs.empty ()) {
721                 _out_subtitle.image.reset ();
722                 return;
723         }
724
725         render_subtitles (_text_subtitle.subtitle->subs, _video_container_size, _out_subtitle.image, _out_subtitle.position);
726 }
727
728 void
729 Player::set_approximate_size ()
730 {
731         _approximate_size = true;
732 }
733                               
734 PlayerImage::PlayerImage (
735         shared_ptr<const Image> in,
736         Crop crop,
737         dcp::Size inter_size,
738         dcp::Size out_size,
739         Scaler const * scaler
740         )
741         : _in (in)
742         , _crop (crop)
743         , _inter_size (inter_size)
744         , _out_size (out_size)
745         , _scaler (scaler)
746 {
747
748 }
749
750 void
751 PlayerImage::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
752 {
753         _subtitle_image = image;
754         _subtitle_position = pos;
755 }
756
757 shared_ptr<Image>
758 PlayerImage::image (AVPixelFormat format, bool aligned)
759 {
760         shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, format, aligned);
761         
762         Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
763
764         if (_subtitle_image) {
765                 out->alpha_blend (_subtitle_image, _subtitle_position);
766         }
767
768         return out;
769 }
770
771 void
772 PlayerStatistics::dump (shared_ptr<Log> log) const
773 {
774         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
775         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()));
776 }
777
778 PlayerStatistics const &
779 Player::statistics () const
780 {
781         return _statistics;
782 }