New DCPTime/ContentTime types.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include <stdint.h>
21 #include <algorithm>
22 #include "player.h"
23 #include "film.h"
24 #include "ffmpeg_decoder.h"
25 #include "audio_buffers.h"
26 #include "ffmpeg_content.h"
27 #include "image_decoder.h"
28 #include "image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
32 #include "subrip_decoder.h"
33 #include "subrip_content.h"
34 #include "playlist.h"
35 #include "job.h"
36 #include "image.h"
37 #include "ratio.h"
38 #include "log.h"
39 #include "scaler.h"
40 #include "render_subtitles.h"
41
42 using std::list;
43 using std::cout;
44 using std::min;
45 using std::max;
46 using std::vector;
47 using std::pair;
48 using std::map;
49 using boost::shared_ptr;
50 using boost::weak_ptr;
51 using boost::dynamic_pointer_cast;
52 using boost::optional;
53
54 class Piece
55 {
56 public:
57         Piece (shared_ptr<Content> c, shared_ptr<Decoder> d, FrameRateChange f)
58                 : content (c)
59                 , decoder (d)
60                 , frc (f)
61         {}
62
63         shared_ptr<Content> content;
64         shared_ptr<Decoder> decoder;
65         FrameRateChange frc;
66 };
67
68 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
69         : _film (f)
70         , _playlist (p)
71         , _video (true)
72         , _audio (true)
73         , _have_valid_pieces (false)
74         , _video_position (0)
75         , _audio_position (0)
76         , _audio_merger (f->audio_channels(), f->audio_frame_rate ())
77         , _last_emit_was_black (false)
78         , _just_did_inaccurate_seek (false)
79         , _approximate_size (false)
80 {
81         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
82         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
83         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
84         set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
85 }
86
87 void
88 Player::disable_video ()
89 {
90         _video = false;
91 }
92
93 void
94 Player::disable_audio ()
95 {
96         _audio = false;
97 }
98
99 bool
100 Player::pass ()
101 {
102         if (!_have_valid_pieces) {
103                 setup_pieces ();
104         }
105
106         /* Interrogate all our pieces to find the one with the earliest decoded data */
107
108         shared_ptr<Piece> earliest_piece;
109         shared_ptr<Decoded> earliest_decoded;
110         DCPTime earliest_time = DCPTime::max ();
111         DCPTime earliest_audio = DCPTime::max ();
112
113         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
114
115                 DCPTime const offset = (*i)->content->position() - (*i)->content->trim_start();
116                 
117                 bool done = false;
118                 shared_ptr<Decoded> dec;
119                 while (!done) {
120                         dec = (*i)->decoder->peek ();
121                         if (!dec) {
122                                 /* Decoder has nothing else to give us */
123                                 break;
124                         }
125
126
127                         dec->set_dcp_times ((*i)->frc, offset);
128                         DCPTime const t = dec->dcp_time - offset;
129                         cout << "Peeked " << (*i)->content->paths()[0] << " for " << t << " cf " << ((*i)->content->full_length() - (*i)->content->trim_end ()) << "\n";
130                         if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
131                                 /* In the end-trimmed part; decoder has nothing else to give us */
132                                 dec.reset ();
133                                 done = true;
134                         } else if (t >= (*i)->content->trim_start ()) {
135                                 /* Within the un-trimmed part; everything's ok */
136                                 done = true;
137                         } else {
138                                 /* Within the start-trimmed part; get something else */
139                                 (*i)->decoder->consume ();
140                         }
141                 }
142
143                 if (!dec) {
144                         continue;
145                 }
146
147                 if (dec->dcp_time < earliest_time) {
148                         earliest_piece = *i;
149                         earliest_decoded = dec;
150                         earliest_time = dec->dcp_time;
151                 }
152
153                 if (dynamic_pointer_cast<DecodedAudio> (dec) && dec->dcp_time < earliest_audio) {
154                         earliest_audio = dec->dcp_time;
155                 }
156         }
157                 
158         if (!earliest_piece) {
159                 flush ();
160                 return true;
161         }
162
163         if (earliest_audio != DCPTime::max ()) {
164                 if (earliest_audio.get() < 0) {
165                         earliest_audio = DCPTime ();
166                 }
167                 TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (earliest_audio);
168                 Audio (tb.audio, tb.time);
169                 /* This assumes that the audio-frames-to-time conversion is exact
170                    so that there are no accumulated errors caused by rounding.
171                 */
172                 _audio_position += DCPTime::from_frames (tb.audio->frames(), _film->audio_frame_rate ());
173         }
174
175         /* Emit the earliest thing */
176
177         shared_ptr<DecodedVideo> dv = dynamic_pointer_cast<DecodedVideo> (earliest_decoded);
178         shared_ptr<DecodedAudio> da = dynamic_pointer_cast<DecodedAudio> (earliest_decoded);
179         shared_ptr<DecodedImageSubtitle> dis = dynamic_pointer_cast<DecodedImageSubtitle> (earliest_decoded);
180         shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (earliest_decoded);
181
182         /* Will be set to false if we shouldn't consume the peeked DecodedThing */
183         bool consume = true;
184
185         if (dv && _video) {
186
187                 if (_just_did_inaccurate_seek) {
188
189                         /* Just emit; no subtlety */
190                         emit_video (earliest_piece, dv);
191                         step_video_position (dv);
192                         
193                 } else if (dv->dcp_time > _video_position) {
194
195                         /* Too far ahead */
196
197                         list<shared_ptr<Piece> >::iterator i = _pieces.begin();
198                         while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) {
199                                 ++i;
200                         }
201
202                         if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) {
203                                 /* We're outside all video content */
204                                 emit_black ();
205                                 _statistics.video.black++;
206                         } else {
207                                 /* We're inside some video; repeat the frame */
208                                 _last_incoming_video.video->dcp_time = _video_position;
209                                 emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video);
210                                 step_video_position (_last_incoming_video.video);
211                                 _statistics.video.repeat++;
212                         }
213
214                         consume = false;
215
216                 } else if (dv->dcp_time == _video_position) {
217                         /* We're ok */
218                         emit_video (earliest_piece, dv);
219                         step_video_position (dv);
220                         _statistics.video.good++;
221                 } else {
222                         /* Too far behind: skip */
223                         _statistics.video.skip++;
224                 }
225
226                 _just_did_inaccurate_seek = false;
227
228         } else if (da && _audio) {
229
230                 if (da->dcp_time > _audio_position) {
231                         /* Too far ahead */
232                         emit_silence (da->dcp_time - _audio_position);
233                         consume = false;
234                         _statistics.audio.silence += (da->dcp_time - _audio_position);
235                 } else if (da->dcp_time == _audio_position) {
236                         /* We're ok */
237                         emit_audio (earliest_piece, da);
238                         _statistics.audio.good += da->data->frames();
239                 } else {
240                         /* Too far behind: skip */
241                         _statistics.audio.skip += da->data->frames();
242                 }
243                 
244         } else if (dis && _video) {
245                 _image_subtitle.piece = earliest_piece;
246                 _image_subtitle.subtitle = dis;
247                 update_subtitle_from_image ();
248         } else if (dts && _video) {
249                 _text_subtitle.piece = earliest_piece;
250                 _text_subtitle.subtitle = dts;
251                 update_subtitle_from_text ();
252         }
253
254         if (consume) {
255                 earliest_piece->decoder->consume ();
256         }                       
257         
258         return false;
259 }
260
261 void
262 Player::emit_video (weak_ptr<Piece> weak_piece, shared_ptr<DecodedVideo> video)
263 {
264         /* Keep a note of what came in so that we can repeat it if required */
265         _last_incoming_video.weak_piece = weak_piece;
266         _last_incoming_video.video = video;
267         
268         shared_ptr<Piece> piece = weak_piece.lock ();
269         if (!piece) {
270                 return;
271         }
272
273         shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
274         assert (content);
275
276         FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate());
277
278         float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
279         dcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
280         if (_approximate_size) {
281                 image_size.width &= ~3;
282                 image_size.height &= ~3;
283         }
284
285         shared_ptr<PlayerImage> pi (
286                 new PlayerImage (
287                         video->image,
288                         content->crop(),
289                         image_size,
290                         _video_container_size,
291                         _film->scaler()
292                         )
293                 );
294         
295         if (
296                 _film->with_subtitles () &&
297                 _out_subtitle.image &&
298                 video->dcp_time >= _out_subtitle.from && video->dcp_time <= _out_subtitle.to
299                 ) {
300
301                 Position<int> const container_offset (
302                         (_video_container_size.width - image_size.width) / 2,
303                         (_video_container_size.height - image_size.height) / 2
304                         );
305
306                 pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset);
307         }
308                 
309                                             
310 #ifdef DCPOMATIC_DEBUG
311         _last_video = piece->content;
312 #endif
313
314         Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time);
315         
316         _last_emit_was_black = false;
317 }
318
319 void
320 Player::step_video_position (shared_ptr<DecodedVideo> video)
321 {
322         /* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */
323         if (video->eyes != EYES_LEFT) {
324                 /* This assumes that the video-frames-to-time conversion is exact
325                    so that there are no accumulated errors caused by rounding.
326                 */
327                 _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
328         }
329 }
330
331 void
332 Player::emit_audio (weak_ptr<Piece> weak_piece, shared_ptr<DecodedAudio> audio)
333 {
334         shared_ptr<Piece> piece = weak_piece.lock ();
335         if (!piece) {
336                 return;
337         }
338
339         shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
340         assert (content);
341
342         /* Gain */
343         if (content->audio_gain() != 0) {
344                 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio->data));
345                 gain->apply_gain (content->audio_gain ());
346                 audio->data = gain;
347         }
348
349         /* Remap channels */
350         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames()));
351         dcp_mapped->make_silent ();
352         AudioMapping map = content->audio_mapping ();
353         for (int i = 0; i < map.content_channels(); ++i) {
354                 for (int j = 0; j < _film->audio_channels(); ++j) {
355                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
356                                 dcp_mapped->accumulate_channel (
357                                         audio->data.get(),
358                                         i,
359                                         static_cast<dcp::Channel> (j),
360                                         map.get (i, static_cast<dcp::Channel> (j))
361                                         );
362                         }
363                 }
364         }
365
366         audio->data = dcp_mapped;
367
368         /* Delay */
369         audio->dcp_time += DCPTime::from_seconds (content->audio_delay() / 1000.0);
370         if (audio->dcp_time < DCPTime (0)) {
371                 int const frames = - audio->dcp_time.frames (_film->audio_frame_rate());
372                 if (frames >= audio->data->frames ()) {
373                         return;
374                 }
375
376                 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames));
377                 trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0);
378
379                 audio->data = trimmed;
380                 audio->dcp_time = DCPTime ();
381         }
382
383         _audio_merger.push (audio->data, audio->dcp_time);
384 }
385
386 void
387 Player::flush ()
388 {
389         TimedAudioBuffers<DCPTime> tb = _audio_merger.flush ();
390         if (_audio && tb.audio) {
391                 Audio (tb.audio, tb.time);
392                 _audio_position += DCPTime::from_frames (tb.audio->frames (), _film->audio_frame_rate ());
393         }
394
395         while (_video && _video_position < _audio_position) {
396                 emit_black ();
397         }
398
399         while (_audio && _audio_position < _video_position) {
400                 emit_silence (_video_position - _audio_position);
401         }
402 }
403
404 /** Seek so that the next pass() will yield (approximately) the requested frame.
405  *  Pass accurate = true to try harder to get close to the request.
406  *  @return true on error
407  */
408 void
409 Player::seek (DCPTime t, bool accurate)
410 {
411         if (!_have_valid_pieces) {
412                 setup_pieces ();
413         }
414
415         if (_pieces.empty ()) {
416                 return;
417         }
418
419         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
420                 /* s is the offset of t from the start position of this content */
421                 DCPTime s = t - (*i)->content->position ();
422                 s = max (static_cast<DCPTime> (0), s);
423                 s = min ((*i)->content->length_after_trim(), s);
424
425                 /* Convert this to the content time */
426                 ContentTime ct (s + (*i)->content->trim_start(), (*i)->frc);
427
428                 /* And seek the decoder */
429                 (*i)->decoder->seek (ct, accurate);
430         }
431
432         _video_position = t.round_up (_film->video_frame_rate());
433         _audio_position = t.round_up (_film->audio_frame_rate());
434
435         _audio_merger.clear (_audio_position);
436
437         if (!accurate) {
438                 /* We just did an inaccurate seek, so it's likely that the next thing seen
439                    out of pass() will be a fair distance from _{video,audio}_position.  Setting
440                    this flag stops pass() from trying to fix that: we assume that if it
441                    was an inaccurate seek then the caller does not care too much about
442                    inserting black/silence to keep the time tidy.
443                 */
444                 _just_did_inaccurate_seek = true;
445         }
446 }
447
448 void
449 Player::setup_pieces ()
450 {
451         list<shared_ptr<Piece> > old_pieces = _pieces;
452         _pieces.clear ();
453
454         ContentList content = _playlist->content ();
455
456         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
457
458                 if (!(*i)->paths_valid ()) {
459                         continue;
460                 }
461                 
462                 shared_ptr<Decoder> decoder;
463                 optional<FrameRateChange> frc;
464
465                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
466                 DCPTime best_overlap_t;
467                 shared_ptr<VideoContent> best_overlap;
468                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
469                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
470                         if (!vc) {
471                                 continue;
472                         }
473                         
474                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
475                         if (overlap > best_overlap_t) {
476                                 best_overlap = vc;
477                                 best_overlap_t = overlap;
478                         }
479                 }
480
481                 optional<FrameRateChange> best_overlap_frc;
482                 if (best_overlap) {
483                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
484                 } else {
485                         /* No video overlap; e.g. if the DCP is just audio */
486                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
487                 }
488
489                 /* FFmpeg */
490                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
491                 if (fc) {
492                         decoder.reset (new FFmpegDecoder (_film, fc, _video, _audio));
493                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
494                 }
495
496                 /* ImageContent */
497                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
498                 if (ic) {
499                         /* See if we can re-use an old ImageDecoder */
500                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
501                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
502                                 if (imd && imd->content() == ic) {
503                                         decoder = imd;
504                                 }
505                         }
506
507                         if (!decoder) {
508                                 decoder.reset (new ImageDecoder (_film, ic));
509                         }
510
511                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
512                 }
513
514                 /* SndfileContent */
515                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
516                 if (sc) {
517                         decoder.reset (new SndfileDecoder (_film, sc));
518                         frc = best_overlap_frc;
519                 }
520
521                 /* SubRipContent */
522                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
523                 if (rc) {
524                         decoder.reset (new SubRipDecoder (_film, rc));
525                         frc = best_overlap_frc;
526                 }
527
528                 ContentTime st ((*i)->trim_start(), frc.get ());
529                 decoder->seek (st, true);
530                 
531                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
532         }
533
534         _have_valid_pieces = true;
535
536         /* The Piece for the _last_incoming_video will no longer be valid */
537         _last_incoming_video.video.reset ();
538
539         _video_position = DCPTime ();
540         _audio_position = DCPTime ();
541 }
542
543 void
544 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
545 {
546         shared_ptr<Content> c = w.lock ();
547         if (!c) {
548                 return;
549         }
550
551         if (
552                 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
553                 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END ||
554                 property == VideoContentProperty::VIDEO_FRAME_TYPE 
555                 ) {
556                 
557                 _have_valid_pieces = false;
558                 Changed (frequent);
559
560         } else if (
561                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
562                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
563                 property == SubtitleContentProperty::SUBTITLE_SCALE
564                 ) {
565
566                 update_subtitle_from_image ();
567                 update_subtitle_from_text ();
568                 Changed (frequent);
569
570         } else if (
571                 property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_RATIO ||
572                 property == VideoContentProperty::VIDEO_FRAME_RATE
573                 ) {
574                 
575                 Changed (frequent);
576
577         } else if (property == ContentProperty::PATH) {
578
579                 _have_valid_pieces = false;
580                 Changed (frequent);
581         }
582 }
583
584 void
585 Player::playlist_changed ()
586 {
587         _have_valid_pieces = false;
588         Changed (false);
589 }
590
591 void
592 Player::set_video_container_size (dcp::Size s)
593 {
594         _video_container_size = s;
595
596         shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
597         im->make_black ();
598         
599         _black_frame.reset (
600                 new PlayerImage (
601                         im,
602                         Crop(),
603                         _video_container_size,
604                         _video_container_size,
605                         Scaler::from_id ("bicubic")
606                         )
607                 );
608 }
609
610 void
611 Player::emit_black ()
612 {
613 #ifdef DCPOMATIC_DEBUG
614         _last_video.reset ();
615 #endif
616
617         Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
618         _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
619         _last_emit_was_black = true;
620 }
621
622 void
623 Player::emit_silence (DCPTime most)
624 {
625         if (most == 0) {
626                 return;
627         }
628         
629         DCPTime t = min (most, DCPTime::from_seconds (0.5));
630         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t.frames (_film->audio_frame_rate())));
631         silence->make_silent ();
632         Audio (silence, _audio_position);
633         
634         _audio_position += t;
635 }
636
637 void
638 Player::film_changed (Film::Property p)
639 {
640         /* Here we should notice Film properties that affect our output, and
641            alert listeners that our output now would be different to how it was
642            last time we were run.
643         */
644
645         if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
646                 Changed (false);
647         }
648 }
649
650 void
651 Player::update_subtitle_from_image ()
652 {
653         shared_ptr<Piece> piece = _image_subtitle.piece.lock ();
654         if (!piece) {
655                 return;
656         }
657
658         if (!_image_subtitle.subtitle->image) {
659                 _out_subtitle.image.reset ();
660                 return;
661         }
662
663         shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
664         assert (sc);
665
666         dcpomatic::Rect<double> in_rect = _image_subtitle.subtitle->rect;
667         dcp::Size scaled_size;
668
669         in_rect.x += sc->subtitle_x_offset ();
670         in_rect.y += sc->subtitle_y_offset ();
671
672         /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
673         scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
674         scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
675
676         /* Then we need a corrective translation, consisting of two parts:
677          *
678          * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
679          *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
680          *
681          * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
682          *     (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
683          *     (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
684          *
685          * Combining these two translations gives these expressions.
686          */
687         
688         _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
689         _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
690         
691         _out_subtitle.image = _image_subtitle.subtitle->image->scale (
692                 scaled_size,
693                 Scaler::from_id ("bicubic"),
694                 _image_subtitle.subtitle->image->pixel_format (),
695                 true
696                 );
697         
698         _out_subtitle.from = _image_subtitle.subtitle->dcp_time + piece->content->position ();
699         _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to + piece->content->position ();
700 }
701
702 /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
703  *  @return false if this could not be done.
704  */
705 bool
706 Player::repeat_last_video ()
707 {
708         if (!_last_incoming_video.video || !_have_valid_pieces) {
709                 return false;
710         }
711
712         emit_video (
713                 _last_incoming_video.weak_piece,
714                 _last_incoming_video.video
715                 );
716
717         return true;
718 }
719
720 void
721 Player::update_subtitle_from_text ()
722 {
723         if (_text_subtitle.subtitle->subs.empty ()) {
724                 _out_subtitle.image.reset ();
725                 return;
726         }
727
728         render_subtitles (_text_subtitle.subtitle->subs, _video_container_size, _out_subtitle.image, _out_subtitle.position);
729 }
730
731 void
732 Player::set_approximate_size ()
733 {
734         _approximate_size = true;
735 }
736                               
737 PlayerImage::PlayerImage (
738         shared_ptr<const Image> in,
739         Crop crop,
740         dcp::Size inter_size,
741         dcp::Size out_size,
742         Scaler const * scaler
743         )
744         : _in (in)
745         , _crop (crop)
746         , _inter_size (inter_size)
747         , _out_size (out_size)
748         , _scaler (scaler)
749 {
750
751 }
752
753 void
754 PlayerImage::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
755 {
756         _subtitle_image = image;
757         _subtitle_position = pos;
758 }
759
760 shared_ptr<Image>
761 PlayerImage::image (AVPixelFormat format, bool aligned)
762 {
763         shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, format, aligned);
764         
765         Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
766
767         if (_subtitle_image) {
768                 out->alpha_blend (_subtitle_image, _subtitle_position);
769         }
770
771         return out;
772 }
773
774 void
775 PlayerStatistics::dump (shared_ptr<Log> log) const
776 {
777         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
778         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence));
779 }
780
781 PlayerStatistics const &
782 Player::statistics () const
783 {
784         return _statistics;
785 }