3859915f87520cb69beb9d21d84f2355b8b83919
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include <stdint.h>
21 #include <algorithm>
22 #include "player.h"
23 #include "film.h"
24 #include "ffmpeg_decoder.h"
25 #include "audio_buffers.h"
26 #include "ffmpeg_content.h"
27 #include "image_decoder.h"
28 #include "image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
32 #include "subrip_decoder.h"
33 #include "subrip_content.h"
34 #include "playlist.h"
35 #include "job.h"
36 #include "image.h"
37 #include "ratio.h"
38 #include "log.h"
39 #include "scaler.h"
40 #include "render_subtitles.h"
41
42 using std::list;
43 using std::cout;
44 using std::min;
45 using std::max;
46 using std::vector;
47 using std::pair;
48 using std::map;
49 using boost::shared_ptr;
50 using boost::weak_ptr;
51 using boost::dynamic_pointer_cast;
52 using boost::optional;
53
54 class Piece
55 {
56 public:
57         Piece (shared_ptr<Content> c, shared_ptr<Decoder> d, FrameRateChange f)
58                 : content (c)
59                 , decoder (d)
60                 , frc (f)
61         {}
62
63         shared_ptr<Content> content;
64         shared_ptr<Decoder> decoder;
65         FrameRateChange frc;
66 };
67
68 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
69         : _film (f)
70         , _playlist (p)
71         , _video (true)
72         , _audio (true)
73         , _have_valid_pieces (false)
74         , _video_position (0)
75         , _audio_position (0)
76         , _audio_merger (f->audio_channels(), f->audio_frame_rate ())
77         , _last_emit_was_black (false)
78         , _just_did_inaccurate_seek (false)
79         , _approximate_size (false)
80 {
81         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
82         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
83         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
84         set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
85 }
86
87 void
88 Player::disable_video ()
89 {
90         _video = false;
91 }
92
93 void
94 Player::disable_audio ()
95 {
96         _audio = false;
97 }
98
99 bool
100 Player::pass ()
101 {
102         if (!_have_valid_pieces) {
103                 setup_pieces ();
104         }
105
106         /* Interrogate all our pieces to find the one with the earliest decoded data */
107
108         shared_ptr<Piece> earliest_piece;
109         shared_ptr<Decoded> earliest_decoded;
110         DCPTime earliest_time = DCPTime::max ();
111         DCPTime earliest_audio = DCPTime::max ();
112
113         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
114
115                 DCPTime const offset = (*i)->content->position() - (*i)->content->trim_start();
116                 
117                 bool done = false;
118                 shared_ptr<Decoded> dec;
119                 while (!done) {
120                         dec = (*i)->decoder->peek ();
121                         if (!dec) {
122                                 /* Decoder has nothing else to give us */
123                                 break;
124                         }
125
126
127                         dec->set_dcp_times ((*i)->frc, offset);
128                         DCPTime const t = dec->dcp_time - offset;
129                         cout << "Peeked " << (*i)->content->paths()[0] << " for " << t << " cf " << ((*i)->content->full_length() - (*i)->content->trim_end ()) << "\n";
130                         if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
131                                 /* In the end-trimmed part; decoder has nothing else to give us */
132                                 dec.reset ();
133                                 done = true;
134                         } else if (t >= (*i)->content->trim_start ()) {
135                                 /* Within the un-trimmed part; everything's ok */
136                                 done = true;
137                         } else {
138                                 /* Within the start-trimmed part; get something else */
139                                 (*i)->decoder->consume ();
140                         }
141                 }
142
143                 if (!dec) {
144                         continue;
145                 }
146
147                 if (dec->dcp_time < earliest_time) {
148                         earliest_piece = *i;
149                         earliest_decoded = dec;
150                         earliest_time = dec->dcp_time;
151                 }
152
153                 if (dynamic_pointer_cast<DecodedAudio> (dec) && dec->dcp_time < earliest_audio) {
154                         earliest_audio = dec->dcp_time;
155                 }
156         }
157                 
158         if (!earliest_piece) {
159                 flush ();
160                 return true;
161         }
162
163         if (earliest_audio != DCPTime::max ()) {
164                 if (earliest_audio.get() < 0) {
165                         earliest_audio = DCPTime ();
166                 }
167                 TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (earliest_audio);
168                 Audio (tb.audio, tb.time);
169                 /* This assumes that the audio-frames-to-time conversion is exact
170                    so that there are no accumulated errors caused by rounding.
171                 */
172                 _audio_position += DCPTime::from_frames (tb.audio->frames(), _film->audio_frame_rate ());
173         }
174
175         /* Emit the earliest thing */
176
177         shared_ptr<DecodedVideo> dv = dynamic_pointer_cast<DecodedVideo> (earliest_decoded);
178         shared_ptr<DecodedAudio> da = dynamic_pointer_cast<DecodedAudio> (earliest_decoded);
179         shared_ptr<DecodedImageSubtitle> dis = dynamic_pointer_cast<DecodedImageSubtitle> (earliest_decoded);
180         shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (earliest_decoded);
181
182         /* Will be set to false if we shouldn't consume the peeked DecodedThing */
183         bool consume = true;
184
185         if (dv && _video) {
186
187                 if (_just_did_inaccurate_seek) {
188
189                         /* Just emit; no subtlety */
190                         emit_video (earliest_piece, dv);
191                         step_video_position (dv);
192                         
193                 } else if (dv->dcp_time > _video_position) {
194
195                         /* Too far ahead */
196
197                         list<shared_ptr<Piece> >::iterator i = _pieces.begin();
198                         while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) {
199                                 ++i;
200                         }
201
202                         if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) {
203                                 /* We're outside all video content */
204                                 emit_black ();
205                                 _statistics.video.black++;
206                         } else {
207                                 /* We're inside some video; repeat the frame */
208                                 _last_incoming_video.video->dcp_time = _video_position;
209                                 emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video);
210                                 step_video_position (_last_incoming_video.video);
211                                 _statistics.video.repeat++;
212                         }
213
214                         consume = false;
215
216                 } else if (dv->dcp_time == _video_position) {
217                         /* We're ok */
218                         emit_video (earliest_piece, dv);
219                         step_video_position (dv);
220                         _statistics.video.good++;
221                 } else {
222                         /* Too far behind: skip */
223                         _statistics.video.skip++;
224                 }
225
226                 _just_did_inaccurate_seek = false;
227
228         } else if (da && _audio) {
229
230                 if (da->dcp_time > _audio_position) {
231                         /* Too far ahead */
232                         emit_silence (da->dcp_time - _audio_position);
233                         consume = false;
234                         _statistics.audio.silence += (da->dcp_time - _audio_position);
235                 } else if (da->dcp_time == _audio_position) {
236                         /* We're ok */
237                         emit_audio (earliest_piece, da);
238                         _statistics.audio.good += da->data->frames();
239                 } else {
240                         /* Too far behind: skip */
241                         _statistics.audio.skip += da->data->frames();
242                 }
243                 
244         } else if (dis && _video) {
245                 _image_subtitle.piece = earliest_piece;
246                 _image_subtitle.subtitle = dis;
247                 update_subtitle_from_image ();
248         } else if (dts && _video) {
249                 _text_subtitle.piece = earliest_piece;
250                 _text_subtitle.subtitle = dts;
251                 update_subtitle_from_text ();
252         }
253
254         if (consume) {
255                 earliest_piece->decoder->consume ();
256         }                       
257         
258         return false;
259 }
260
261 void
262 Player::emit_video (weak_ptr<Piece> weak_piece, shared_ptr<DecodedVideo> video)
263 {
264         /* Keep a note of what came in so that we can repeat it if required */
265         _last_incoming_video.weak_piece = weak_piece;
266         _last_incoming_video.video = video;
267         
268         shared_ptr<Piece> piece = weak_piece.lock ();
269         if (!piece) {
270                 return;
271         }
272
273         shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
274         assert (content);
275
276         FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate());
277
278         dcp::Size image_size = content->scale().size (content, _video_container_size);
279         if (_approximate_size) {
280                 image_size.width &= ~3;
281                 image_size.height &= ~3;
282         }
283
284         shared_ptr<PlayerImage> pi (
285                 new PlayerImage (
286                         video->image,
287                         content->crop(),
288                         image_size,
289                         _video_container_size,
290                         _film->scaler()
291                         )
292                 );
293         
294         if (
295                 _film->with_subtitles () &&
296                 _out_subtitle.image &&
297                 video->dcp_time >= _out_subtitle.from && video->dcp_time <= _out_subtitle.to
298                 ) {
299
300                 Position<int> const container_offset (
301                         (_video_container_size.width - image_size.width) / 2,
302                         (_video_container_size.height - image_size.height) / 2
303                         );
304
305                 pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset);
306         }
307                 
308                                             
309 #ifdef DCPOMATIC_DEBUG
310         _last_video = piece->content;
311 #endif
312
313         Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time);
314         
315         _last_emit_was_black = false;
316 }
317
318 void
319 Player::step_video_position (shared_ptr<DecodedVideo> video)
320 {
321         /* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */
322         if (video->eyes != EYES_LEFT) {
323                 /* This assumes that the video-frames-to-time conversion is exact
324                    so that there are no accumulated errors caused by rounding.
325                 */
326                 _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
327         }
328 }
329
330 void
331 Player::emit_audio (weak_ptr<Piece> weak_piece, shared_ptr<DecodedAudio> audio)
332 {
333         shared_ptr<Piece> piece = weak_piece.lock ();
334         if (!piece) {
335                 return;
336         }
337
338         shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
339         assert (content);
340
341         /* Gain */
342         if (content->audio_gain() != 0) {
343                 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio->data));
344                 gain->apply_gain (content->audio_gain ());
345                 audio->data = gain;
346         }
347
348         /* Remap channels */
349         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames()));
350         dcp_mapped->make_silent ();
351         AudioMapping map = content->audio_mapping ();
352         for (int i = 0; i < map.content_channels(); ++i) {
353                 for (int j = 0; j < _film->audio_channels(); ++j) {
354                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
355                                 dcp_mapped->accumulate_channel (
356                                         audio->data.get(),
357                                         i,
358                                         static_cast<dcp::Channel> (j),
359                                         map.get (i, static_cast<dcp::Channel> (j))
360                                         );
361                         }
362                 }
363         }
364
365         audio->data = dcp_mapped;
366
367         /* Delay */
368         audio->dcp_time += DCPTime::from_seconds (content->audio_delay() / 1000.0);
369         if (audio->dcp_time < DCPTime (0)) {
370                 int const frames = - audio->dcp_time.frames (_film->audio_frame_rate());
371                 if (frames >= audio->data->frames ()) {
372                         return;
373                 }
374
375                 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames));
376                 trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0);
377
378                 audio->data = trimmed;
379                 audio->dcp_time = DCPTime ();
380         }
381
382         _audio_merger.push (audio->data, audio->dcp_time);
383 }
384
385 void
386 Player::flush ()
387 {
388         TimedAudioBuffers<DCPTime> tb = _audio_merger.flush ();
389         if (_audio && tb.audio) {
390                 Audio (tb.audio, tb.time);
391                 _audio_position += DCPTime::from_frames (tb.audio->frames (), _film->audio_frame_rate ());
392         }
393
394         while (_video && _video_position < _audio_position) {
395                 emit_black ();
396         }
397
398         while (_audio && _audio_position < _video_position) {
399                 emit_silence (_video_position - _audio_position);
400         }
401 }
402
403 /** Seek so that the next pass() will yield (approximately) the requested frame.
404  *  Pass accurate = true to try harder to get close to the request.
405  *  @return true on error
406  */
407 void
408 Player::seek (DCPTime t, bool accurate)
409 {
410         if (!_have_valid_pieces) {
411                 setup_pieces ();
412         }
413
414         if (_pieces.empty ()) {
415                 return;
416         }
417
418         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
419                 /* s is the offset of t from the start position of this content */
420                 DCPTime s = t - (*i)->content->position ();
421                 s = max (static_cast<DCPTime> (0), s);
422                 s = min ((*i)->content->length_after_trim(), s);
423
424                 /* Convert this to the content time */
425                 ContentTime ct (s + (*i)->content->trim_start(), (*i)->frc);
426
427                 /* And seek the decoder */
428                 (*i)->decoder->seek (ct, accurate);
429         }
430
431         _video_position = t.round_up (_film->video_frame_rate());
432         _audio_position = t.round_up (_film->audio_frame_rate());
433
434         _audio_merger.clear (_audio_position);
435
436         if (!accurate) {
437                 /* We just did an inaccurate seek, so it's likely that the next thing seen
438                    out of pass() will be a fair distance from _{video,audio}_position.  Setting
439                    this flag stops pass() from trying to fix that: we assume that if it
440                    was an inaccurate seek then the caller does not care too much about
441                    inserting black/silence to keep the time tidy.
442                 */
443                 _just_did_inaccurate_seek = true;
444         }
445 }
446
447 void
448 Player::setup_pieces ()
449 {
450         list<shared_ptr<Piece> > old_pieces = _pieces;
451         _pieces.clear ();
452
453         ContentList content = _playlist->content ();
454
455         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
456
457                 if (!(*i)->paths_valid ()) {
458                         continue;
459                 }
460                 
461                 shared_ptr<Decoder> decoder;
462                 optional<FrameRateChange> frc;
463
464                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
465                 DCPTime best_overlap_t;
466                 shared_ptr<VideoContent> best_overlap;
467                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
468                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
469                         if (!vc) {
470                                 continue;
471                         }
472                         
473                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
474                         if (overlap > best_overlap_t) {
475                                 best_overlap = vc;
476                                 best_overlap_t = overlap;
477                         }
478                 }
479
480                 optional<FrameRateChange> best_overlap_frc;
481                 if (best_overlap) {
482                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
483                 } else {
484                         /* No video overlap; e.g. if the DCP is just audio */
485                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
486                 }
487
488                 /* FFmpeg */
489                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
490                 if (fc) {
491                         decoder.reset (new FFmpegDecoder (_film, fc, _video, _audio));
492                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
493                 }
494
495                 /* ImageContent */
496                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
497                 if (ic) {
498                         /* See if we can re-use an old ImageDecoder */
499                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
500                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
501                                 if (imd && imd->content() == ic) {
502                                         decoder = imd;
503                                 }
504                         }
505
506                         if (!decoder) {
507                                 decoder.reset (new ImageDecoder (_film, ic));
508                         }
509
510                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
511                 }
512
513                 /* SndfileContent */
514                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
515                 if (sc) {
516                         decoder.reset (new SndfileDecoder (_film, sc));
517                         frc = best_overlap_frc;
518                 }
519
520                 /* SubRipContent */
521                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
522                 if (rc) {
523                         decoder.reset (new SubRipDecoder (_film, rc));
524                         frc = best_overlap_frc;
525                 }
526
527                 ContentTime st ((*i)->trim_start(), frc.get ());
528                 decoder->seek (st, true);
529                 
530                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
531         }
532
533         _have_valid_pieces = true;
534
535         /* The Piece for the _last_incoming_video will no longer be valid */
536         _last_incoming_video.video.reset ();
537
538         _video_position = DCPTime ();
539         _audio_position = DCPTime ();
540 }
541
542 void
543 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
544 {
545         shared_ptr<Content> c = w.lock ();
546         if (!c) {
547                 return;
548         }
549
550         if (
551                 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
552                 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END ||
553                 property == VideoContentProperty::VIDEO_FRAME_TYPE 
554                 ) {
555                 
556                 _have_valid_pieces = false;
557                 Changed (frequent);
558
559         } else if (
560                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
561                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
562                 property == SubtitleContentProperty::SUBTITLE_SCALE
563                 ) {
564
565                 update_subtitle_from_image ();
566                 update_subtitle_from_text ();
567                 Changed (frequent);
568
569         } else if (
570                 property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_SCALE ||
571                 property == VideoContentProperty::VIDEO_FRAME_RATE
572                 ) {
573                 
574                 Changed (frequent);
575
576         } else if (property == ContentProperty::PATH) {
577
578                 _have_valid_pieces = false;
579                 Changed (frequent);
580         }
581 }
582
583 void
584 Player::playlist_changed ()
585 {
586         _have_valid_pieces = false;
587         Changed (false);
588 }
589
590 void
591 Player::set_video_container_size (dcp::Size s)
592 {
593         _video_container_size = s;
594
595         shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
596         im->make_black ();
597         
598         _black_frame.reset (
599                 new PlayerImage (
600                         im,
601                         Crop(),
602                         _video_container_size,
603                         _video_container_size,
604                         Scaler::from_id ("bicubic")
605                         )
606                 );
607 }
608
609 void
610 Player::emit_black ()
611 {
612 #ifdef DCPOMATIC_DEBUG
613         _last_video.reset ();
614 #endif
615
616         Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
617         _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
618         _last_emit_was_black = true;
619 }
620
621 void
622 Player::emit_silence (DCPTime most)
623 {
624         if (most == 0) {
625                 return;
626         }
627         
628         DCPTime t = min (most, DCPTime::from_seconds (0.5));
629         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t.frames (_film->audio_frame_rate())));
630         silence->make_silent ();
631         Audio (silence, _audio_position);
632         
633         _audio_position += t;
634 }
635
636 void
637 Player::film_changed (Film::Property p)
638 {
639         /* Here we should notice Film properties that affect our output, and
640            alert listeners that our output now would be different to how it was
641            last time we were run.
642         */
643
644         if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
645                 Changed (false);
646         }
647 }
648
649 void
650 Player::update_subtitle_from_image ()
651 {
652         shared_ptr<Piece> piece = _image_subtitle.piece.lock ();
653         if (!piece) {
654                 return;
655         }
656
657         if (!_image_subtitle.subtitle->image) {
658                 _out_subtitle.image.reset ();
659                 return;
660         }
661
662         shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
663         assert (sc);
664
665         dcpomatic::Rect<double> in_rect = _image_subtitle.subtitle->rect;
666         dcp::Size scaled_size;
667
668         in_rect.x += sc->subtitle_x_offset ();
669         in_rect.y += sc->subtitle_y_offset ();
670
671         /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
672         scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
673         scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
674
675         /* Then we need a corrective translation, consisting of two parts:
676          *
677          * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
678          *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
679          *
680          * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
681          *     (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
682          *     (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
683          *
684          * Combining these two translations gives these expressions.
685          */
686         
687         _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
688         _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
689         
690         _out_subtitle.image = _image_subtitle.subtitle->image->scale (
691                 scaled_size,
692                 Scaler::from_id ("bicubic"),
693                 _image_subtitle.subtitle->image->pixel_format (),
694                 true
695                 );
696         
697         _out_subtitle.from = _image_subtitle.subtitle->dcp_time + piece->content->position ();
698         _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to + piece->content->position ();
699 }
700
701 /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
702  *  @return false if this could not be done.
703  */
704 bool
705 Player::repeat_last_video ()
706 {
707         if (!_last_incoming_video.video || !_have_valid_pieces) {
708                 return false;
709         }
710
711         emit_video (
712                 _last_incoming_video.weak_piece,
713                 _last_incoming_video.video
714                 );
715
716         return true;
717 }
718
719 void
720 Player::update_subtitle_from_text ()
721 {
722         if (_text_subtitle.subtitle->subs.empty ()) {
723                 _out_subtitle.image.reset ();
724                 return;
725         }
726
727         render_subtitles (_text_subtitle.subtitle->subs, _video_container_size, _out_subtitle.image, _out_subtitle.position);
728 }
729
730 void
731 Player::set_approximate_size ()
732 {
733         _approximate_size = true;
734 }
735                               
736 PlayerImage::PlayerImage (
737         shared_ptr<const Image> in,
738         Crop crop,
739         dcp::Size inter_size,
740         dcp::Size out_size,
741         Scaler const * scaler
742         )
743         : _in (in)
744         , _crop (crop)
745         , _inter_size (inter_size)
746         , _out_size (out_size)
747         , _scaler (scaler)
748 {
749
750 }
751
752 void
753 PlayerImage::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
754 {
755         _subtitle_image = image;
756         _subtitle_position = pos;
757 }
758
759 shared_ptr<Image>
760 PlayerImage::image (AVPixelFormat format, bool aligned)
761 {
762         shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, format, aligned);
763         
764         Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
765
766         if (_subtitle_image) {
767                 out->alpha_blend (_subtitle_image, _subtitle_position);
768         }
769
770         return out;
771 }
772
773 void
774 PlayerStatistics::dump (shared_ptr<Log> log) const
775 {
776         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
777         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence));
778 }
779
780 PlayerStatistics const &
781 Player::statistics () const
782 {
783         return _statistics;
784 }