Merge master.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include <stdint.h>
21 #include <algorithm>
22 #include "player.h"
23 #include "film.h"
24 #include "ffmpeg_decoder.h"
25 #include "audio_buffers.h"
26 #include "ffmpeg_content.h"
27 #include "image_decoder.h"
28 #include "image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
32 #include "subrip_decoder.h"
33 #include "subrip_content.h"
34 #include "dcp_content.h"
35 #include "playlist.h"
36 #include "job.h"
37 #include "image.h"
38 #include "raw_image_proxy.h"
39 #include "ratio.h"
40 #include "log.h"
41 #include "scaler.h"
42 #include "render_subtitles.h"
43 #include "config.h"
44 #include "content_video.h"
45 #include "player_video.h"
46 #include "frame_rate_change.h"
47 #include "dcp_content.h"
48 #include "dcp_decoder.h"
49 #include "dcp_subtitle_content.h"
50 #include "dcp_subtitle_decoder.h"
51
52 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
53
54 using std::list;
55 using std::cout;
56 using std::min;
57 using std::max;
58 using std::min;
59 using std::vector;
60 using std::pair;
61 using std::map;
62 using std::make_pair;
63 using boost::shared_ptr;
64 using boost::weak_ptr;
65 using boost::dynamic_pointer_cast;
66 using boost::optional;
67
68 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
69         : _film (f)
70         , _playlist (p)
71         , _have_valid_pieces (false)
72         , _approximate_size (false)
73 {
74         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
75         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
76         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
77         set_video_container_size (_film->frame_size ());
78 }
79
80 void
81 Player::setup_pieces ()
82 {
83         list<shared_ptr<Piece> > old_pieces = _pieces;
84         _pieces.clear ();
85
86         ContentList content = _playlist->content ();
87
88         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
89
90                 if (!(*i)->paths_valid ()) {
91                         continue;
92                 }
93                 
94                 shared_ptr<Decoder> decoder;
95                 optional<FrameRateChange> frc;
96
97                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
98                 DCPTime best_overlap_t;
99                 shared_ptr<VideoContent> best_overlap;
100                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
101                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
102                         if (!vc) {
103                                 continue;
104                         }
105                         
106                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
107                         if (overlap > best_overlap_t) {
108                                 best_overlap = vc;
109                                 best_overlap_t = overlap;
110                         }
111                 }
112
113                 optional<FrameRateChange> best_overlap_frc;
114                 if (best_overlap) {
115                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
116                 } else {
117                         /* No video overlap; e.g. if the DCP is just audio */
118                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
119                 }
120
121                 /* FFmpeg */
122                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
123                 if (fc) {
124                         decoder.reset (new FFmpegDecoder (fc, _film->log()));
125                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
126                 }
127
128                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (*i);
129                 if (dc) {
130                         decoder.reset (new DCPDecoder (dc, _film->log ()));
131                         frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
132                 }
133
134                 /* ImageContent */
135                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
136                 if (ic) {
137                         /* See if we can re-use an old ImageDecoder */
138                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
139                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
140                                 if (imd && imd->content() == ic) {
141                                         decoder = imd;
142                                 }
143                         }
144
145                         if (!decoder) {
146                                 decoder.reset (new ImageDecoder (ic));
147                         }
148
149                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
150                 }
151
152                 /* SndfileContent */
153                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
154                 if (sc) {
155                         decoder.reset (new SndfileDecoder (sc));
156                         frc = best_overlap_frc;
157                 }
158
159                 /* SubRipContent */
160                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
161                 if (rc) {
162                         decoder.reset (new SubRipDecoder (rc));
163                         frc = best_overlap_frc;
164                 }
165
166                 /* DCPSubtitleContent */
167                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (*i);
168                 if (dsc) {
169                         decoder.reset (new DCPSubtitleDecoder (dsc));
170                         frc = best_overlap_frc;
171                 }
172
173                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
174         }
175
176         _have_valid_pieces = true;
177 }
178
179 void
180 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
181 {
182         shared_ptr<Content> c = w.lock ();
183         if (!c) {
184                 return;
185         }
186
187         if (
188                 property == ContentProperty::POSITION ||
189                 property == ContentProperty::LENGTH ||
190                 property == ContentProperty::TRIM_START ||
191                 property == ContentProperty::TRIM_END ||
192                 property == ContentProperty::PATH ||
193                 property == VideoContentProperty::VIDEO_FRAME_TYPE ||
194                 property == DCPContentProperty::CAN_BE_PLAYED
195                 ) {
196                 
197                 _have_valid_pieces = false;
198                 Changed (frequent);
199
200         } else if (
201                 property == SubtitleContentProperty::USE_SUBTITLES ||
202                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
203                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
204                 property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
205                 property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
206                 property == VideoContentProperty::VIDEO_CROP ||
207                 property == VideoContentProperty::VIDEO_SCALE ||
208                 property == VideoContentProperty::VIDEO_FRAME_RATE
209                 ) {
210                 
211                 Changed (frequent);
212         }
213 }
214
215 /** @param already_resampled true if this data has already been through the chain up to the resampler */
216 void
217 Player::playlist_changed ()
218 {
219         _have_valid_pieces = false;
220         Changed (false);
221 }
222
223 void
224 Player::set_video_container_size (dcp::Size s)
225 {
226         _video_container_size = s;
227
228         _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
229         _black_image->make_black ();
230 }
231
232 void
233 Player::film_changed (Film::Property p)
234 {
235         /* Here we should notice Film properties that affect our output, and
236            alert listeners that our output now would be different to how it was
237            last time we were run.
238         */
239
240         if (p == Film::SCALER || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
241                 Changed (false);
242         }
243 }
244
245 list<PositionImage>
246 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
247 {
248         list<PositionImage> all;
249         
250         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
251                 if (!i->image) {
252                         continue;
253                 }
254
255                 /* We will scale the subtitle up to fit _video_container_size */
256                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
257                 
258                 /* Then we need a corrective translation, consisting of two parts:
259                  *
260                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
261                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
262                  *
263                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
264                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
265                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
266                  *
267                  * Combining these two translations gives these expressions.
268                  */
269
270                 all.push_back (
271                         PositionImage (
272                                 i->image->scale (
273                                         scaled_size,
274                                         Scaler::from_id ("bicubic"),
275                                         i->image->pixel_format (),
276                                         true
277                                         ),
278                                 Position<int> (
279                                         rint (_video_container_size.width * i->rectangle.x),
280                                         rint (_video_container_size.height * i->rectangle.y)
281                                         )
282                                 )
283                         );
284         }
285
286         return all;
287 }
288
289 void
290 Player::set_approximate_size ()
291 {
292         _approximate_size = true;
293 }
294
295 shared_ptr<PlayerVideo>
296 Player::black_player_video_frame (DCPTime time) const
297 {
298         return shared_ptr<PlayerVideo> (
299                 new PlayerVideo (
300                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image, _film->log ())),
301                         time,
302                         Crop (),
303                         _video_container_size,
304                         _video_container_size,
305                         Scaler::from_id ("bicubic"),
306                         EYES_BOTH,
307                         PART_WHOLE,
308                         Config::instance()->colour_conversions().front().conversion
309                 )
310         );
311 }
312
313 /** @return All PlayerVideos at the given time (there may be two frames for 3D) */
314 list<shared_ptr<PlayerVideo> >
315 Player::get_video (DCPTime time, bool accurate)
316 {
317         if (!_have_valid_pieces) {
318                 setup_pieces ();
319         }
320         
321         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
322                 time,
323                 time + DCPTime::from_frames (1, _film->video_frame_rate ())
324                 );
325
326         list<shared_ptr<PlayerVideo> > pvf;
327
328         if (ov.empty ()) {
329                 /* No video content at this time */
330                 pvf.push_back (black_player_video_frame (time));
331         } else {
332                 /* Create a PlayerVideo from the content's video at this time */
333
334                 shared_ptr<Piece> piece = ov.back ();
335                 shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
336                 assert (decoder);
337                 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
338                 assert (content);
339
340                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
341                 if (content_video.empty ()) {
342                         pvf.push_back (black_player_video_frame (time));
343                         return pvf;
344                 }
345                 
346                 dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size (), _approximate_size ? 4 : 1);
347                 if (_approximate_size) {
348                         image_size.width &= ~3;
349                         image_size.height &= ~3;
350                 }
351                 
352                 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
353                         pvf.push_back (
354                                 shared_ptr<PlayerVideo> (
355                                         new PlayerVideo (
356                                                 i->image,
357                                                 content_video_to_dcp (piece, i->frame),
358                                                 content->crop (),
359                                                 image_size,
360                                                 _video_container_size,
361                                                 _film->scaler(),
362                                                 i->eyes,
363                                                 i->part,
364                                                 content->colour_conversion ()
365                                                 )
366                                         )
367                                 );
368                 }
369         }
370
371         /* Add subtitles (for possible burn-in) to whatever PlayerVideos we got */
372
373         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false);
374
375         list<PositionImage> sub_images;
376
377         /* Image subtitles */
378         list<PositionImage> c = transform_image_subtitles (ps.image);
379         copy (c.begin(), c.end(), back_inserter (sub_images));
380
381         /* Text subtitles (rendered to images) */
382         sub_images.push_back (render_subtitles (ps.text, _video_container_size));
383         
384         if (!sub_images.empty ()) {
385                 for (list<shared_ptr<PlayerVideo> >::const_iterator i = pvf.begin(); i != pvf.end(); ++i) {
386                         (*i)->set_subtitle (merge (sub_images));
387                 }
388         }       
389                 
390         return pvf;
391 }
392
393 shared_ptr<AudioBuffers>
394 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
395 {
396         if (!_have_valid_pieces) {
397                 setup_pieces ();
398         }
399
400         AudioFrame const length_frames = length.frames (_film->audio_frame_rate ());
401
402         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
403         audio->make_silent ();
404         
405         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
406         if (ov.empty ()) {
407                 return audio;
408         }
409
410         for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
411
412                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
413                 assert (content);
414                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
415                 assert (decoder);
416
417                 if (content->audio_frame_rate() == 0) {
418                         /* This AudioContent has no audio (e.g. if it is an FFmpegContent with no
419                          * audio stream).
420                          */
421                         continue;
422                 }
423
424                 /* The time that we should request from the content */
425                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
426                 DCPTime offset;
427                 if (request < DCPTime ()) {
428                         /* We went off the start of the content, so we will need to offset
429                            the stuff we get back.
430                         */
431                         offset = -request;
432                         request = DCPTime ();
433                 }
434
435                 AudioFrame const content_frame = dcp_to_content_audio (*i, request);
436
437                 /* Audio from this piece's decoder (which might be more or less than what we asked for) */
438                 shared_ptr<ContentAudio> all = decoder->get_audio (content_frame, length_frames, accurate);
439
440                 /* Gain */
441                 if (content->audio_gain() != 0) {
442                         shared_ptr<AudioBuffers> gain (new AudioBuffers (all->audio));
443                         gain->apply_gain (content->audio_gain ());
444                         all->audio = gain;
445                 }
446
447                 /* Remap channels */
448                 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all->audio->frames()));
449                 dcp_mapped->make_silent ();
450                 AudioMapping map = content->audio_mapping ();
451                 for (int i = 0; i < map.content_channels(); ++i) {
452                         for (int j = 0; j < _film->audio_channels(); ++j) {
453                                 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
454                                         dcp_mapped->accumulate_channel (
455                                                 all->audio.get(),
456                                                 i,
457                                                 j,
458                                                 map.get (i, static_cast<dcp::Channel> (j))
459                                                 );
460                                 }
461                         }
462                 }
463                 
464                 all->audio = dcp_mapped;
465
466                 audio->accumulate_frames (
467                         all->audio.get(),
468                         content_frame - all->frame,
469                         offset.frames (_film->audio_frame_rate()),
470                         min (AudioFrame (all->audio->frames()), length_frames) - offset.frames (_film->audio_frame_rate ())
471                         );
472         }
473
474         return audio;
475 }
476
477 VideoFrame
478 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
479 {
480         /* s is the offset of t from the start position of this content */
481         DCPTime s = t - piece->content->position ();
482         s = DCPTime (max (DCPTime::Type (0), s.get ()));
483         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
484
485         /* Convert this to the content frame */
486         return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) * piece->frc.factor ();
487 }
488
489 DCPTime
490 Player::content_video_to_dcp (shared_ptr<const Piece> piece, VideoFrame f) const
491 {
492         DCPTime t = DCPTime::from_frames (f / piece->frc.factor (), _film->video_frame_rate()) - piece->content->trim_start () + piece->content->position ();
493         if (t < DCPTime ()) {
494                 t = DCPTime ();
495         }
496
497         return t;
498 }
499
500 AudioFrame
501 Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
502 {
503         /* s is the offset of t from the start position of this content */
504         DCPTime s = t - piece->content->position ();
505         s = DCPTime (max (DCPTime::Type (0), s.get ()));
506         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
507
508         /* Convert this to the content frame */
509         return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate());
510 }
511
512 ContentTime
513 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
514 {
515         /* s is the offset of t from the start position of this content */
516         DCPTime s = t - piece->content->position ();
517         s = DCPTime (max (DCPTime::Type (0), s.get ()));
518         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
519
520         return ContentTime (s + piece->content->trim_start(), piece->frc);
521 }
522
523 void
524 PlayerStatistics::dump (shared_ptr<Log> log) const
525 {
526         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat), Log::TYPE_GENERAL);
527         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()), Log::TYPE_GENERAL);
528 }
529
530 PlayerStatistics const &
531 Player::statistics () const
532 {
533         return _statistics;
534 }
535
536 PlayerSubtitles
537 Player::get_subtitles (DCPTime time, DCPTime length, bool starting)
538 {
539         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
540
541         PlayerSubtitles ps (time, length);
542
543         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
544                 shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
545                 if (!subtitle_content->use_subtitles ()) {
546                         continue;
547                 }
548
549                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
550                 ContentTime const from = dcp_to_content_subtitle (*j, time);
551                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
552                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
553
554                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting);
555                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
556                         
557                         /* Apply content's subtitle offsets */
558                         i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
559                         i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
560
561                         /* Apply content's subtitle scale */
562                         i->sub.rectangle.width *= subtitle_content->subtitle_x_scale ();
563                         i->sub.rectangle.height *= subtitle_content->subtitle_y_scale ();
564
565                         /* Apply a corrective translation to keep the subtitle centred after that scale */
566                         i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_x_scale() - 1);
567                         i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_y_scale() - 1);
568                         
569                         ps.image.push_back (i->sub);
570                 }
571
572                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting);
573                 for (list<ContentTextSubtitle>::const_iterator i = text.begin(); i != text.end(); ++i) {
574                         copy (i->subs.begin(), i->subs.end(), back_inserter (ps.text));
575                 }
576         }
577
578         return ps;
579 }