+
+FFmpegSubtitlePeriod
+FFmpeg::subtitle_period (AVSubtitle const & sub)
+{
+ ContentTime const packet_time = ContentTime::from_seconds (static_cast<double> (sub.pts) / AV_TIME_BASE);
+
+ if (sub.end_display_time == static_cast<uint32_t> (-1)) {
+ /* End time is not known */
+ return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3));
+ }
+
+ return FFmpegSubtitlePeriod (
+ packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3),
+ packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3)
+ );
+}
+
+string
+FFmpeg::subtitle_id (AVSubtitle const & sub)
+{
+ Digester digester;
+ digester.add (sub.pts);
+ for (unsigned int i = 0; i < sub.num_rects; ++i) {
+ AVSubtitleRect* rect = sub.rects[i];
+ digester.add (rect->x);
+ digester.add (rect->y);
+ digester.add (rect->w);
+ digester.add (rect->h);
+#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
+ int const line = rect->pict.linesize[0];
+ for (int j = 0; j < rect->h; ++j) {
+ digester.add (rect->pict.data[0] + j * line, line);
+ }
+#else
+ int const line = rect->linesize[0];
+ for (int j = 0; j < rect->h; ++j) {
+ digester.add (rect->data[0] + j * line, line);
+ }
+#endif
+ }
+ return digester.get ();
+}
+
+/** @return true if sub starts a new image subtitle */
+bool
+FFmpeg::subtitle_starts_image (AVSubtitle const & sub)
+{
+ bool image = false;
+ bool text = false;
+
+ for (unsigned int i = 0; i < sub.num_rects; ++i) {
+ switch (sub.rects[i]->type) {
+ case SUBTITLE_BITMAP:
+ image = true;
+ break;
+ case SUBTITLE_TEXT:
+ case SUBTITLE_ASS:
+ text = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* We can't cope with mixed image/text in one AVSubtitle */
+ DCPOMATIC_ASSERT (!image || !text);
+
+ return image;
+}
+
+/** Compute the pts offset to use given a set of audio streams and some video details.
+ * Sometimes these parameters will have just been determined by an Examiner, sometimes
+ * they will have been retrieved from a piece of Content, hence the need for this method
+ * in FFmpeg.
+ */
+ContentTime
+FFmpeg::pts_offset (vector<shared_ptr<FFmpegAudioStream> > audio_streams, optional<ContentTime> first_video, double video_frame_rate) const
+{
+ /* Audio and video frame PTS values may not start with 0. We want
+ to fiddle them so that:
+
+ 1. One of them starts at time 0.
+ 2. The first video PTS value ends up on a frame boundary.
+
+ Then we remove big initial gaps in PTS and we allow our
+ insertion of black frames to work.
+
+ We will do:
+ audio_pts_to_use = audio_pts_from_ffmpeg + pts_offset;
+ video_pts_to_use = video_pts_from_ffmpeg + pts_offset;
+ */
+
+ /* First, make one of them start at 0 */
+
+ ContentTime po = ContentTime::min ();
+
+ if (first_video) {
+ po = - first_video.get ();
+ }
+
+ BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, audio_streams) {
+ if (i->first_audio) {
+ po = max (po, - i->first_audio.get ());
+ }
+ }
+
+ /* If the offset is positive we would be pushing things from a -ve PTS to be played.
+ I don't think we ever want to do that, as it seems things at -ve PTS are not meant
+ to be seen (use for alignment bars etc.); see mantis #418.
+ */
+ if (po > ContentTime ()) {
+ po = ContentTime ();
+ }
+
+ /* Now adjust so that the video pts starts on a frame */
+ if (first_video) {
+ ContentTime const fvc = first_video.get() + po;
+ po += fvc.round_up (video_frame_rate) - fvc;
+ }
+
+ return po;
+}