be emitted, instead of the time that the last thing was (#2268).
This is to avoid problems with the example shown in the test, where
just because a subtitle in source A comes before a subtitle in source B,
source A is pass()ed next and may then emit a subtitle which should
be after the next one in B.
audio = make_shared<AudioDecoder>(this, content->audio, fast);
}
for (auto i: content->text) {
- /* XXX: this time here should be the time of the first subtitle, not 0 */
- text.push_back (make_shared<TextDecoder>(this, i, ContentTime()));
+ text.push_back (make_shared<TextDecoder>(this, i));
+ /* We should really call maybe_set_position() on this TextDecoder to set the time
+ * of the first subtitle, but it probably doesn't matter since we'll always
+ * have regularly occurring video (and maybe audio) content.
+ */
}
if (content->atmos) {
atmos = make_shared<AtmosDecoder>(this, content);
_subtitles = c->subtitles ();
_next = _subtitles.begin ();
- ContentTime first;
- if (_next != _subtitles.end()) {
- first = content_time_period(*_next).from;
- }
- text.push_back (make_shared<TextDecoder>(this, content->only_text(), first));
+ text.push_back (make_shared<TextDecoder>(this, content->only_text()));
+ update_position();
}
while (i != _subtitles.end() && ContentTime::from_seconds ((*_next)->in().as_seconds()) < time) {
++i;
}
+
+ update_position();
}
}
only_text()->emit_plain (p, s);
+
+ update_position();
+
return false;
}
return ContentTime::from_seconds(_subtitles[0]->in().as_seconds());
}
+
+void
+DCPSubtitleDecoder::update_position()
+{
+ if (_next != _subtitles.end()) {
+ only_text()->maybe_set_position(
+ ContentTime::from_seconds((*_next)->in().as_seconds())
+ );
+ }
+}
+
private:
dcpomatic::ContentTimePeriod content_time_period (std::shared_ptr<const dcp::Subtitle> s) const;
+ void update_position();
std::vector<std::shared_ptr<const dcp::Subtitle>> _subtitles;
std::vector<std::shared_ptr<const dcp::Subtitle>>::const_iterator _next;
}
if (c->only_text()) {
- /* XXX: this time here should be the time of the first subtitle, not 0 */
- text.push_back (make_shared<TextDecoder>(this, c->only_text(), ContentTime()));
+ text.push_back (make_shared<TextDecoder>(this, c->only_text()));
+ /* XXX: we should be calling maybe_set_position() on this TextDecoder, but we can't easily find
+ * the time of the first subtitle at this point.
+ */
}
for (auto i: c->ffmpeg_audio_streams()) {
, StringTextFile (content)
, _next (0)
{
- ContentTime first;
- if (!_subtitles.empty()) {
- first = content_time_period(_subtitles[0]).from;
- }
- text.push_back (make_shared<TextDecoder>(this, content->only_text(), first));
+ text.push_back (make_shared<TextDecoder>(this, content->only_text()));
+ update_position();
}
while (_next < _subtitles.size() && ContentTime::from_seconds (_subtitles[_next].from.all_as_seconds ()) < time) {
++_next;
}
+
+ update_position();
}
only_text()->emit_plain (p, _subtitles[_next]);
++_next;
+
+ update_position();
+
return false;
}
ContentTime::from_seconds (s.to.all_as_seconds())
);
}
+
+
+void
+StringTextFileDecoder::update_position ()
+{
+ if (_next < _subtitles.size()) {
+ only_text()->maybe_set_position(
+ ContentTime::from_seconds(_subtitles[_next].from.all_as_seconds())
+ );
+ }
+}
+
private:
dcpomatic::ContentTimePeriod content_time_period (sub::Subtitle s) const;
+ void update_position();
size_t _next;
};
TextDecoder::TextDecoder (
Decoder* parent,
- shared_ptr<const TextContent> content,
- ContentTime first
+ shared_ptr<const TextContent> content
)
: DecoderPart (parent)
, _content (content)
- , _position (first)
{
}
TextDecoder::emit_bitmap_start (ContentBitmapText const& bitmap)
{
BitmapStart (bitmap);
- _position = bitmap.from();
+ maybe_set_position(bitmap.from());
}
}
PlainStart(ContentStringText(from, string_texts));
- _position = from;
+ maybe_set_position(from);
}
}
PlainStart(ContentStringText(from, string_texts));
- _position = from;
+ maybe_set_position(from);
}
{
_position = ContentTime ();
}
+
+
+void
+TextDecoder::maybe_set_position (dcpomatic::ContentTime position)
+{
+ if (!_position || position > *_position) {
+ _position = position;
+ }
+}
+
class TextDecoder : public DecoderPart
{
public:
- TextDecoder (
- Decoder* parent,
- std::shared_ptr<const TextContent>,
- dcpomatic::ContentTime first
- );
+ TextDecoder (Decoder* parent, std::shared_ptr<const TextContent>);
boost::optional<dcpomatic::ContentTime> position (std::shared_ptr<const Film>) const override {
return _position;
void emit_plain (dcpomatic::ContentTimePeriod period, sub::Subtitle const & subtitle);
void emit_stop (dcpomatic::ContentTime to);
+ void maybe_set_position (dcpomatic::ContentTime position);
+
void seek () override;
std::shared_ptr<const TextContent> content () const {
BOOST_CHECK_NO_THROW(butler.rethrow());
}
+
+BOOST_AUTO_TEST_CASE (interleaved_subtitle_are_emitted_correctly)
+{
+ boost::filesystem::path paths[2] = {
+ "build/test/interleaved_subtitle_are_emitted_correctly1.srt",
+ "build/test/interleaved_subtitle_are_emitted_correctly2.srt"
+ };
+
+ dcp::File subs_file[2] = { dcp::File(paths[0], "w"), dcp::File(paths[1], "w") };
+
+ fprintf(subs_file[0].get(), "1\n00:00:01,000 -> 00:00:02,000\nSub 1/1\n\n");
+ fprintf(subs_file[0].get(), "2\n00:00:05,000 -> 00:00:06,000\nSub 1/2\n\n");
+
+ fprintf(subs_file[1].get(), "1\n00:00:00,500 -> 00:00:01,500\nSub 2/1\n\n");
+ fprintf(subs_file[1].get(), "2\n00:00:02,000 -> 00:00:03,000\nSub 2/2\n\n");
+
+ subs_file[0].close();
+ subs_file[1].close();
+
+ auto subs1 = content_factory(paths[0]).front();
+ auto subs2 = content_factory(paths[1]).front();
+ auto film = new_test_film2("interleaved_subtitle_are_emitted_correctly", { subs1, subs2 });
+ film->set_sequence(false);
+ subs1->set_position(film, DCPTime());
+ subs2->set_position(film, DCPTime());
+
+ auto player = std::make_shared<Player>(film, Image::Alignment::COMPACT);
+ dcp::Time last;
+ player->Text.connect([&last](PlayerText text, TextType, optional<DCPTextTrack>, dcpomatic::DCPTimePeriod) {
+ for (auto sub: text.string) {
+ BOOST_CHECK(sub.in() >= last);
+ last = sub.in();
+ }
+ });
+ while (!player->pass()) {}
+}
+