Previously if there were two images at the same time we would start
them both, then the stop time would be set in the second one but
not the first. This meant that the first one would hang around
forever.
dcpomatic::ContentTime _from;
};
dcpomatic::ContentTime _from;
};
class ContentBitmapText : public ContentText
{
public:
class ContentBitmapText : public ContentText
{
public:
+ ContentBitmapText (dcpomatic::ContentTime from)
+ : ContentText(from)
+ {}
+
ContentBitmapText (dcpomatic::ContentTime f, std::shared_ptr<const Image> im, dcpomatic::Rect<double> r)
: ContentText (f)
, subs{ {im, r} }
ContentBitmapText (dcpomatic::ContentTime f, std::shared_ptr<const Image> im, dcpomatic::Rect<double> r)
: ContentText (f)
, subs{ {im, r} }
std::vector<BitmapText> subs;
};
std::vector<BitmapText> subs;
};
/** A text caption. We store the time period separately (as well as in the dcp::SubtitleStrings)
* as the dcp::SubtitleString timings are sometimes quite heavily quantised and this causes problems
* when we want to compare the quantised periods to the unquantised ones.
/** A text caption. We store the time period separately (as well as in the dcp::SubtitleStrings)
* as the dcp::SubtitleString timings are sometimes quite heavily quantised and this causes problems
* when we want to compare the quantised periods to the unquantised ones.
_have_current_subtitle = true;
}
_have_current_subtitle = true;
}
+ ContentBitmapText bitmap_text(from);
for (unsigned int i = 0; i < sub.num_rects; ++i) {
auto const rect = sub.rects[i];
for (unsigned int i = 0; i < sub.num_rects; ++i) {
auto const rect = sub.rects[i];
case SUBTITLE_NONE:
break;
case SUBTITLE_BITMAP:
case SUBTITLE_NONE:
break;
case SUBTITLE_BITMAP:
- process_bitmap_subtitle (rect, from);
+ bitmap_text.subs.push_back(process_bitmap_subtitle(rect));
break;
case SUBTITLE_TEXT:
cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
break;
case SUBTITLE_TEXT:
cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
+ if (!bitmap_text.subs.empty()) {
+ only_text()->emit_bitmap_start(bitmap_text);
+ }
+
if (_current_subtitle_to) {
only_text()->emit_stop (*_current_subtitle_to);
}
if (_current_subtitle_to) {
only_text()->emit_stop (*_current_subtitle_to);
}
-void
-FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
+BitmapText
+FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect)
{
/* Note BGRA is expressed little-endian, so the first byte in the word is B, second
G, third R, fourth A.
{
/* Note BGRA is expressed little-endian, so the first byte in the word is B, second
G, third R, fourth A.
static_cast<double>(rect->h) / target_height
);
static_cast<double>(rect->h) / target_height
);
- only_text()->emit_bitmap_start ({ from, image, scaled_rect });
+ return { image, scaled_rect };
+#include "bitmap_text.h"
#include "decoder.h"
#include "ffmpeg.h"
#include "util.h"
#include "decoder.h"
#include "ffmpeg.h"
#include "util.h"
void decode_and_process_audio_packet (AVPacket* packet);
void decode_and_process_subtitle_packet (AVPacket* packet);
void decode_and_process_audio_packet (AVPacket* packet);
void decode_and_process_subtitle_packet (AVPacket* packet);
- void process_bitmap_subtitle (AVSubtitleRect const * rect, dcpomatic::ContentTime from);
+ BitmapText process_bitmap_subtitle (AVSubtitleRect const * rect);
void process_ass_subtitle (std::string ass, dcpomatic::ContentTime from);
void maybe_add_subtitle ();
void process_ass_subtitle (std::string ass, dcpomatic::ContentTime from);
void maybe_add_subtitle ();