summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2025-12-06 11:11:47 +0100
committerCarl Hetherington <cth@carlh.net>2025-12-06 11:12:31 +0100
commit5029bf180a8b29c8e9f1aeef1c398b05e01eb663 (patch)
treeb599b89fe3ea1536031f15d7605c17d16d24a691
parent59e358ae25f84314afc6d8235aecaec124822f3f (diff)
Clarify and improve decoder/player/butler logging a bit.
-rw-r--r--src/lib/butler.cc2
-rw-r--r--src/lib/dcpomatic_log.h1
-rw-r--r--src/lib/ffmpeg_decoder.cc20
-rw-r--r--src/lib/log_entry.cc25
-rw-r--r--src/lib/log_entry.h1
-rw-r--r--src/lib/player.cc28
-rw-r--r--src/wx/full_config_dialog.cc8
7 files changed, 50 insertions, 35 deletions
diff --git a/src/lib/butler.cc b/src/lib/butler.cc
index a4f00eb08..2bcd818cc 100644
--- a/src/lib/butler.cc
+++ b/src/lib/butler.cc
@@ -134,6 +134,8 @@ Butler::~Butler()
bool
Butler::should_run() const
{
+ LOG_DEBUG_BUTLER("BUT: video={} audio={}", _video.size(), _audio.size());
+
if (_video.size() >= MAXIMUM_VIDEO_READAHEAD * 10) {
/* This is way too big */
auto pos = _audio.peek();
diff --git a/src/lib/dcpomatic_log.h b/src/lib/dcpomatic_log.h
index 237f5e5d0..4bffc981b 100644
--- a/src/lib/dcpomatic_log.h
+++ b/src/lib/dcpomatic_log.h
@@ -44,6 +44,7 @@ extern std::shared_ptr<Log> dcpomatic_log;
#define LOG_DEBUG_PLAYER_NC(...) dcpomatic_log->log(__VA_ARGS__, LogEntry::TYPE_DEBUG_PLAYER);
#define LOG_DEBUG_AUDIO_ANALYSIS(...) dcpomatic_log->log(fmt::format(__VA_ARGS__), LogEntry::TYPE_DEBUG_AUDIO_ANALYSIS);
#define LOG_DEBUG_AUDIO_ANALYSIS_NC(...) dcpomatic_log->log(__VA_ARGS__, LogEntry::TYPE_DEBUG_AUDIO_ANALYSIS);
+#define LOG_DEBUG_BUTLER(...) dcpomatic_log->log(fmt::format(__VA_ARGS__), LogEntry::TYPE_DEBUG_BUTLER);
#define LOG_HTTP(...) dcpomatic_log->log(fmt::format(__VA_ARGS__), LogEntry::TYPE_HTTP);
#define LOG_HTTP_NC(...) dcpomatic_log->log(__VA_ARGS__, LogEntry::TYPE_HTTP);
diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc
index 3764cbb46..ebbc242e3 100644
--- a/src/lib/ffmpeg_decoder.cc
+++ b/src/lib/ffmpeg_decoder.cc
@@ -108,12 +108,12 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmp
FFmpegDecoder::FlushResult
FFmpegDecoder::flush ()
{
- LOG_DEBUG_PLAYER("Flush FFmpeg decoder: current state {}", static_cast<int>(_flush_state));
+ LOG_DEBUG_PLAYER("DEC: Flush FFmpeg decoder: current state {}", static_cast<int>(_flush_state));
switch (_flush_state) {
case FlushState::CODECS:
if (flush_codecs() == FlushResult::DONE) {
- LOG_DEBUG_PLAYER("Finished flushing codecs");
+ LOG_DEBUG_PLAYER("DEC: Finished flushing codecs");
_flush_state = FlushState::AUDIO_DECODER;
}
break;
@@ -121,12 +121,12 @@ FFmpegDecoder::flush ()
if (audio) {
audio->flush();
}
- LOG_DEBUG_PLAYER("Finished flushing audio decoder");
+ LOG_DEBUG_PLAYER("DEC: Finished flushing audio decoder");
_flush_state = FlushState::FILL;
break;
case FlushState::FILL:
if (flush_fill() == FlushResult::DONE) {
- LOG_DEBUG_PLAYER("Finished flushing fills");
+ LOG_DEBUG_PLAYER("DEC: Finished flushing fills");
return FlushResult::DONE;
}
break;
@@ -192,7 +192,7 @@ FFmpegDecoder::flush_fill()
here. I'm not sure if that's the right idea.
*/
if (a > ContentTime() && a < full_length) {
- LOG_DEBUG_PLAYER("Flush inserts silence at {}", to_string(a));
+ LOG_DEBUG_PLAYER("DEC: Flush inserts silence at {}", to_string(a));
auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
silence->make_silent ();
@@ -219,7 +219,7 @@ FFmpegDecoder::pass ()
Hence it makes sense to continue here in that case.
*/
if (r < 0 && r != AVERROR_INVALIDDATA) {
- LOG_DEBUG_PLAYER("FFpmegDecoder::pass flushes because av_read_frame returned {}", r);
+ LOG_DEBUG_PLAYER("DEC: FFpmegDecoder::pass flushes because av_read_frame returned {}", r);
if (r != AVERROR_EOF) {
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
@@ -504,7 +504,7 @@ FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
av_q2d(time_base))
+ _pts_offset;
LOG_DEBUG_PLAYER(
- "Process audio with timestamp {} (BET {}, timebase {}/{}, (PTS offset {})",
+ "DEC: Process audio with timestamp {} (BET {}, timebase {}/{}, (PTS offset {})",
to_string(ct),
frame->best_effort_timestamp,
time_base.num,
@@ -553,7 +553,7 @@ FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
auto context = _codec_context[stream->index(_format_context)];
auto frame = audio_frame (stream);
- LOG_DEBUG_PLAYER("Send audio packet on stream {}", stream->index(_format_context));
+ LOG_DEBUG_PLAYER("DEC: Send audio packet on stream {}", stream->index(_format_context));
int r = avcodec_send_packet (context, packet);
if (r < 0) {
LOG_WARNING("avcodec_send_packet returned {} for an audio packet", r);
@@ -562,7 +562,7 @@ FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
r = avcodec_receive_frame (context, frame);
if (r == AVERROR(EAGAIN)) {
/* More input is required */
- LOG_DEBUG_PLAYER("EAGAIN after trying to receive audio frame");
+ LOG_DEBUG_PLAYER("DEC: EAGAIN after trying to receive audio frame");
return;
}
@@ -584,6 +584,7 @@ FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet)
bool pending = false;
do {
+ LOG_DEBUG_PLAYER("DEC: Send video packet");
int r = avcodec_send_packet (context, packet);
if (r < 0) {
LOG_WARNING("avcodec_send_packet returned {} for a video packet", r);
@@ -624,6 +625,7 @@ FFmpegDecoder::process_video_frame ()
if (i.second != AV_NOPTS_VALUE) {
double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
+ LOG_DEBUG_PLAYER("DEC: Process video with timestamp {}", to_string(ContentTime::from_seconds(pts)))
video->emit (
film(),
make_shared<RawImageProxy>(image),
diff --git a/src/lib/log_entry.cc b/src/lib/log_entry.cc
index af9148e68..6d2710332 100644
--- a/src/lib/log_entry.cc
+++ b/src/lib/log_entry.cc
@@ -27,18 +27,19 @@
#include "i18n.h"
-int const LogEntry::TYPE_GENERAL = 0x001;
-int const LogEntry::TYPE_WARNING = 0x002;
-int const LogEntry::TYPE_ERROR = 0x004;
-int const LogEntry::TYPE_DEBUG_THREE_D = 0x008;
-int const LogEntry::TYPE_DEBUG_ENCODE = 0x010;
-int const LogEntry::TYPE_TIMING = 0x020;
-int const LogEntry::TYPE_DEBUG_EMAIL = 0x040;
-int const LogEntry::TYPE_DEBUG_VIDEO_VIEW = 0x080;
-int const LogEntry::TYPE_DISK = 0x100;
-int const LogEntry::TYPE_DEBUG_PLAYER = 0x200;
-int const LogEntry::TYPE_DEBUG_AUDIO_ANALYSIS = 0x400;
-int const LogEntry::TYPE_HTTP = 0x800;
+int const LogEntry::TYPE_GENERAL = 0x0001;
+int const LogEntry::TYPE_WARNING = 0x0002;
+int const LogEntry::TYPE_ERROR = 0x0004;
+int const LogEntry::TYPE_DEBUG_THREE_D = 0x0008;
+int const LogEntry::TYPE_DEBUG_ENCODE = 0x0010;
+int const LogEntry::TYPE_TIMING = 0x0020;
+int const LogEntry::TYPE_DEBUG_EMAIL = 0x0040;
+int const LogEntry::TYPE_DEBUG_VIDEO_VIEW = 0x0080;
+int const LogEntry::TYPE_DISK = 0x0100;
+int const LogEntry::TYPE_DEBUG_PLAYER = 0x0200;
+int const LogEntry::TYPE_DEBUG_AUDIO_ANALYSIS = 0x0400;
+int const LogEntry::TYPE_HTTP = 0x0800;
+int const LogEntry::TYPE_DEBUG_BUTLER = 0x1000;
using std::string;
diff --git a/src/lib/log_entry.h b/src/lib/log_entry.h
index 89357d201..b76579ff4 100644
--- a/src/lib/log_entry.h
+++ b/src/lib/log_entry.h
@@ -43,6 +43,7 @@ public:
static const int TYPE_DEBUG_PLAYER; ///< the Player class
static const int TYPE_DEBUG_AUDIO_ANALYSIS; ///< audio analysis job
static const int TYPE_HTTP;
+ static const int TYPE_DEBUG_BUTLER;
explicit LogEntry (int type);
virtual ~LogEntry () {}
diff --git a/src/lib/player.cc b/src/lib/player.cc
index 417c30449..2cec90d93 100644
--- a/src/lib/player.cc
+++ b/src/lib/player.cc
@@ -711,7 +711,7 @@ Player::pass()
if (_suspended) {
/* We can't pass in this state */
- LOG_DEBUG_PLAYER("Player is suspended");
+ LOG_DEBUG_PLAYER("PLY: Player is suspended");
return false;
}
@@ -755,7 +755,7 @@ Player::pass()
switch (which) {
case CONTENT:
{
- LOG_DEBUG_PLAYER("Calling pass() on {}", earliest_content->content->path(0).string());
+ LOG_DEBUG_PLAYER("PLY: Calling pass() on {}", earliest_content->content->path(0).string());
earliest_content->done = earliest_content->decoder->pass();
auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
if (dcp && !_play_referenced) {
@@ -773,7 +773,7 @@ Player::pass()
break;
}
case BLACK:
- LOG_DEBUG_PLAYER("Emit black for gap at {}", to_string(_black.position()));
+ LOG_DEBUG_PLAYER("PLY: Emit black for gap at {}", to_string(_black.position()));
if (!_next_video_time) {
/* Deciding to emit black has the same effect as getting some video from the content
* when we are inaccurately seeking.
@@ -790,7 +790,7 @@ Player::pass()
break;
case SILENT:
{
- LOG_DEBUG_PLAYER("Emit silence for gap at {}", to_string(_silent.position()));
+ LOG_DEBUG_PLAYER("PLY: Emit silence for gap at {}", to_string(_silent.position()));
DCPTimePeriod period(_silent.period_at_position());
if (_next_audio_time) {
/* Sometimes the thing that happened last finishes fractionally before
@@ -845,7 +845,7 @@ Player::pass()
std::map<AudioStreamPtr, StreamState> alive_stream_states;
if (latest_last_push_end != have_pushed.end()) {
- LOG_DEBUG_PLAYER("Leading audio stream is in {} at {}", latest_last_push_end->second.piece->content->path(0).string(), to_string(latest_last_push_end->second.last_push_end.get()));
+ LOG_DEBUG_PLAYER("PLY: Leading audio stream is in {} at {}", latest_last_push_end->second.piece->content->path(0).string(), to_string(latest_last_push_end->second.last_push_end.get()));
auto const ignore_threshold = dcpomatic::DCPTime::from_seconds(ignore_streams_behind);
auto const leader_time = latest_last_push_end->second.last_push_end.get();
@@ -859,7 +859,7 @@ Player::pass()
if ((i.second.last_push_end && (leader_time - i.second.last_push_end.get()) < ignore_threshold) || ((leader_time - i.second.piece->content->position()) < ignore_threshold)) {
alive_stream_states.insert(i);
} else {
- LOG_DEBUG_PLAYER("Ignoring stream {} because it is too far behind", i.second.piece->content->path(0).string());
+ LOG_DEBUG_PLAYER("PLY: Ignoring stream {} because it is too far behind", i.second.piece->content->path(0).string());
}
}
}
@@ -875,7 +875,7 @@ Player::pass()
pull_to = _silent.position();
}
- LOG_DEBUG_PLAYER("Emitting audio up to {}", to_string(pull_to));
+ LOG_DEBUG_PLAYER("PLY: Emitting audio up to {}", to_string(pull_to));
auto audio = _audio_merger.pull(pull_to);
for (auto i = audio.begin(); i != audio.end(); ++i) {
if (_next_audio_time && i->second < *_next_audio_time) {
@@ -895,7 +895,7 @@ Player::pass()
if (done) {
if (_next_video_time) {
- LOG_DEBUG_PLAYER("Done: emit video until end of film at {}", to_string(film->length()));
+ LOG_DEBUG_PLAYER("PLY: Done: emit video until end of film at {}", to_string(film->length()));
emit_video_until(film->length());
}
@@ -971,7 +971,7 @@ Player::open_texts_for_frame(DCPTime time) const
void
Player::emit_video_until(DCPTime time)
{
- LOG_DEBUG_PLAYER("emit_video_until {}; next video time is {}", to_string(time), to_string(_next_video_time.get_value_or({})));
+ LOG_DEBUG_PLAYER("PLY: emit_video_until {}; next video time is {}", to_string(time), to_string(_next_video_time.get_value_or({})));
auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
/* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
player before the video that requires them.
@@ -1020,7 +1020,7 @@ Player::emit_video_until(DCPTime time)
frame(right.first, next);
} else if (both.first && (both.second - next) < age_threshold(both)) {
frame(both.first, next);
- LOG_DEBUG_PLAYER("Content {} selected for DCP {} (age {})", to_string(both.second), to_string(next), to_string(both.second - next));
+ LOG_DEBUG_PLAYER("PLY: Content {} selected for DCP {} (age {})", to_string(both.second), to_string(next), to_string(both.second - next));
} else {
auto film = _film.lock();
if (film && film->three_d()) {
@@ -1029,7 +1029,7 @@ Player::emit_video_until(DCPTime time)
} else {
frame(black_player_video_frame(Eyes::BOTH), next);
}
- LOG_DEBUG_PLAYER("Black selected for {}", to_string(next));
+ LOG_DEBUG_PLAYER("PLY: Black selected for {}", to_string(next));
}
}
}
@@ -1080,7 +1080,7 @@ Player::video(weak_ptr<Piece> weak_piece, ContentVideo video)
/* Time of the frame we just received within the DCP */
auto const time = content_time_to_dcp(piece, video.time);
- LOG_DEBUG_PLAYER("Received video frame {} {} eyes {}", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
+ LOG_DEBUG_PLAYER("PLY: Received video frame {} {} eyes {}", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
if (time < piece->content->position()) {
return;
@@ -1170,7 +1170,7 @@ Player::audio(weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio co
/* And the end of this block in the DCP */
auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
- LOG_DEBUG_PLAYER("Received audio frame {} covering {} to {} ({})", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename().string());
+ LOG_DEBUG_PLAYER("PLY: Received audio frame {} covering {} to {} ({})", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename().string());
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
@@ -1389,7 +1389,7 @@ void
Player::seek(DCPTime time, bool accurate)
{
boost::mutex::scoped_lock lm(_mutex);
- LOG_DEBUG_PLAYER("Seek to {} ({}accurate)", to_string(time), accurate ? "" : "in");
+ LOG_DEBUG_PLAYER("PLY: Seek to {} ({}accurate)", to_string(time), accurate ? "" : "in");
if (_suspended) {
/* We can't seek in this state */
diff --git a/src/wx/full_config_dialog.cc b/src/wx/full_config_dialog.cc
index eff3fdee2..95c4b7052 100644
--- a/src/wx/full_config_dialog.cc
+++ b/src/wx/full_config_dialog.cc
@@ -1298,6 +1298,8 @@ private:
t->Add(_log_debug_player, 1, wxEXPAND | wxALL);
_log_debug_audio_analysis = new CheckBox(_panel, _("Debug: audio analysis"));
t->Add(_log_debug_audio_analysis, 1, wxEXPAND | wxALL);
+ _log_debug_butler = new CheckBox(_panel, _("Debug: butler"));
+ t->Add(_log_debug_butler, 1, wxEXPAND | wxALL);
table->Add(t, 0, wxALL, 6);
}
@@ -1328,6 +1330,7 @@ private:
_log_debug_video_view->bind(&AdvancedPage::log_changed, this);
_log_debug_player->bind(&AdvancedPage::log_changed, this);
_log_debug_audio_analysis->bind(&AdvancedPage::log_changed, this);
+ _log_debug_butler->bind(&AdvancedPage::log_changed, this);
#ifdef DCPOMATIC_WINDOWS
_win32_console->bind(&AdvancedPage::win32_console_changed, this);
#endif
@@ -1358,6 +1361,7 @@ private:
checked_set(_log_debug_video_view, config->log_types() & LogEntry::TYPE_DEBUG_VIDEO_VIEW);
checked_set(_log_debug_player, config->log_types() & LogEntry::TYPE_DEBUG_PLAYER);
checked_set(_log_debug_audio_analysis, config->log_types() & LogEntry::TYPE_DEBUG_AUDIO_ANALYSIS);
+ checked_set(_log_debug_butler, config->log_types() & LogEntry::TYPE_DEBUG_BUTLER);
checked_set(_frames_in_memory_multiplier, config->frames_in_memory_multiplier());
#ifdef DCPOMATIC_WINDOWS
checked_set(_win32_console, config->win32_console());
@@ -1436,6 +1440,9 @@ private:
if (_log_debug_audio_analysis->GetValue()) {
types |= LogEntry::TYPE_DEBUG_AUDIO_ANALYSIS;
}
+ if (_log_debug_butler->GetValue()) {
+ types |= LogEntry::TYPE_DEBUG_BUTLER;
+ }
Config::instance()->set_log_types(types);
}
@@ -1463,6 +1470,7 @@ private:
CheckBox* _log_debug_video_view = nullptr;
CheckBox* _log_debug_player = nullptr;
CheckBox* _log_debug_audio_analysis = nullptr;
+ CheckBox* _log_debug_butler = nullptr;
#ifdef DCPOMATIC_WINDOWS
CheckBox* _win32_console = nullptr;
#endif