summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2024-12-19 19:57:56 +0100
committerCarl Hetherington <cth@carlh.net>2025-09-03 00:25:58 +0200
commit8784debc7de4daeb04fb142a4097a34a20cf47a4 (patch)
tree1025ec7043cf8412507f81d5c90dc7f4f2196186 /src/lib
parent1bba4979cc19efaedf1258bc30f5e8f2d968dcd6 (diff)
Move to_string() from a free function into a member of Time.
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/audio_analyser.cc2
-rw-r--r--src/lib/audio_ring_buffers.cc2
-rw-r--r--src/lib/dcpomatic_time.cc30
-rw-r--r--src/lib/dcpomatic_time.h15
-rw-r--r--src/lib/ffmpeg_decoder.cc10
-rw-r--r--src/lib/j2k_encoder.cc8
-rw-r--r--src/lib/player.cc24
-rw-r--r--src/lib/shuffler.cc10
8 files changed, 42 insertions, 59 deletions
diff --git a/src/lib/audio_analyser.cc b/src/lib/audio_analyser.cc
index 7d4ee6ace..f869933eb 100644
--- a/src/lib/audio_analyser.cc
+++ b/src/lib/audio_analyser.cc
@@ -135,7 +135,7 @@ AudioAnalyser::AudioAnalyser(shared_ptr<const Film> film, shared_ptr<const Playl
void
AudioAnalyser::analyse (shared_ptr<AudioBuffers> b, DCPTime time)
{
- LOG_DEBUG_AUDIO_ANALYSIS("AudioAnalyser received {} frames at {}", b->frames(), to_string(time));
+ LOG_DEBUG_AUDIO_ANALYSIS("AudioAnalyser received {} frames at {}", b->frames(), time.to_string());
DCPOMATIC_ASSERT (time >= _start);
/* In bug #2364 we had a lot of frames arriving here (~47s worth) which
* caused an OOM error on Windows. Check for the number of frames being
diff --git a/src/lib/audio_ring_buffers.cc b/src/lib/audio_ring_buffers.cc
index a257edd38..b2976deec 100644
--- a/src/lib/audio_ring_buffers.cc
+++ b/src/lib/audio_ring_buffers.cc
@@ -51,7 +51,7 @@ AudioRingBuffers::put(shared_ptr<const AudioBuffers> data, DCPTime time, int fra
DCPOMATIC_ASSERT(_buffers.front().first->channels() == data->channels());
DCPTime const end = (_buffers.back().second + DCPTime::from_frames(_buffers.back().first->frames(), frame_rate));
if (labs(end.get() - time.get()) > 1) {
- cout << "bad put " << to_string(_buffers.back().second) << " " << _buffers.back().first->frames() << " " << to_string(time) << "\n";
+ cout << "bad put " << _buffers.back().second.to_string() << " " << _buffers.back().first->frames() << " " << time.to_string() << "\n";
}
DCPOMATIC_ASSERT(labs(end.get() - time.get()) < 2);
}
diff --git a/src/lib/dcpomatic_time.cc b/src/lib/dcpomatic_time.cc
index 60fc5342a..f1be46925 100644
--- a/src/lib/dcpomatic_time.cc
+++ b/src/lib/dcpomatic_time.cc
@@ -107,39 +107,13 @@ dcpomatic::max (ContentTime a, ContentTime b)
string
-dcpomatic::to_string (ContentTime t)
-{
- char buffer[64];
-#ifdef DCPOMATIC_WINDOWS
- __mingw_snprintf (buffer, sizeof(buffer), "[CONT %" PRId64 " %fs]", t.get(), t.seconds());
-#else
- snprintf (buffer, sizeof(buffer), "[CONT %" PRId64 " %fs]", t.get(), t.seconds());
-#endif
- return buffer;
-}
-
-
-string
-dcpomatic::to_string (DCPTime t)
-{
- char buffer[64];
-#ifdef DCPOMATIC_WINDOWS
- __mingw_snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs]", t.get(), t.seconds());
-#else
- snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs]", t.get(), t.seconds());
-#endif
- return buffer;
-}
-
-
-string
dcpomatic::to_string (DCPTimePeriod p)
{
char buffer[64];
#ifdef DCPOMATIC_WINDOWS
- __mingw_snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs -> %" PRId64 " %fs]", p.from.get(), p.from.seconds(), p.to.get(), p.to.seconds());
+ __mingw_snprintf (buffer, sizeof(buffer), "[DCP %s %fs -> %s %fs]", p.from.to_string().c_str(), p.from.seconds(), p.to.get().c_str(), p.to.seconds());
#else
- snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs -> %" PRId64 " %fs]", p.from.get(), p.from.seconds(), p.to.get(), p.to.seconds());
+ snprintf (buffer, sizeof(buffer), "[DCP %s %fs -> %s %fs]", p.from.to_string().c_str(), p.from.seconds(), p.to.to_string().c_str(), p.to.seconds());
#endif
return buffer;
}
diff --git a/src/lib/dcpomatic_time.h b/src/lib/dcpomatic_time.h
index 6de576246..f79242161 100644
--- a/src/lib/dcpomatic_time.h
+++ b/src/lib/dcpomatic_time.h
@@ -31,12 +31,13 @@
#include "dcpomatic_assert.h"
#include "frame_rate_change.h"
#include <boost/optional.hpp>
-#include <stdint.h>
#include <cmath>
#include <cstdio>
#include <iomanip>
+#include <inttypes.h>
#include <list>
#include <ostream>
+#include <stdint.h>
struct dcpomatic_time_ceil_test;
@@ -244,6 +245,16 @@ public:
return buffer;
}
+ std::string to_string() const {
+ char buffer[64];
+#ifdef DCPOMATIC_WINDOWS
+ __mingw_snprintf(buffer, sizeof(buffer), "[%" PRId64 " %fs]", get(), seconds());
+#else
+ snprintf(buffer, sizeof(buffer), "[%" PRId64 " %fs]", get(), seconds());
+#endif
+ return buffer;
+ }
+
static Time<S, O> from_seconds (double s) {
return Time<S, O> (llrint (s * HZ));
}
@@ -400,8 +411,6 @@ DCPTime min (DCPTime a, DCPTime b);
DCPTime max (DCPTime a, DCPTime b);
ContentTime min (ContentTime a, ContentTime b);
ContentTime max (ContentTime a, ContentTime b);
-std::string to_string (ContentTime t);
-std::string to_string (DCPTime t);
std::string to_string (DCPTimePeriod p);
diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc
index 74836c1a8..a970e6d50 100644
--- a/src/lib/ffmpeg_decoder.cc
+++ b/src/lib/ffmpeg_decoder.cc
@@ -192,7 +192,7 @@ FFmpegDecoder::flush_fill()
here. I'm not sure if that's the right idea.
*/
if (a > ContentTime() && a < full_length) {
- LOG_DEBUG_PLAYER("Flush inserts silence at {}", to_string(a));
+ LOG_DEBUG_PLAYER("Flush inserts silence at {}", a.to_string());
auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
silence->make_silent ();
@@ -505,11 +505,11 @@ FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
+ _pts_offset;
LOG_DEBUG_PLAYER(
"Process audio with timestamp {} (BET {}, timebase {}/{}, (PTS offset {})",
- to_string(ct),
+ ct.to_string(),
frame->best_effort_timestamp,
time_base.num,
time_base.den,
- to_string(_pts_offset)
+ _pts_offset.to_string()
);
}
@@ -526,12 +526,12 @@ FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
if (ct < ContentTime()) {
LOG_WARNING (
"Crazy timestamp {} for {} samples in stream {} (ts={} tb={}, off={})",
- to_string(ct),
+ ct.to_string(),
data->frames(),
stream->id(),
frame->best_effort_timestamp,
av_q2d(time_base),
- to_string(_pts_offset)
+ _pts_offset.to_string()
);
}
diff --git a/src/lib/j2k_encoder.cc b/src/lib/j2k_encoder.cc
index 441e91827..2d7802068 100644
--- a/src/lib/j2k_encoder.cc
+++ b/src/lib/j2k_encoder.cc
@@ -312,19 +312,19 @@ J2KEncoder::encode (shared_ptr<PlayerVideo> pv, DCPTime time)
if (_writer.can_fake_write(position)) {
/* We can fake-write this frame */
- LOG_DEBUG_ENCODE("Frame @ {} FAKE", to_string(time));
+ LOG_DEBUG_ENCODE("Frame @ {} FAKE", time.to_string());
_writer.fake_write(position, pv->eyes ());
frame_done ();
} else if (pv->has_j2k() && !_film->reencode_j2k()) {
- LOG_DEBUG_ENCODE("Frame @ {} J2K", to_string(time));
+ LOG_DEBUG_ENCODE("Frame @ {} J2K", time.to_string());
/* This frame already has J2K data, so just write it */
_writer.write(pv->j2k(), position, pv->eyes ());
frame_done ();
} else if (_last_player_video[pv->eyes()] && _writer.can_repeat(position) && pv->same(_last_player_video[pv->eyes()])) {
- LOG_DEBUG_ENCODE("Frame @ {} REPEAT", to_string(time));
+ LOG_DEBUG_ENCODE("Frame @ {} REPEAT", time.to_string());
_writer.repeat(position, pv->eyes());
} else {
- LOG_DEBUG_ENCODE("Frame @ {} ENCODE", to_string(time));
+ LOG_DEBUG_ENCODE("Frame @ {} ENCODE", time.to_string());
/* Queue this new frame for encoding */
LOG_TIMING ("add-frame-to-queue queue={}", _queue.size ());
auto dcpv = DCPVideo(
diff --git a/src/lib/player.cc b/src/lib/player.cc
index 985bd3a9c..5ff8ec779 100644
--- a/src/lib/player.cc
+++ b/src/lib/player.cc
@@ -773,7 +773,7 @@ Player::pass()
break;
}
case BLACK:
- LOG_DEBUG_PLAYER("Emit black for gap at {}", to_string(_black.position()));
+ LOG_DEBUG_PLAYER("Emit black for gap at {}", _black.position().to_string());
if (!_next_video_time) {
/* Deciding to emit black has the same effect as getting some video from the content
* when we are inaccurately seeking.
@@ -790,7 +790,7 @@ Player::pass()
break;
case SILENT:
{
- LOG_DEBUG_PLAYER("Emit silence for gap at {}", to_string(_silent.position()));
+ LOG_DEBUG_PLAYER("Emit silence for gap at {}", _silent.position().to_string());
DCPTimePeriod period(_silent.period_at_position());
if (_next_audio_time) {
/* Sometimes the thing that happened last finishes fractionally before
@@ -845,7 +845,7 @@ Player::pass()
std::map<AudioStreamPtr, StreamState> alive_stream_states;
if (latest_last_push_end != have_pushed.end()) {
- LOG_DEBUG_PLAYER("Leading audio stream is in {} at {}", latest_last_push_end->second.piece->content->path(0).string(), to_string(latest_last_push_end->second.last_push_end.get()));
+ LOG_DEBUG_PLAYER("Leading audio stream is in {} at {}", latest_last_push_end->second.piece->content->path(0).string(), latest_last_push_end->second.last_push_end->to_string());
/* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
for (auto const& i: _stream_states) {
@@ -868,7 +868,7 @@ Player::pass()
pull_to = _silent.position();
}
- LOG_DEBUG_PLAYER("Emitting audio up to {}", to_string(pull_to));
+ LOG_DEBUG_PLAYER("Emitting audio up to {}", pull_to.to_string());
auto audio = _audio_merger.pull(pull_to);
for (auto i = audio.begin(); i != audio.end(); ++i) {
if (_next_audio_time && i->second < *_next_audio_time) {
@@ -888,7 +888,7 @@ Player::pass()
if (done) {
if (_next_video_time) {
- LOG_DEBUG_PLAYER("Done: emit video until end of film at {}", to_string(film->length()));
+ LOG_DEBUG_PLAYER("Done: emit video until end of film at {}", film->length().to_string());
emit_video_until(film->length());
}
@@ -964,7 +964,7 @@ Player::open_texts_for_frame(DCPTime time) const
void
Player::emit_video_until(DCPTime time)
{
- LOG_DEBUG_PLAYER("emit_video_until {}; next video time is {}", to_string(time), to_string(_next_video_time.get_value_or({})));
+ LOG_DEBUG_PLAYER("emit_video_until {}; next video time is {}", time.to_string(), _next_video_time.get_value_or({}).to_string());
auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
/* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
player before the video that requires them.
@@ -1013,7 +1013,7 @@ Player::emit_video_until(DCPTime time)
frame(right.first, next);
} else if (both.first && (both.second - next) < age_threshold(both)) {
frame(both.first, next);
- LOG_DEBUG_PLAYER("Content {} selected for DCP {} (age {})", to_string(both.second), to_string(next), to_string(both.second - next));
+ LOG_DEBUG_PLAYER("Content {} selected for DCP {} (age {})", both.second.to_string(), next.to_string(), DCPTime(both.second - next).to_string());
} else {
auto film = _film.lock();
if (film && film->three_d()) {
@@ -1022,7 +1022,7 @@ Player::emit_video_until(DCPTime time)
} else {
frame(black_player_video_frame(Eyes::BOTH), next);
}
- LOG_DEBUG_PLAYER("Black selected for DCP {}", to_string(next));
+ LOG_DEBUG_PLAYER("Black selected for DCP {}", next.to_string());
}
}
}
@@ -1073,7 +1073,7 @@ Player::video(weak_ptr<Piece> weak_piece, ContentVideo video)
/* Time of the frame we just received within the DCP */
auto const time = content_time_to_dcp(piece, video.time);
- LOG_DEBUG_PLAYER("Received video frame {} {} eyes {}", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
+ LOG_DEBUG_PLAYER("Received video frame {} {} eyes {}", video.time.to_string(), time.to_string(), static_cast<int>(video.eyes));
if (time < piece->content->position()) {
return;
@@ -1163,7 +1163,7 @@ Player::audio(weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio co
/* And the end of this block in the DCP */
auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
- LOG_DEBUG_PLAYER("Received audio frame {} covering {} to {} ({})", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename().string());
+ LOG_DEBUG_PLAYER("Received audio frame {} covering {} to {} ({})", content_audio.frame, time.to_string(), end.to_string(), piece->content->path(0).filename().string());
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
@@ -1382,7 +1382,7 @@ void
Player::seek(DCPTime time, bool accurate)
{
boost::mutex::scoped_lock lm(_mutex);
- LOG_DEBUG_PLAYER("Seek to {} ({}accurate)", to_string(time), accurate ? "" : "in");
+ LOG_DEBUG_PLAYER("Seek to {} ({}accurate)", time.to_string(), accurate ? "" : "in");
if (_suspended) {
/* We can't seek in this state */
@@ -1470,7 +1470,7 @@ Player::emit_audio(shared_ptr<AudioBuffers> data, DCPTime time)
/* Log if the assert below is about to fail */
if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
- film->log()->log(fmt::format("Out-of-sequence emit {} vs {}", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
+ film->log()->log(fmt::format("Out-of-sequence emit {} vs {}", time.to_string(), _next_audio_time->to_string()), LogEntry::TYPE_WARNING);
}
/* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
diff --git a/src/lib/shuffler.cc b/src/lib/shuffler.cc
index 66f29a8e1..d017343f8 100644
--- a/src/lib/shuffler.cc
+++ b/src/lib/shuffler.cc
@@ -51,7 +51,7 @@ struct Comparator
void
Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
{
- LOG_DEBUG_THREE_D("Shuffler::video time={} eyes={} part={}", to_string(video.time), static_cast<int>(video.eyes), static_cast<int>(video.part));
+ LOG_DEBUG_THREE_D("Shuffler::video time={} eyes={} part={}", video.time.to_string(), static_cast<int>(video.eyes), static_cast<int>(video.part));
if (video.eyes != Eyes::LEFT && video.eyes != Eyes::RIGHT) {
/* Pass through anything that we don't care about */
@@ -84,8 +84,8 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
);
if (!store_front_in_sequence) {
- string const store = _store.empty() ? "store empty" : fmt::format("store front time={} eyes={}", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes));
- string const last = _last ? fmt::format("last time={} eyes={}", to_string(_last->time), static_cast<int>(_last->eyes)) : "no last";
+ string const store = _store.empty() ? "store empty" : fmt::format("store front time={} eyes={}", _store.front().second.time.to_string(), static_cast<int>(_store.front().second.eyes));
+ string const last = _last ? fmt::format("last time={} eyes={}", _last->time.to_string(), static_cast<int>(_last->eyes)) : "no last";
LOG_DEBUG_THREE_D("Shuffler not in sequence: {} {}", store, last);
}
@@ -98,10 +98,10 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
}
if (_store.size() > _max_size) {
- LOG_WARNING("Shuffler is full after receiving frame at {}; 3D sync may be incorrect.", to_string(video.time));
+ LOG_WARNING("Shuffler is full after receiving frame at {}; 3D sync may be incorrect.", video.time.to_string());
}
- LOG_DEBUG_THREE_D("Shuffler emits time={} eyes={} store={}", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes), _store.size());
+ LOG_DEBUG_THREE_D("Shuffler emits time={} eyes={} store={}", _store.front().second.time.to_string(), static_cast<int>(_store.front().second.eyes), _store.size());
Video (_store.front().first, _store.front().second);
_last = _store.front().second;
_store.pop_front ();