diff options
| author | Carl Hetherington <cth@carlh.net> | 2024-12-22 00:09:57 +0100 |
|---|---|---|
| committer | Carl Hetherington <cth@carlh.net> | 2025-09-02 22:40:13 +0200 |
| commit | 33b0928b20618da5bc295711bfdf3d638863afa5 (patch) | |
| tree | 24af21c6a7b863d914991ae837c36b6b9b50bcce /src/lib | |
| parent | 6784eb8de2451afd2dedc15c05eac043011b5afb (diff) | |
Untested conversion to num/den DCPTime.arbitrary-hz
Summary of required changes:
Replace ::from_frames with a constructor that takes num/den.
Provide and use member to_debug_string() instead of to_string().
Provide and use member to_serializable_string() and string constructor instead of fmt::to_string on .get() and number constructor.
Provide and use content_time() member instead of ContentTime constructor from DCPTime.
Use frames_round(96000) instead of get() when comparing times to see if they are "close enough".
Provide and use DCPTime(x, FrameRateChange) constructor when converting from ContentTime.
Use .seconds() when calculating proportions or sometimes when dividing by HZ.
Provide and use operator bool().
Pass explicit 96000 denominator in a lot of places.
Add member max() and use it instead of static max()
Change BOOST_CHECK_EQUAL to BOOST_CHECK
Provide operator/ and use it instead of .get() / 2.
Diffstat (limited to 'src/lib')
32 files changed, 465 insertions, 142 deletions
diff --git a/src/lib/active_text.cc b/src/lib/active_text.cc index 2a5c4d836..003f8f3a5 100644 --- a/src/lib/active_text.cc +++ b/src/lib/active_text.cc @@ -73,9 +73,9 @@ ActiveText::get_burnt (DCPTimePeriod period, bool always_burn_captions) const } for (auto j: i.second) { - DCPTimePeriod test (j.from, j.to.get_value_or(DCPTime::max())); + DCPTimePeriod test(j.from, j.to.get_value_or(j.from.max())); auto overlap = period.overlap (test); - if (overlap && overlap->duration() > DCPTime(period.duration().get() / 2)) { + if (overlap && overlap->duration() > DCPTime(period.duration() / 2)) { ps.push_back (j.subs); } } diff --git a/src/lib/atmos_mxf_content.cc b/src/lib/atmos_mxf_content.cc index 32e8a73b2..5d47a18ee 100644 --- a/src/lib/atmos_mxf_content.cc +++ b/src/lib/atmos_mxf_content.cc @@ -109,12 +109,12 @@ DCPTime AtmosMXFContent::full_length (shared_ptr<const Film> film) const { FrameRateChange const frc (film, shared_from_this()); - return DCPTime::from_frames (llrint(atmos->length() * frc.factor()), film->video_frame_rate()); + return DCPTime(llrint(atmos->length() * frc.factor()), film->video_frame_rate()); } DCPTime AtmosMXFContent::approximate_length () const { - return DCPTime::from_frames (atmos->length(), 24); + return DCPTime(atmos->length(), 24); } diff --git a/src/lib/audio_analyser.cc b/src/lib/audio_analyser.cc index 7d4ee6ace..b1ccef901 100644 --- a/src/lib/audio_analyser.cc +++ b/src/lib/audio_analyser.cc @@ -135,7 +135,7 @@ AudioAnalyser::AudioAnalyser(shared_ptr<const Film> film, shared_ptr<const Playl void AudioAnalyser::analyse (shared_ptr<AudioBuffers> b, DCPTime time) { - LOG_DEBUG_AUDIO_ANALYSIS("AudioAnalyser received {} frames at {}", b->frames(), to_string(time)); + LOG_DEBUG_AUDIO_ANALYSIS("AudioAnalyser received {} frames at {}", b->frames(), time.to_debug_string()); DCPOMATIC_ASSERT (time >= _start); /* In bug #2364 we had a lot of frames arriving here (~47s worth) which * caused an OOM error on Windows. Check for the number of frames being @@ -197,7 +197,7 @@ AudioAnalyser::finish () vector<AudioAnalysis::PeakTime> sample_peak; for (int i = 0; i < _film->audio_channels(); ++i) { sample_peak.push_back ( - AudioAnalysis::PeakTime (_sample_peak[i], DCPTime::from_frames (_sample_peak_frame[i], _film->audio_frame_rate ())) + AudioAnalysis::PeakTime(_sample_peak[i], DCPTime(_sample_peak_frame[i], _film->audio_frame_rate())) ); } _analysis.set_sample_peak (sample_peak); diff --git a/src/lib/audio_analysis.cc b/src/lib/audio_analysis.cc index 8375ae770..d5c6e3d2e 100644 --- a/src/lib/audio_analysis.cc +++ b/src/lib/audio_analysis.cc @@ -85,7 +85,7 @@ AudioAnalysis::AudioAnalysis (boost::filesystem::path filename) } for (auto i: f.node_children ("SamplePeak")) { - auto const time = number_attribute<Frame>(i, "Time", "time"); + auto const time = i->string_attribute("Time"); _sample_peak.push_back(PeakTime(dcp::raw_convert<float>(i->content()), DCPTime(time))); } @@ -153,7 +153,7 @@ AudioAnalysis::write (boost::filesystem::path filename) for (size_t i = 0; i < _sample_peak.size(); ++i) { auto n = cxml::add_child(root, "SamplePeak"); n->add_child_text(fmt::to_string(_sample_peak[i].peak)); - n->set_attribute("time", fmt::to_string(_sample_peak[i].time.get())); + n->set_attribute("time", _sample_peak[i].time.to_serialisable_string()); } for (auto i: _true_peak) { diff --git a/src/lib/audio_content.cc b/src/lib/audio_content.cc index 142dc0855..a290bbc13 100644 --- a/src/lib/audio_content.cc +++ b/src/lib/audio_content.cc @@ -307,7 +307,7 @@ AudioContent::add_properties (shared_ptr<const Film> film, list<UserProperty>& p } FrameRateChange const frc (_parent->active_video_frame_rate(film), film->video_frame_rate()); - ContentTime const c (_parent->full_length(film), frc); + auto const c = _parent->full_length(film).content_time(frc); p.push_back ( UserProperty (UserProperty::LENGTH, _("Full length in video frames at content rate"), c.frames_round(frc.source)) diff --git a/src/lib/audio_merger.cc b/src/lib/audio_merger.cc index 0bc1ad008..cbb28f918 100644 --- a/src/lib/audio_merger.cc +++ b/src/lib/audio_merger.cc @@ -83,7 +83,7 @@ AudioMerger::pull (DCPTime time) auto audio = make_shared<AudioBuffers>(i.audio, overlap, 0); out.push_back (make_pair(audio, i.time)); i.audio->trim_start (overlap); - i.time += DCPTime::from_frames(overlap, _frame_rate); + i.time += DCPTime(overlap, _frame_rate); DCPOMATIC_ASSERT (i.audio->frames() > 0); new_buffers.push_back (i); } @@ -110,7 +110,7 @@ AudioMerger::push (std::shared_ptr<const AudioBuffers> audio, DCPTime time) { DCPOMATIC_ASSERT (audio->frames() > 0); - DCPTimePeriod period (time, time + DCPTime::from_frames (audio->frames(), _frame_rate)); + DCPTimePeriod period(time, time + DCPTime(audio->frames(), _frame_rate)); /* Mix any overlapping parts of this new block with existing ones */ for (auto i: _buffers) { diff --git a/src/lib/audio_merger.h b/src/lib/audio_merger.h index a6b7637f6..5d69b6035 100644 --- a/src/lib/audio_merger.h +++ b/src/lib/audio_merger.h @@ -68,7 +68,7 @@ private: int frame_rate; dcpomatic::DCPTimePeriod period () const { - return dcpomatic::DCPTimePeriod (time, time + dcpomatic::DCPTime::from_frames (audio->frames(), frame_rate)); + return dcpomatic::DCPTimePeriod(time, time + dcpomatic::DCPTime(audio->frames(), frame_rate)); } }; diff --git a/src/lib/audio_ring_buffers.cc b/src/lib/audio_ring_buffers.cc index a257edd38..4e8fa2603 100644 --- a/src/lib/audio_ring_buffers.cc +++ b/src/lib/audio_ring_buffers.cc @@ -49,11 +49,11 @@ AudioRingBuffers::put(shared_ptr<const AudioBuffers> data, DCPTime time, int fra if (!_buffers.empty()) { DCPOMATIC_ASSERT(_buffers.front().first->channels() == data->channels()); - DCPTime const end = (_buffers.back().second + DCPTime::from_frames(_buffers.back().first->frames(), frame_rate)); - if (labs(end.get() - time.get()) > 1) { - cout << "bad put " << to_string(_buffers.back().second) << " " << _buffers.back().first->frames() << " " << to_string(time) << "\n"; + DCPTime const end = (_buffers.back().second + DCPTime(_buffers.back().first->frames(), frame_rate)); + if (labs(end.frames_round(96000) - time.frames_round(96000)) > 1) { + cout << "bad put " << _buffers.back().second.to_debug_string() << " " << _buffers.back().first->frames() << " " << time.to_debug_string() << "\n"; } - DCPOMATIC_ASSERT(labs(end.get() - time.get()) < 2); + DCPOMATIC_ASSERT(labs(end.frames_round(96000) - time.frames_round(96000)) < 2); } _buffers.push_back(make_pair(data, time)); @@ -80,7 +80,7 @@ AudioRingBuffers::get(float* out, int channels, int frames) auto front = _buffers.front(); if (!time) { - time = front.second + DCPTime::from_frames(_used_in_head, 48000); + time = front.second + DCPTime(_used_in_head, 48000); } int const to_do = min(frames, front.first->frames() - _used_in_head); diff --git a/src/lib/butler.cc b/src/lib/butler.cc index a4f00eb08..70fae4dc4 100644 --- a/src/lib/butler.cc +++ b/src/lib/butler.cc @@ -136,10 +136,9 @@ Butler::should_run() const { if (_video.size() >= MAXIMUM_VIDEO_READAHEAD * 10) { /* This is way too big */ - auto pos = _audio.peek(); - if (pos) { + if (auto pos = _audio.peek()) { throw ProgrammingError - (__FILE__, __LINE__, fmt::format("Butler video buffers reached {} frames (audio is {} at {})", _video.size(), _audio.size(), pos->get())); + (__FILE__, __LINE__, fmt::format("Butler video buffers reached {} frames (audio is {} at {})", _video.size(), _audio.size(), pos->to_debug_string())); } else { throw ProgrammingError (__FILE__, __LINE__, fmt::format("Butler video buffers reached {} frames (audio is {})", _video.size(), _audio.size())); @@ -148,10 +147,9 @@ Butler::should_run() const if (_audio.size() >= MAXIMUM_AUDIO_READAHEAD * 10) { /* This is way too big */ - auto pos = _audio.peek(); - if (pos) { + if (auto pos = _audio.peek()) { throw ProgrammingError - (__FILE__, __LINE__, fmt::format("Butler audio buffers reached {} frames at {} (video is {})", _audio.size(), pos->get(), _video.size())); + (__FILE__, __LINE__, fmt::format("Butler audio buffers reached {} frames at {} (video is {})", _audio.size(), pos->to_debug_string(), _video.size())); } else { throw ProgrammingError (__FILE__, __LINE__, fmt::format("Butler audio buffers reached {} frames (video is {})", _audio.size(), _video.size())); diff --git a/src/lib/content.cc b/src/lib/content.cc index 6bb7c02c9..bb9aa361e 100644 --- a/src/lib/content.cc +++ b/src/lib/content.cc @@ -87,7 +87,7 @@ Content::Content(cxml::ConstNodePtr node, boost::optional<boost::filesystem::pat } } _digest = node->optional_string_child("Digest").get_value_or("X"); - _position = DCPTime(node->number_child<DCPTime::Type>("Position")); + _position = DCPTime(node->string_child("Position")); _trim_start = ContentTime(node->number_child<ContentTime::Type>("TrimStart")); _trim_end = ContentTime(node->number_child<ContentTime::Type>("TrimEnd")); _video_frame_rate = node->optional_number_child<double>("VideoFrameRate"); @@ -146,7 +146,7 @@ Content::as_xml(xmlpp::Element* element, bool with_paths, PathBehaviour path_beh } } cxml::add_text_child(element, "Digest", _digest); - cxml::add_text_child(element, "Position", fmt::to_string(_position.get())); + cxml::add_text_child(element, "Position", _position.to_serialisable_string()); cxml::add_text_child(element, "TrimStart", fmt::to_string(_trim_start.get())); cxml::add_text_child(element, "TrimEnd", fmt::to_string(_trim_end.get())); if (_video_frame_rate) { @@ -322,8 +322,8 @@ Content::identifier() const { char buffer[256]; snprintf( - buffer, sizeof(buffer), "%s_%" PRId64 "_%" PRId64 "_%" PRId64, - Content::digest().c_str(), position().get(), trim_start().get(), trim_end().get() + buffer, sizeof(buffer), "%s_%s_%" PRId64 "_%" PRId64, + Content::digest().c_str(), position().to_serialisable_string().c_str(), trim_start().get(), trim_end().get() ); return buffer; } diff --git a/src/lib/copy_dcp_details_to_film.cc b/src/lib/copy_dcp_details_to_film.cc index 9e0ad79c1..f5334fc70 100644 --- a/src/lib/copy_dcp_details_to_film.cc +++ b/src/lib/copy_dcp_details_to_film.cc @@ -74,7 +74,7 @@ copy_dcp_markers_to_film(shared_ptr<const DCPContent> dcp, shared_ptr<Film> film { film->clear_markers(); for (auto const& i: dcp->markers()) { - film->set_marker(i.first, dcpomatic::DCPTime(i.second.get())); + film->set_marker(i.first, dcpomatic::DCPTime(i.second, FrameRateChange{})); } } diff --git a/src/lib/cover_sheet.cc b/src/lib/cover_sheet.cc index b0450dbd0..c21f28a4d 100644 --- a/src/lib/cover_sheet.cc +++ b/src/lib/cover_sheet.cc @@ -94,7 +94,7 @@ dcpomatic::write_cover_sheet(shared_ptr<const Film> film, boost::filesystem::pat } boost::algorithm::replace_all(text, "$AUDIO", description); - auto const hmsf = film->length().split(film->video_frame_rate()); + auto const hmsf = film->length().splitX(film->video_frame_rate()); string length; if (hmsf.h == 0 && hmsf.m == 0) { length = fmt::format("{}s", hmsf.s); diff --git a/src/lib/dcp_content.cc b/src/lib/dcp_content.cc index 5a4593ec2..64e749cc1 100644 --- a/src/lib/dcp_content.cc +++ b/src/lib/dcp_content.cc @@ -324,7 +324,7 @@ DCPContent::examine(shared_ptr<const Film> film, shared_ptr<Job> job, bool toler _cpl = examiner->cpl (); _reel_lengths = examiner->reel_lengths (); for (auto const& i: examiner->markers()) { - _markers[i.first] = ContentTime(i.second.as_editable_units_ceil(DCPTime::HZ)); + _markers[i.first] = ContentTime(i.second.as_editable_units_ceil(ContentTime::HZ)); } _ratings = examiner->ratings (); _content_versions = examiner->content_versions (); @@ -472,7 +472,7 @@ DCPContent::full_length (shared_ptr<const Film> film) const return {}; } FrameRateChange const frc (film, shared_from_this()); - return DCPTime::from_frames (llrint(video->length() * frc.factor()), film->video_frame_rate()); + return DCPTime(llrint(video->length() * frc.factor()), film->video_frame_rate()); } DCPTime @@ -481,7 +481,7 @@ DCPContent::approximate_length () const if (!video) { return {}; } - return DCPTime::from_frames (video->length(), 24); + return DCPTime(video->length(), 24); } string @@ -620,11 +620,11 @@ DCPContent::reels (shared_ptr<const Film> film) const */ /* The starting point of this content on the timeline */ - auto pos = position() - DCPTime (trim_start().get()); + auto pos = position() - DCPTime(trim_start(), FrameRateChange{}); for (auto i: reel_lengths) { /* This reel runs from `pos' to `to' */ - DCPTime const to = pos + DCPTime::from_frames (i, film->video_frame_rate()); + DCPTime const to = pos + DCPTime(i, film->video_frame_rate()); if (to > position()) { p.push_back (DCPTimePeriod(max(position(), pos), min(end(film), to))); if (to > end(film)) { diff --git a/src/lib/dcpomatic_time.cc b/src/lib/dcpomatic_time.cc index 60fc5342a..a969f4c19 100644 --- a/src/lib/dcpomatic_time.cc +++ b/src/lib/dcpomatic_time.cc @@ -20,10 +20,13 @@ #include "dcpomatic_time.h" +#include <dcp/raw_convert.h> +#include <boost/algorithm/string.hpp> #include <inttypes.h> using std::string; +using std::vector; using namespace dcpomatic; @@ -47,14 +50,6 @@ dcpomatic::operator<=(HMSF const& a, HMSF const& b) template <> -Time<ContentTimeDifferentiator, DCPTimeDifferentiator>::Time (DCPTime d, FrameRateChange f) - : _t (llrint(d.get() * f.speed_up)) -{ - -} - - -template <> Time<DCPTimeDifferentiator, ContentTimeDifferentiator>::Time (ContentTime d, FrameRateChange f) : _t (llrint(d.get() / f.speed_up)) { @@ -119,27 +114,280 @@ dcpomatic::to_string (ContentTime t) } +DCPTime::DCPTime(Type num) + : _num(num) + , _den(96000) +{ + +} + + +DCPTime::DCPTime(Type num, Type den) + : _num(num) + , _den(den) +{ + DCPOMATIC_ASSERT(_den); +} + + +DCPTime::DCPTime(ContentTime time, FrameRateChange frc) + : _num(llrint(time.get() / frc.speed_up)) + , _den(ContentTime::HZ) +{ + +} + + +DCPTime::DCPTime(string const& serializable_string) +{ + vector<string> parts; + boost::algorithm::split(parts, serializable_string, boost::is_any_of("_")); + if (parts.size() == 1) { + _num = dcp::raw_convert<int64_t>(parts[0]); + _den = 96000; + } else { + _num = dcp::raw_convert<int64_t>(parts[0]); + _den = dcp::raw_convert<int64_t>(parts[1]); + } +} + + +DCPTime::DCPTime(HMSF const& hmsf, float fps) +{ + *this = from_seconds(hmsf.h * 3600) + + from_seconds(hmsf.m * 60) + + from_seconds(hmsf.s) + + DCPTime(hmsf.f * 1000, fps * 1000); +} + + string -dcpomatic::to_string (DCPTime t) +DCPTime::to_serialisable_string() const { - char buffer[64]; -#ifdef DCPOMATIC_WINDOWS - __mingw_snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs]", t.get(), t.seconds()); -#else - snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs]", t.get(), t.seconds()); -#endif - return buffer; + return fmt::format("{}_{}", _num, _den); } string -dcpomatic::to_string (DCPTimePeriod p) +DCPTime::to_debug_string() const { - char buffer[64]; -#ifdef DCPOMATIC_WINDOWS - __mingw_snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs -> %" PRId64 " %fs]", p.from.get(), p.from.seconds(), p.to.get(), p.to.seconds()); -#else - snprintf (buffer, sizeof(buffer), "[DCP %" PRId64 " %fs -> %" PRId64 " %fs]", p.from.get(), p.from.seconds(), p.to.get(), p.to.seconds()); -#endif + return fmt::format("[{}/{} {}]", _num, _den, seconds()); +} + + +double +DCPTime::seconds() const +{ + return static_cast<double>(_num) / _den; +} + +bool +DCPTime::operator<(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return _num < o._num; +} + + +bool +DCPTime::operator<=(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return _num <= o._num; +} + + +bool +DCPTime::operator==(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return _num == o._num; +} + + +bool +DCPTime::operator!=(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return _num != o._num; +} + + +bool +DCPTime::operator>=(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return _num >= o._num; +} + + +bool +DCPTime::operator>(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return _num > o._num; +} + + +int64_t +DCPTime::frames_floor(int r) const +{ + return (_num * r) / _den; +} + + +int64_t +DCPTime::frames_round(int r) const +{ + return ((_num * r) + (r / 2)) / _den; +} + + +int64_t +DCPTime::frames_ceil(int r) const +{ + return ((_num + 1) * r) / _den; +} + + +DCPTime +DCPTime::operator+(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return DCPTime(_num + o._num, _den); +} + + +DCPTime& +DCPTime::operator+=(DCPTime const& o) +{ + DCPOMATIC_ASSERT(_den == o._den); + _num += o._num; + return *this; +} + + +DCPTime +DCPTime::operator-(DCPTime const& o) const +{ + DCPOMATIC_ASSERT(_den == o._den); + return DCPTime(_num - o._num, _den); +} + + +DCPTime +DCPTime::operator-() const +{ + return DCPTime(-_num, _den); +} + + +DCPTime& +DCPTime::operator-=(DCPTime const& o) +{ + DCPOMATIC_ASSERT(_den == o._den); + _num -= o._num; + return *this; +} + + +DCPTime +DCPTime::operator*(int o) const +{ + return DCPTime(_num * o, _den); +} + + +DCPTime +DCPTime::operator/(int o) const +{ + return DCPTime(_num, _den * o); +} + + +DCPTime::operator bool() const +{ + return _num != 0; +} + + +DCPTime +DCPTime::max() const +{ + return DCPTime(INT64_MAX, _den); +} + + +DCPTime +DCPTime::from_seconds(double s) +{ + return DCPTime(s * 96000, 96000); +} + + +DCPTime +DCPTime::floor(int r) const +{ + return DCPTime(frames_floor(r), r); +} + + +DCPTime +DCPTime::round(int r) const +{ + return DCPTime(frames_round(r), r); +} + + +DCPTime +DCPTime::ceil(int r) const +{ + return DCPTime(frames_ceil(r), r); +} + + +DCPTime +DCPTime::abs() const +{ + return DCPTime(std::abs(_num), _den); +} + + +HMSF +DCPTime::splitX(int r) const +{ + /* Do this calculation with frames so that we can round + to a frame boundary at the start rather than the end. + */ + auto ff = frames_round(r); + HMSF hmsf; + + hmsf.h = ff / (3600 * r); + ff -= static_cast<int64_t>(hmsf.h) * 3600 * r; + hmsf.m = ff / (60 * r); + ff -= static_cast<int64_t>(hmsf.m) * 60 * r; + hmsf.s = ff / r; + ff -= static_cast<int64_t>(hmsf.s) * r; + + hmsf.f = static_cast<int>(ff); + return hmsf; +} + + +string +DCPTime::timecodeX(int r) const +{ + auto hmsf = splitX(r); + + char buffer[128]; + snprintf(buffer, sizeof(buffer), "%02d:%02d:%02d:%02d", hmsf.h, hmsf.m, hmsf.s, hmsf.f); return buffer; } + + +ContentTime +DCPTime::content_time(FrameRateChange frc) const +{ + return ContentTime(frames_round(ContentTime::HZ) * frc.speed_up); +} + diff --git a/src/lib/dcpomatic_time.h b/src/lib/dcpomatic_time.h index 6de576246..685da778a 100644 --- a/src/lib/dcpomatic_time.h +++ b/src/lib/dcpomatic_time.h @@ -216,7 +216,7 @@ public: * @return Split time. */ template <typename T> - HMSF split (T r) const + HMSF splitX(T r) const { /* Do this calculation with frames so that we can round to a frame boundary at the start rather than the end. @@ -236,8 +236,8 @@ public: } template <typename T> - std::string timecode (T r) const { - auto hmsf = split (r); + std::string timecodeX(T r) const { + auto hmsf = splitX(r); char buffer[128]; snprintf (buffer, sizeof(buffer), "%02d:%02d:%02d:%02d", hmsf.h, hmsf.m, hmsf.s, hmsf.f); @@ -291,8 +291,7 @@ Time<DCPTimeDifferentiator, ContentTimeDifferentiator>::Time (Time<ContentTimeDi /** Time relative to the start or position of a piece of content in its native frame rate */ typedef Time<ContentTimeDifferentiator, DCPTimeDifferentiator> ContentTime; -/** Time relative to the start of the output DCP in its frame rate */ -typedef Time<DCPTimeDifferentiator, ContentTimeDifferentiator> DCPTime; + template <class T> class TimePeriod @@ -392,6 +391,66 @@ std::list<TimePeriod<T>> subtract (TimePeriod<T> A, std::list<TimePeriod<T>> con } +class DCPTime +{ +private: + using Type = int64_t; + +public: + DCPTime() = default; + explicit DCPTime(Type num); + DCPTime(Type num, Type den); + DCPTime(ContentTime time, FrameRateChange frc); + DCPTime(std::string const& serializable_string); + DCPTime(HMSF const& hmsf, float fps); + + static DCPTime from_seconds(double s); + + std::string to_serialisable_string() const; + std::string to_debug_string() const; + + double seconds() const; + + int64_t frames_floor(int r) const; + int64_t frames_round(int r) const; + int64_t frames_ceil(int r) const; + DCPTime floor(int r) const; + DCPTime round(int r) const; + DCPTime ceil(int r) const; + + DCPTime abs() const; + + bool operator<(DCPTime const& o) const; + bool operator<=(DCPTime const& o) const; + bool operator==(DCPTime const& o) const; + bool operator!=(DCPTime const& o) const; + bool operator>=(DCPTime const& o) const; + bool operator>(DCPTime const& o) const; + + DCPTime operator+(DCPTime const& o) const; + DCPTime& operator+=(DCPTime const& o); + DCPTime operator-() const; + DCPTime operator-(DCPTime const& o) const; + DCPTime& operator-=(DCPTime const& o); + DCPTime operator*(int o) const; + DCPTime operator/(int o) const; + + DCPTime max() const; + + explicit operator bool() const; + + HMSF splitX(int r) const; + std::string timecodeX(int r) const; + + ContentTime content_time(FrameRateChange frc) const; + +private: + Type _num = 0; + Type _den = 1; +}; + + + typedef TimePeriod<ContentTime> ContentTimePeriod; typedef TimePeriod<DCPTime> DCPTimePeriod; @@ -401,7 +460,6 @@ DCPTime max (DCPTime a, DCPTime b); ContentTime min (ContentTime a, ContentTime b); ContentTime max (ContentTime a, ContentTime b); std::string to_string (ContentTime t); -std::string to_string (DCPTime t); std::string to_string (DCPTimePeriod p); diff --git a/src/lib/encode_cli.cc b/src/lib/encode_cli.cc index 8bf1a4a26..bbd28094b 100644 --- a/src/lib/encode_cli.cc +++ b/src/lib/encode_cli.cc @@ -113,7 +113,7 @@ print_dump(function<void (string)> out, shared_ptr<Film> film) out(fmt::format("{}\n", film->dcp_name(true))); out(fmt::format("{} at {}\n", film->container().container_nickname(), film->resolution() == Resolution::TWO_K ? "2K" : "4K")); out(fmt::format("{}Mbit/s\n", film->video_bit_rate(film->video_encoding()) / 1000000)); - out(fmt::format("Duration {}\n", film->length().timecode(film->video_frame_rate()))); + out(fmt::format("Duration {}\n", film->length().timecodeX(film->video_frame_rate()))); out(fmt::format("Output {}fps {} {}kHz\n", film->video_frame_rate(), film->three_d() ? "3D" : "2D", film->audio_frame_rate() / 1000)); out(fmt::format("{} {}\n", film->interop() ? "Inter-Op" : "SMPTE", film->encrypted() ? "encrypted" : "unencrypted")); diff --git a/src/lib/ffmpeg_content.cc b/src/lib/ffmpeg_content.cc index 6261c4003..7c3431ffe 100644 --- a/src/lib/ffmpeg_content.cc +++ b/src/lib/ffmpeg_content.cc @@ -416,13 +416,13 @@ FFmpegContent::full_length (shared_ptr<const Film> film) const { FrameRateChange const frc (film, shared_from_this()); if (video) { - return DCPTime::from_frames (llrint (video->length_after_3d_combine() * frc.factor()), film->video_frame_rate()); + return DCPTime(llrint(video->length_after_3d_combine() * frc.factor()), film->video_frame_rate()); } if (audio) { DCPTime longest; for (auto i: audio->streams()) { - longest = max (longest, DCPTime::from_frames(llrint(i->length() / frc.speed_up), i->frame_rate())); + longest = max(longest, DCPTime(llrint(i->length() / frc.speed_up), i->frame_rate())); } return longest; } @@ -437,7 +437,7 @@ DCPTime FFmpegContent::approximate_length () const { if (video) { - return DCPTime::from_frames (video->length_after_3d_combine(), 24); + return DCPTime(video->length_after_3d_combine(), 24); } DCPOMATIC_ASSERT (audio); @@ -447,7 +447,7 @@ FFmpegContent::approximate_length () const longest = max (longest, Frame(llrint(i->length()))); } - return DCPTime::from_frames (longest, 24); + return DCPTime(longest, 24); } diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index 74836c1a8..f4642355a 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -173,7 +173,7 @@ FFmpegDecoder::flush_fill() bool did_something = false; auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position()); - ContentTime full_length (_ffmpeg_content->full_length(film()), frc); + auto full_length = _ffmpeg_content->full_length(film()).content_time(frc); full_length = full_length.ceil (frc.source); if (video && !video->ignore()) { double const vfr = _ffmpeg_content->video_frame_rate().get(); diff --git a/src/lib/ffmpeg_file_encoder.cc b/src/lib/ffmpeg_file_encoder.cc index 9df078ad4..67523e250 100644 --- a/src/lib/ffmpeg_file_encoder.cc +++ b/src/lib/ffmpeg_file_encoder.cc @@ -433,7 +433,7 @@ FFmpegFileEncoder::video (shared_ptr<PlayerVideo> video, DCPTime time) frame->height = image->size().height; frame->format = _pixel_format; DCPOMATIC_ASSERT (_video_stream->time_base.num == 1); - frame->pts = time.get() * _video_stream->time_base.den / DCPTime::HZ; + frame->pts = time.seconds() * _video_stream->time_base.den; int r = avcodec_send_frame (_video_codec_context, frame); av_frame_free (&frame); diff --git a/src/lib/ffmpeg_film_encoder.cc b/src/lib/ffmpeg_film_encoder.cc index 97a3209e6..c2ed1aa0d 100644 --- a/src/lib/ffmpeg_film_encoder.cc +++ b/src/lib/ffmpeg_film_encoder.cc @@ -171,7 +171,7 @@ FFmpegFilmEncoder::go() auto reel = reel_periods.begin (); auto encoder = file_encoders.begin (); - auto const video_frame = DCPTime::from_frames (1, _film->video_frame_rate ()); + auto const video_frame = DCPTime(1, _film->video_frame_rate ()); int const audio_frames = video_frame.frames_round(_film->audio_frame_rate()); std::vector<float> interleaved(_output_audio_channels * audio_frames); auto deinterleaved = make_shared<AudioBuffers>(_output_audio_channels, audio_frames); @@ -209,9 +209,8 @@ FFmpegFilmEncoder::go() _last_time = time; } - auto job = _job.lock (); - if (job) { - job->set_progress(float(time.get()) / _film->length().get()); + if (auto job = _job.lock()) { + job->set_progress(time.seconds() / _film->length().seconds()); } waker.nudge (); diff --git a/src/lib/film.cc b/src/lib/film.cc index e2e2de111..1de8b3665 100644 --- a/src/lib/film.cc +++ b/src/lib/film.cc @@ -288,7 +288,7 @@ Film::info_file(DCPTimePeriod period) const { boost::filesystem::path p; p /= "info"; - p /= video_identifier() + "_" + fmt::to_string(period.from.get()) + "_" + fmt::to_string(period.to.get()); + p /= video_identifier() + "_" + fmt::to_string(period.from.to_serialisable_string()) + "_" + fmt::to_string(period.to.to_serialisable_string()); return file(p); } @@ -318,7 +318,7 @@ Film::audio_analysis_path(shared_ptr<const Playlist> playlist) const /* Likewise we only care about position if we're looking at a * whole-project view. */ - digester.add(content->position().get()); + digester.add(content->position().to_serialisable_string()); digester.add(content->trim_start().get()); digester.add(content->trim_end().get()); } @@ -421,14 +421,14 @@ Film::metadata(bool with_content_paths) const cxml::add_text_child(root, "ReelType", fmt::to_string(static_cast<int>(_reel_type))); cxml::add_text_child(root, "ReelLength", fmt::to_string(_reel_length)); for (auto boundary: _custom_reel_boundaries) { - cxml::add_text_child(root, "CustomReelBoundary", fmt::to_string(boundary.get())); + cxml::add_text_child(root, "CustomReelBoundary", boundary.to_serialisable_string()); } cxml::add_text_child(root, "ReencodeJ2K", _reencode_j2k ? "1" : "0"); cxml::add_text_child(root, "UserExplicitVideoFrameRate", _user_explicit_video_frame_rate ? "1" : "0"); for (auto const& marker: _markers) { auto m = cxml::add_child(root, "Marker"); m->set_attribute("type", dcp::marker_to_string(marker.first)); - m->add_child_text(fmt::to_string(marker.second.get())); + m->add_child_text(marker.second.to_serialisable_string()); } for (auto i: _ratings) { i.as_xml(cxml::add_child(root, "Rating")); @@ -629,7 +629,7 @@ Film::read_metadata(optional<boost::filesystem::path> path) _reel_type = static_cast<ReelType>(f.optional_number_child<int>("ReelType").get_value_or(static_cast<int>(ReelType::SINGLE))); _reel_length = f.optional_number_child<int64_t>("ReelLength").get_value_or(2000000000); for (auto boundary: f.node_children("CustomReelBoundary")) { - _custom_reel_boundaries.push_back(DCPTime(raw_convert<int64_t>(boundary->content()))); + _custom_reel_boundaries.push_back(DCPTime(boundary->content())); } _reencode_j2k = f.optional_bool_child("ReencodeJ2K").get_value_or(false); _user_explicit_video_frame_rate = f.optional_bool_child("UserExplicitVideoFrameRate").get_value_or(false); @@ -639,7 +639,7 @@ Film::read_metadata(optional<boost::filesystem::path> path) if (!type) { type = i->string_attribute("type"); } - _markers[dcp::marker_from_string(*type)] = DCPTime(dcp::raw_convert<DCPTime::Type>(i->content())); + _markers[dcp::marker_from_string(*type)] = DCPTime(i->content()); } for (auto i: f.node_children("Rating")) { @@ -1673,7 +1673,7 @@ Film::check_reel_boundaries_for_atmos() if (remake_boundaries) { vector<dcpomatic::DCPTime> required_boundaries; std::copy_if(atmos_boundaries.begin(), atmos_boundaries.end(), std::back_inserter(required_boundaries), [this](dcpomatic::DCPTime time) { - return time.get() != 0 && time != length(); + return time && time != length(); }); if (!required_boundaries.empty()) { set_reel_type(ReelType::CUSTOM); @@ -2031,7 +2031,7 @@ Film::reels_for_type(ReelType type) const */ Frame const reel_in_frames = max(_reel_length / ((video_bit_rate(video_encoding()) / video_frame_rate()) / 8), static_cast<Frame>(video_frame_rate())); while (current < len) { - DCPTime end = min(len, current + DCPTime::from_frames(reel_in_frames, video_frame_rate())); + DCPTime end = min(len, current + DCPTime(reel_in_frames, video_frame_rate())); periods.emplace_back(current, end); current = end; } @@ -2295,11 +2295,11 @@ void Film::add_ffoc_lfoc(Markers& markers) const { if (markers.find(dcp::Marker::FFOC) == markers.end()) { - markers[dcp::Marker::FFOC] = dcpomatic::DCPTime::from_frames(1, video_frame_rate()); + markers[dcp::Marker::FFOC] = dcpomatic::DCPTime(1, video_frame_rate()); } if (markers.find(dcp::Marker::LFOC) == markers.end()) { - markers[dcp::Marker::LFOC] = length() - DCPTime::from_frames(1, video_frame_rate()); + markers[dcp::Marker::LFOC] = length() - DCPTime(1, video_frame_rate()); } } diff --git a/src/lib/image_content.cc b/src/lib/image_content.cc index 8d3092196..d49c1114f 100644 --- a/src/lib/image_content.cc +++ b/src/lib/image_content.cc @@ -147,14 +147,14 @@ DCPTime ImageContent::full_length (shared_ptr<const Film> film) const { FrameRateChange const frc (film, shared_from_this()); - return DCPTime::from_frames (llrint(video->length_after_3d_combine() * frc.factor()), film->video_frame_rate()); + return DCPTime(llrint(video->length_after_3d_combine() * frc.factor()), film->video_frame_rate()); } DCPTime ImageContent::approximate_length () const { - return DCPTime::from_frames (video->length_after_3d_combine(), 24); + return DCPTime(video->length_after_3d_combine(), 24); } diff --git a/src/lib/j2k_encoder.cc b/src/lib/j2k_encoder.cc index 441e91827..058e689c9 100644 --- a/src/lib/j2k_encoder.cc +++ b/src/lib/j2k_encoder.cc @@ -312,19 +312,19 @@ J2KEncoder::encode (shared_ptr<PlayerVideo> pv, DCPTime time) if (_writer.can_fake_write(position)) { /* We can fake-write this frame */ - LOG_DEBUG_ENCODE("Frame @ {} FAKE", to_string(time)); + LOG_DEBUG_ENCODE("Frame @ {} FAKE", time.to_debug_string()); _writer.fake_write(position, pv->eyes ()); frame_done (); } else if (pv->has_j2k() && !_film->reencode_j2k()) { - LOG_DEBUG_ENCODE("Frame @ {} J2K", to_string(time)); + LOG_DEBUG_ENCODE("Frame @ {} J2K", time.to_debug_string()); /* This frame already has J2K data, so just write it */ _writer.write(pv->j2k(), position, pv->eyes ()); frame_done (); } else if (_last_player_video[pv->eyes()] && _writer.can_repeat(position) && pv->same(_last_player_video[pv->eyes()])) { - LOG_DEBUG_ENCODE("Frame @ {} REPEAT", to_string(time)); + LOG_DEBUG_ENCODE("Frame @ {} REPEAT", time.to_debug_string()); _writer.repeat(position, pv->eyes()); } else { - LOG_DEBUG_ENCODE("Frame @ {} ENCODE", to_string(time)); + LOG_DEBUG_ENCODE("Frame @ {} ENCODE", time.to_debug_string()); /* Queue this new frame for encoding */ LOG_TIMING ("add-frame-to-queue queue={}", _queue.size ()); auto dcpv = DCPVideo( diff --git a/src/lib/mpeg2_encoder.cc b/src/lib/mpeg2_encoder.cc index 38388431d..6a35c1666 100644 --- a/src/lib/mpeg2_encoder.cc +++ b/src/lib/mpeg2_encoder.cc @@ -45,7 +45,7 @@ MPEG2Encoder::encode(shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time) auto image = pv->image(force(AV_PIX_FMT_YUV420P), VideoRange::VIDEO, false); - dcp::FFmpegImage ffmpeg_image(time.get() * _film->video_frame_rate() / dcpomatic::DCPTime::HZ); + dcp::FFmpegImage ffmpeg_image(time.frames_round(_film->video_frame_rate())); DCPOMATIC_ASSERT(image->size() == ffmpeg_image.size()); diff --git a/src/lib/player.cc b/src/lib/player.cc index 985bd3a9c..d2a0ff76e 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -548,7 +548,7 @@ DCPTime Player::content_video_to_dcp(shared_ptr<const Piece> piece, Frame f) const { /* See comment in dcp_to_content_video */ - auto const d = DCPTime::from_frames(f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc); + auto const d = DCPTime(f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc); return d + piece->content->position(); } @@ -573,7 +573,7 @@ Player::resampled_audio_to_dcp(shared_ptr<const Piece> piece, Frame f) const DCPOMATIC_ASSERT(film); /* See comment in dcp_to_content_video */ - return DCPTime::from_frames(f, film->audio_frame_rate()) + return DCPTime(f, film->audio_frame_rate()) - DCPTime(piece->content->trim_start(), piece->frc) + piece->content->position(); } @@ -587,7 +587,7 @@ Player::dcp_to_content_time(shared_ptr<const Piece> piece, DCPTime t) const auto s = t - piece->content->position(); s = min(piece->content->length_after_trim(film), s); - return max(ContentTime(), ContentTime(s, piece->frc) + piece->content->trim_start()); + return max(ContentTime(), s.content_time(piece->frc) + piece->content->trim_start()); } @@ -773,7 +773,7 @@ Player::pass() break; } case BLACK: - LOG_DEBUG_PLAYER("Emit black for gap at {}", to_string(_black.position())); + LOG_DEBUG_PLAYER("Emit black for gap at {}", _black.position().to_debug_string()); if (!_next_video_time) { /* Deciding to emit black has the same effect as getting some video from the content * when we are inaccurately seeking. @@ -790,7 +790,7 @@ Player::pass() break; case SILENT: { - LOG_DEBUG_PLAYER("Emit silence for gap at {}", to_string(_silent.position())); + LOG_DEBUG_PLAYER("Emit silence for gap at {}", _silent.position().to_debug_string()); DCPTimePeriod period(_silent.period_at_position()); if (_next_audio_time) { /* Sometimes the thing that happened last finishes fractionally before @@ -798,9 +798,9 @@ Player::pass() I think this is nothing to worry about since we will just add or remove a little silence at the end of some content. */ - int64_t const error = labs(period.from.get() - _next_audio_time->get()); + int64_t const error = labs(period.from.frames_round(96000) - _next_audio_time->frames_round(96000)); /* Let's not worry about less than a frame at 24fps */ - int64_t const too_much_error = DCPTime::from_frames(1, 24).get(); + int64_t constexpr too_much_error = 96000 / 24; if (error >= too_much_error) { film->log()->log(fmt::format("Silence starting before or after last audio by {}", error), LogEntry::TYPE_ERROR); } @@ -845,7 +845,11 @@ Player::pass() std::map<AudioStreamPtr, StreamState> alive_stream_states; if (latest_last_push_end != have_pushed.end()) { - LOG_DEBUG_PLAYER("Leading audio stream is in {} at {}", latest_last_push_end->second.piece->content->path(0).string(), to_string(latest_last_push_end->second.last_push_end.get())); + LOG_DEBUG_PLAYER( + "Leading audio stream is in {} at {}", + latest_last_push_end->second.piece->content->path(0).string(), + latest_last_push_end->second.last_push_end->to_debug_string() + ); /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */ for (auto const& i: _stream_states) { @@ -868,7 +872,7 @@ Player::pass() pull_to = _silent.position(); } - LOG_DEBUG_PLAYER("Emitting audio up to {}", to_string(pull_to)); + LOG_DEBUG_PLAYER("Emitting audio up to {}", pull_to.to_debug_string()); auto audio = _audio_merger.pull(pull_to); for (auto i = audio.begin(); i != audio.end(); ++i) { if (_next_audio_time && i->second < *_next_audio_time) { @@ -888,7 +892,7 @@ Player::pass() if (done) { if (_next_video_time) { - LOG_DEBUG_PLAYER("Done: emit video until end of film at {}", to_string(film->length())); + LOG_DEBUG_PLAYER("Done: emit video until end of film at {}", film->length().to_debug_string()); emit_video_until(film->length()); } @@ -919,7 +923,7 @@ Player::open_texts_for_frame(DCPTime time) const for (auto type: { TextType::OPEN_SUBTITLE, TextType::OPEN_CAPTION }) { for ( auto const& text: - _active_texts[type].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles) + _active_texts[type].get_burnt(DCPTimePeriod(time, time + DCPTime(1, vfr)), _always_burn_open_subtitles) ) { /* Bitmap texts */ @@ -964,7 +968,7 @@ Player::open_texts_for_frame(DCPTime time) const void Player::emit_video_until(DCPTime time) { - LOG_DEBUG_PLAYER("emit_video_until {}; next video time is {}", to_string(time), to_string(_next_video_time.get_value_or({}))); + LOG_DEBUG_PLAYER("emit_video_until {}; next video time is {}", time.to_debug_string(), _next_video_time.get_value_or({}).to_debug_string()); auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) { /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the player before the video that requires them. @@ -1013,7 +1017,12 @@ Player::emit_video_until(DCPTime time) frame(right.first, next); } else if (both.first && (both.second - next) < age_threshold(both)) { frame(both.first, next); - LOG_DEBUG_PLAYER("Content {} selected for DCP {} (age {})", to_string(both.second), to_string(next), to_string(both.second - next)); + LOG_DEBUG_PLAYER( + "Content {} selected for DCP {} (age {})", + both.second.to_debug_string(), + next.to_debug_string(), + DCPTime(both.second - next).to_debug_string() + ); } else { auto film = _film.lock(); if (film && film->three_d()) { @@ -1022,7 +1031,7 @@ Player::emit_video_until(DCPTime time) } else { frame(black_player_video_frame(Eyes::BOTH), next); } - LOG_DEBUG_PLAYER("Black selected for DCP {}", to_string(next)); + LOG_DEBUG_PLAYER("Black selected for DCP {}", next.to_debug_string()); } } } @@ -1073,7 +1082,7 @@ Player::video(weak_ptr<Piece> weak_piece, ContentVideo video) /* Time of the frame we just received within the DCP */ auto const time = content_time_to_dcp(piece, video.time); - LOG_DEBUG_PLAYER("Received video frame {} {} eyes {}", to_string(video.time), to_string(time), static_cast<int>(video.eyes)); + LOG_DEBUG_PLAYER("Received video frame {} {} eyes {}", to_string(video.time), time.to_debug_string(), static_cast<int>(video.eyes)); if (time < piece->content->position()) { return; @@ -1162,12 +1171,18 @@ Player::audio(weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio co auto time = resampled_audio_to_dcp(piece, content_audio.frame); /* And the end of this block in the DCP */ - auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr); - LOG_DEBUG_PLAYER("Received audio frame {} covering {} to {} ({})", content_audio.frame, to_string(time), to_string(end), piece->content->path(0).filename().string()); + auto end = time + DCPTime(content_audio.audio->frames(), rfr); + LOG_DEBUG_PLAYER( + "Received audio frame {} covering {} to {} ({})", + content_audio.frame, + time.to_debug_string(), + end.to_debug_string(), + piece->content->path(0).filename().string() + ); /* Remove anything that comes before the start or after the end of the content */ if (time < piece->content->position()) { - auto cut = discard_audio(content_audio.audio, time, piece->content->position()); + auto const cut = discard_audio(content_audio.audio, time, piece->content->position()); if (!cut.first) { /* This audio is entirely discarded */ return; @@ -1225,7 +1240,7 @@ Player::audio(weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio co _audio_merger.push(content_audio.audio, time); DCPOMATIC_ASSERT(_stream_states.find(stream) != _stream_states.end()); - _stream_states[stream].last_push_end = time + DCPTime::from_frames(content_audio.audio->frames(), film->audio_frame_rate()); + _stream_states[stream].last_push_end = time + DCPTime(content_audio.audio->frames(), film->audio_frame_rate()); } @@ -1382,7 +1397,7 @@ void Player::seek(DCPTime time, bool accurate) { boost::mutex::scoped_lock lm(_mutex); - LOG_DEBUG_PLAYER("Seek to {} ({}accurate)", to_string(time), accurate ? "" : "in"); + LOG_DEBUG_PLAYER("Seek to {} ({}accurate)", time.to_debug_string(), accurate ? "" : "in"); if (_suspended) { /* We can't seek in this state */ @@ -1469,14 +1484,21 @@ Player::emit_audio(shared_ptr<AudioBuffers> data, DCPTime time) DCPOMATIC_ASSERT(film); /* Log if the assert below is about to fail */ - if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) { - film->log()->log(fmt::format("Out-of-sequence emit {} vs {}", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING); + if (_next_audio_time && labs(time.frames_round(96000) - _next_audio_time->frames_round(96000)) > 1) { + film->log()->log( + fmt::format( + "Out-of-sequence emit {} vs {}", + time.to_debug_string(), + _next_audio_time->to_debug_string() + ), + LogEntry::TYPE_WARNING + ); } /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */ - DCPOMATIC_ASSERT(!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2); + DCPOMATIC_ASSERT(!_next_audio_time || labs(time.frames_round(96000) - _next_audio_time->frames_round(96000)) < 2); Audio(data, time, film->audio_frame_rate()); - _next_audio_time = time + DCPTime::from_frames(data->frames(), film->audio_frame_rate()); + _next_audio_time = time + DCPTime(data->frames(), film->audio_frame_rate()); } @@ -1512,7 +1534,7 @@ Player::one_video_frame() const auto film = _film.lock(); DCPOMATIC_ASSERT(film); - return DCPTime::from_frames(1, film->video_frame_rate()); + return DCPTime(1, film->video_frame_rate()); } @@ -1607,7 +1629,7 @@ Player::atmos(weak_ptr<Piece> weak_piece, ContentAtmos data) auto const vfr = film->video_frame_rate(); - DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr)); + DCPTime const dcp_time = DCPTime(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr)); if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(film))) { return; } @@ -1664,5 +1686,5 @@ Player::progress() const optional<DCPTime> earliest_time; std::tie(earliest_content, earliest_time) = earliest_piece_and_time(); - return static_cast<float>(earliest_time.get_value_or(film->length()).get()) / film->length().get(); + return earliest_time.get_value_or(film->length()).seconds() / film->length().seconds(); } diff --git a/src/lib/playlist.cc b/src/lib/playlist.cc index 8c60a5458..d210131f4 100644 --- a/src/lib/playlist.cc +++ b/src/lib/playlist.cc @@ -488,7 +488,7 @@ Playlist::start() const return {}; } - auto start = DCPTime::max(); + auto start = DCPTime().max(); for (auto i: cont) { start = min(start, i->position()); } @@ -595,7 +595,7 @@ Playlist::content() const void Playlist::repeat(shared_ptr<const Film> film, ContentList c, int n) { - pair<DCPTime, DCPTime> range(DCPTime::max(), DCPTime()); + pair<DCPTime, DCPTime> range(DCPTime().max(), DCPTime()); for (auto i: c) { range.first = min(range.first, i->position()); range.second = max(range.second, i->position()); @@ -700,9 +700,8 @@ Playlist::content_summary(shared_ptr<const Film> film, DCPTimePeriod period) con int best_score = -1; for (auto i: content()) { int score = 0; - auto const o = i->period(film).overlap(period); - if (o) { - score += 100 * o.get().duration().get() / period.duration().get(); + if (auto const o = i->period(film).overlap(period)) { + score += 100 * o.get().duration().seconds() / period.duration().seconds(); } if (i->video) { diff --git a/src/lib/reel_writer.cc b/src/lib/reel_writer.cc index 01a798676..9ec8522ac 100644 --- a/src/lib/reel_writer.cc +++ b/src/lib/reel_writer.cc @@ -514,7 +514,7 @@ ReelWriter::create_reel_picture(shared_ptr<dcp::Reel> reel, list<ReferencedReelA for (auto j: refs) { auto k = dynamic_pointer_cast<dcp::ReelPictureAsset>(j.asset); if (k) { - LOG_GENERAL("candidate picture asset period is {}-{}", j.period.from.get(), j.period.to.get()); + LOG_GENERAL("candidate picture asset period is {}-{}", j.period.from.to_debug_string(), j.period.to.to_debug_string()); } if (k && j.period == _period) { reel_asset = k; @@ -556,7 +556,7 @@ ReelWriter::create_reel_sound(shared_ptr<dcp::Reel> reel, list<ReferencedReelAss for (auto j: refs) { auto k = dynamic_pointer_cast<dcp::ReelSoundAsset>(j.asset); if (k) { - LOG_GENERAL("candidate sound asset period is {}-{}", j.period.from.get(), j.period.to.get()); + LOG_GENERAL("candidate sound asset period is {}-{}", j.period.from.to_debug_string(), j.period.to.to_debug_string()); } if (k && j.period == _period) { reel_asset = k; @@ -682,7 +682,7 @@ ReelWriter::create_reel_markers(shared_ptr<dcp::Reel> reel) const auto ma = make_shared<dcp::ReelMarkersAsset>(dcp::Fraction(film()->video_frame_rate(), 1), reel->duration()); for (auto const& i: reel_markers) { DCPTime relative = i.second - _period.from; - auto hmsf = relative.split(film()->video_frame_rate()); + auto const hmsf = relative.splitX(film()->video_frame_rate()); ma->set(i.first, dcp::Time(hmsf.h, hmsf.m, hmsf.s, hmsf.f, film()->video_frame_rate())); } reel->add(ma); @@ -701,7 +701,7 @@ ReelWriter::create_reel( set<DCPTextTrack> ensure_closed_captions ) { - LOG_GENERAL("create_reel for {}-{}; {} of {}", _period.from.get(), _period.to.get(), _reel_index, _reel_count); + LOG_GENERAL("create_reel for {}-{}; {} of {}", _period.from.to_debug_string(), _period.to.to_debug_string(), _reel_index, _reel_count); auto reel = make_shared<dcp::Reel>(); diff --git a/src/lib/referenced_reel_asset.cc b/src/lib/referenced_reel_asset.cc index 5ef3b9ae7..65a8bade5 100644 --- a/src/lib/referenced_reel_asset.cc +++ b/src/lib/referenced_reel_asset.cc @@ -50,7 +50,7 @@ maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Fra r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end); if (r->actual_duration() > 0) { a.push_back ( - ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr))) + ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime(r->actual_duration(), ffr))) ); } } @@ -106,7 +106,7 @@ get_referenced_reel_assets(shared_ptr<const Film> film, shared_ptr<const Playlis Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start)); Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end))); - auto const from = content->position() + std::max(DCPTime(), DCPTime::from_frames(offset_from_start - trim_start, frame_rate)); + auto const from = content->position() + std::max(DCPTime(), DCPTime(offset_from_start - trim_start, frame_rate)); if (dcp->reference_video()) { maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate); } diff --git a/src/lib/remembered_asset.cc b/src/lib/remembered_asset.cc index dfe0917c5..6e4648133 100644 --- a/src/lib/remembered_asset.cc +++ b/src/lib/remembered_asset.cc @@ -40,8 +40,8 @@ RememberedAsset::RememberedAsset(cxml::ConstNodePtr node) DCPOMATIC_ASSERT(period_node); _period = { - dcpomatic::DCPTime(period_node->number_child<int64_t>("From")), - dcpomatic::DCPTime(period_node->number_child<int64_t>("To")) + dcpomatic::DCPTime(period_node->string_child("From")), + dcpomatic::DCPTime(period_node->string_child("To")) }; _identifier = node->string_child("Identifier"); @@ -53,8 +53,8 @@ RememberedAsset::as_xml(xmlpp::Element* parent) const { cxml::add_text_child(parent, "Filename", _filename.string()); auto period_node = cxml::add_child(parent, "Period"); - cxml::add_text_child(period_node, "From", fmt::to_string(_period.from.get())); - cxml::add_text_child(period_node, "To", fmt::to_string(_period.to.get())); + cxml::add_text_child(period_node, "From", _period.from.to_serialisable_string()); + cxml::add_text_child(period_node, "To", _period.to.to_serialisable_string()); cxml::add_text_child(parent, "Identifier", _identifier); } diff --git a/src/lib/subtitle_film_encoder.cc b/src/lib/subtitle_film_encoder.cc index 2f1fc7099..1ac052e5a 100644 --- a/src/lib/subtitle_film_encoder.cc +++ b/src/lib/subtitle_film_encoder.cc @@ -183,9 +183,8 @@ SubtitleFilmEncoder::text(PlayerText subs, TextType type, optional<DCPTextTrack> _last = period.from; - auto job = _job.lock (); - if (job) { - job->set_progress (float(period.from.get()) / _length.get()); + if (auto job = _job.lock()) { + job->set_progress(period.from.seconds() / _length.seconds()); } } diff --git a/src/lib/video_mxf_content.cc b/src/lib/video_mxf_content.cc index a26c54473..968d26279 100644 --- a/src/lib/video_mxf_content.cc +++ b/src/lib/video_mxf_content.cc @@ -133,14 +133,14 @@ DCPTime VideoMXFContent::full_length (shared_ptr<const Film> film) const { FrameRateChange const frc (film, shared_from_this()); - return DCPTime::from_frames (llrint(video->length_after_3d_combine() * frc.factor()), film->video_frame_rate()); + return DCPTime(llrint(video->length_after_3d_combine() * frc.factor()), film->video_frame_rate()); } DCPTime VideoMXFContent::approximate_length () const { - return DCPTime::from_frames (video->length_after_3d_combine(), 24); + return DCPTime(video->length_after_3d_combine(), 24); } diff --git a/src/lib/writer.cc b/src/lib/writer.cc index dd5223670..3a6547207 100644 --- a/src/lib/writer.cc +++ b/src/lib/writer.cc @@ -263,7 +263,7 @@ Writer::write (shared_ptr<const AudioBuffers> audio, DCPTime const time) int const afr = film()->audio_frame_rate(); - DCPTime const end = time + DCPTime::from_frames(audio->frames(), afr); + DCPTime const end = time + DCPTime(audio->frames(), afr); /* The audio we get might span a reel boundary, and if so we have to write it in bits */ @@ -771,9 +771,9 @@ Writer::write(PlayerText text, TextType type, optional<DCPTextTrack> track, DCPT auto back_off = [this](DCPTimePeriod period) { auto const vfr = film()->video_frame_rate(); - period.to -= DCPTime::from_frames(2, vfr); + period.to -= DCPTime(2, vfr); if (period.duration().frames_floor(vfr) <= 0) { - period.to = period.from + DCPTime::from_frames(1, vfr); + period.to = period.from + DCPTime(1, vfr); } return period; }; @@ -887,7 +887,7 @@ Writer::write(ReferencedReelAsset asset) size_t Writer::video_reel(int frame) const { - auto t = DCPTime::from_frames(frame, film()->video_frame_rate()); + auto t = DCPTime(frame, film()->video_frame_rate()); size_t reel_index = 0; while (reel_index < _reels.size() && !_reels[reel_index].period().contains(t)) { ++reel_index; |
