summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2026-01-02 00:29:20 +0100
committerCarl Hetherington <cth@carlh.net>2026-01-02 00:29:20 +0100
commitdd5ad39045f155d44f1fefb58670d5a3695e818c (patch)
tree3422fbdb2369ef4e1bff946a245b08bf92f50cd5 /src/lib
parenta8025ae69b2fc6c6f191eefc119582fbdae0b50a (diff)
White space: audio_content.{cc,h}
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/audio_content.cc180
-rw-r--r--src/lib/audio_content.h60
2 files changed, 120 insertions, 120 deletions
diff --git a/src/lib/audio_content.cc b/src/lib/audio_content.cc
index 142dc0855..1d54841aa 100644
--- a/src/lib/audio_content.cc
+++ b/src/lib/audio_content.cc
@@ -49,16 +49,16 @@ using boost::optional;
using namespace dcpomatic;
-AudioContent::AudioContent (Content* parent)
- : ContentPart (parent)
- , _delay (Config::instance()->default_audio_delay())
+AudioContent::AudioContent(Content* parent)
+ : ContentPart(parent)
+ , _delay(Config::instance()->default_audio_delay())
{
}
shared_ptr<AudioContent>
-AudioContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version)
+AudioContent::from_xml(Content* parent, cxml::ConstNodePtr node, int version)
{
if (version < 34) {
/* With old metadata FFmpeg content has the audio-related tags even with no
@@ -71,7 +71,7 @@ AudioContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version)
/* Otherwise we can drop through to the newer logic */
}
- if (!node->optional_number_child<double> ("AudioGain")) {
+ if (!node->optional_number_child<double>("AudioGain")) {
return {};
}
@@ -79,43 +79,43 @@ AudioContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version)
}
-AudioContent::AudioContent (Content* parent, cxml::ConstNodePtr node)
- : ContentPart (parent)
+AudioContent::AudioContent(Content* parent, cxml::ConstNodePtr node)
+ : ContentPart(parent)
{
- _gain = node->number_child<double> ("AudioGain");
- _delay = node->number_child<int> ("AudioDelay");
+ _gain = node->number_child<double>("AudioGain");
+ _delay = node->number_child<int>("AudioDelay");
_fade_in = ContentTime(node->optional_number_child<ContentTime::Type>("AudioFadeIn").get_value_or(0));
_fade_out = ContentTime(node->optional_number_child<ContentTime::Type>("AudioFadeOut").get_value_or(0));
_use_same_fades_as_video = node->optional_bool_child("AudioUseSameFadesAsVideo").get_value_or(false);
}
-AudioContent::AudioContent (Content* parent, vector<shared_ptr<Content>> c)
- : ContentPart (parent)
+AudioContent::AudioContent(Content* parent, vector<shared_ptr<Content>> c)
+ : ContentPart(parent)
{
auto ref = c[0]->audio;
- DCPOMATIC_ASSERT (ref);
+ DCPOMATIC_ASSERT(ref);
for (size_t i = 1; i < c.size(); ++i) {
if (c[i]->audio->gain() != ref->gain()) {
- throw JoinError (_("Content to be joined must have the same audio gain."));
+ throw JoinError(_("Content to be joined must have the same audio gain."));
}
if (c[i]->audio->delay() != ref->delay()) {
- throw JoinError (_("Content to be joined must have the same audio delay."));
+ throw JoinError(_("Content to be joined must have the same audio delay."));
}
}
- _gain = ref->gain ();
- _delay = ref->delay ();
- _streams = ref->streams ();
+ _gain = ref->gain();
+ _delay = ref->delay();
+ _streams = ref->streams();
}
void
AudioContent::as_xml(xmlpp::Element* element) const
{
- boost::mutex::scoped_lock lm (_mutex);
+ boost::mutex::scoped_lock lm(_mutex);
cxml::add_text_child(element, "AudioGain", fmt::to_string(_gain));
cxml::add_text_child(element, "AudioDelay", fmt::to_string(_delay));
cxml::add_text_child(element, "AudioFadeIn", fmt::to_string(_fade_in.get()));
@@ -125,21 +125,21 @@ AudioContent::as_xml(xmlpp::Element* element) const
void
-AudioContent::set_gain (double g)
+AudioContent::set_gain(double g)
{
- maybe_set (_gain, g, AudioContentProperty::GAIN);
+ maybe_set(_gain, g, AudioContentProperty::GAIN);
}
void
-AudioContent::set_delay (int d)
+AudioContent::set_delay(int d)
{
- maybe_set (_delay, d, AudioContentProperty::DELAY);
+ maybe_set(_delay, d, AudioContentProperty::DELAY);
}
string
-AudioContent::technical_summary () const
+AudioContent::technical_summary() const
{
string s = "audio: ";
for (auto i: streams()) {
@@ -151,42 +151,42 @@ AudioContent::technical_summary () const
void
-AudioContent::set_mapping (AudioMapping mapping)
+AudioContent::set_mapping(AudioMapping mapping)
{
- ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
+ ContentChangeSignaller cc(_parent, AudioContentProperty::STREAMS);
int c = 0;
for (auto i: streams()) {
- AudioMapping stream_mapping (i->channels(), MAX_DCP_AUDIO_CHANNELS);
+ AudioMapping stream_mapping(i->channels(), MAX_DCP_AUDIO_CHANNELS);
for (int j = 0; j < i->channels(); ++j) {
for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) {
- stream_mapping.set (j, k, mapping.get(c, k));
+ stream_mapping.set(j, k, mapping.get(c, k));
}
++c;
}
- i->set_mapping (stream_mapping);
+ i->set_mapping(stream_mapping);
}
}
AudioMapping
-AudioContent::mapping () const
+AudioContent::mapping() const
{
int channels = 0;
for (auto i: streams()) {
- channels += i->channels ();
+ channels += i->channels();
}
- AudioMapping merged (channels, MAX_DCP_AUDIO_CHANNELS);
- merged.make_zero ();
+ AudioMapping merged(channels, MAX_DCP_AUDIO_CHANNELS);
+ merged.make_zero();
int c = 0;
for (auto i: streams()) {
- auto mapping = i->mapping ();
+ auto mapping = i->mapping();
for (int j = 0; j < mapping.input_channels(); ++j) {
for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) {
if (k < mapping.output_channels()) {
- merged.set (c, k, mapping.get(j, k));
+ merged.set(c, k, mapping.get(j, k));
}
}
++c;
@@ -201,11 +201,11 @@ AudioContent::mapping () const
* that it is in sync with the active video content at its start time.
*/
int
-AudioContent::resampled_frame_rate (shared_ptr<const Film> film) const
+AudioContent::resampled_frame_rate(shared_ptr<const Film> film) const
{
- double t = film->audio_frame_rate ();
+ double t = film->audio_frame_rate();
- FrameRateChange frc (film, _parent);
+ FrameRateChange frc(film, _parent);
/* Compensate if the DCP is being run at a different frame rate
to the source; that is, if the video is run such that it will
@@ -216,11 +216,11 @@ AudioContent::resampled_frame_rate (shared_ptr<const Film> film) const
t /= frc.speed_up;
}
- return lrint (t);
+ return lrint(t);
}
string
-AudioContent::processing_description (shared_ptr<const Film> film) const
+AudioContent::processing_description(shared_ptr<const Film> film) const
{
if (streams().empty()) {
return "";
@@ -245,10 +245,10 @@ AudioContent::processing_description (shared_ptr<const Film> film) const
not_resampled = true;
}
- if (common_frame_rate && common_frame_rate != i->frame_rate ()) {
+ if (common_frame_rate && common_frame_rate != i->frame_rate()) {
same = false;
}
- common_frame_rate = i->frame_rate ();
+ common_frame_rate = i->frame_rate();
}
if (not_resampled && !resampled) {
@@ -273,7 +273,7 @@ AudioContent::processing_description (shared_ptr<const Film> film) const
/** @return User-visible names of each of our audio channels */
vector<NamedChannel>
-AudioContent::channel_names () const
+AudioContent::channel_names() const
{
vector<NamedChannel> n;
@@ -281,7 +281,7 @@ AudioContent::channel_names () const
int stream = 1;
for (auto i: streams()) {
for (int j = 0; j < i->channels(); ++j) {
- n.push_back (NamedChannel(fmt::format("{}:{}", stream, j + 1), index++));
+ n.push_back(NamedChannel(fmt::format("{}:{}", stream, j + 1), index++));
}
++stream;
}
@@ -291,7 +291,7 @@ AudioContent::channel_names () const
void
-AudioContent::add_properties (shared_ptr<const Film> film, list<UserProperty>& p) const
+AudioContent::add_properties(shared_ptr<const Film> film, list<UserProperty>& p) const
{
shared_ptr<const AudioStream> stream;
if (streams().size() == 1) {
@@ -299,36 +299,36 @@ AudioContent::add_properties (shared_ptr<const Film> film, list<UserProperty>& p
}
if (stream) {
- p.push_back (UserProperty(UserProperty::AUDIO, _("Channels"), stream->channels()));
- p.push_back (UserProperty(UserProperty::AUDIO, _("Content sample rate"), stream->frame_rate(), _("Hz")));
+ p.push_back(UserProperty(UserProperty::AUDIO, _("Channels"), stream->channels()));
+ p.push_back(UserProperty(UserProperty::AUDIO, _("Content sample rate"), stream->frame_rate(), _("Hz")));
if (auto bits = stream->bit_depth()) {
p.push_back(UserProperty(UserProperty::AUDIO, _("Content bit depth"), *bits, _("bits")));
}
}
- FrameRateChange const frc (_parent->active_video_frame_rate(film), film->video_frame_rate());
- ContentTime const c (_parent->full_length(film), frc);
+ FrameRateChange const frc(_parent->active_video_frame_rate(film), film->video_frame_rate());
+ ContentTime const c(_parent->full_length(film), frc);
- p.push_back (
- UserProperty (UserProperty::LENGTH, _("Full length in video frames at content rate"), c.frames_round(frc.source))
+ p.push_back(
+ UserProperty(UserProperty::LENGTH, _("Full length in video frames at content rate"), c.frames_round(frc.source))
);
if (stream) {
- p.push_back (
- UserProperty (
+ p.push_back(
+ UserProperty(
UserProperty::LENGTH,
_("Full length in audio samples at content rate"),
- c.frames_round (stream->frame_rate ())
+ c.frames_round(stream->frame_rate())
)
);
}
- p.push_back (UserProperty(UserProperty::AUDIO, _("DCP sample rate"), resampled_frame_rate(film), _("Hz")));
- p.push_back (UserProperty(UserProperty::LENGTH, _("Full length in video frames at DCP rate"), c.frames_round (frc.dcp)));
+ p.push_back(UserProperty(UserProperty::AUDIO, _("DCP sample rate"), resampled_frame_rate(film), _("Hz")));
+ p.push_back(UserProperty(UserProperty::LENGTH, _("Full length in video frames at DCP rate"), c.frames_round(frc.dcp)));
- if (stream) {
- p.push_back (
- UserProperty (
+ if(stream) {
+ p.push_back(
+ UserProperty(
UserProperty::LENGTH,
_("Full length in audio samples at DCP rate"),
c.frames_round(resampled_frame_rate(film))
@@ -339,46 +339,46 @@ AudioContent::add_properties (shared_ptr<const Film> film, list<UserProperty>& p
AudioStreamPtr
-AudioContent::stream () const
+AudioContent::stream() const
{
- boost::mutex::scoped_lock lm (_mutex);
- DCPOMATIC_ASSERT (_streams.size() == 1);
- return _streams.front ();
+ boost::mutex::scoped_lock lm(_mutex);
+ DCPOMATIC_ASSERT(_streams.size() == 1);
+ return _streams.front();
}
void
-AudioContent::add_stream (AudioStreamPtr stream)
+AudioContent::add_stream(AudioStreamPtr stream)
{
- ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
+ ContentChangeSignaller cc(_parent, AudioContentProperty::STREAMS);
{
- boost::mutex::scoped_lock lm (_mutex);
- _streams.push_back (stream);
+ boost::mutex::scoped_lock lm(_mutex);
+ _streams.push_back(stream);
}
}
void
-AudioContent::set_stream (AudioStreamPtr stream)
+AudioContent::set_stream(AudioStreamPtr stream)
{
- ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
+ ContentChangeSignaller cc(_parent, AudioContentProperty::STREAMS);
{
- boost::mutex::scoped_lock lm (_mutex);
- _streams.clear ();
- _streams.push_back (stream);
+ boost::mutex::scoped_lock lm(_mutex);
+ _streams.clear();
+ _streams.push_back(stream);
}
}
void
-AudioContent::take_settings_from (shared_ptr<const AudioContent> c)
+AudioContent::take_settings_from(shared_ptr<const AudioContent> c)
{
- set_gain (c->_gain);
- set_delay (c->_delay);
- set_fade_in (c->fade_in());
- set_fade_out (c->fade_out());
+ set_gain(c->_gain);
+ set_delay(c->_delay);
+ set_fade_in(c->fade_in());
+ set_fade_out(c->fade_out());
auto const streams_to_take = std::min(_streams.size(), c->_streams.size());
@@ -391,9 +391,9 @@ AudioContent::take_settings_from (shared_ptr<const AudioContent> c)
void
-AudioContent::modify_position (shared_ptr<const Film> film, DCPTime& pos) const
+AudioContent::modify_position(shared_ptr<const Film> film, DCPTime& pos) const
{
- pos = pos.round (film->audio_frame_rate());
+ pos = pos.round(film->audio_frame_rate());
}
@@ -409,9 +409,9 @@ AudioContent::modify_trim_start(shared_ptr<const Film> film, ContentTime& trim)
ContentTime
-AudioContent::fade_in () const
+AudioContent::fade_in() const
{
- boost::mutex::scoped_lock lm (_mutex);
+ boost::mutex::scoped_lock lm(_mutex);
if (_use_same_fades_as_video && _parent->video) {
return dcpomatic::ContentTime::from_frames(_parent->video->fade_in(), _parent->video_frame_rate().get_value_or(24));
}
@@ -421,9 +421,9 @@ AudioContent::fade_in () const
ContentTime
-AudioContent::fade_out () const
+AudioContent::fade_out() const
{
- boost::mutex::scoped_lock lm (_mutex);
+ boost::mutex::scoped_lock lm(_mutex);
if (_use_same_fades_as_video && _parent->video) {
return dcpomatic::ContentTime::from_frames(_parent->video->fade_out(), _parent->video_frame_rate().get_value_or(24));
}
@@ -435,34 +435,34 @@ AudioContent::fade_out () const
bool
AudioContent::use_same_fades_as_video() const
{
- boost::mutex::scoped_lock lm (_mutex);
+ boost::mutex::scoped_lock lm(_mutex);
return _use_same_fades_as_video;
}
void
-AudioContent::set_fade_in (ContentTime t)
+AudioContent::set_fade_in(ContentTime t)
{
- maybe_set (_fade_in, t, AudioContentProperty::FADE_IN);
+ maybe_set(_fade_in, t, AudioContentProperty::FADE_IN);
}
void
-AudioContent::set_fade_out (ContentTime t)
+AudioContent::set_fade_out(ContentTime t)
{
- maybe_set (_fade_out, t, AudioContentProperty::FADE_OUT);
+ maybe_set(_fade_out, t, AudioContentProperty::FADE_OUT);
}
void
-AudioContent::set_use_same_fades_as_video (bool s)
+AudioContent::set_use_same_fades_as_video(bool s)
{
- maybe_set (_use_same_fades_as_video, s, AudioContentProperty::USE_SAME_FADES_AS_VIDEO);
+ maybe_set(_use_same_fades_as_video, s, AudioContentProperty::USE_SAME_FADES_AS_VIDEO);
}
vector<float>
-AudioContent::fade (AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const
+AudioContent::fade(AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const
{
auto const in = fade_in().frames_round(frame_rate);
auto const out = fade_out().frames_round(frame_rate);
diff --git a/src/lib/audio_content.h b/src/lib/audio_content.h
index 2bcf194de..1b96b1af0 100644
--- a/src/lib/audio_content.h
+++ b/src/lib/audio_content.h
@@ -53,56 +53,56 @@ public:
class AudioContent : public ContentPart
{
public:
- explicit AudioContent (Content* parent);
- AudioContent (Content* parent, std::vector<std::shared_ptr<Content>>);
- AudioContent (Content* parent, cxml::ConstNodePtr);
+ explicit AudioContent(Content* parent);
+ AudioContent(Content* parent, std::vector<std::shared_ptr<Content>>);
+ AudioContent(Content* parent, cxml::ConstNodePtr);
void as_xml(xmlpp::Element*) const;
- std::string technical_summary () const;
- void take_settings_from (std::shared_ptr<const AudioContent> c);
+ std::string technical_summary() const;
+ void take_settings_from(std::shared_ptr<const AudioContent> c);
- AudioMapping mapping () const;
- void set_mapping (AudioMapping);
- int resampled_frame_rate (std::shared_ptr<const Film> film) const;
- std::vector<NamedChannel> channel_names () const;
+ AudioMapping mapping() const;
+ void set_mapping(AudioMapping);
+ int resampled_frame_rate(std::shared_ptr<const Film> film) const;
+ std::vector<NamedChannel> channel_names() const;
/** Set gain in dB */
- void set_gain (double);
+ void set_gain(double);
/** Set delay in milliseconds (positive moves audio later) */
- void set_delay (int);
+ void set_delay(int);
- double gain () const {
- boost::mutex::scoped_lock lm (_mutex);
+ double gain() const {
+ boost::mutex::scoped_lock lm(_mutex);
return _gain;
}
- int delay () const {
- boost::mutex::scoped_lock lm (_mutex);
+ int delay() const {
+ boost::mutex::scoped_lock lm(_mutex);
return _delay;
}
- dcpomatic::ContentTime fade_in () const;
- dcpomatic::ContentTime fade_out () const;
+ dcpomatic::ContentTime fade_in() const;
+ dcpomatic::ContentTime fade_out() const;
bool use_same_fades_as_video() const;
- void set_fade_in (dcpomatic::ContentTime time);
- void set_fade_out (dcpomatic::ContentTime time);
- void set_use_same_fades_as_video (bool s);
+ void set_fade_in(dcpomatic::ContentTime time);
+ void set_fade_out(dcpomatic::ContentTime time);
+ void set_use_same_fades_as_video(bool s);
- std::string processing_description (std::shared_ptr<const Film> film) const;
+ std::string processing_description(std::shared_ptr<const Film> film) const;
- std::vector<AudioStreamPtr> streams () const {
- boost::mutex::scoped_lock lm (_mutex);
+ std::vector<AudioStreamPtr> streams() const {
+ boost::mutex::scoped_lock lm(_mutex);
return _streams;
}
- void add_stream (AudioStreamPtr stream);
- void set_stream (AudioStreamPtr stream);
- AudioStreamPtr stream () const;
+ void add_stream(AudioStreamPtr stream);
+ void set_stream(AudioStreamPtr stream);
+ AudioStreamPtr stream() const;
- void add_properties (std::shared_ptr<const Film> film, std::list<UserProperty> &) const;
+ void add_properties(std::shared_ptr<const Film> film, std::list<UserProperty> &) const;
- void modify_position (std::shared_ptr<const Film> film, dcpomatic::DCPTime& pos) const;
+ void modify_position(std::shared_ptr<const Film> film, dcpomatic::DCPTime& pos) const;
void modify_trim_start(std::shared_ptr<const Film> film, dcpomatic::ContentTime& pos) const;
/** @param frame frame within the whole (untrimmed) content.
@@ -110,9 +110,9 @@ public:
* @return a fade coefficient for @ref length samples starting at an offset @frame within
* the content, or an empty vector if the given section has no fade.
*/
- std::vector<float> fade (AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const;
+ std::vector<float> fade(AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const;
- static std::shared_ptr<AudioContent> from_xml (Content* parent, cxml::ConstNodePtr, int version);
+ static std::shared_ptr<AudioContent> from_xml(Content* parent, cxml::ConstNodePtr, int version);
private: