X-Git-Url: https://git.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Faudio_content.cc;h=bd857b313d418da8fcc674ee25f8abd3ed8f3221;hp=014221f2e892afc95ef5f31d57aa3f4e44bea997;hb=a60668a37f05a125954ac5e03db0530995ac5769;hpb=c4403784febdbdd42e9c32e67fadb147f11fe566 diff --git a/src/lib/audio_content.cc b/src/lib/audio_content.cc index 014221f2e..bd857b313 100644 --- a/src/lib/audio_content.cc +++ b/src/lib/audio_content.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2013-2018 Carl Hetherington + Copyright (C) 2013-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,46 +18,56 @@ */ + #include "audio_content.h" -#include "film.h" -#include "exceptions.h" +#include "compose.hpp" #include "config.h" +#include "constants.h" +#include "exceptions.h" +#include "film.h" #include "frame_rate_change.h" -#include "compose.hpp" +#include "maths_util.h" +#include "video_content.h" #include #include #include -#include #include #include "i18n.h" -using std::string; + using std::cout; -using std::vector; +using std::dynamic_pointer_cast; using std::fixed; using std::list; +using std::make_shared; using std::pair; using std::setprecision; -using boost::shared_ptr; -using boost::dynamic_pointer_cast; +using std::shared_ptr; +using std::string; +using std::vector; using boost::optional; using dcp::raw_convert; using namespace dcpomatic; + /** Something stream-related has changed */ int const AudioContentProperty::STREAMS = 200; int const AudioContentProperty::GAIN = 201; int const AudioContentProperty::DELAY = 202; +int const AudioContentProperty::FADE_IN = 203; +int const AudioContentProperty::FADE_OUT = 204; +int const AudioContentProperty::USE_SAME_FADES_AS_VIDEO = 205; + AudioContent::AudioContent (Content* parent) : ContentPart (parent) - , _gain (0) - , _delay (Config::instance()->default_audio_delay ()) + , _delay (Config::instance()->default_audio_delay()) { } + shared_ptr AudioContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version) { @@ -66,36 +76,35 @@ AudioContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version) audio streams, so check for that. */ if (node->string_child("Type") == "FFmpeg" && node->node_children("AudioStream").empty()) { - return shared_ptr (); + return {}; } /* Otherwise we can drop through to the newer logic */ } if (!node->optional_number_child ("AudioGain")) { - return shared_ptr (); + return {}; } - return shared_ptr (new AudioContent (parent, node)); + return make_shared(parent, node); } + AudioContent::AudioContent (Content* parent, cxml::ConstNodePtr node) : ContentPart (parent) { _gain = node->number_child ("AudioGain"); _delay = node->number_child ("AudioDelay"); - - /* Backwards compatibility */ - optional r = node->optional_number_child ("AudioVideoFrameRate"); - if (r) { - _parent->set_video_frame_rate (r.get ()); - } + _fade_in = ContentTime(node->optional_number_child("AudioFadeIn").get_value_or(0)); + _fade_out = ContentTime(node->optional_number_child("AudioFadeOut").get_value_or(0)); + _use_same_fades_as_video = node->optional_bool_child("AudioUseSameFadesAsVideo").get_value_or(false); } -AudioContent::AudioContent (Content* parent, vector > c) + +AudioContent::AudioContent (Content* parent, vector> c) : ContentPart (parent) { - shared_ptr ref = c[0]->audio; + auto ref = c[0]->audio; DCPOMATIC_ASSERT (ref); for (size_t i = 1; i < c.size(); ++i) { @@ -113,48 +122,56 @@ AudioContent::AudioContent (Content* parent, vector > c) _streams = ref->streams (); } + void AudioContent::as_xml (xmlpp::Node* node) const { boost::mutex::scoped_lock lm (_mutex); - node->add_child("AudioGain")->add_child_text (raw_convert (_gain)); - node->add_child("AudioDelay")->add_child_text (raw_convert (_delay)); + node->add_child("AudioGain")->add_child_text(raw_convert(_gain)); + node->add_child("AudioDelay")->add_child_text(raw_convert(_delay)); + node->add_child("AudioFadeIn")->add_child_text(raw_convert(_fade_in.get())); + node->add_child("AudioFadeOut")->add_child_text(raw_convert(_fade_out.get())); + node->add_child("AudioUseSameFadesAsVideo")->add_child_text(_use_same_fades_as_video ? "1" : "0"); } + void AudioContent::set_gain (double g) { maybe_set (_gain, g, AudioContentProperty::GAIN); } + void AudioContent::set_delay (int d) { maybe_set (_delay, d, AudioContentProperty::DELAY); } + string AudioContent::technical_summary () const { string s = "audio: "; - BOOST_FOREACH (AudioStreamPtr i, streams ()) { + for (auto i: streams()) { s += String::compose ("stream channels %1 rate %2 ", i->channels(), i->frame_rate()); } return s; } + void AudioContent::set_mapping (AudioMapping mapping) { - ChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); int c = 0; - BOOST_FOREACH (AudioStreamPtr i, streams ()) { - AudioMapping stream_mapping (i->channels (), MAX_DCP_AUDIO_CHANNELS); + for (auto i: streams()) { + AudioMapping stream_mapping (i->channels(), MAX_DCP_AUDIO_CHANNELS); for (int j = 0; j < i->channels(); ++j) { for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) { - stream_mapping.set (j, k, mapping.get (c, k)); + stream_mapping.set (j, k, mapping.get(c, k)); } ++c; } @@ -162,11 +179,12 @@ AudioContent::set_mapping (AudioMapping mapping) } } + AudioMapping AudioContent::mapping () const { int channels = 0; - BOOST_FOREACH (AudioStreamPtr i, streams ()) { + for (auto i: streams()) { channels += i->channels (); } @@ -175,12 +193,12 @@ AudioContent::mapping () const int c = 0; int s = 0; - BOOST_FOREACH (AudioStreamPtr i, streams ()) { - AudioMapping mapping = i->mapping (); + for (auto i: streams()) { + auto mapping = i->mapping (); for (int j = 0; j < mapping.input_channels(); ++j) { for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) { if (k < mapping.output_channels()) { - merged.set (c, k, mapping.get (j, k)); + merged.set (c, k, mapping.get(j, k)); } } ++c; @@ -191,6 +209,7 @@ AudioContent::mapping () const return merged; } + /** @return the frame rate that this content should be resampled to in order * that it is in sync with the active video content at its start time. */ @@ -216,7 +235,7 @@ AudioContent::resampled_frame_rate (shared_ptr film) const string AudioContent::processing_description (shared_ptr film) const { - if (streams().empty ()) { + if (streams().empty()) { return ""; } @@ -232,7 +251,7 @@ AudioContent::processing_description (shared_ptr film) const bool same = true; optional common_frame_rate; - BOOST_FOREACH (AudioStreamPtr i, streams()) { + for (auto i: streams()) { if (i->frame_rate() != resampled_frame_rate(film)) { resampled = true; } else { @@ -264,34 +283,40 @@ AudioContent::processing_description (shared_ptr film) const return ""; } + /** @return User-visible names of each of our audio channels */ -vector +vector AudioContent::channel_names () const { - vector n; + vector n; - int t = 1; - BOOST_FOREACH (AudioStreamPtr i, streams ()) { + int index = 0; + int stream = 1; + for (auto i: streams()) { for (int j = 0; j < i->channels(); ++j) { - n.push_back (String::compose ("%1:%2", t, j + 1)); + n.push_back (NamedChannel(String::compose ("%1:%2", stream, j + 1), index++)); } - ++t; + ++stream; } return n; } + void AudioContent::add_properties (shared_ptr film, list& p) const { shared_ptr stream; if (streams().size() == 1) { - stream = streams().front (); + stream = streams().front(); } if (stream) { - p.push_back (UserProperty (UserProperty::AUDIO, _("Channels"), stream->channels ())); - p.push_back (UserProperty (UserProperty::AUDIO, _("Content audio sample rate"), stream->frame_rate(), _("Hz"))); + p.push_back (UserProperty(UserProperty::AUDIO, _("Channels"), stream->channels())); + p.push_back (UserProperty(UserProperty::AUDIO, _("Content sample rate"), stream->frame_rate(), _("Hz"))); + if (auto bits = stream->bit_depth()) { + p.push_back(UserProperty(UserProperty::AUDIO, _("Content bit depth"), *bits, _("bits"))); + } } FrameRateChange const frc (_parent->active_video_frame_rate(film), film->video_frame_rate()); @@ -311,8 +336,8 @@ AudioContent::add_properties (shared_ptr film, list& p ); } - p.push_back (UserProperty (UserProperty::AUDIO, _("DCP sample rate"), resampled_frame_rate(film), _("Hz"))); - p.push_back (UserProperty (UserProperty::LENGTH, _("Full length in video frames at DCP rate"), c.frames_round (frc.dcp))); + p.push_back (UserProperty(UserProperty::AUDIO, _("DCP sample rate"), resampled_frame_rate(film), _("Hz"))); + p.push_back (UserProperty(UserProperty::LENGTH, _("Full length in video frames at DCP rate"), c.frames_round (frc.dcp))); if (stream) { p.push_back ( @@ -325,10 +350,11 @@ AudioContent::add_properties (shared_ptr film, list& p } } + void AudioContent::set_streams (vector streams) { - ChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); { boost::mutex::scoped_lock lm (_mutex); @@ -336,6 +362,7 @@ AudioContent::set_streams (vector streams) } } + AudioStreamPtr AudioContent::stream () const { @@ -344,10 +371,11 @@ AudioContent::stream () const return _streams.front (); } + void AudioContent::add_stream (AudioStreamPtr stream) { - ChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); { boost::mutex::scoped_lock lm (_mutex); @@ -355,10 +383,11 @@ AudioContent::add_stream (AudioStreamPtr stream) } } + void AudioContent::set_stream (AudioStreamPtr stream) { - ChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); { boost::mutex::scoped_lock lm (_mutex); @@ -367,32 +396,123 @@ AudioContent::set_stream (AudioStreamPtr stream) } } + void AudioContent::take_settings_from (shared_ptr c) { set_gain (c->_gain); set_delay (c->_delay); + set_fade_in (c->fade_in()); + set_fade_out (c->fade_out()); - size_t i = 0; - size_t j = 0; + auto const streams_to_take = std::min(_streams.size(), c->_streams.size()); - while (i < _streams.size() && j < c->_streams.size()) { - _streams[i]->set_mapping (c->_streams[j]->mapping()); - ++i; - ++j; + for (auto i = 0U; i < streams_to_take; ++i) { + auto mapping = _streams[i]->mapping(); + mapping.take_from(c->_streams[i]->mapping()); + _streams[i]->set_mapping(mapping); } } + void AudioContent::modify_position (shared_ptr film, DCPTime& pos) const { pos = pos.round (film->audio_frame_rate()); } + void -AudioContent::modify_trim_start (ContentTime& trim) const +AudioContent::modify_trim_start(shared_ptr film, ContentTime& trim) const { - DCPOMATIC_ASSERT (!_streams.empty()); - /* XXX: we're in trouble if streams have different rates */ - trim = trim.round (_streams.front()->frame_rate()); + /* When this trim is used it the audio will have been resampled, and using the + * DCP rate here reduces the chance of rounding errors causing audio glitches + * due to errors in placement of audio frames (#2373). + */ + trim = trim.round(film ? film->audio_frame_rate() : 48000); } + + +ContentTime +AudioContent::fade_in () const +{ + boost::mutex::scoped_lock lm (_mutex); + if (_use_same_fades_as_video && _parent->video) { + return dcpomatic::ContentTime::from_frames(_parent->video->fade_in(), _parent->video_frame_rate().get_value_or(24)); + } + + return _fade_in; +} + + +ContentTime +AudioContent::fade_out () const +{ + boost::mutex::scoped_lock lm (_mutex); + if (_use_same_fades_as_video && _parent->video) { + return dcpomatic::ContentTime::from_frames(_parent->video->fade_out(), _parent->video_frame_rate().get_value_or(24)); + } + + return _fade_out; +} + + +void +AudioContent::set_fade_in (ContentTime t) +{ + maybe_set (_fade_in, t, AudioContentProperty::FADE_IN); +} + + +void +AudioContent::set_fade_out (ContentTime t) +{ + maybe_set (_fade_out, t, AudioContentProperty::FADE_OUT); +} + + +void +AudioContent::set_use_same_fades_as_video (bool s) +{ + maybe_set (_use_same_fades_as_video, s, AudioContentProperty::USE_SAME_FADES_AS_VIDEO); +} + + +vector +AudioContent::fade (AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const +{ + auto const in = fade_in().frames_round(frame_rate); + auto const out = fade_out().frames_round(frame_rate); + + /* Where the start trim ends, at frame_rate */ + auto const trim_start = _parent->trim_start().frames_round(frame_rate); + /* Where the end trim starts within the whole length of the content, at frame_rate */ + auto const trim_end = ContentTime(ContentTime::from_frames(stream->length(), stream->frame_rate()) - _parent->trim_end()).frames_round(frame_rate); + + if ( + (in == 0 || (frame >= (trim_start + in))) && + (out == 0 || ((frame + length) < (trim_end - out))) + ) { + /* This section starts after the fade in and ends before the fade out */ + return {}; + } + + /* Start position relative to the start of the fade in */ + auto in_start = frame - trim_start; + /* Start position relative to the start of the fade out */ + auto out_start = frame - (trim_end - out); + + vector coeffs(length); + for (auto coeff = 0; coeff < length; ++coeff) { + coeffs[coeff] = 1.0; + if (in) { + coeffs[coeff] *= logarithmic_fade_in_curve(static_cast(in_start + coeff) / in); + } + if (out) { + coeffs[coeff] *= logarithmic_fade_out_curve(static_cast(out_start + coeff) / out); + } + } + + return coeffs; +} +