+ return lrint (t);
+}
+
+string
+AudioContent::processing_description (shared_ptr<const Film> film) const
+{
+ if (streams().empty()) {
+ return "";
+ }
+
+ /* Possible answers are:
+ 1. all audio will be resampled from x to y.
+ 2. all audio will be resampled to y (from a variety of rates)
+ 3. some audio will be resampled to y (from a variety of rates)
+ 4. nothing will be resampled.
+ */
+
+ bool not_resampled = false;
+ bool resampled = false;
+ bool same = true;
+
+ optional<int> common_frame_rate;
+ for (auto i: streams()) {
+ if (i->frame_rate() != resampled_frame_rate(film)) {
+ resampled = true;
+ } else {
+ not_resampled = true;
+ }
+
+ if (common_frame_rate && common_frame_rate != i->frame_rate ()) {
+ same = false;
+ }
+ common_frame_rate = i->frame_rate ();
+ }
+
+ if (not_resampled && !resampled) {
+ return _("Audio will not be resampled");
+ }
+
+ if (not_resampled && resampled) {
+ return String::compose (_("Some audio will be resampled to %1Hz"), resampled_frame_rate(film));
+ }
+
+ if (!not_resampled && resampled) {
+ if (same) {
+ return String::compose (_("Audio will be resampled from %1Hz to %2Hz"), common_frame_rate.get(), resampled_frame_rate(film));
+ } else {
+ return String::compose (_("Audio will be resampled to %1Hz"), resampled_frame_rate(film));
+ }
+ }
+
+ return "";
+}
+
+
+/** @return User-visible names of each of our audio channels */
+vector<NamedChannel>
+AudioContent::channel_names () const
+{
+ vector<NamedChannel> n;
+
+ int index = 0;
+ int stream = 1;
+ for (auto i: streams()) {
+ for (int j = 0; j < i->channels(); ++j) {
+ n.push_back (NamedChannel(String::compose ("%1:%2", stream, j + 1), index++));
+ }
+ ++stream;
+ }
+
+ return n;
+}
+
+
+void
+AudioContent::add_properties (shared_ptr<const Film> film, list<UserProperty>& p) const
+{
+ shared_ptr<const AudioStream> stream;
+ if (streams().size() == 1) {
+ stream = streams().front();
+ }
+
+ if (stream) {
+ p.push_back (UserProperty(UserProperty::AUDIO, _("Channels"), stream->channels()));
+ p.push_back (UserProperty(UserProperty::AUDIO, _("Content sample rate"), stream->frame_rate(), _("Hz")));
+ if (auto bits = stream->bit_depth()) {
+ p.push_back(UserProperty(UserProperty::AUDIO, _("Content bit depth"), *bits, _("bits")));
+ }
+ }
+
+ FrameRateChange const frc (_parent->active_video_frame_rate(film), film->video_frame_rate());
+ ContentTime const c (_parent->full_length(film), frc);
+
+ p.push_back (
+ UserProperty (UserProperty::LENGTH, _("Full length in video frames at content rate"), c.frames_round(frc.source))
+ );
+
+ if (stream) {
+ p.push_back (
+ UserProperty (
+ UserProperty::LENGTH,
+ _("Full length in audio samples at content rate"),
+ c.frames_round (stream->frame_rate ())
+ )
+ );
+ }
+
+ p.push_back (UserProperty(UserProperty::AUDIO, _("DCP sample rate"), resampled_frame_rate(film), _("Hz")));
+ p.push_back (UserProperty(UserProperty::LENGTH, _("Full length in video frames at DCP rate"), c.frames_round (frc.dcp)));
+
+ if (stream) {
+ p.push_back (
+ UserProperty (
+ UserProperty::LENGTH,
+ _("Full length in audio samples at DCP rate"),
+ c.frames_round(resampled_frame_rate(film))
+ )
+ );
+ }
+}
+
+
+void
+AudioContent::set_streams (vector<AudioStreamPtr> streams)
+{
+ ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
+
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _streams = streams;
+ }
+}
+
+
+AudioStreamPtr
+AudioContent::stream () const
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ DCPOMATIC_ASSERT (_streams.size() == 1);
+ return _streams.front ();
+}
+
+
+void
+AudioContent::add_stream (AudioStreamPtr stream)
+{
+ ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
+
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _streams.push_back (stream);
+ }
+}
+
+
+void
+AudioContent::set_stream (AudioStreamPtr stream)
+{
+ ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
+
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ _streams.clear ();
+ _streams.push_back (stream);
+ }
+}
+
+
+void
+AudioContent::take_settings_from (shared_ptr<const AudioContent> c)
+{
+ set_gain (c->_gain);
+ set_delay (c->_delay);
+ set_fade_in (c->fade_in());
+ set_fade_out (c->fade_out());
+
+ auto const streams_to_take = std::min(_streams.size(), c->_streams.size());
+
+ for (auto i = 0U; i < streams_to_take; ++i) {
+ auto mapping = _streams[i]->mapping();
+ mapping.take_from(c->_streams[i]->mapping());
+ _streams[i]->set_mapping(mapping);
+ }
+}
+
+
+void
+AudioContent::modify_position (shared_ptr<const Film> film, DCPTime& pos) const
+{
+ pos = pos.round (film->audio_frame_rate());
+}
+
+
+void
+AudioContent::modify_trim_start(shared_ptr<const Film> film, ContentTime& trim) const
+{
+ /* When this trim is used it the audio will have been resampled, and using the
+ * DCP rate here reduces the chance of rounding errors causing audio glitches
+ * due to errors in placement of audio frames (#2373).
+ */
+ trim = trim.round(film ? film->audio_frame_rate() : 48000);
+}
+
+
+ContentTime
+AudioContent::fade_in () const
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ if (_use_same_fades_as_video && _parent->video) {
+ return dcpomatic::ContentTime::from_frames(_parent->video->fade_in(), _parent->video_frame_rate().get_value_or(24));
+ }
+
+ return _fade_in;
+}
+
+
+ContentTime
+AudioContent::fade_out () const
+{
+ boost::mutex::scoped_lock lm (_mutex);
+ if (_use_same_fades_as_video && _parent->video) {
+ return dcpomatic::ContentTime::from_frames(_parent->video->fade_out(), _parent->video_frame_rate().get_value_or(24));
+ }
+
+ return _fade_out;
+}
+
+
+void
+AudioContent::set_fade_in (ContentTime t)
+{
+ maybe_set (_fade_in, t, AudioContentProperty::FADE_IN);
+}
+
+
+void
+AudioContent::set_fade_out (ContentTime t)
+{
+ maybe_set (_fade_out, t, AudioContentProperty::FADE_OUT);
+}
+
+
+void
+AudioContent::set_use_same_fades_as_video (bool s)
+{
+ maybe_set (_use_same_fades_as_video, s, AudioContentProperty::USE_SAME_FADES_AS_VIDEO);
+}
+
+
+vector<float>
+AudioContent::fade (AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const
+{
+ auto const in = fade_in().frames_round(frame_rate);
+ auto const out = fade_out().frames_round(frame_rate);
+
+ /* Where the start trim ends, at frame_rate */
+ auto const trim_start = _parent->trim_start().frames_round(frame_rate);
+ /* Where the end trim starts within the whole length of the content, at frame_rate */
+ auto const trim_end = ContentTime(ContentTime::from_frames(stream->length(), stream->frame_rate()) - _parent->trim_end()).frames_round(frame_rate);
+
+ if (
+ (in == 0 || (frame >= (trim_start + in))) &&
+ (out == 0 || ((frame + length) < (trim_end - out)))
+ ) {
+ /* This section starts after the fade in and ends before the fade out */
+ return {};
+ }
+
+ /* Start position relative to the start of the fade in */
+ auto in_start = frame - trim_start;
+ /* Start position relative to the start of the fade out */
+ auto out_start = frame - (trim_end - out);
+
+ vector<float> coeffs(length);
+ for (auto coeff = 0; coeff < length; ++coeff) {
+ coeffs[coeff] = 1.0;
+ if (in) {
+ coeffs[coeff] *= logarithmic_fade_in_curve(static_cast<float>(in_start + coeff) / in);
+ }
+ if (out) {
+ coeffs[coeff] *= logarithmic_fade_out_curve(static_cast<float>(out_start + coeff) / out);
+ }
+ }
+
+ return coeffs;