X-Git-Url: https://git.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Faudio_content.cc;h=82a6a84d1d38d90c875371b9592d9f16d3d7305a;hp=bd4bb565ff73c9b8e553369a9a40d5f7d3968365;hb=597e27d830ebe88d5c06b3099ab5e429c97d0e3b;hpb=a976df9b5ebd2fc9b37db053faf4e8f33d59b41e diff --git a/src/lib/audio_content.cc b/src/lib/audio_content.cc index bd4bb565f..82a6a84d1 100644 --- a/src/lib/audio_content.cc +++ b/src/lib/audio_content.cc @@ -1,182 +1,195 @@ /* - Copyright (C) 2013-2016 Carl Hetherington + Copyright (C) 2013-2021 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ + #include "audio_content.h" -#include "film.h" -#include "exceptions.h" +#include "compose.hpp" #include "config.h" +#include "exceptions.h" +#include "film.h" #include "frame_rate_change.h" -#include "raw_convert.h" -#include "compose.hpp" +#include "maths_util.h" +#include "video_content.h" +#include #include #include -#include #include #include "i18n.h" -using std::string; + using std::cout; -using std::vector; -using std::stringstream; +using std::dynamic_pointer_cast; using std::fixed; using std::list; +using std::make_shared; using std::pair; using std::setprecision; -using boost::shared_ptr; -using boost::dynamic_pointer_cast; +using std::shared_ptr; +using std::string; +using std::vector; using boost::optional; +using dcp::raw_convert; +using namespace dcpomatic; + /** Something stream-related has changed */ -int const AudioContentProperty::AUDIO_STREAMS = 200; -int const AudioContentProperty::AUDIO_GAIN = 201; -int const AudioContentProperty::AUDIO_DELAY = 202; -int const AudioContentProperty::AUDIO_VIDEO_FRAME_RATE = 203; - -AudioContent::AudioContent (shared_ptr film) - : Content (film) - , _audio_gain (0) - , _audio_delay (Config::instance()->default_audio_delay ()) -{ +int const AudioContentProperty::STREAMS = 200; +int const AudioContentProperty::GAIN = 201; +int const AudioContentProperty::DELAY = 202; +int const AudioContentProperty::FADE_IN = 203; +int const AudioContentProperty::FADE_OUT = 204; +int const AudioContentProperty::USE_SAME_FADES_AS_VIDEO = 205; -} -AudioContent::AudioContent (shared_ptr film, DCPTime s) - : Content (film, s) - , _audio_gain (0) - , _audio_delay (Config::instance()->default_audio_delay ()) +AudioContent::AudioContent (Content* parent) + : ContentPart (parent) + , _delay (Config::instance()->default_audio_delay()) { } -AudioContent::AudioContent (shared_ptr film, boost::filesystem::path p) - : Content (film, p) - , _audio_gain (0) - , _audio_delay (Config::instance()->default_audio_delay ()) + +shared_ptr +AudioContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version) { + if (version < 34) { + /* With old metadata FFmpeg content has the audio-related tags even with no + audio streams, so check for that. + */ + if (node->string_child("Type") == "FFmpeg" && node->node_children("AudioStream").empty()) { + return {}; + } + + /* Otherwise we can drop through to the newer logic */ + } + if (!node->optional_number_child ("AudioGain")) { + return {}; + } + + return make_shared(parent, node); } -AudioContent::AudioContent (shared_ptr film, cxml::ConstNodePtr node) - : Content (film, node) + +AudioContent::AudioContent (Content* parent, cxml::ConstNodePtr node) + : ContentPart (parent) { - _audio_gain = node->number_child ("AudioGain"); - _audio_delay = node->number_child ("AudioDelay"); - _audio_video_frame_rate = node->optional_number_child ("AudioVideoFrameRate"); + _gain = node->number_child ("AudioGain"); + _delay = node->number_child ("AudioDelay"); + _fade_in = ContentTime(node->optional_number_child("AudioFadeIn").get_value_or(0)); + _fade_out = ContentTime(node->optional_number_child("AudioFadeOut").get_value_or(0)); + _use_same_fades_as_video = node->optional_bool_child("AudioUseSameFadesAsVideo").get_value_or(false); + + /* Backwards compatibility */ + auto r = node->optional_number_child("AudioVideoFrameRate"); + if (r) { + _parent->set_video_frame_rate (r.get()); + } } -AudioContent::AudioContent (shared_ptr film, vector > c) - : Content (film, c) + +AudioContent::AudioContent (Content* parent, vector> c) + : ContentPart (parent) { - shared_ptr ref = dynamic_pointer_cast (c[0]); + auto ref = c[0]->audio; DCPOMATIC_ASSERT (ref); - for (size_t i = 0; i < c.size(); ++i) { - shared_ptr ac = dynamic_pointer_cast (c[i]); - - if (ac->audio_gain() != ref->audio_gain()) { + for (size_t i = 1; i < c.size(); ++i) { + if (c[i]->audio->gain() != ref->gain()) { throw JoinError (_("Content to be joined must have the same audio gain.")); } - if (ac->audio_delay() != ref->audio_delay()) { + if (c[i]->audio->delay() != ref->delay()) { throw JoinError (_("Content to be joined must have the same audio delay.")); } - - if (ac->audio_video_frame_rate() != ref->audio_video_frame_rate()) { - throw JoinError (_("Content to be joined must have the same video frame rate.")); - } } - _audio_gain = ref->audio_gain (); - _audio_delay = ref->audio_delay (); - /* Preserve the optional<> part of this */ - _audio_video_frame_rate = ref->_audio_video_frame_rate; + _gain = ref->gain (); + _delay = ref->delay (); + _streams = ref->streams (); } + void AudioContent::as_xml (xmlpp::Node* node) const { boost::mutex::scoped_lock lm (_mutex); - node->add_child("AudioGain")->add_child_text (raw_convert (_audio_gain)); - node->add_child("AudioDelay")->add_child_text (raw_convert (_audio_delay)); - if (_audio_video_frame_rate) { - node->add_child("AudioVideoFrameRate")->add_child_text (raw_convert (_audio_video_frame_rate.get())); - } + node->add_child("AudioGain")->add_child_text(raw_convert(_gain)); + node->add_child("AudioDelay")->add_child_text(raw_convert(_delay)); + node->add_child("AudioFadeIn")->add_child_text(raw_convert(_fade_in.get())); + node->add_child("AudioFadeOut")->add_child_text(raw_convert(_fade_out.get())); + node->add_child("AudioUseSameFadesAsVideo")->add_child_text(_use_same_fades_as_video ? "1" : "0"); } void -AudioContent::set_audio_gain (double g) +AudioContent::set_gain (double g) { - { - boost::mutex::scoped_lock lm (_mutex); - _audio_gain = g; - } - - signal_changed (AudioContentProperty::AUDIO_GAIN); + maybe_set (_gain, g, AudioContentProperty::GAIN); } + void -AudioContent::set_audio_delay (int d) +AudioContent::set_delay (int d) { - { - boost::mutex::scoped_lock lm (_mutex); - _audio_delay = d; - } - - signal_changed (AudioContentProperty::AUDIO_DELAY); + maybe_set (_delay, d, AudioContentProperty::DELAY); } + string AudioContent::technical_summary () const { - string s = "audio :"; - BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) { - s += String::compose ("stream channels %1 rate %2", i->channels(), i->frame_rate()); + string s = "audio: "; + for (auto i: streams()) { + s += String::compose ("stream channels %1 rate %2 ", i->channels(), i->frame_rate()); } return s; } + void -AudioContent::set_audio_mapping (AudioMapping mapping) +AudioContent::set_mapping (AudioMapping mapping) { + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + int c = 0; - BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) { - AudioMapping stream_mapping (i->channels (), MAX_DCP_AUDIO_CHANNELS); + for (auto i: streams()) { + AudioMapping stream_mapping (i->channels(), MAX_DCP_AUDIO_CHANNELS); for (int j = 0; j < i->channels(); ++j) { for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) { - stream_mapping.set (j, k, mapping.get (c, k)); + stream_mapping.set (j, k, mapping.get(c, k)); } ++c; } i->set_mapping (stream_mapping); } - - signal_changed (AudioContentProperty::AUDIO_STREAMS); } + AudioMapping -AudioContent::audio_mapping () const +AudioContent::mapping () const { int channels = 0; - BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) { + for (auto i: streams()) { channels += i->channels (); } @@ -185,12 +198,12 @@ AudioContent::audio_mapping () const int c = 0; int s = 0; - BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) { - AudioMapping mapping = i->mapping (); + for (auto i: streams()) { + auto mapping = i->mapping (); for (int j = 0; j < mapping.input_channels(); ++j) { for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) { if (k < mapping.output_channels()) { - merged.set (c, k, mapping.get (j, k)); + merged.set (c, k, mapping.get(j, k)); } } ++c; @@ -201,16 +214,16 @@ AudioContent::audio_mapping () const return merged; } + /** @return the frame rate that this content should be resampled to in order * that it is in sync with the active video content at its start time. */ int -AudioContent::resampled_audio_frame_rate () const +AudioContent::resampled_frame_rate (shared_ptr film) const { - /* Resample to a DCI-approved sample rate */ - double t = has_rate_above_48k() ? 96000 : 48000; + double t = film->audio_frame_rate (); - FrameRateChange frc (audio_video_frame_rate(), film()->video_frame_rate()); + FrameRateChange frc (film, _parent); /* Compensate if the DCP is being run at a different frame rate to the source; that is, if the video is run such that it will @@ -225,10 +238,9 @@ AudioContent::resampled_audio_frame_rate () const } string -AudioContent::processing_description () const +AudioContent::processing_description (shared_ptr film) const { - vector streams = audio_streams (); - if (streams.empty ()) { + if (streams().empty()) { return ""; } @@ -244,8 +256,8 @@ AudioContent::processing_description () const bool same = true; optional common_frame_rate; - BOOST_FOREACH (AudioStreamPtr i, streams) { - if (i->frame_rate() != resampled_audio_frame_rate()) { + for (auto i: streams()) { + if (i->frame_rate() != resampled_frame_rate(film)) { resampled = true; } else { not_resampled = true; @@ -262,117 +274,246 @@ AudioContent::processing_description () const } if (not_resampled && resampled) { - return String::compose (_("Some audio will be resampled to %1kHz"), resampled_audio_frame_rate ()); + return String::compose (_("Some audio will be resampled to %1Hz"), resampled_frame_rate(film)); } if (!not_resampled && resampled) { if (same) { - return String::compose (_("Audio will be resampled from %1kHz to %2kHz"), common_frame_rate.get(), resampled_audio_frame_rate ()); + return String::compose (_("Audio will be resampled from %1Hz to %2Hz"), common_frame_rate.get(), resampled_frame_rate(film)); } else { - return String::compose (_("Audio will be resampled to %1kHz"), resampled_audio_frame_rate ()); + return String::compose (_("Audio will be resampled to %1Hz"), resampled_frame_rate(film)); } } return ""; } -/** @return true if any stream in this content has a sampling rate of more than 48kHz */ -bool -AudioContent::has_rate_above_48k () const -{ - BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) { - if (i->frame_rate() > 48000) { - return true; - } - } - - return false; -} /** @return User-visible names of each of our audio channels */ -vector -AudioContent::audio_channel_names () const +vector +AudioContent::channel_names () const { - vector n; + vector n; - int t = 1; - BOOST_FOREACH (AudioStreamPtr i, audio_streams ()) { + int index = 0; + int stream = 1; + for (auto i: streams()) { for (int j = 0; j < i->channels(); ++j) { - n.push_back (String::compose ("%1:%2", t, j + 1)); + n.push_back (NamedChannel(String::compose ("%1:%2", stream, j + 1), index++)); } - ++t; + ++stream; } return n; } + void -AudioContent::add_properties (list& p) const +AudioContent::add_properties (shared_ptr film, list& p) const { shared_ptr stream; - if (audio_streams().size() == 1) { - stream = audio_streams().front (); + if (streams().size() == 1) { + stream = streams().front(); } if (stream) { - p.push_back (UserProperty (_("Audio"), _("Channels"), stream->channels ())); - p.push_back (UserProperty (_("Audio"), _("Content audio frame rate"), stream->frame_rate(), _("Hz"))); + p.push_back (UserProperty(UserProperty::AUDIO, _("Channels"), stream->channels())); + p.push_back (UserProperty(UserProperty::AUDIO, _("Content audio sample rate"), stream->frame_rate(), _("Hz"))); } - FrameRateChange const frc (audio_video_frame_rate(), film()->video_frame_rate()); - ContentTime const c (full_length(), frc); + FrameRateChange const frc (_parent->active_video_frame_rate(film), film->video_frame_rate()); + ContentTime const c (_parent->full_length(film), frc); p.push_back ( - UserProperty (_("Length"), _("Full length in video frames at content rate"), c.frames_round(frc.source)) + UserProperty (UserProperty::LENGTH, _("Full length in video frames at content rate"), c.frames_round(frc.source)) ); if (stream) { p.push_back ( UserProperty ( - _("Length"), - _("Full length in audio frames at content rate"), + UserProperty::LENGTH, + _("Full length in audio samples at content rate"), c.frames_round (stream->frame_rate ()) ) ); } - p.push_back (UserProperty (_("Audio"), _("DCP frame rate"), resampled_audio_frame_rate (), _("Hz"))); - p.push_back (UserProperty (_("Length"), _("Full length in video frames at DCP rate"), c.frames_round (frc.dcp))); + p.push_back (UserProperty(UserProperty::AUDIO, _("DCP sample rate"), resampled_frame_rate(film), _("Hz"))); + p.push_back (UserProperty(UserProperty::LENGTH, _("Full length in video frames at DCP rate"), c.frames_round (frc.dcp))); if (stream) { p.push_back ( UserProperty ( - _("Length"), - _("Full length in audio frames at DCP rate"), - c.frames_round (resampled_audio_frame_rate ()) + UserProperty::LENGTH, + _("Full length in audio samples at DCP rate"), + c.frames_round(resampled_frame_rate(film)) ) ); } } + void -AudioContent::set_audio_video_frame_rate (double r) +AudioContent::set_streams (vector streams) { + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + { boost::mutex::scoped_lock lm (_mutex); - _audio_video_frame_rate = r; + _streams = streams; } +} - signal_changed (AudioContentProperty::AUDIO_VIDEO_FRAME_RATE); + +AudioStreamPtr +AudioContent::stream () const +{ + boost::mutex::scoped_lock lm (_mutex); + DCPOMATIC_ASSERT (_streams.size() == 1); + return _streams.front (); } -double -AudioContent::audio_video_frame_rate () const + +void +AudioContent::add_stream (AudioStreamPtr stream) { + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + { boost::mutex::scoped_lock lm (_mutex); - if (_audio_video_frame_rate) { - return _audio_video_frame_rate.get (); + _streams.push_back (stream); + } +} + + +void +AudioContent::set_stream (AudioStreamPtr stream) +{ + ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS); + + { + boost::mutex::scoped_lock lm (_mutex); + _streams.clear (); + _streams.push_back (stream); + } +} + + +void +AudioContent::take_settings_from (shared_ptr c) +{ + set_gain (c->_gain); + set_delay (c->_delay); + set_fade_in (c->fade_in()); + set_fade_out (c->fade_out()); + + size_t i = 0; + size_t j = 0; + + while (i < _streams.size() && j < c->_streams.size()) { + _streams[i]->set_mapping (c->_streams[j]->mapping()); + ++i; + ++j; + } +} + + +void +AudioContent::modify_position (shared_ptr film, DCPTime& pos) const +{ + pos = pos.round (film->audio_frame_rate()); +} + + +void +AudioContent::modify_trim_start (ContentTime& trim) const +{ + DCPOMATIC_ASSERT (!_streams.empty()); + /* XXX: we're in trouble if streams have different rates */ + trim = trim.round (_streams.front()->frame_rate()); +} + + +ContentTime +AudioContent::fade_in () const +{ + boost::mutex::scoped_lock lm (_mutex); + if (_use_same_fades_as_video && _parent->video) { + return dcpomatic::ContentTime::from_frames(_parent->video->fade_in(), _parent->video_frame_rate().get_value_or(24)); + } + + return _fade_in; +} + + +ContentTime +AudioContent::fade_out () const +{ + boost::mutex::scoped_lock lm (_mutex); + if (_use_same_fades_as_video && _parent->video) { + return dcpomatic::ContentTime::from_frames(_parent->video->fade_out(), _parent->video_frame_rate().get_value_or(24)); + } + + return _fade_out; +} + + +void +AudioContent::set_fade_in (ContentTime t) +{ + maybe_set (_fade_in, t, AudioContentProperty::FADE_IN); +} + + +void +AudioContent::set_fade_out (ContentTime t) +{ + maybe_set (_fade_out, t, AudioContentProperty::FADE_OUT); +} + + +void +AudioContent::set_use_same_fades_as_video (bool s) +{ + maybe_set (_use_same_fades_as_video, s, AudioContentProperty::USE_SAME_FADES_AS_VIDEO); +} + + +vector +AudioContent::fade (AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const +{ + auto const in = fade_in().frames_round(frame_rate); + auto const out = fade_out().frames_round(frame_rate); + + /* Where the start trim ends, at frame_rate */ + auto const trim_start = _parent->trim_start().frames_round(frame_rate); + /* Where the end trim starts within the whole length of the content, at frame_rate */ + auto const trim_end = ContentTime(ContentTime::from_frames(stream->length(), stream->frame_rate()) - _parent->trim_end()).frames_round(frame_rate); + + if ( + (in == 0 || (frame >= (trim_start + in))) && + (out == 0 || ((frame + length) < (trim_end - out))) + ) { + /* This section starts after the fade in and ends before the fade out */ + return {}; + } + + /* Start position relative to the start of the fade in */ + auto in_start = frame - trim_start; + /* Start position relative to the start of the fade out */ + auto out_start = frame - (trim_end - out); + + vector coeffs(length); + for (auto coeff = 0; coeff < length; ++coeff) { + coeffs[coeff] = 1.0; + if (in) { + coeffs[coeff] *= logarithmic_fade_in_curve(static_cast(in_start + coeff) / in); + } + if (out) { + coeffs[coeff] *= logarithmic_fade_out_curve(static_cast(out_start + coeff) / out); } } - /* No frame rate specified, so assume this content has been - prepared for any concurrent video content. - */ - return film()->active_frame_rate_change(position()).source; + return coeffs; } +