2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "audio_content.h"
23 #include "compose.hpp"
25 #include "exceptions.h"
27 #include "frame_rate_change.h"
28 #include "maths_util.h"
29 #include <dcp/raw_convert.h>
30 #include <libcxml/cxml.h>
31 #include <libxml++/libxml++.h>
38 using std::dynamic_pointer_cast;
41 using std::make_shared;
43 using std::setprecision;
44 using std::shared_ptr;
47 using boost::optional;
48 using dcp::raw_convert;
49 using namespace dcpomatic;
52 /** Something stream-related has changed */
53 int const AudioContentProperty::STREAMS = 200;
54 int const AudioContentProperty::GAIN = 201;
55 int const AudioContentProperty::DELAY = 202;
56 int const AudioContentProperty::FADE_IN = 203;
57 int const AudioContentProperty::FADE_OUT = 204;
60 AudioContent::AudioContent (Content* parent)
61 : ContentPart (parent)
62 , _delay (Config::instance()->default_audio_delay())
68 shared_ptr<AudioContent>
69 AudioContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version)
72 /* With old metadata FFmpeg content has the audio-related tags even with no
73 audio streams, so check for that.
75 if (node->string_child("Type") == "FFmpeg" && node->node_children("AudioStream").empty()) {
79 /* Otherwise we can drop through to the newer logic */
82 if (!node->optional_number_child<double> ("AudioGain")) {
86 return make_shared<AudioContent>(parent, node);
90 AudioContent::AudioContent (Content* parent, cxml::ConstNodePtr node)
91 : ContentPart (parent)
93 _gain = node->number_child<double> ("AudioGain");
94 _delay = node->number_child<int> ("AudioDelay");
95 _fade_in = ContentTime(node->optional_number_child<ContentTime::Type>("AudioFadeIn").get_value_or(0));
96 _fade_out = ContentTime(node->optional_number_child<ContentTime::Type>("AudioFadeOut").get_value_or(0));
98 /* Backwards compatibility */
99 auto r = node->optional_number_child<double>("AudioVideoFrameRate");
101 _parent->set_video_frame_rate (r.get());
106 AudioContent::AudioContent (Content* parent, vector<shared_ptr<Content>> c)
107 : ContentPart (parent)
109 auto ref = c[0]->audio;
110 DCPOMATIC_ASSERT (ref);
112 for (size_t i = 1; i < c.size(); ++i) {
113 if (c[i]->audio->gain() != ref->gain()) {
114 throw JoinError (_("Content to be joined must have the same audio gain."));
117 if (c[i]->audio->delay() != ref->delay()) {
118 throw JoinError (_("Content to be joined must have the same audio delay."));
122 _gain = ref->gain ();
123 _delay = ref->delay ();
124 _streams = ref->streams ();
129 AudioContent::as_xml (xmlpp::Node* node) const
131 boost::mutex::scoped_lock lm (_mutex);
132 node->add_child("AudioGain")->add_child_text(raw_convert<string>(_gain));
133 node->add_child("AudioDelay")->add_child_text(raw_convert<string>(_delay));
134 node->add_child("AudioFadeIn")->add_child_text(raw_convert<string>(_fade_in.get()));
135 node->add_child("AudioFadeOut")->add_child_text(raw_convert<string>(_fade_out.get()));
140 AudioContent::set_gain (double g)
142 maybe_set (_gain, g, AudioContentProperty::GAIN);
147 AudioContent::set_delay (int d)
149 maybe_set (_delay, d, AudioContentProperty::DELAY);
154 AudioContent::technical_summary () const
156 string s = "audio: ";
157 for (auto i: streams()) {
158 s += String::compose ("stream channels %1 rate %2 ", i->channels(), i->frame_rate());
166 AudioContent::set_mapping (AudioMapping mapping)
168 ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
171 for (auto i: streams()) {
172 AudioMapping stream_mapping (i->channels(), MAX_DCP_AUDIO_CHANNELS);
173 for (int j = 0; j < i->channels(); ++j) {
174 for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) {
175 stream_mapping.set (j, k, mapping.get(c, k));
179 i->set_mapping (stream_mapping);
185 AudioContent::mapping () const
188 for (auto i: streams()) {
189 channels += i->channels ();
192 AudioMapping merged (channels, MAX_DCP_AUDIO_CHANNELS);
197 for (auto i: streams()) {
198 auto mapping = i->mapping ();
199 for (int j = 0; j < mapping.input_channels(); ++j) {
200 for (int k = 0; k < MAX_DCP_AUDIO_CHANNELS; ++k) {
201 if (k < mapping.output_channels()) {
202 merged.set (c, k, mapping.get(j, k));
214 /** @return the frame rate that this content should be resampled to in order
215 * that it is in sync with the active video content at its start time.
218 AudioContent::resampled_frame_rate (shared_ptr<const Film> film) const
220 double t = film->audio_frame_rate ();
222 FrameRateChange frc (film, _parent);
224 /* Compensate if the DCP is being run at a different frame rate
225 to the source; that is, if the video is run such that it will
226 look different in the DCP compared to the source (slower or faster).
229 if (frc.change_speed) {
237 AudioContent::processing_description (shared_ptr<const Film> film) const
239 if (streams().empty()) {
243 /* Possible answers are:
244 1. all audio will be resampled from x to y.
245 2. all audio will be resampled to y (from a variety of rates)
246 3. some audio will be resampled to y (from a variety of rates)
247 4. nothing will be resampled.
250 bool not_resampled = false;
251 bool resampled = false;
254 optional<int> common_frame_rate;
255 for (auto i: streams()) {
256 if (i->frame_rate() != resampled_frame_rate(film)) {
259 not_resampled = true;
262 if (common_frame_rate && common_frame_rate != i->frame_rate ()) {
265 common_frame_rate = i->frame_rate ();
268 if (not_resampled && !resampled) {
269 return _("Audio will not be resampled");
272 if (not_resampled && resampled) {
273 return String::compose (_("Some audio will be resampled to %1Hz"), resampled_frame_rate(film));
276 if (!not_resampled && resampled) {
278 return String::compose (_("Audio will be resampled from %1Hz to %2Hz"), common_frame_rate.get(), resampled_frame_rate(film));
280 return String::compose (_("Audio will be resampled to %1Hz"), resampled_frame_rate(film));
288 /** @return User-visible names of each of our audio channels */
290 AudioContent::channel_names () const
292 vector<NamedChannel> n;
296 for (auto i: streams()) {
297 for (int j = 0; j < i->channels(); ++j) {
298 n.push_back (NamedChannel(String::compose ("%1:%2", stream, j + 1), index++));
308 AudioContent::add_properties (shared_ptr<const Film> film, list<UserProperty>& p) const
310 shared_ptr<const AudioStream> stream;
311 if (streams().size() == 1) {
312 stream = streams().front();
316 p.push_back (UserProperty(UserProperty::AUDIO, _("Channels"), stream->channels()));
317 p.push_back (UserProperty(UserProperty::AUDIO, _("Content audio sample rate"), stream->frame_rate(), _("Hz")));
320 FrameRateChange const frc (_parent->active_video_frame_rate(film), film->video_frame_rate());
321 ContentTime const c (_parent->full_length(film), frc);
324 UserProperty (UserProperty::LENGTH, _("Full length in video frames at content rate"), c.frames_round(frc.source))
330 UserProperty::LENGTH,
331 _("Full length in audio samples at content rate"),
332 c.frames_round (stream->frame_rate ())
337 p.push_back (UserProperty(UserProperty::AUDIO, _("DCP sample rate"), resampled_frame_rate(film), _("Hz")));
338 p.push_back (UserProperty(UserProperty::LENGTH, _("Full length in video frames at DCP rate"), c.frames_round (frc.dcp)));
343 UserProperty::LENGTH,
344 _("Full length in audio samples at DCP rate"),
345 c.frames_round(resampled_frame_rate(film))
353 AudioContent::set_streams (vector<AudioStreamPtr> streams)
355 ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
358 boost::mutex::scoped_lock lm (_mutex);
365 AudioContent::stream () const
367 boost::mutex::scoped_lock lm (_mutex);
368 DCPOMATIC_ASSERT (_streams.size() == 1);
369 return _streams.front ();
374 AudioContent::add_stream (AudioStreamPtr stream)
376 ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
379 boost::mutex::scoped_lock lm (_mutex);
380 _streams.push_back (stream);
386 AudioContent::set_stream (AudioStreamPtr stream)
388 ContentChangeSignaller cc (_parent, AudioContentProperty::STREAMS);
391 boost::mutex::scoped_lock lm (_mutex);
393 _streams.push_back (stream);
399 AudioContent::take_settings_from (shared_ptr<const AudioContent> c)
402 set_delay (c->_delay);
407 while (i < _streams.size() && j < c->_streams.size()) {
408 _streams[i]->set_mapping (c->_streams[j]->mapping());
416 AudioContent::modify_position (shared_ptr<const Film> film, DCPTime& pos) const
418 pos = pos.round (film->audio_frame_rate());
423 AudioContent::modify_trim_start (ContentTime& trim) const
425 DCPOMATIC_ASSERT (!_streams.empty());
426 /* XXX: we're in trouble if streams have different rates */
427 trim = trim.round (_streams.front()->frame_rate());
432 AudioContent::set_fade_in (ContentTime t)
434 maybe_set (_fade_in, t, AudioContentProperty::FADE_IN);
439 AudioContent::set_fade_out (ContentTime t)
441 maybe_set (_fade_out, t, AudioContentProperty::FADE_OUT);
446 AudioContent::fade (AudioStreamPtr stream, Frame frame, Frame length, int frame_rate) const
448 auto const in = fade_in().frames_round(frame_rate);
449 auto const out = fade_out().frames_round(frame_rate);
451 /* Where the start trim ends, at frame_rate */
452 auto const trim_start = _parent->trim_start().frames_round(frame_rate);
453 /* Where the end trim starts within the whole length of the content, at frame_rate */
454 auto const trim_end = ContentTime(ContentTime::from_frames(stream->length(), stream->frame_rate()) - _parent->trim_end()).frames_round(frame_rate);
457 (in == 0 || (frame >= (trim_start + in))) &&
458 (out == 0 || ((frame + length) < (trim_end - out)))
460 /* This section starts after the fade in and ends before the fade out */
464 /* Start position relative to the start of the fade in */
465 auto in_start = frame - trim_start;
466 /* Start position relative to the start of the fade out */
467 auto out_start = frame - (trim_end - out);
469 vector<float> coeffs(length);
470 for (auto coeff = 0; coeff < length; ++coeff) {
473 coeffs[coeff] *= logarithmic_fade_in_curve(static_cast<float>(in_start + coeff) / in);
476 coeffs[coeff] *= logarithmic_fade_out_curve(static_cast<float>(out_start + coeff) / out);