#include <set>
+#include <boost/scoped_array.hpp>
-#include <glibmm/thread.h>
+#include <glibmm/threads.h>
#include "pbd/basename.h"
#include "pbd/xml++.h"
#include "ardour/audioregion.h"
#include "ardour/session.h"
-#include "ardour/gain.h"
#include "ardour/dB.h"
+#include "ardour/debug.h"
#include "ardour/playlist.h"
#include "ardour/audiofilesource.h"
#include "ardour/region_factory.h"
#include "ardour/runtime_functions.h"
#include "ardour/transient_detector.h"
+#include "ardour/progress.h"
#include "i18n.h"
#include <locale.h>
using namespace ARDOUR;
using namespace PBD;
-/* a Session will reset these to its chosen defaults by calling AudioRegion::set_default_fade() */
+namespace ARDOUR {
+ namespace Properties {
+ PBD::PropertyDescriptor<bool> envelope_active;
+ PBD::PropertyDescriptor<bool> default_fade_in;
+ PBD::PropertyDescriptor<bool> default_fade_out;
+ PBD::PropertyDescriptor<bool> fade_in_active;
+ PBD::PropertyDescriptor<bool> fade_out_active;
+ PBD::PropertyDescriptor<float> scale_amplitude;
+ PBD::PropertyDescriptor<boost::shared_ptr<AutomationList> > fade_in;
+ PBD::PropertyDescriptor<boost::shared_ptr<AutomationList> > inverse_fade_in;
+ PBD::PropertyDescriptor<boost::shared_ptr<AutomationList> > fade_out;
+ PBD::PropertyDescriptor<boost::shared_ptr<AutomationList> > inverse_fade_out;
+ PBD::PropertyDescriptor<boost::shared_ptr<AutomationList> > envelope;
+ }
+}
-Change AudioRegion::FadeInChanged = ARDOUR::new_change();
-Change AudioRegion::FadeOutChanged = ARDOUR::new_change();
-Change AudioRegion::FadeInActiveChanged = ARDOUR::new_change();
-Change AudioRegion::FadeOutActiveChanged = ARDOUR::new_change();
-Change AudioRegion::EnvelopeActiveChanged = ARDOUR::new_change();
-Change AudioRegion::ScaleAmplitudeChanged = ARDOUR::new_change();
-Change AudioRegion::EnvelopeChanged = ARDOUR::new_change();
+static const double VERY_SMALL_SIGNAL = 0.0000001; //-140dB
-void
-AudioRegion::init ()
-{
- _scale_amplitude = 1.0;
+/* Curve manipulations */
- set_default_fades ();
- set_default_envelope ();
-
- listen_to_my_curves ();
- connect_to_analysis_changed ();
+static void
+reverse_curve (boost::shared_ptr<Evoral::ControlList> dst, boost::shared_ptr<const Evoral::ControlList> src)
+{
+ size_t len = src->back()->when;
+ for (Evoral::ControlList::const_reverse_iterator it = src->rbegin(); it!=src->rend(); it++) {
+ dst->fast_simple_add (len - (*it)->when, (*it)->value);
+ }
}
-/** Constructor for use by derived types only */
-AudioRegion::AudioRegion (Session& s, nframes_t start, nframes_t length, string name)
- : Region (s, start, length, name, DataType::AUDIO)
- , _automatable(s)
- , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
- , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
- , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
+static void
+generate_inverse_power_curve (boost::shared_ptr<Evoral::ControlList> dst, boost::shared_ptr<const Evoral::ControlList> src)
{
- init ();
- assert (_sources.size() == _master_sources.size());
+ // calc inverse curve using sum of squares
+ for (Evoral::ControlList::const_iterator it = src->begin(); it!=src->end(); ++it ) {
+ float value = (*it)->value;
+ value = 1 - powf(value,2);
+ value = sqrtf(value);
+ dst->fast_simple_add ( (*it)->when, value );
+ }
}
-/** Basic AudioRegion constructor (one channel) */
-AudioRegion::AudioRegion (boost::shared_ptr<AudioSource> src, nframes_t start, nframes_t length)
- : Region (src, start, length, PBD::basename_nosuffix(src->name()), DataType::AUDIO, 0, Region::Flag(Region::DefaultFlags|Region::External))
- , _automatable(src->session())
- , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
- , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
- , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
+static void
+generate_db_fade (boost::shared_ptr<Evoral::ControlList> dst, double len, int num_steps, float dB_drop)
{
- boost::shared_ptr<AudioFileSource> afs = boost::dynamic_pointer_cast<AudioFileSource> (src);
- if (afs) {
- afs->HeaderPositionOffsetChanged.connect_same_thread (*this, boost::bind (&AudioRegion::source_offset_changed, this));
+ dst->clear ();
+ dst->fast_simple_add (0, 1);
+
+ //generate a fade-out curve by successively applying a gain drop
+ float fade_speed = dB_to_coefficient(dB_drop / (float) num_steps);
+ for (int i = 1; i < (num_steps-1); i++) {
+ float coeff = 1.0;
+ for (int j = 0; j < i; j++) {
+ coeff *= fade_speed;
+ }
+ dst->fast_simple_add (len*(double)i/(double)num_steps, coeff);
}
- init ();
- assert (_sources.size() == _master_sources.size());
+ dst->fast_simple_add (len, VERY_SMALL_SIGNAL);
}
-/* Basic AudioRegion constructor (one channel) */
-AudioRegion::AudioRegion (boost::shared_ptr<AudioSource> src, nframes_t start, nframes_t length, const string& name, layer_t layer, Flag flags)
- : Region (src, start, length, name, DataType::AUDIO, layer, flags)
- , _automatable(src->session())
- , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
- , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
- , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
+static void
+merge_curves (boost::shared_ptr<Evoral::ControlList> dst,
+ boost::shared_ptr<const Evoral::ControlList> curve1,
+ boost::shared_ptr<const Evoral::ControlList> curve2)
{
- boost::shared_ptr<AudioFileSource> afs = boost::dynamic_pointer_cast<AudioFileSource> (src);
- if (afs) {
- afs->HeaderPositionOffsetChanged.connect_same_thread (*this, boost::bind (&AudioRegion::source_offset_changed, this));
+ Evoral::ControlList::EventList::size_type size = curve1->size();
+
+ //curve lengths must match for now
+ if (size != curve2->size()) {
+ return;
}
+
+ Evoral::ControlList::const_iterator c1 = curve1->begin();
+ int count = 0;
+ for (Evoral::ControlList::const_iterator c2 = curve2->begin(); c2!=curve2->end(); c2++ ) {
+ float v1 = accurate_coefficient_to_dB((*c1)->value);
+ float v2 = accurate_coefficient_to_dB((*c2)->value);
+
+ double interp = v1 * ( 1.0-( (double)count / (double)size) );
+ interp += v2 * ( (double)count / (double)size );
- init ();
- assert (_sources.size() == _master_sources.size());
+ interp = dB_to_coefficient(interp);
+ dst->fast_simple_add ( (*c1)->when, interp );
+ c1++;
+ count++;
+ }
}
-/** Basic AudioRegion constructor (many channels) */
-AudioRegion::AudioRegion (const SourceList& srcs, nframes_t start, nframes_t length, const string& name, layer_t layer, Flag flags)
- : Region (srcs, start, length, name, DataType::AUDIO, layer, flags)
- , _automatable(srcs[0]->session())
- , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
- , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
- , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
+void
+AudioRegion::make_property_quarks ()
{
- init ();
- connect_to_analysis_changed ();
- assert (_sources.size() == _master_sources.size());
+ Properties::envelope_active.property_id = g_quark_from_static_string (X_("envelope-active"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for envelope-active = %1\n", Properties::envelope_active.property_id));
+ Properties::default_fade_in.property_id = g_quark_from_static_string (X_("default-fade-in"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-in = %1\n", Properties::default_fade_in.property_id));
+ Properties::default_fade_out.property_id = g_quark_from_static_string (X_("default-fade-out"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for default-fade-out = %1\n", Properties::default_fade_out.property_id));
+ Properties::fade_in_active.property_id = g_quark_from_static_string (X_("fade-in-active"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-in-active = %1\n", Properties::fade_in_active.property_id));
+ Properties::fade_out_active.property_id = g_quark_from_static_string (X_("fade-out-active"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for fade-out-active = %1\n", Properties::fade_out_active.property_id));
+ Properties::scale_amplitude.property_id = g_quark_from_static_string (X_("scale-amplitude"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for scale-amplitude = %1\n", Properties::scale_amplitude.property_id));
+ Properties::fade_in.property_id = g_quark_from_static_string (X_("FadeIn"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for FadeIn = %1\n", Properties::fade_in.property_id));
+ Properties::inverse_fade_in.property_id = g_quark_from_static_string (X_("InverseFadeIn"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for InverseFadeIn = %1\n", Properties::inverse_fade_in.property_id));
+ Properties::fade_out.property_id = g_quark_from_static_string (X_("FadeOut"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for FadeOut = %1\n", Properties::fade_out.property_id));
+ Properties::inverse_fade_out.property_id = g_quark_from_static_string (X_("InverseFadeOut"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for InverseFadeOut = %1\n", Properties::inverse_fade_out.property_id));
+ Properties::envelope.property_id = g_quark_from_static_string (X_("Envelope"));
+ DEBUG_TRACE (DEBUG::Properties, string_compose ("quark for Envelope = %1\n", Properties::envelope.property_id));
}
-/** Create a new AudioRegion, that is part of an existing one */
-AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other, nframes_t offset, nframes_t length, const string& name, layer_t layer, Flag flags)
- : Region (other, offset, length, name, layer, flags)
- , _automatable(other->session())
- , _fade_in (new AutomationList(*other->_fade_in))
- , _fade_out (new AutomationList(*other->_fade_out))
- , _envelope (new AutomationList(*other->_envelope, offset, offset + length))
+void
+AudioRegion::register_properties ()
{
- connect_to_header_position_offset_changed ();
-
- /* return to default fades if the existing ones are too long */
-
- if (_flags & LeftOfSplit) {
- if (_fade_in->back()->when >= _length) {
- set_default_fade_in ();
- } else {
- _fade_in_disabled = other->_fade_in_disabled;
- }
- set_default_fade_out ();
- _flags = Flag (_flags & ~Region::LeftOfSplit);
- }
+ /* no need to register parent class properties */
+
+ add_property (_envelope_active);
+ add_property (_default_fade_in);
+ add_property (_default_fade_out);
+ add_property (_fade_in_active);
+ add_property (_fade_out_active);
+ add_property (_scale_amplitude);
+ add_property (_fade_in);
+ add_property (_inverse_fade_in);
+ add_property (_fade_out);
+ add_property (_inverse_fade_out);
+ add_property (_envelope);
+}
- if (_flags & RightOfSplit) {
- if (_fade_out->back()->when >= _length) {
- set_default_fade_out ();
- } else {
- _fade_out_disabled = other->_fade_out_disabled;
- }
- set_default_fade_in ();
- _flags = Flag (_flags & ~Region::RightOfSplit);
- }
+#define AUDIOREGION_STATE_DEFAULT \
+ _envelope_active (Properties::envelope_active, false) \
+ , _default_fade_in (Properties::default_fade_in, true) \
+ , _default_fade_out (Properties::default_fade_out, true) \
+ , _fade_in_active (Properties::fade_in_active, true) \
+ , _fade_out_active (Properties::fade_out_active, true) \
+ , _scale_amplitude (Properties::scale_amplitude, 1.0) \
+ , _fade_in (Properties::fade_in, boost::shared_ptr<AutomationList> (new AutomationList (Evoral::Parameter (FadeInAutomation)))) \
+ , _inverse_fade_in (Properties::inverse_fade_in, boost::shared_ptr<AutomationList> (new AutomationList (Evoral::Parameter (FadeInAutomation)))) \
+ , _fade_out (Properties::fade_out, boost::shared_ptr<AutomationList> (new AutomationList (Evoral::Parameter (FadeOutAutomation)))) \
+ , _inverse_fade_out (Properties::inverse_fade_out, boost::shared_ptr<AutomationList> (new AutomationList (Evoral::Parameter (FadeOutAutomation))))
+
+#define AUDIOREGION_COPY_STATE(other) \
+ _envelope_active (Properties::envelope_active, other->_envelope_active) \
+ , _default_fade_in (Properties::default_fade_in, other->_default_fade_in) \
+ , _default_fade_out (Properties::default_fade_out, other->_default_fade_out) \
+ , _fade_in_active (Properties::fade_in_active, other->_fade_in_active) \
+ , _fade_out_active (Properties::fade_out_active, other->_fade_out_active) \
+ , _scale_amplitude (Properties::scale_amplitude, other->_scale_amplitude) \
+ , _fade_in (Properties::fade_in, boost::shared_ptr<AutomationList> (new AutomationList (*other->_fade_in.val()))) \
+ , _inverse_fade_in (Properties::fade_in, boost::shared_ptr<AutomationList> (new AutomationList (*other->_inverse_fade_in.val()))) \
+ , _fade_out (Properties::fade_in, boost::shared_ptr<AutomationList> (new AutomationList (*other->_fade_out.val()))) \
+ , _inverse_fade_out (Properties::fade_in, boost::shared_ptr<AutomationList> (new AutomationList (*other->_inverse_fade_out.val())))
+/* a Session will reset these to its chosen defaults by calling AudioRegion::set_default_fade() */
- _scale_amplitude = other->_scale_amplitude;
+void
+AudioRegion::init ()
+{
+ register_properties ();
- assert(_type == DataType::AUDIO);
+ suspend_property_changes();
+ set_default_fades ();
+ set_default_envelope ();
+ resume_property_changes();
listen_to_my_curves ();
connect_to_analysis_changed ();
+ connect_to_header_position_offset_changed ();
+}
+/** Constructor for use by derived types only */
+AudioRegion::AudioRegion (Session& s, framepos_t start, framecnt_t len, std::string name)
+ : Region (s, start, len, name, DataType::AUDIO)
+ , AUDIOREGION_STATE_DEFAULT
+ , _envelope (Properties::envelope, boost::shared_ptr<AutomationList> (new AutomationList (Evoral::Parameter(EnvelopeAutomation))))
+ , _automatable (s)
+ , _fade_in_suspended (0)
+ , _fade_out_suspended (0)
+{
+ init ();
+ assert (_sources.size() == _master_sources.size());
+}
+
+/** Basic AudioRegion constructor */
+AudioRegion::AudioRegion (const SourceList& srcs)
+ : Region (srcs)
+ , AUDIOREGION_STATE_DEFAULT
+ , _envelope (Properties::envelope, boost::shared_ptr<AutomationList> (new AutomationList (Evoral::Parameter(EnvelopeAutomation))))
+ , _automatable(srcs[0]->session())
+ , _fade_in_suspended (0)
+ , _fade_out_suspended (0)
+{
+ init ();
assert (_sources.size() == _master_sources.size());
}
AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other)
: Region (other)
+ , AUDIOREGION_COPY_STATE (other)
+ /* As far as I can see, the _envelope's times are relative to region position, and have nothing
+ to do with sources (and hence _start). So when we copy the envelope, we just use the supplied offset.
+ */
+ , _envelope (Properties::envelope, boost::shared_ptr<AutomationList> (new AutomationList (*other->_envelope.val(), 0, other->_length)))
, _automatable (other->session())
- , _fade_in (new AutomationList (*other->_fade_in))
- , _fade_out (new AutomationList (*other->_fade_out))
- , _envelope (new AutomationList (*other->_envelope))
+ , _fade_in_suspended (0)
+ , _fade_out_suspended (0)
{
- assert(_type == DataType::AUDIO);
- _scale_amplitude = other->_scale_amplitude;
-
+ /* don't use init here, because we got fade in/out from the other region
+ */
+ register_properties ();
listen_to_my_curves ();
connect_to_analysis_changed ();
+ connect_to_header_position_offset_changed ();
+ assert(_type == DataType::AUDIO);
assert (_sources.size() == _master_sources.size());
}
-AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other, const SourceList& /*srcs*/,
- nframes_t length, const string& name, layer_t layer, Flag flags)
- : Region (other, length, name, layer, flags)
+AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other, framecnt_t offset)
+ : Region (other, offset)
+ , AUDIOREGION_COPY_STATE (other)
+ /* As far as I can see, the _envelope's times are relative to region position, and have nothing
+ to do with sources (and hence _start). So when we copy the envelope, we just use the supplied offset.
+ */
+ , _envelope (Properties::envelope, boost::shared_ptr<AutomationList> (new AutomationList (*other->_envelope.val(), offset, other->_length)))
, _automatable (other->session())
- , _fade_in (new AutomationList (*other->_fade_in))
- , _fade_out (new AutomationList (*other->_fade_out))
- , _envelope (new AutomationList (*other->_envelope))
+ , _fade_in_suspended (0)
+ , _fade_out_suspended (0)
{
- /* make-a-sort-of-copy-with-different-sources constructor (used by audio filter) */
-
- for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) {
-
- boost::shared_ptr<AudioFileSource> afs = boost::dynamic_pointer_cast<AudioFileSource> ((*i));
- if (afs) {
- afs->HeaderPositionOffsetChanged.connect_same_thread (*this, boost::bind (&AudioRegion::source_offset_changed, this));
- }
- }
-
- _scale_amplitude = other->_scale_amplitude;
-
- _fade_in_disabled = 0;
- _fade_out_disabled = 0;
-
+ /* don't use init here, because we got fade in/out from the other region
+ */
+ register_properties ();
listen_to_my_curves ();
connect_to_analysis_changed ();
+ connect_to_header_position_offset_changed ();
+ assert(_type == DataType::AUDIO);
assert (_sources.size() == _master_sources.size());
}
-AudioRegion::AudioRegion (boost::shared_ptr<AudioSource> src, const XMLNode& node)
- : Region (src, node)
- , _automatable(src->session())
- , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
- , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
- , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
+AudioRegion::AudioRegion (boost::shared_ptr<const AudioRegion> other, const SourceList& srcs)
+ : Region (boost::static_pointer_cast<const Region>(other), srcs)
+ , AUDIOREGION_COPY_STATE (other)
+ , _envelope (Properties::envelope, boost::shared_ptr<AutomationList> (new AutomationList (*other->_envelope.val())))
+ , _automatable (other->session())
+ , _fade_in_suspended (0)
+ , _fade_out_suspended (0)
{
- boost::shared_ptr<AudioFileSource> afs = boost::dynamic_pointer_cast<AudioFileSource> (src);
- if (afs) {
- afs->HeaderPositionOffsetChanged.connect_same_thread (*this, boost::bind (&AudioRegion::source_offset_changed, this));
- }
-
- init ();
+ /* make-a-sort-of-copy-with-different-sources constructor (used by audio filter) */
- if (set_state (node, Stateful::loading_state_version)) {
- throw failed_constructor();
- }
+ register_properties ();
- assert(_type == DataType::AUDIO);
+ listen_to_my_curves ();
connect_to_analysis_changed ();
+ connect_to_header_position_offset_changed ();
assert (_sources.size() == _master_sources.size());
}
-AudioRegion::AudioRegion (SourceList& srcs, const XMLNode& node)
- : Region (srcs, node)
+AudioRegion::AudioRegion (SourceList& srcs)
+ : Region (srcs)
+ , AUDIOREGION_STATE_DEFAULT
+ , _envelope (Properties::envelope, boost::shared_ptr<AutomationList> (new AutomationList(Evoral::Parameter(EnvelopeAutomation))))
, _automatable(srcs[0]->session())
- , _fade_in (new AutomationList(Evoral::Parameter(FadeInAutomation)))
- , _fade_out (new AutomationList(Evoral::Parameter(FadeOutAutomation)))
- , _envelope (new AutomationList(Evoral::Parameter(EnvelopeAutomation)))
+ , _fade_in_suspended (0)
+ , _fade_out_suspended (0)
{
init ();
- if (set_state (node, Stateful::loading_state_version)) {
- throw failed_constructor();
- }
-
assert(_type == DataType::AUDIO);
- connect_to_analysis_changed ();
assert (_sources.size() == _master_sources.size());
}
{
}
+void
+AudioRegion::post_set (const PropertyChange& /*ignored*/)
+{
+ if (!_sync_marked) {
+ _sync_position = _start;
+ }
+
+ /* return to default fades if the existing ones are too long */
+
+ if (_left_of_split) {
+ if (_fade_in->back()->when >= _length) {
+ set_default_fade_in ();
+ }
+ set_default_fade_out ();
+ _left_of_split = false;
+ }
+
+ if (_right_of_split) {
+ if (_fade_out->back()->when >= _length) {
+ set_default_fade_out ();
+ }
+
+ set_default_fade_in ();
+ _right_of_split = false;
+ }
+
+ /* If _length changed, adjust our gain envelope accordingly */
+ _envelope->truncate_end (_length);
+}
+
void
AudioRegion::connect_to_analysis_changed ()
{
for (SourceList::const_iterator i = _sources.begin(); i != _sources.end(); ++i) {
+ /* connect only once to HeaderPositionOffsetChanged, even if sources are replicated
+ */
+
if (unique_srcs.find (*i) == unique_srcs.end ()) {
unique_srcs.insert (*i);
boost::shared_ptr<AudioFileSource> afs = boost::dynamic_pointer_cast<AudioFileSource> (*i);
void
AudioRegion::listen_to_my_curves ()
{
- cerr << _name << ": listeing my own curves\n";
-
_envelope->StateChanged.connect_same_thread (*this, boost::bind (&AudioRegion::envelope_changed, this));
_fade_in->StateChanged.connect_same_thread (*this, boost::bind (&AudioRegion::fade_in_changed, this));
_fade_out->StateChanged.connect_same_thread (*this, boost::bind (&AudioRegion::fade_out_changed, this));
AudioRegion::set_envelope_active (bool yn)
{
if (envelope_active() != yn) {
- char buf[64];
- if (yn) {
- snprintf (buf, sizeof (buf), "envelope active");
- _flags = Flag (_flags|EnvelopeActive);
- } else {
- snprintf (buf, sizeof (buf), "envelope off");
- _flags = Flag (_flags & ~EnvelopeActive);
- }
- send_change (EnvelopeActiveChanged);
+ _envelope_active = yn;
+ send_change (PropertyChange (Properties::envelope_active));
}
}
-ARDOUR::nframes_t
-AudioRegion::read_peaks (PeakData *buf, nframes_t npeaks, nframes_t offset, nframes_t cnt, uint32_t chan_n, double samples_per_unit) const
+ARDOUR::framecnt_t
+AudioRegion::read_peaks (PeakData *buf, framecnt_t npeaks, framecnt_t offset, framecnt_t cnt, uint32_t chan_n, double samples_per_unit) const
{
if (chan_n >= _sources.size()) {
return 0;
if (audio_source(chan_n)->read_peaks (buf, npeaks, offset, cnt, samples_per_unit)) {
return 0;
} else {
- if (_scale_amplitude != 1.0) {
- for (nframes_t n = 0; n < npeaks; ++n) {
+ if (_scale_amplitude != 1.0f) {
+ for (framecnt_t n = 0; n < npeaks; ++n) {
buf[n].max *= _scale_amplitude;
buf[n].min *= _scale_amplitude;
}
}
}
-nframes_t
-AudioRegion::read (Sample* buf, sframes_t timeline_position, nframes_t cnt, int channel) const
+/** @param buf Buffer to write data to (existing data will be overwritten).
+ * @param pos Position to read from as an offset from the region position.
+ * @param cnt Number of frames to read.
+ * @param channel Channel to read from.
+ */
+framecnt_t
+AudioRegion::read (Sample* buf, framepos_t pos, framecnt_t cnt, int channel) const
{
/* raw read, no fades, no gain, nada */
- return _read_at (_sources, _length, buf, 0, 0, _position + timeline_position, cnt, channel, 0, 0, ReadOps (0));
+ return read_from_sources (_sources, _length, buf, _position + pos, cnt, channel);
}
-nframes_t
-AudioRegion::read_with_ops (Sample* buf, sframes_t file_position, nframes_t cnt, int channel, ReadOps rops) const
-{
- return _read_at (_sources, _length, buf, 0, 0, file_position, cnt, channel, 0, 0, rops);
-}
-
-nframes_t
-AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
- sframes_t file_position, nframes_t cnt, uint32_t chan_n,
- nframes_t read_frames, nframes_t skip_frames) const
-{
- /* regular diskstream/butler read complete with fades etc */
- return _read_at (_sources, _length, buf, mixdown_buffer, gain_buffer,
- file_position, cnt, chan_n, read_frames, skip_frames, ReadOps (~0));
-}
-
-nframes_t
-AudioRegion::master_read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
- sframes_t position, nframes_t cnt, uint32_t chan_n) const
+framecnt_t
+AudioRegion::master_read_at (Sample *buf, Sample* /*mixdown_buffer*/, float* /*gain_buffer*/,
+ framepos_t position, framecnt_t cnt, uint32_t chan_n) const
{
/* do not read gain/scaling/fades and do not count this disk i/o in statistics */
- return _read_at (_master_sources, _master_sources.front()->length(_master_sources.front()->timeline_position()),
- buf, mixdown_buffer, gain_buffer, position, cnt, chan_n, 0, 0, ReadOps (0));
+ assert (cnt >= 0);
+ return read_from_sources (
+ _master_sources, _master_sources.front()->length (_master_sources.front()->timeline_position()),
+ buf, position, cnt, chan_n
+ );
}
-nframes_t
-AudioRegion::_read_at (const SourceList& /*srcs*/, nframes_t limit,
- Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
- sframes_t position, nframes_t cnt,
- uint32_t chan_n,
- nframes_t /*read_frames*/,
- nframes_t /*skip_frames*/,
- ReadOps rops) const
+/** @param buf Buffer to mix data into.
+ * @param mixdown_buffer Scratch buffer for audio data.
+ * @param gain_buffer Scratch buffer for gain data.
+ * @param position Position within the session to read from.
+ * @param cnt Number of frames to read.
+ * @param chan_n Channel number to read.
+ */
+framecnt_t
+AudioRegion::read_at (Sample *buf, Sample *mixdown_buffer, float *gain_buffer,
+ framepos_t position,
+ framecnt_t cnt,
+ uint32_t chan_n) const
{
- nframes_t internal_offset;
- nframes_t buf_offset;
- nframes_t to_read;
- bool raw = (rops == ReadOpsNone);
-
- if (muted() && !raw) {
- return 0; /* read nothing */
- }
+ /* We are reading data from this region into buf (possibly via mixdown_buffer).
+ The caller has verified that we cover the desired section.
+ */
- /* precondition: caller has verified that we cover the desired section */
+ /* See doc/region_read.svg for a drawing which might help to explain
+ what is going on.
+ */
- if (position < _position) {
- internal_offset = 0;
- buf_offset = _position - position;
- cnt -= buf_offset;
- } else {
- internal_offset = position - _position;
- buf_offset = 0;
+ assert (cnt >= 0);
+
+ if (n_channels() == 0) {
+ return 0;
}
- if (internal_offset >= limit) {
- return 0; /* read nothing */
- }
+ /* WORK OUT WHERE TO GET DATA FROM */
- if ((to_read = min (cnt, limit - internal_offset)) == 0) {
- return 0; /* read nothing */
- }
+ framecnt_t to_read;
- if (opaque() || raw) {
- /* overwrite whatever is there */
- mixdown_buffer = buf + buf_offset;
- } else {
- mixdown_buffer += buf_offset;
- }
+ assert (position >= _position);
+ frameoffset_t const internal_offset = position - _position;
- if (rops & ReadOpsCount) {
- _read_data_count = 0;
+ if (internal_offset >= _length) {
+ return 0; /* read nothing */
}
- if (chan_n < n_channels()) {
-
- boost::shared_ptr<AudioSource> src = audio_source(chan_n);
- if (src->read (mixdown_buffer, _start + internal_offset, to_read) != to_read) {
- return 0; /* "read nothing" */
- }
-
- if (rops & ReadOpsCount) {
- _read_data_count += src->read_data_count();
- }
-
- } else {
-
- /* track is N-channel, this region has less channels; silence the ones
- we don't have.
- */
-
- memset (mixdown_buffer, 0, sizeof (Sample) * cnt);
+ if ((to_read = min (cnt, _length - internal_offset)) == 0) {
+ return 0; /* read nothing */
}
- if (rops & ReadOpsFades) {
-
- /* fade in */
-
- if ((_flags & FadeIn) && _session.config.get_use_region_fades()) {
-
- nframes_t fade_in_length = (nframes_t) _fade_in->back()->when;
-
- /* see if this read is within the fade in */
- if (internal_offset < fade_in_length) {
+ /* COMPUTE DETAILS OF ANY FADES INVOLVED IN THIS READ */
- nframes_t fi_limit;
+ /* Amount (length) of fade in that we are dealing with in this read */
+ framecnt_t fade_in_limit = 0;
- fi_limit = min (to_read, fade_in_length - internal_offset);
+ /* Offset from buf / mixdown_buffer of the start
+ of any fade out that we are dealing with
+ */
+ frameoffset_t fade_out_offset = 0;
+
+ /* Amount (length) of fade out that we are dealing with in this read */
+ framecnt_t fade_out_limit = 0;
+ framecnt_t fade_interval_start = 0;
- _fade_in->curve().get_vector (internal_offset, internal_offset+fi_limit, gain_buffer, fi_limit);
+ /* Fade in */
+
+ if (_fade_in_active && _session.config.get_use_region_fades()) {
+
+ framecnt_t fade_in_length = (framecnt_t) _fade_in->back()->when;
- for (nframes_t n = 0; n < fi_limit; ++n) {
- mixdown_buffer[n] *= gain_buffer[n];
- }
- }
+ /* see if this read is within the fade in */
+
+ if (internal_offset < fade_in_length) {
+ fade_in_limit = min (to_read, fade_in_length - internal_offset);
}
-
- /* fade out */
-
- if ((_flags & FadeOut) && _session.config.get_use_region_fades()) {
-
- /* see if some part of this read is within the fade out */
+ }
+
+ /* Fade out */
+
+ if (_fade_out_active && _session.config.get_use_region_fades()) {
+
+ /* see if some part of this read is within the fade out */
/* ................. >| REGION
- limit
+ _length
{ } FADE
fade_out_length
^
- limit - fade_out_length
+ _length - fade_out_length
|--------------|
^internal_offset
^internal_offset + to_read
we need the intersection of [internal_offset,internal_offset+to_read] with
- [limit - fade_out_length, limit]
+ [_length - fade_out_length, _length]
*/
- nframes_t fade_out_length = (nframes_t) _fade_out->back()->when;
- nframes_t fade_interval_start = max(internal_offset, limit-fade_out_length);
- nframes_t fade_interval_end = min(internal_offset + to_read, limit);
-
- if (fade_interval_end > fade_interval_start) {
- /* (part of the) the fade out is in this buffer */
-
- nframes_t fo_limit = fade_interval_end - fade_interval_start;
- nframes_t curve_offset = fade_interval_start - (limit-fade_out_length);
- nframes_t fade_offset = fade_interval_start - internal_offset;
-
- _fade_out->curve().get_vector (curve_offset, curve_offset+fo_limit, gain_buffer, fo_limit);
+ fade_interval_start = max (internal_offset, _length - framecnt_t (_fade_out->back()->when));
+ framecnt_t fade_interval_end = min(internal_offset + to_read, _length.val());
+
+ if (fade_interval_end > fade_interval_start) {
+ /* (part of the) the fade out is in this buffer */
+ fade_out_limit = fade_interval_end - fade_interval_start;
+ fade_out_offset = fade_interval_start - internal_offset;
+ }
+ }
- for (nframes_t n = 0, m = fade_offset; n < fo_limit; ++n, ++m) {
- mixdown_buffer[m] *= gain_buffer[n];
- }
- }
+ /* READ DATA FROM THE SOURCE INTO mixdown_buffer.
+ We can never read directly into buf, since it may contain data
+ from a region `below' this one in the stack, and our fades (if they exist)
+ may need to mix with the existing data.
+ */
- }
+ if (read_from_sources (_sources, _length, mixdown_buffer, position, to_read, chan_n) != to_read) {
+ return 0;
}
- /* Regular gain curves and scaling */
+ /* APPLY REGULAR GAIN CURVES AND SCALING TO mixdown_buffer */
- if ((rops & ReadOpsOwnAutomation) && envelope_active()) {
+ if (envelope_active()) {
_envelope->curve().get_vector (internal_offset, internal_offset + to_read, gain_buffer, to_read);
- if ((rops & ReadOpsOwnScaling) && _scale_amplitude != 1.0f) {
- for (nframes_t n = 0; n < to_read; ++n) {
+ if (_scale_amplitude != 1.0f) {
+ for (framecnt_t n = 0; n < to_read; ++n) {
mixdown_buffer[n] *= gain_buffer[n] * _scale_amplitude;
}
} else {
- for (nframes_t n = 0; n < to_read; ++n) {
+ for (framecnt_t n = 0; n < to_read; ++n) {
mixdown_buffer[n] *= gain_buffer[n];
}
}
- } else if ((rops & ReadOpsOwnScaling) && _scale_amplitude != 1.0f) {
-
- // XXX this should be using what in 2.0 would have been:
- // Session::apply_gain_to_buffer (mixdown_buffer, to_read, _scale_amplitude);
+ } else if (_scale_amplitude != 1.0f) {
+ apply_gain_to_buffer (mixdown_buffer, to_read, _scale_amplitude);
+ }
+
+ /* APPLY FADES TO THE DATA IN mixdown_buffer AND MIX THE RESULTS INTO
+ * buf. The key things to realize here: (1) the fade being applied is
+ * (as of April 26th 2012) just the inverse of the fade in curve (2)
+ * "buf" contains data from lower regions already. So this operation
+ * fades out the existing material.
+ */
+
+ if (fade_in_limit != 0) {
+
+ if (opaque()) {
+ if (_inverse_fade_in) {
+
+ /* explicit inverse fade in curve (e.g. for constant
+ * power), so we have to fetch it.
+ */
+
+ _inverse_fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+
+ /* Fade the data from lower layers out */
+ for (framecnt_t n = 0; n < fade_in_limit; ++n) {
+ buf[n] *= gain_buffer[n];
+ }
+
+ /* refill gain buffer with the fade in */
+
+ _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+
+ } else {
+
+ /* no explicit inverse fade in, so just use (1 - fade
+ * in) for the fade out of lower layers
+ */
+
+ _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+
+ for (framecnt_t n = 0; n < fade_in_limit; ++n) {
+ buf[n] *= 1 - gain_buffer[n];
+ }
+ }
+ } else {
+ _fade_in->curve().get_vector (internal_offset, internal_offset + fade_in_limit, gain_buffer, fade_in_limit);
+ }
- for (nframes_t n = 0; n < to_read; ++n) {
- mixdown_buffer[n] *= _scale_amplitude;
+ /* Mix our newly-read data in, with the fade */
+ for (framecnt_t n = 0; n < fade_in_limit; ++n) {
+ buf[n] += mixdown_buffer[n] * gain_buffer[n];
}
}
- if (!opaque()) {
+ if (fade_out_limit != 0) {
- /* gack. the things we do for users.
- */
+ framecnt_t const curve_offset = fade_interval_start - (_length - _fade_out->back()->when);
+
+ if (opaque()) {
+ if (_inverse_fade_out) {
+
+ _inverse_fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
+
+ /* Fade the data from lower levels in */
+ for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) {
+ buf[m] *= gain_buffer[n];
+ }
+
+ /* fetch the actual fade out */
+
+ _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
+
+ } else {
- buf += buf_offset;
+ /* no explicit inverse fade out (which is
+ * actually a fade in), so just use (1 - fade
+ * out) for the fade in of lower layers
+ */
+
+ _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
+
+ for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) {
+ buf[m] *= 1 - gain_buffer[n];
+ }
+ }
+ } else {
+ _fade_out->curve().get_vector (curve_offset, curve_offset + fade_out_limit, gain_buffer, fade_out_limit);
+ }
- for (nframes_t n = 0; n < to_read; ++n) {
- buf[n] += mixdown_buffer[n];
+ /* Mix our newly-read data with whatever was already there,
+ with the fade out applied to our data.
+ */
+ for (framecnt_t n = 0, m = fade_out_offset; n < fade_out_limit; ++n, ++m) {
+ buf[m] += mixdown_buffer[m] * gain_buffer[n];
+ }
+ }
+
+ /* MIX OR COPY THE REGION BODY FROM mixdown_buffer INTO buf */
+
+ framecnt_t const N = to_read - fade_in_limit - fade_out_limit;
+ if (N > 0) {
+ if (opaque ()) {
+ DEBUG_TRACE (DEBUG::AudioPlayback, string_compose ("Region %1 memcpy into buf @ %2 + %3, from mixdown buffer @ %4 + %5, len = %6 cnt was %7\n",
+ name(), buf, fade_in_limit, mixdown_buffer, fade_in_limit, N, cnt));
+ memcpy (buf + fade_in_limit, mixdown_buffer + fade_in_limit, N * sizeof (Sample));
+ } else {
+ mix_buffers_no_gain (buf + fade_in_limit, mixdown_buffer + fade_in_limit, N);
}
}
return to_read;
}
-XMLNode&
-AudioRegion::state (bool full)
+/** Read data directly from one of our sources, accounting for the situation when the track has a different channel
+ * count to the region.
+ *
+ * @param srcs Source list to get our source from.
+ * @param limit Furthest that we should read, as an offset from the region position.
+ * @param buf Buffer to write data into (existing contents of the buffer will be overwritten)
+ * @param position Position to read from, in session frames.
+ * @param cnt Number of frames to read.
+ * @param chan_n Channel to read from.
+ * @return Number of frames read.
+ */
+
+framecnt_t
+AudioRegion::read_from_sources (SourceList const & srcs, framecnt_t limit, Sample* buf, framepos_t position, framecnt_t cnt, uint32_t chan_n) const
{
- XMLNode& node (Region::state (full));
- XMLNode *child;
- char buf[64];
- char buf2[64];
- LocaleGuard lg (X_("POSIX"));
+ frameoffset_t const internal_offset = position - _position;
+ if (internal_offset >= limit) {
+ return 0;
+ }
- node.add_property ("flags", enum_2_string (_flags));
+ framecnt_t const to_read = min (cnt, limit - internal_offset);
+ if (to_read == 0) {
+ return 0;
+ }
+
+ if (chan_n < n_channels()) {
- snprintf (buf, sizeof(buf), "%.12g", _scale_amplitude);
- node.add_property ("scale-gain", buf);
+ boost::shared_ptr<AudioSource> src = boost::dynamic_pointer_cast<AudioSource> (srcs[chan_n]);
+ if (src->read (buf, _start + internal_offset, to_read) != to_read) {
+ return 0; /* "read nothing" */
+ }
- // XXX these should move into Region
+ } else {
- for (uint32_t n=0; n < _sources.size(); ++n) {
- snprintf (buf2, sizeof(buf2), "source-%d", n);
- _sources[n]->id().print (buf, sizeof (buf));
- node.add_property (buf2, buf);
- }
+ /* track is N-channel, this region has fewer channels; silence the ones
+ we don't have.
+ */
- for (uint32_t n=0; n < _master_sources.size(); ++n) {
- snprintf (buf2, sizeof(buf2), "master-source-%d", n);
- _master_sources[n]->id().print (buf, sizeof (buf));
- node.add_property (buf2, buf);
- }
+ if (Config->get_replicate_missing_region_channels()) {
- snprintf (buf, sizeof (buf), "%u", (uint32_t) _sources.size());
- node.add_property ("channels", buf);
+ /* copy an existing channel's data in for this non-existant one */
- if (full) {
+ uint32_t channel = n_channels() % chan_n;
+ boost::shared_ptr<AudioSource> src = boost::dynamic_pointer_cast<AudioSource> (srcs[channel]);
- child = node.add_child (X_("FadeIn"));
+ if (src->read (buf, _start + internal_offset, to_read) != to_read) {
+ return 0; /* "read nothing" */
+ }
- if ((_flags & DefaultFadeIn)) {
- child->add_property (X_("default"), X_("yes"));
} else {
- child->add_child_nocopy (_fade_in->get_state ());
+
+ /* use silence */
+ memset (buf, 0, sizeof (Sample) * to_read);
}
+ }
- child->add_property (X_("active"), fade_in_active () ? X_("yes") : X_("no"));
+ return to_read;
+}
- child = node.add_child (X_("FadeOut"));
+XMLNode&
+AudioRegion::get_basic_state ()
+{
+ XMLNode& node (Region::state ());
+ char buf[64];
+ LocaleGuard lg (X_("POSIX"));
- if ((_flags & DefaultFadeOut)) {
- child->add_property (X_("default"), X_("yes"));
- } else {
- child->add_child_nocopy (_fade_out->get_state ());
- }
+ snprintf (buf, sizeof (buf), "%u", (uint32_t) _sources.size());
+ node.add_property ("channels", buf);
- child->add_property (X_("active"), fade_out_active () ? X_("yes") : X_("no"));
- }
+ return node;
+}
+
+XMLNode&
+AudioRegion::state ()
+{
+ XMLNode& node (get_basic_state());
+ XMLNode *child;
+ LocaleGuard lg (X_("POSIX"));
child = node.add_child ("Envelope");
- if (full) {
- bool default_env = false;
+ bool default_env = false;
- // If there are only two points, the points are in the start of the region and the end of the region
- // so, if they are both at 1.0f, that means the default region.
+ // If there are only two points, the points are in the start of the region and the end of the region
+ // so, if they are both at 1.0f, that means the default region.
- if (_envelope->size() == 2 &&
- _envelope->front()->value == 1.0f &&
- _envelope->back()->value==1.0f) {
- if (_envelope->front()->when == 0 && _envelope->back()->when == _length) {
- default_env = true;
- }
+ if (_envelope->size() == 2 &&
+ _envelope->front()->value == 1.0f &&
+ _envelope->back()->value==1.0f) {
+ if (_envelope->front()->when == 0 && _envelope->back()->when == _length) {
+ default_env = true;
}
+ }
- if (default_env) {
- child->add_property ("default", "yes");
- } else {
- child->add_child_nocopy (_envelope->get_state ());
- }
+ if (default_env) {
+ child->add_property ("default", "yes");
+ } else {
+ child->add_child_nocopy (_envelope->get_state ());
+ }
+ child = node.add_child (X_("FadeIn"));
+
+ if (_default_fade_in) {
+ child->add_property ("default", "yes");
} else {
+ child->add_child_nocopy (_fade_in->get_state ());
+ }
+
+ if (_inverse_fade_in) {
+ child = node.add_child (X_("InverseFadeIn"));
+ child->add_child_nocopy (_inverse_fade_in->get_state ());
+ }
+
+ child = node.add_child (X_("FadeOut"));
+
+ if (_default_fade_out) {
child->add_property ("default", "yes");
+ } else {
+ child->add_child_nocopy (_fade_out->get_state ());
}
- if (full && _extra_xml) {
- node.add_child_copy (*_extra_xml);
+ if (_inverse_fade_out) {
+ child = node.add_child (X_("InverseFadeOut"));
+ child->add_child_nocopy (_inverse_fade_out->get_state ());
}
return node;
}
int
-AudioRegion::set_live_state (const XMLNode& node, int version, Change& what_changed, bool send)
+AudioRegion::_set_state (const XMLNode& node, int version, PropertyChange& what_changed, bool send)
{
const XMLNodeList& nlist = node.children();
const XMLProperty *prop;
LocaleGuard lg (X_("POSIX"));
- boost::shared_ptr<Playlist> the_playlist (_playlist.lock());
+ boost::shared_ptr<Playlist> the_playlist (_playlist.lock());
+
+ suspend_property_changes ();
- freeze ();
if (the_playlist) {
the_playlist->freeze ();
}
- Region::set_live_state (node, version, what_changed, false);
- cerr << "After region SLS, wc = " << what_changed << endl;
-
- uint32_t old_flags = _flags;
- if ((prop = node.property ("flags")) != 0) {
- _flags = Flag (string_2_enum (prop->value(), _flags));
-
- //_flags = Flag (strtol (prop->value().c_str(), (char **) 0, 16));
-
- _flags = Flag (_flags & ~Region::LeftOfSplit);
- _flags = Flag (_flags & ~Region::RightOfSplit);
- }
-
- /* leave this flag setting in place, no matter what */
-
- if ((old_flags & DoNotSendPropertyChanges)) {
- _flags = Flag (_flags | DoNotSendPropertyChanges);
- }
-
- /* find out if any flags changed that we signal about */
+ /* this will set all our State members and stuff controlled by the Region.
+ It should NOT send any changed signals - that is our responsibility.
+ */
- if ((old_flags ^ _flags) & Muted) {
- what_changed = Change (what_changed|MuteChanged);
- cerr << _name << " mute changed\n";
- }
- if ((old_flags ^ _flags) & Opaque) {
- what_changed = Change (what_changed|OpacityChanged);
- cerr << _name << " opacity changed\n";
- }
- if ((old_flags ^ _flags) & Locked) {
- what_changed = Change (what_changed|LockChanged);
- cerr << _name << " lock changed\n";
- }
+ Region::_set_state (node, version, what_changed, false);
if ((prop = node.property ("scale-gain")) != 0) {
float a = atof (prop->value().c_str());
if (a != _scale_amplitude) {
_scale_amplitude = a;
- what_changed = Change (what_changed|ScaleAmplitudeChanged);
- cerr << _name << " amp changed\n";
+ what_changed.add (Properties::scale_amplitude);
}
}
- /* Now find envelope description and other misc child items */
+ /* Now find envelope description and other related child items */
_envelope->freeze ();
for (XMLNodeConstIterator niter = nlist.begin(); niter != nlist.end(); ++niter) {
-#if 0
XMLNode *child;
XMLProperty *prop;
set_default_envelope ();
}
- _envelope->set_max_xval (_length);
_envelope->truncate_end (_length);
- cerr << _name << " envelope changd\n";
-
} else if (child->name() == "FadeIn") {
_fade_in->clear ();
- if ((prop = child->property ("default")) != 0 || (prop = child->property ("steepness")) != 0) {
+ if (((prop = child->property ("default")) != 0 && string_is_affirmative (prop->value())) || (prop = child->property ("steepness")) != 0) {
set_default_fade_in ();
} else {
XMLNode* grandchild = child->child ("AutomationList");
set_fade_in_active (false);
}
}
- cerr << _name << " fadein changd\n";
} else if (child->name() == "FadeOut") {
_fade_out->clear ();
- if ((prop = child->property ("default")) != 0 || (prop = child->property ("steepness")) != 0) {
+ if (((prop = child->property ("default")) != 0 && (string_is_affirmative (prop->value()))) || (prop = child->property ("steepness")) != 0) {
set_default_fade_out ();
} else {
XMLNode* grandchild = child->child ("AutomationList");
_fade_out->set_state (*grandchild, version);
}
}
-
+
if ((prop = child->property ("active")) != 0) {
if (string_is_affirmative (prop->value())) {
set_fade_out_active (true);
set_fade_out_active (false);
}
}
- cerr << _name << " fadeout changd\n";
-
+
+ } else if (child->name() == "InverseFadeIn") {
+ XMLNode* grandchild = child->child ("AutomationList");
+ if (grandchild) {
+ _inverse_fade_in->set_state (*grandchild, version);
+ }
+ } else if (child->name() == "InverseFadeOut") {
+ XMLNode* grandchild = child->child ("AutomationList");
+ if (grandchild) {
+ _inverse_fade_out->set_state (*grandchild, version);
+ }
}
-#endif
}
_envelope->thaw ();
- thaw ();
+ resume_property_changes ();
if (send) {
- cerr << _name << ": audio final change: " << hex << what_changed << dec << endl;
send_change (what_changed);
}
int
AudioRegion::set_state (const XMLNode& node, int version)
{
- /* Region::set_state() calls the virtual set_live_state(),
- which will get us back to AudioRegion::set_live_state()
- to handle the relevant stuff.
- */
-
- return Region::set_state (node, version);
+ PropertyChange what_changed;
+ return _set_state (node, version, what_changed, true);
}
void
AudioRegion::set_fade_in_shape (FadeShape shape)
{
- set_fade_in (shape, (nframes_t) _fade_in->back()->when);
+ set_fade_in (shape, (framecnt_t) _fade_in->back()->when);
}
void
AudioRegion::set_fade_out_shape (FadeShape shape)
{
- set_fade_out (shape, (nframes_t) _fade_out->back()->when);
+ set_fade_out (shape, (framecnt_t) _fade_out->back()->when);
}
void
AudioRegion::set_fade_in (boost::shared_ptr<AutomationList> f)
{
_fade_in->freeze ();
- *_fade_in = *f;
+ *(_fade_in.val()) = *f;
_fade_in->thaw ();
-
- send_change (FadeInChanged);
+ _default_fade_in = false;
+
+ send_change (PropertyChange (Properties::fade_in));
}
void
-AudioRegion::set_fade_in (FadeShape shape, nframes_t len)
+AudioRegion::set_fade_in (FadeShape shape, framecnt_t len)
{
+ boost::shared_ptr<Evoral::ControlList> c1 (new Evoral::ControlList (FadeInAutomation));
+ boost::shared_ptr<Evoral::ControlList> c2 (new Evoral::ControlList (FadeInAutomation));
+ boost::shared_ptr<Evoral::ControlList> c3 (new Evoral::ControlList (FadeInAutomation));
+
_fade_in->freeze ();
_fade_in->clear ();
+ _inverse_fade_in->clear ();
switch (shape) {
- case Linear:
+ case FadeLinear:
_fade_in->fast_simple_add (0.0, 0.0);
_fade_in->fast_simple_add (len, 1.0);
+ reverse_curve (_inverse_fade_in.val(), _fade_in.val());
break;
- case Fast:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.389401, 0.0333333);
- _fade_in->fast_simple_add (len * 0.629032, 0.0861111);
- _fade_in->fast_simple_add (len * 0.829493, 0.233333);
- _fade_in->fast_simple_add (len * 0.9447, 0.483333);
- _fade_in->fast_simple_add (len * 0.976959, 0.697222);
- _fade_in->fast_simple_add (len, 1);
+ case FadeFast:
+ generate_db_fade (_fade_in.val(), len, 10, -60);
+ reverse_curve (c1, _fade_in.val());
+ _fade_in->copy_events (*c1);
+ generate_inverse_power_curve (_inverse_fade_in.val(), _fade_in.val());
break;
- case Slow:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.0207373, 0.197222);
- _fade_in->fast_simple_add (len * 0.0645161, 0.525);
- _fade_in->fast_simple_add (len * 0.152074, 0.802778);
- _fade_in->fast_simple_add (len * 0.276498, 0.919444);
- _fade_in->fast_simple_add (len * 0.481567, 0.980556);
- _fade_in->fast_simple_add (len * 0.767281, 1);
- _fade_in->fast_simple_add (len, 1);
+ case FadeSlow:
+ generate_db_fade (c1, len, 10, -1); // start off with a slow fade
+ generate_db_fade (c2, len, 10, -80); // end with a fast fade
+ merge_curves (_fade_in.val(), c1, c2);
+ reverse_curve (c3, _fade_in.val());
+ _fade_in->copy_events (*c3);
+ generate_inverse_power_curve (_inverse_fade_in.val(), _fade_in.val());
break;
- case LogA:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.0737327, 0.308333);
- _fade_in->fast_simple_add (len * 0.246544, 0.658333);
- _fade_in->fast_simple_add (len * 0.470046, 0.886111);
- _fade_in->fast_simple_add (len * 0.652074, 0.972222);
- _fade_in->fast_simple_add (len * 0.771889, 0.988889);
- _fade_in->fast_simple_add (len, 1);
+ case FadeConstantPower:
+ for (int i = 0; i < 9; ++i) {
+ float dist = (float) i / 10.0f;
+ _fade_in->fast_simple_add (len*dist, sin (dist*M_PI/2));
+ }
+ _fade_in->fast_simple_add (len, 1.0);
+ reverse_curve (_inverse_fade_in.val(), _fade_in.val());
break;
-
- case LogB:
- _fade_in->fast_simple_add (0, 0);
- _fade_in->fast_simple_add (len * 0.304147, 0.0694444);
- _fade_in->fast_simple_add (len * 0.529954, 0.152778);
- _fade_in->fast_simple_add (len * 0.725806, 0.333333);
- _fade_in->fast_simple_add (len * 0.847926, 0.558333);
- _fade_in->fast_simple_add (len * 0.919355, 0.730556);
- _fade_in->fast_simple_add (len, 1);
+
+ case FadeSymmetric:
+ //start with a nearly linear cuve
+ _fade_in->fast_simple_add (0, 1);
+ _fade_in->fast_simple_add (0.5*len, 0.6);
+ //now generate a fade-out curve by successively applying a gain drop
+ const float breakpoint = 0.7; //linear for first 70%
+ const int num_steps = 9;
+ for (int i = 2; i < num_steps; i++) {
+ float coeff = (1.0-breakpoint);
+ for (int j = 0; j < i; j++) {
+ coeff *= 0.5; //6dB drop per step
+ }
+ _fade_in->fast_simple_add (len* (breakpoint+((1.0-breakpoint)*(double)i/(double)num_steps)), coeff);
+ }
+ _fade_in->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (c3, _fade_in.val());
+ _fade_in->copy_events (*c3);
+ reverse_curve (_inverse_fade_in.val(), _fade_in.val());
break;
}
+ _default_fade_in = false;
_fade_in->thaw ();
+ send_change (PropertyChange (Properties::fade_in));
}
void
AudioRegion::set_fade_out (boost::shared_ptr<AutomationList> f)
{
_fade_out->freeze ();
- *_fade_out = *f;
+ *(_fade_out.val()) = *f;
_fade_out->thaw ();
+ _default_fade_out = false;
- send_change (FadeInChanged);
+ send_change (PropertyChange (Properties::fade_in));
}
void
-AudioRegion::set_fade_out (FadeShape shape, nframes_t len)
+AudioRegion::set_fade_out (FadeShape shape, framecnt_t len)
{
+ boost::shared_ptr<Evoral::ControlList> c1 (new Evoral::ControlList (FadeOutAutomation));
+ boost::shared_ptr<Evoral::ControlList> c2 (new Evoral::ControlList (FadeOutAutomation));
+
_fade_out->freeze ();
_fade_out->clear ();
+ _inverse_fade_out->clear ();
switch (shape) {
- case Fast:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.023041, 0.697222);
- _fade_out->fast_simple_add (len * 0.0553, 0.483333);
- _fade_out->fast_simple_add (len * 0.170507, 0.233333);
- _fade_out->fast_simple_add (len * 0.370968, 0.0861111);
- _fade_out->fast_simple_add (len * 0.610599, 0.0333333);
- _fade_out->fast_simple_add (len * 1, 0);
+ case FadeLinear:
+ _fade_out->fast_simple_add (0.0, 1.0);
+ _fade_out->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (_inverse_fade_out.val(), _fade_out.val());
break;
-
- case LogA:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.228111, 0.988889);
- _fade_out->fast_simple_add (len * 0.347926, 0.972222);
- _fade_out->fast_simple_add (len * 0.529954, 0.886111);
- _fade_out->fast_simple_add (len * 0.753456, 0.658333);
- _fade_out->fast_simple_add (len * 0.9262673, 0.308333);
- _fade_out->fast_simple_add (len * 1, 0);
+
+ case FadeFast:
+ generate_db_fade (_fade_out.val(), len, 10, -60);
+ generate_inverse_power_curve (_inverse_fade_out.val(), _fade_out.val());
break;
-
- case Slow:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.305556, 1);
- _fade_out->fast_simple_add (len * 0.548611, 0.991736);
- _fade_out->fast_simple_add (len * 0.759259, 0.931129);
- _fade_out->fast_simple_add (len * 0.918981, 0.68595);
- _fade_out->fast_simple_add (len * 0.976852, 0.22865);
- _fade_out->fast_simple_add (len * 1, 0);
+
+ case FadeSlow:
+ generate_db_fade (c1, len, 10, -1); //start off with a slow fade
+ generate_db_fade (c2, len, 10, -80); //end with a fast fade
+ merge_curves (_fade_out.val(), c1, c2);
+ generate_inverse_power_curve (_inverse_fade_out.val(), _fade_out.val());
break;
- case LogB:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 0.080645, 0.730556);
- _fade_out->fast_simple_add (len * 0.277778, 0.289256);
- _fade_out->fast_simple_add (len * 0.470046, 0.152778);
- _fade_out->fast_simple_add (len * 0.695853, 0.0694444);
- _fade_out->fast_simple_add (len * 1, 0);
+ case FadeConstantPower:
+ //constant-power fades use a sin/cos relationship
+ //the cutoff is abrupt but it has the benefit of being symmetrical
+ _fade_out->fast_simple_add (0.0, 1.0);
+ for (int i = 1; i < 9; i++ ) {
+ float dist = (float)i/10.0;
+ _fade_out->fast_simple_add ((len * dist), cos(dist*M_PI/2));
+ }
+ _fade_out->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (_inverse_fade_out.val(), _fade_out.val());
break;
-
- case Linear:
- _fade_out->fast_simple_add (len * 0, 1);
- _fade_out->fast_simple_add (len * 1, 0);
+
+ case FadeSymmetric:
+ //start with a nearly linear cuve
+ _fade_out->fast_simple_add (0, 1);
+ _fade_out->fast_simple_add (0.5*len, 0.6);
+
+ //now generate a fade-out curve by successively applying a gain drop
+ const float breakpoint = 0.7; //linear for first 70%
+ const int num_steps = 9;
+ for (int i = 2; i < num_steps; i++) {
+ float coeff = (1.0-breakpoint);
+ for (int j = 0; j < i; j++) {
+ coeff *= 0.5; //6dB drop per step
+ }
+ _fade_out->fast_simple_add (len* (breakpoint+((1.0-breakpoint)*(double)i/(double)num_steps)), coeff);
+ }
+ _fade_out->fast_simple_add (len, VERY_SMALL_SIGNAL);
+ reverse_curve (_inverse_fade_out.val(), _fade_out.val());
break;
}
+ _default_fade_out = false;
_fade_out->thaw ();
+ send_change (PropertyChange (Properties::fade_out));
}
void
-AudioRegion::set_fade_in_length (nframes_t len)
+AudioRegion::set_fade_in_length (framecnt_t len)
{
if (len > _length) {
len = _length - 1;
}
+
+ if (len < 64) {
+ len = 64;
+ }
bool changed = _fade_in->extend_to (len);
if (changed) {
- _flags = Flag (_flags & ~DefaultFadeIn);
- send_change (FadeInChanged);
+ if (_inverse_fade_in) {
+ _inverse_fade_in->extend_to (len);
+ }
+
+ _default_fade_in = false;
+ send_change (PropertyChange (Properties::fade_in));
}
}
void
-AudioRegion::set_fade_out_length (nframes_t len)
+AudioRegion::set_fade_out_length (framecnt_t len)
{
if (len > _length) {
len = _length - 1;
}
+ if (len < 64) {
+ len = 64;
+ }
+
bool changed = _fade_out->extend_to (len);
if (changed) {
- _flags = Flag (_flags & ~DefaultFadeOut);
- send_change (FadeOutChanged);
+
+ if (_inverse_fade_out) {
+ _inverse_fade_out->extend_to (len);
+ }
+ _default_fade_out = false;
+
+ send_change (PropertyChange (Properties::fade_out));
}
}
void
AudioRegion::set_fade_in_active (bool yn)
{
- if (yn == (_flags & FadeIn)) {
+ if (yn == _fade_in_active) {
return;
}
- if (yn) {
- _flags = Flag (_flags|FadeIn);
- } else {
- _flags = Flag (_flags & ~FadeIn);
- }
- send_change (FadeInActiveChanged);
+ _fade_in_active = yn;
+ send_change (PropertyChange (Properties::fade_in_active));
}
void
AudioRegion::set_fade_out_active (bool yn)
{
- if (yn == (_flags & FadeOut)) {
+ if (yn == _fade_out_active) {
return;
}
- if (yn) {
- _flags = Flag (_flags | FadeOut);
- } else {
- _flags = Flag (_flags & ~FadeOut);
- }
-
- send_change (FadeOutActiveChanged);
+ _fade_out_active = yn;
+ send_change (PropertyChange (Properties::fade_out_active));
}
bool
void
AudioRegion::set_default_fade_in ()
{
- _fade_in_disabled = 0;
- set_fade_in (Linear, 64);
+ _fade_in_suspended = 0;
+ set_fade_in (FadeLinear, 64);
}
void
AudioRegion::set_default_fade_out ()
{
- _fade_out_disabled = 0;
- set_fade_out (Linear, 64);
+ _fade_out_suspended = 0;
+ set_fade_out (FadeLinear, 64);
}
void
_envelope->freeze ();
_envelope->truncate_end (_length);
- _envelope->set_max_xval (_length);
_envelope->thaw ();
+ suspend_property_changes();
+
+ if (_left_of_split) {
+ set_default_fade_out ();
+ _left_of_split = false;
+ } else if (_fade_out->back()->when > _length) {
+ _fade_out->extend_to (_length);
+ send_change (PropertyChange (Properties::fade_out));
+ }
+
if (_fade_in->back()->when > _length) {
_fade_in->extend_to (_length);
- send_change (FadeInChanged);
+ send_change (PropertyChange (Properties::fade_in));
}
- if (_fade_out->back()->when > _length) {
- _fade_out->extend_to (_length);
- send_change (FadeOutChanged);
- }
+ resume_property_changes();
}
void
_envelope->truncate_start (_length);
- if (_fade_in->back()->when > _length) {
+ suspend_property_changes();
+
+ if (_right_of_split) {
+ set_default_fade_in ();
+ _right_of_split = false;
+ } else if (_fade_in->back()->when > _length) {
_fade_in->extend_to (_length);
- send_change (FadeInChanged);
+ send_change (PropertyChange (Properties::fade_in));
}
if (_fade_out->back()->when > _length) {
_fade_out->extend_to (_length);
- send_change (FadeOutChanged);
+ send_change (PropertyChange (Properties::fade_out));
}
+
+ resume_property_changes();
}
int
"whole file" even if it covers the entire source file(s).
*/
- Flag f = Flag (_flags & ~WholeFile);
+ PropertyList plist;
+
+ plist.add (Properties::start, _start.val());
+ plist.add (Properties::length, _length.val());
+ plist.add (Properties::name, new_name);
+ plist.add (Properties::layer, layer ());
- v.push_back(RegionFactory::create (srcs, _start, _length, new_name, _layer, f));
+ v.push_back(RegionFactory::create (srcs, plist));
+ v.back()->set_whole_file (false);
++n;
}
return 0;
}
-nframes_t
-AudioRegion::read_raw_internal (Sample* buf, sframes_t pos, nframes_t cnt, int channel) const
+framecnt_t
+AudioRegion::read_raw_internal (Sample* buf, framepos_t pos, framecnt_t cnt, int channel) const
{
- return audio_source()->read (buf, pos, cnt, channel);
-}
-
-int
-AudioRegion::exportme (Session& /*session*/, ARDOUR::ExportSpecification& /*spec*/)
-{
- // TODO EXPORT
-// const nframes_t blocksize = 4096;
-// nframes_t to_read;
-// int status = -1;
-//
-// spec.channels = _sources.size();
-//
-// if (spec.prepare (blocksize, session.frame_rate())) {
-// goto out;
-// }
-//
-// spec.pos = 0;
-// spec.total_frames = _length;
-//
-// while (spec.pos < _length && !spec.stop) {
-//
-//
-// /* step 1: interleave */
-//
-// to_read = min (_length - spec.pos, blocksize);
-//
-// if (spec.channels == 1) {
-//
-// if (read_raw_internal (spec.dataF, _start + spec.pos, to_read) != to_read) {
-// goto out;
-// }
-//
-// } else {
-//
-// Sample buf[blocksize];
-//
-// for (uint32_t chan = 0; chan < spec.channels; ++chan) {
-//
-// if (audio_source(chan)->read (buf, _start + spec.pos, to_read) != to_read) {
-// goto out;
-// }
-//
-// for (nframes_t x = 0; x < to_read; ++x) {
-// spec.dataF[chan+(x*spec.channels)] = buf[x];
-// }
-// }
-// }
-//
-// if (spec.process (to_read)) {
-// goto out;
-// }
-//
-// spec.pos += to_read;
-// spec.progress = (double) spec.pos /_length;
-//
-// }
-//
-// status = 0;
-//
-// out:
-// spec.running = false;
-// spec.status = status;
-// spec.clear();
-//
-// return status;
- return 0;
+ return audio_source(channel)->read (buf, pos, cnt);
}
void
/* tell everybody else */
- send_change (ScaleAmplitudeChanged);
+ send_change (PropertyChange (Properties::scale_amplitude));
}
-void
-AudioRegion::normalize_to (float target_dB)
+/** @return the maximum (linear) amplitude of the region, or a -ve
+ * number if the Progress object reports that the process was cancelled.
+ */
+double
+AudioRegion::maximum_amplitude (Progress* p) const
{
- const nframes_t blocksize = 64 * 1024;
- Sample buf[blocksize];
- nframes_t fpos;
- nframes_t fend;
- nframes_t to_read;
+ framepos_t fpos = _start;
+ framepos_t const fend = _start + _length;
double maxamp = 0;
- gain_t target = dB_to_coefficient (target_dB);
- if (target == 1.0f) {
- /* do not normalize to precisely 1.0 (0 dBFS), to avoid making it appear
- that we may have clipped.
- */
- target -= FLT_EPSILON;
- }
-
- fpos = _start;
- fend = _start + _length;
-
- /* first pass: find max amplitude */
+ framecnt_t const blocksize = 64 * 1024;
+ Sample buf[blocksize];
while (fpos < fend) {
uint32_t n;
- to_read = min (fend - fpos, blocksize);
+ framecnt_t const to_read = min (fend - fpos, blocksize);
for (n = 0; n < n_channels(); ++n) {
/* read it in */
- if (read_raw_internal (buf, fpos, to_read, 0) != to_read) {
- return;
+ if (read_raw_internal (buf, fpos, to_read, n) != to_read) {
+ return 0;
}
maxamp = compute_peak (buf, to_read, maxamp);
}
fpos += to_read;
- };
+ if (p) {
+ p->set_progress (float (fpos - _start) / _length);
+ if (p->cancelled ()) {
+ return -1;
+ }
+ }
+ }
+
+ return maxamp;
+}
- if (maxamp == 0.0f) {
+/** Normalize using a given maximum amplitude and target, so that region
+ * _scale_amplitude becomes target / max_amplitude.
+ */
+void
+AudioRegion::normalize (float max_amplitude, float target_dB)
+{
+ gain_t target = dB_to_coefficient (target_dB);
+
+ if (target == 1.0f) {
+ /* do not normalize to precisely 1.0 (0 dBFS), to avoid making it appear
+ that we may have clipped.
+ */
+ target -= FLT_EPSILON;
+ }
+
+ if (max_amplitude == 0.0f) {
/* don't even try */
return;
}
- if (maxamp == target) {
+ if (max_amplitude == target) {
/* we can't do anything useful */
return;
}
- /* compute scale factor */
-
- _scale_amplitude = target/maxamp;
-
- /* tell the diskstream we're in */
-
- boost::shared_ptr<Playlist> pl (playlist());
-
- if (pl) {
- pl->ContentsChanged();
- }
-
- /* tell everybody else */
-
- send_change (ScaleAmplitudeChanged);
+ set_scale_amplitude (target / max_amplitude);
}
void
AudioRegion::fade_in_changed ()
{
- send_change (FadeInChanged);
+ send_change (PropertyChange (Properties::fade_in));
}
void
AudioRegion::fade_out_changed ()
{
- send_change (FadeOutChanged);
+ send_change (PropertyChange (Properties::fade_out));
}
void
AudioRegion::envelope_changed ()
{
- send_change (EnvelopeChanged);
+ send_change (PropertyChange (Properties::envelope));
}
void
AudioRegion::suspend_fade_in ()
{
- if (++_fade_in_disabled == 1) {
+ if (++_fade_in_suspended == 1) {
if (fade_in_is_default()) {
set_fade_in_active (false);
}
void
AudioRegion::resume_fade_in ()
{
- if (--_fade_in_disabled == 0 && _fade_in_disabled) {
+ if (--_fade_in_suspended == 0 && _fade_in_suspended) {
set_fade_in_active (true);
}
}
void
AudioRegion::suspend_fade_out ()
{
- if (++_fade_out_disabled == 1) {
+ if (++_fade_out_suspended == 1) {
if (fade_out_is_default()) {
set_fade_out_active (false);
}
void
AudioRegion::resume_fade_out ()
{
- if (--_fade_out_disabled == 0 &&_fade_out_disabled) {
+ if (--_fade_out_suspended == 0 &&_fade_out_suspended) {
set_fade_out_active (true);
}
}
if (afs && afs->destructive()) {
// set_start (source()->natural_position(), this);
- set_position (source()->natural_position(), this);
+ set_position (source()->natural_position());
}
}
return boost::dynamic_pointer_cast<AudioSource>(source(n));
}
+int
+AudioRegion::adjust_transients (frameoffset_t delta)
+{
+ for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) {
+ (*x) = (*x) + delta;
+ }
+
+ send_change (PropertyChange (Properties::valid_transients));
+
+ return 0;
+}
+
+int
+AudioRegion::update_transient (framepos_t old_position, framepos_t new_position)
+{
+ for (AnalysisFeatureList::iterator x = _transients.begin(); x != _transients.end(); ++x) {
+ if ((*x) == old_position) {
+ (*x) = new_position;
+ send_change (PropertyChange (Properties::valid_transients));
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+void
+AudioRegion::add_transient (framepos_t where)
+{
+ _transients.push_back(where);
+ _valid_transients = true;
+
+ send_change (PropertyChange (Properties::valid_transients));
+}
+
+void
+AudioRegion::remove_transient (framepos_t where)
+{
+ _transients.remove(where);
+ _valid_transients = true;
+
+ send_change (PropertyChange (Properties::valid_transients));
+}
+
+int
+AudioRegion::set_transients (AnalysisFeatureList& results)
+{
+ _transients.clear();
+ _transients = results;
+ _valid_transients = true;
+
+ send_change (PropertyChange (Properties::valid_transients));
+
+ return 0;
+}
+
int
AudioRegion::get_transients (AnalysisFeatureList& results, bool force_new)
{
/* no existing/complete transient info */
+ static bool analyse_dialog_shown = false; /* global per instance of Ardour */
+
if (!Config->get_auto_analyse_audio()) {
- pl->session().Dialog (_("\
+ if (!analyse_dialog_shown) {
+ pl->session().Dialog (_("\
You have requested an operation that requires audio analysis.\n\n\
-You currently have \"auto-analyse-audio\" disabled, which means\n\
+You currently have \"auto-analyse-audio\" disabled, which means \
that transient data must be generated every time it is required.\n\n\
-If you are doing work that will require transient data on a\n\
-regular basis, you should probably enable \"auto-analyse-audio\"\n\
-then quit ardour and restart."));
+If you are doing work that will require transient data on a \
+regular basis, you should probably enable \"auto-analyse-audio\" \
+then quit ardour and restart.\n\n\
+This dialog will not display again. But you may notice a slight delay \
+in this and future transient-detection operations.\n\
+"));
+ analyse_dialog_shown = true;
+ }
}
TransientDetector t (pl->session().frame_rate());
*
* @param threshold Threshold below which signal is considered silence (as a sample value)
* @param min_length Minimum length of silent period to be reported.
- * @return Silent periods; first of pair is the offset within the region, second is the length of the period
+ * @return Silent intervals, measured relative to the region start in the source
*/
-std::list<std::pair<nframes_t, nframes_t> >
-AudioRegion::find_silence (Sample threshold, nframes_t min_length) const
+AudioIntervalResult
+AudioRegion::find_silence (Sample threshold, framecnt_t min_length, InterThreadInfo& itt) const
{
- nframes_t const block_size = 64 * 1024;
- Sample loudest[block_size];
- Sample buf[block_size];
+ framecnt_t const block_size = 64 * 1024;
+ boost::scoped_array<Sample> loudest (new Sample[block_size]);
+ boost::scoped_array<Sample> buf (new Sample[block_size]);
- nframes_t pos = _start;
- nframes_t const end = _start + _length - 1;
+ framepos_t pos = _start;
+ framepos_t const end = _start + _length - 1;
- std::list<std::pair<nframes_t, nframes_t> > silent_periods;
+ AudioIntervalResult silent_periods;
bool in_silence = false;
- nframes_t silence_start = 0;
- bool silence;
+ frameoffset_t silence_start = 0;
- while (pos < end) {
+ while (pos < end && !itt.cancel) {
/* fill `loudest' with the loudest absolute sample at each instant, across all channels */
- memset (loudest, 0, sizeof (Sample) * block_size);
+ memset (loudest.get(), 0, sizeof (Sample) * block_size);
for (uint32_t n = 0; n < n_channels(); ++n) {
- read_raw_internal (buf, pos, block_size, n);
- for (nframes_t i = 0; i < block_size; ++i) {
+ read_raw_internal (buf.get(), pos, block_size, n);
+ for (framecnt_t i = 0; i < block_size; ++i) {
loudest[i] = max (loudest[i], abs (buf[i]));
}
}
/* now look for silence */
- for (nframes_t i = 0; i < block_size; ++i) {
- silence = abs (loudest[i]) < threshold;
+ for (framecnt_t i = 0; i < block_size; ++i) {
+ bool const silence = abs (loudest[i]) < threshold;
if (silence && !in_silence) {
/* non-silence to silence */
in_silence = true;
}
pos += block_size;
+ itt.progress = (end-pos)/(double)_length;
}
if (in_silence && end - 1 - silence_start >= min_length) {
silent_periods.push_back (std::make_pair (silence_start, end));
}
+ itt.done = true;
+
return silent_periods;
}
+Evoral::Range<framepos_t>
+AudioRegion::body_range () const
+{
+ return Evoral::Range<framepos_t> (first_frame() + _fade_in->back()->when + 1, last_frame() - _fade_out->back()->when);
+}
+
+boost::shared_ptr<Region>
+AudioRegion::get_single_other_xfade_region (bool start) const
+{
+ boost::shared_ptr<Playlist> pl (playlist());
+
+ if (!pl) {
+ /* not currently in a playlist - xfade length is unbounded
+ (and irrelevant)
+ */
+ return boost::shared_ptr<AudioRegion> ();
+ }
+
+ boost::shared_ptr<RegionList> rl;
+
+ if (start) {
+ rl = pl->regions_at (position());
+ } else {
+ rl = pl->regions_at (last_frame());
+ }
+
+ RegionList::iterator i;
+ boost::shared_ptr<Region> other;
+ uint32_t n = 0;
+
+ /* count and find the other region in a single pass through the list */
+
+ for (i = rl->begin(); i != rl->end(); ++i) {
+ if ((*i).get() != this) {
+ other = *i;
+ }
+ ++n;
+ }
+
+ if (n != 2) {
+ /* zero or multiple regions stacked here - don't care about xfades */
+ return boost::shared_ptr<AudioRegion> ();
+ }
+
+ return other;
+}
+
+framecnt_t
+AudioRegion::verify_xfade_bounds (framecnt_t len, bool start)
+{
+ /* this is called from a UI to check on whether a new proposed
+ length for an xfade is legal or not. it returns the legal
+ length corresponding to @a len which may be shorter than or
+ equal to @a len itself.
+ */
+
+ boost::shared_ptr<Region> other = get_single_other_xfade_region (start);
+ framecnt_t maxlen;
+
+ if (!other) {
+ /* zero or > 2 regions here, don't care about len, but
+ it can't be longer than the region itself.
+ */
+ return min (length(), len);
+ }
+
+ /* we overlap a single region. clamp the length of an xfade to
+ the maximum possible duration of the overlap (if the other
+ region were trimmed appropriately).
+ */
+
+ if (start) {
+ maxlen = other->latest_possible_frame() - position();
+ } else {
+ maxlen = last_frame() - other->earliest_possible_position();
+ }
+
+ return min (length(), min (maxlen, len));
+
+}
extern "C" {
int region_read_peaks_from_c (void *arg, uint32_t npeaks, uint32_t start, uint32_t cnt, intptr_t data, uint32_t n_chan, double samples_per_unit)
{
- return ((AudioRegion *) arg)->read_peaks ((PeakData *) data, (nframes_t) npeaks, (nframes_t) start, (nframes_t) cnt, n_chan,samples_per_unit);
+ return ((AudioRegion *) arg)->read_peaks ((PeakData *) data, (framecnt_t) npeaks, (framepos_t) start, (framecnt_t) cnt, n_chan,samples_per_unit);
}
uint32_t region_length_from_c (void *arg)