move (_frames - frames, frames, 0);
set_frames (_frames - frames);
}
+
+
+void
+AudioBuffers::trim_end (int32_t frames)
+{
+ DCPOMATIC_ASSERT (frames <= _frames);
+ set_frames (_frames - frames);
+}
+
void accumulate_frames (AudioBuffers const * from, int32_t frames, int32_t read_offset, int32_t write_offset);
void append (std::shared_ptr<const AudioBuffers> other);
void trim_start (int32_t frames);
+ void trim_end (int32_t frames);
private:
void allocate (int channels, int32_t frames);
*/
-#include "audio_decoder.h"
#include "audio_buffers.h"
#include "audio_content.h"
+#include "audio_decoder.h"
+#include "compose.hpp"
#include "dcpomatic_log.h"
#include "log.h"
-#include "resampler.h"
-#include "compose.hpp"
#include <iostream>
#include "i18n.h"
/** @param time_already_delayed true if the delay should not be added to time */
void
-AudioDecoder::emit (shared_ptr<const Film> film, AudioStreamPtr stream, shared_ptr<const AudioBuffers> data, ContentTime time, bool time_already_delayed)
+AudioDecoder::emit (AudioStreamPtr stream, shared_ptr<const AudioBuffers> data, ContentTime time, bool time_already_delayed)
{
if (ignore ()) {
return;
*/
static Frame const slack_frames = 48000 / 24;
- int const resampled_rate = _content->resampled_frame_rate(film);
+ auto const frame_rate = stream->frame_rate();
if (!time_already_delayed) {
time += ContentTime::from_seconds (_content->delay() / 1000.0);
}
silence (_content->delay ());
}
reset = true;
- } else if (std::abs(_positions[stream] - time.frames_round(resampled_rate)) > slack_frames) {
+ } else if (std::abs(_positions[stream] - time.frames_round(frame_rate)) > slack_frames) {
reset = true;
LOG_GENERAL (
"Reset audio position: was %1, new data at %2, slack: %3 frames",
_positions[stream],
- time.frames_round(resampled_rate),
- std::abs(_positions[stream] - time.frames_round(resampled_rate))
+ time.frames_round(frame_rate),
+ std::abs(_positions[stream] - time.frames_round(frame_rate))
);
}
if (reset) {
- _positions[stream] = time.frames_round (resampled_rate);
+ _positions[stream] = time.frames_round (frame_rate);
}
Data(stream, data, _positions[stream]);
{
auto i = _positions.find (stream);
DCPOMATIC_ASSERT (i != _positions.end ());
- return ContentTime::from_frames (i->second, _content->resampled_frame_rate(film));
+ return ContentTime::from_frames (i->second, stream->frame_rate());
}
AudioDecoder (Decoder* parent, std::shared_ptr<const AudioContent> content);
boost::optional<dcpomatic::ContentTime> position (std::shared_ptr<const Film> film) const;
- void emit (std::shared_ptr<const Film> film, AudioStreamPtr stream, std::shared_ptr<const AudioBuffers>, dcpomatic::ContentTime, bool time_already_delayed = false);
+ void emit (AudioStreamPtr stream, std::shared_ptr<const AudioBuffers>, dcpomatic::ContentTime, bool time_already_delayed = false);
void seek ();
void flush ();
}
}
- audio->emit (film(), _dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
+ audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
}
if (_atmos_reader) {
auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
silence->make_silent ();
- audio->emit (film(), i, silence, a, true);
+ audio->emit (i, silence, a, true);
a += to_do;
}
}
data->move (data->frames() - remove, remove, 0);
data->set_frames (data->frames() - remove);
ct += ContentTime::from_frames (remove, stream->frame_rate());
- std::cout << "FF discarded " << remove << "\n";
}
if (ct < ContentTime()) {
/* Give this data provided there is some, and its time is sane */
if (ct >= ContentTime() && data->frames() > 0) {
- audio->emit (film(), stream, data, ct);
+ audio->emit (stream, data, ct);
}
}
* start thinking about frame indices into the piece. Here's the last chance for us to apply this content's
* trim, so we'll take it.
*/
- auto const start_trim = content->trim_start().frames_round(stream_ptr->frame_rate());
- auto const remove_from_start = std::max(Frame(0), start_trim - frame);
- if (remove_from_start > 0) {
+ auto const sfr = stream_ptr->frame_rate();
+ auto const remove_from_start = std::max(Frame(0), content->trim_start().frames_round(sfr) - frame);
+ auto const remove_from_end = std::max(Frame(0), frame + audio->frames() - ContentTime(content->full_length_content() - content->trim_end()).frames_round(sfr));
+ if ((remove_from_start + remove_from_end) >= audio->frames()) {
+ std::cout << "trim whole block.\n";
+ return;
+ }
+ if (remove_from_start || remove_from_end) {
+ std::cout << "trim " << remove_from_start << " " << remove_from_end << "\n";
auto trimmed = make_shared<AudioBuffers>(audio);
trimmed->trim_start (remove_from_start);
- frame += remove_from_start;
+ trimmed->trim_end (remove_from_end);
+ audio = trimmed;
}
- auto const end_trim = content->trim_send().frames_round(stream_ptr->frame_rate());
-
-
auto content_streams = content->audio->streams();
auto content_stream_iter = std::find(content_streams.begin(), content_streams.end(), stream_ptr);
DCPOMATIC_ASSERT (content_stream_iter != content_streams.end());
stream.position = frame;
}
- std::cout << "piece sends frame " << stream.position << " " << to_string(resampled_audio_to_dcp(stream.position) << "\n";
Audio (PieceAudio(index, audio, resampled_audio_to_dcp(stream.position), stream_ptr->mapping()));
stream.position += audio->frames();
}
/* The end of this block in the DCP */
int const rfr = piece->resampled_audio_frame_rate ();
auto end = audio.time + DCPTime::from_frames(audio.audio->frames(), rfr);
- std::cout << "Player gets " << to_string(audio.time) << "\n";
+
+ /* XXX: is this still necessary? don't the checks in Piece take care of this now?
+ * Maybe replace with some assertions & run tests.
+ */
/* Remove anything that comes before the start or after the end of the content */
if (audio.time < piece->position()) {
for (int i = 0; i < fr->samples(); ++i) {
for (int j = 0; j < 6; ++j) {
if (j == 2) {
- BOOST_CHECK_EQUAL ((fr->get(j, i) + 128) >> 8, stair * 2);
+ BOOST_CHECK_MESSAGE (((fr->get(j, i) + 128) >> 8) == (stair * 2), "sample=" << i << " channel=" << j << " frame has " << ((fr->get(j, i) + 128) >> 8) << " instead of " << (stair * 2));
++stair;
} else {
BOOST_CHECK_EQUAL (fr->get(j, i), 0);