AudioDecoder::AudioDecoder (shared_ptr<const Film> film, shared_ptr<const AudioContent> content)
: Decoder (film)
, _audio_content (content)
- , _audio_position (0)
{
if (content->output_audio_frame_rate() != content->content_audio_frame_rate() && content->audio_channels ()) {
_resampler.reset (new Resampler (content->content_audio_frame_rate(), content->output_audio_frame_rate(), content->audio_channels ()));
/** Audio timestamping is made hard by many factors, but the final nail in the coffin is resampling.
* We have to assume that we are feeding continuous data into the resampler, and so we get continuous
* data out. Hence we do the timestamping here, post-resampler, just by counting samples.
+ *
+ * The time is passed in here so that after a seek we can set up our _audio_position. The
+ * time is ignored once this has been done.
*/
void
-AudioDecoder::audio (shared_ptr<const AudioBuffers> data)
+AudioDecoder::audio (shared_ptr<const AudioBuffers> data, ContentTime time)
{
if (_resampler) {
data = _resampler->run (data);
}
- _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (data, _audio_position)));
- _audio_position += data->frames ();
+ if (!_audio_position) {
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ FrameRateChange frc = film->active_frame_rate_change (_audio_content->position ());
+ _audio_position = (double (time) / frc.speed_up) * film->audio_frame_rate() / TIME_HZ;
+ }
+
+ _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (data, _audio_position.get ())));
+ _audio_position = _audio_position.get() + data->frames ();
}
void
shared_ptr<const AudioBuffers> b = _resampler->flush ();
if (b) {
- _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (b, _audio_position)));
- _audio_position += b->frames ();
+ _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (b, _audio_position.get ())));
+ _audio_position = _audio_position.get() + b->frames ();
}
}
void
-AudioDecoder::seek (ContentTime t, bool)
+AudioDecoder::seek (ContentTime, bool)
{
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
-
- FrameRateChange frc = film->active_frame_rate_change (_audio_content->position ());
- _audio_position = ((t + first_audio()) / frc.speed_up) * film->audio_frame_rate() / TIME_HZ;
+ _audio_position.reset ();
}
protected:
- virtual ContentTime first_audio () const = 0;
- void audio (boost::shared_ptr<const AudioBuffers>);
+ void audio (boost::shared_ptr<const AudioBuffers>, ContentTime);
void flush ();
boost::shared_ptr<const AudioContent> _audio_content;
boost::shared_ptr<Resampler> _resampler;
- AudioFrame _audio_position;
+ boost::optional<AudioFrame> _audio_position;
};
#endif
}
+struct DecodedSorter
+{
+ bool operator() (shared_ptr<Decoded> a, shared_ptr<Decoded> b)
+ {
+ return a->dcp_time < b->dcp_time;
+ }
+};
+
shared_ptr<Decoded>
Decoder::peek ()
{
return shared_ptr<Decoded> ();
}
+ _pending.sort (DecodedSorter ());
return _pending.front ();
}
}
if (frame_finished) {
+ ContentTime const ct = (
+ av_frame_get_best_effort_timestamp (_frame) *
+ av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base)
+ + _pts_offset
+ ) * TIME_HZ;
+
int const data_size = av_samples_get_buffer_size (
0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1
);
-
- audio (deinterleave_audio (_frame->data, data_size));
+
+ audio (deinterleave_audio (_frame->data, data_size), ct);
}
copy_packet.data += decode_result;
avsubtitle_free (&sub);
}
-
-ContentTime
-FFmpegDecoder::first_audio () const
-{
- if (!_ffmpeg_content->audio_stream ()) {
- return 0;
- }
-
- return _ffmpeg_content->audio_stream()->first_audio.get_value_or(0) + _pts_offset;
-}
bool pass ();
void flush ();
- ContentTime first_audio () const;
void setup_subtitle ();
/* Will be set to false if we shouldn't consume the peeked DecodedThing */
bool consume = true;
- /* This is the margin either side of _{video,audio}_position that we will accept
- as a starting point for a frame consecutive to the previous.
- */
- DCPTime const margin = TIME_HZ / (2 * _film->video_frame_rate ());
-
if (dv && _video) {
if (_just_did_inaccurate_seek) {
emit_video (earliest_piece, dv);
step_video_position (dv);
- } else if (dv->dcp_time - _video_position > margin) {
+ } else if (dv->dcp_time > _video_position) {
/* Too far ahead */
consume = false;
- } else if (abs (dv->dcp_time - _video_position) < margin) {
+ } else if (dv->dcp_time == _video_position) {
/* We're ok */
emit_video (earliest_piece, dv);
step_video_position (dv);
} else if (da && _audio) {
- if (da->dcp_time - _audio_position > margin) {
+ if (da->dcp_time > _audio_position) {
/* Too far ahead */
emit_silence (da->dcp_time - _audio_position);
consume = false;
_statistics.audio.silence += (da->dcp_time - _audio_position);
- } else if (abs (da->dcp_time - _audio_position) < margin) {
+ } else if (da->dcp_time == _audio_position) {
/* We're ok */
emit_audio (earliest_piece, da);
_statistics.audio.good += da->data->frames();
throw DecodeError (_("could not open audio file for reading"));
}
+ _done = 0;
_remaining = _info.frames;
}
}
data->set_frames (this_time);
- audio (data);
+ audio (data, _done * TIME_HZ / audio_frame_rate ());
+ _done += this_time;
_remaining -= this_time;
return _remaining == 0;
Decoder::seek (t, accurate);
AudioDecoder::seek (t, accurate);
+ _done = t * audio_frame_rate() / TIME_HZ;
_remaining = _info.frames - _done;
}
int audio_frame_rate () const;
private:
- ContentTime first_audio () const {
- return 0;
- }
-
bool pass ();
boost::shared_ptr<const SndfileContent> _sndfile_content;
SNDFILE* _sndfile;
SF_INFO _info;
+ AudioFrame _done;
AudioFrame _remaining;
float* _deinterleave_buffer;
};