X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Faudio_decoder.cc;h=c0ef02f65d5ab5518dcb7e53aa145b1ff3f9a598;hb=cadf2d574d144098fffa3c61e0a2be88f496cac6;hp=e1c93ac77bcb23de9a5888cdc2babd36ffdef442;hpb=237a0052c60af768f4d62b00321932918b7ba4d9;p=dcpomatic.git diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc index e1c93ac77..c0ef02f65 100644 --- a/src/lib/audio_decoder.cc +++ b/src/lib/audio_decoder.cc @@ -21,113 +21,38 @@ #include "audio_buffers.h" #include "exceptions.h" #include "log.h" +#include "resampler.h" #include "i18n.h" using std::stringstream; +using std::list; +using std::pair; +using std::cout; using boost::optional; using boost::shared_ptr; -AudioDecoder::AudioDecoder (shared_ptr f, shared_ptr c) - : Decoder (f) - , _audio_content (c) - , _output_audio_frame_rate (_audio_content->output_audio_frame_rate (f)) +AudioDecoder::AudioDecoder (shared_ptr film, shared_ptr content) + : Decoder (film) + , _audio_content (content) + , _audio_position (0) { - if (_audio_content->content_audio_frame_rate() != _output_audio_frame_rate) { - stringstream s; - s << String::compose ("Will resample audio from %1 to %2", _audio_content->content_audio_frame_rate(), _output_audio_frame_rate); - _film->log()->log (s.str ()); - - /* We will be using planar float data when we call the - resampler. As far as I can see, the audio channel - layout is not necessary for our purposes; it seems - only to be used get the number of channels and - decide if rematrixing is needed. It won't be, since - input and output layouts are the same. - */ - - _swr_context = swr_alloc_set_opts ( - 0, - av_get_default_channel_layout (MAX_AUDIO_CHANNELS), - AV_SAMPLE_FMT_FLTP, - _output_audio_frame_rate, - av_get_default_channel_layout (MAX_AUDIO_CHANNELS), - AV_SAMPLE_FMT_FLTP, - _audio_content->content_audio_frame_rate(), - 0, 0 - ); - - swr_init (_swr_context); - } else { - _swr_context = 0; - } -} - -AudioDecoder::~AudioDecoder () -{ - if (_swr_context) { - swr_free (&_swr_context); - } } - -#if 0 void -AudioDecoder::process_end () +AudioDecoder::audio (shared_ptr data, AudioContent::Frame frame) { - if (_swr_context) { - - shared_ptr out (new AudioBuffers (_film->audio_mapping().dcp_channels(), 256)); - - while (1) { - int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0); - - if (frames < 0) { - throw EncodeError (_("could not run sample-rate converter")); - } - - if (frames == 0) { - break; - } - - out->set_frames (frames); - _writer->write (out); - } - - } + Audio (data, frame); + _audio_position = frame + data->frames (); } -#endif -void -AudioDecoder::emit_audio (shared_ptr data, Time time) +/** This is a bit odd, but necessary when we have (e.g.) FFmpegDecoders with no audio. + * The player needs to know that there is no audio otherwise it will keep trying to + * pass() the decoder to get it to emit audio. + */ +bool +AudioDecoder::has_audio () const { - /* XXX: map audio to 5.1 */ - - /* Maybe sample-rate convert */ - if (_swr_context) { - - /* Compute the resampled frames count and add 32 for luck */ - int const max_resampled_frames = ceil ((int64_t) data->frames() * _output_audio_frame_rate / _audio_content->content_audio_frame_rate()) + 32; - - shared_ptr resampled (new AudioBuffers (MAX_AUDIO_CHANNELS, max_resampled_frames)); - - /* Resample audio */ - int const resampled_frames = swr_convert ( - _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) data->data(), data->frames() - ); - - if (resampled_frames < 0) { - throw EncodeError (_("could not run sample-rate converter")); - } - - resampled->set_frames (resampled_frames); - - /* And point our variables at the resampled audio */ - data = resampled; - } - - Audio (data, time); + return _audio_content->audio_channels () > 0; } - -