X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fj2k_wav_encoder.cc;h=c0c785d118d8254be4eb3489bca2806ca8f9a230;hb=44b57d623dec97a3f9955082f0b8a7a8d27b7518;hp=e2a3a5ed7193dd3b591d0785999aff4c48dfacad;hpb=13511ed2fcc23f4d5f9c507c775c3c5cfd82d155;p=dcpomatic.git diff --git a/src/lib/j2k_wav_encoder.cc b/src/lib/j2k_wav_encoder.cc index e2a3a5ed7..c0c785d11 100644 --- a/src/lib/j2k_wav_encoder.cc +++ b/src/lib/j2k_wav_encoder.cc @@ -49,16 +49,15 @@ J2KWAVEncoder::J2KWAVEncoder (shared_ptr s, shared_ptraudio_channels; ++i) { + for (int i = 0; i < _fs->audio_channels(); ++i) { SF_INFO sf_info; - sf_info.samplerate = dcp_audio_sample_rate (_fs->audio_sample_rate); + sf_info.samplerate = dcp_audio_sample_rate (_fs->audio_sample_rate()); /* We write mono files */ sf_info.channels = 1; sf_info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_24; @@ -68,15 +67,11 @@ J2KWAVEncoder::J2KWAVEncoder (shared_ptr s, shared_ptr yuv, int frame, shared_ptrframe_out_path (frame, false))) { - pair const s = Filter::ffmpeg_strings (_fs->filters); + pair const s = Filter::ffmpeg_strings (_fs->filters()); TIMING ("adding to queue of %1", _queue.size ()); _queue.push_back (boost::shared_ptr ( new DCPVideoFrame ( - yuv, sub, _opt->out_size, _opt->padding, _fs->subtitle_offset, _fs->subtitle_scale, - _fs->scaler, frame, _fs->frames_per_second, s.second, + yuv, sub, _opt->out_size, _opt->padding, _fs->subtitle_offset(), _fs->subtitle_scale(), + _fs->scaler(), frame, _fs->frames_per_second(), s.second, Config::instance()->colour_lut_index (), Config::instance()->j2k_bandwidth (), _log ) @@ -222,23 +217,24 @@ J2KWAVEncoder::encoder_thread (ServerDescription* server) } void -J2KWAVEncoder::process_begin (int64_t audio_channel_layout, AVSampleFormat audio_sample_format) +J2KWAVEncoder::process_begin (int64_t audio_channel_layout) { - if (_fs->audio_sample_rate != _fs->target_sample_rate ()) { + if (_fs->audio_sample_rate() != _fs->target_audio_sample_rate()) { #ifdef HAVE_SWRESAMPLE stringstream s; - s << "Will resample audio from " << _fs->audio_sample_rate << " to " << _fs->target_sample_rate(); + s << "Will resample audio from " << _fs->audio_sample_rate() << " to " << _fs->target_audio_sample_rate(); _log->log (s.str ()); - + + /* We will be using planar float data when we call the resampler */ _swr_context = swr_alloc_set_opts ( 0, audio_channel_layout, - audio_sample_format, - _fs->target_sample_rate(), + AV_SAMPLE_FMT_FLTP, + _fs->target_audio_sample_rate(), audio_channel_layout, - audio_sample_format, - _fs->audio_sample_rate, + AV_SAMPLE_FMT_FLTP, + _fs->audio_sample_rate(), 0, 0 ); @@ -308,14 +304,10 @@ J2KWAVEncoder::process_end () #if HAVE_SWRESAMPLE if (_swr_context) { + shared_ptr out (new AudioBuffers (_fs->audio_channels(), 256)); + while (1) { - uint8_t buffer[256 * _fs->bytes_per_sample() * _fs->audio_channels]; - uint8_t* out[2] = { - buffer, - 0 - }; - - int const frames = swr_convert (_swr_context, out, 256, 0, 0); + int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0); if (frames < 0) { throw EncodeError ("could not run sample-rate converter"); @@ -325,17 +317,23 @@ J2KWAVEncoder::process_end () break; } - write_audio (buffer, frames * _fs->bytes_per_sample() * _fs->audio_channels); + write_audio (out); } swr_free (&_swr_context); } -#endif +#endif + + int const dcp_sr = dcp_audio_sample_rate (_fs->audio_sample_rate ()); + int64_t const extra_audio_frames = dcp_sr - (_audio_frames_written % dcp_sr); + shared_ptr silence (new AudioBuffers (_fs->audio_channels(), extra_audio_frames)); + silence->make_silent (); + write_audio (silence); close_sound_files (); /* Rename .wav.tmp files to .wav */ - for (int i = 0; i < _fs->audio_channels; ++i) { + for (int i = 0; i < _fs->audio_channels(); ++i) { if (boost::filesystem::exists (_opt->multichannel_audio_out_path (i, false))) { boost::filesystem::remove (_opt->multichannel_audio_out_path (i, false)); } @@ -344,97 +342,45 @@ J2KWAVEncoder::process_end () } void -J2KWAVEncoder::process_audio (uint8_t* data, int size) +J2KWAVEncoder::process_audio (shared_ptr audio) { - /* This is a buffer we might use if we are sample-rate converting; - it will need freeing if so. - */ - uint8_t* out_buffer = 0; + shared_ptr resampled; +#if HAVE_SWRESAMPLE /* Maybe sample-rate convert */ -#if HAVE_SWRESAMPLE if (_swr_context) { - uint8_t const * in[2] = { - data, - 0 - }; - - /* Here's samples per channel */ - int const samples = size / _fs->bytes_per_sample(); - - /* And here's frames (where 1 frame is a collection of samples, 1 for each channel, - so for 5.1 a frame would be 6 samples) - */ - int const frames = samples / _fs->audio_channels; + /* Compute the resampled frames count and add 32 for luck */ + int const max_resampled_frames = ceil (audio->frames() * _fs->target_audio_sample_rate() / _fs->audio_sample_rate()) + 32; - /* Compute the resampled frame count and add 32 for luck */ - int const out_buffer_size_frames = ceil (frames * _fs->target_sample_rate() / _fs->audio_sample_rate) + 32; - int const out_buffer_size_bytes = out_buffer_size_frames * _fs->audio_channels * _fs->bytes_per_sample(); - out_buffer = new uint8_t[out_buffer_size_bytes]; - - uint8_t* out[2] = { - out_buffer, - 0 - }; + resampled.reset (new AudioBuffers (_fs->audio_channels(), max_resampled_frames)); /* Resample audio */ - int out_frames = swr_convert (_swr_context, out, out_buffer_size_frames, in, frames); - if (out_frames < 0) { + int const resampled_frames = swr_convert ( + _swr_context, (uint8_t **) resampled->data(), max_resampled_frames, (uint8_t const **) audio->data(), audio->frames() + ); + + if (resampled_frames < 0) { throw EncodeError ("could not run sample-rate converter"); } + resampled->set_frames (resampled_frames); + /* And point our variables at the resampled audio */ - data = out_buffer; - size = out_frames * _fs->audio_channels * _fs->bytes_per_sample(); + audio = resampled; } #endif - write_audio (data, size); - - /* Delete the sample-rate conversion buffer, if it exists */ - delete[] out_buffer; + write_audio (audio); } void -J2KWAVEncoder::write_audio (uint8_t* data, int size) +J2KWAVEncoder::write_audio (shared_ptr audio) { - /* XXX: we are assuming that the _deinterleave_buffer_size is a multiple - of the sample size and that size is a multiple of _fs->audio_channels * sample_size. - */ - - assert ((size % (_fs->audio_channels * _fs->bytes_per_sample())) == 0); - assert ((_deinterleave_buffer_size % _fs->bytes_per_sample()) == 0); - - /* XXX: this code is very tricksy and it must be possible to make it simpler ... */ - - /* Number of bytes left to read this time */ - int remaining = size; - /* Our position in the output buffers, in bytes */ - int position = 0; - while (remaining > 0) { - /* How many bytes of the deinterleaved data to do this time */ - int this_time = min (remaining / _fs->audio_channels, _deinterleave_buffer_size); - for (int i = 0; i < _fs->audio_channels; ++i) { - for (int j = 0; j < this_time; j += _fs->bytes_per_sample()) { - for (int k = 0; k < _fs->bytes_per_sample(); ++k) { - int const to = j + k; - int const from = position + (i * _fs->bytes_per_sample()) + (j * _fs->audio_channels) + k; - _deinterleave_buffer[to] = data[from]; - } - } - - switch (_fs->audio_sample_format) { - case AV_SAMPLE_FMT_S16: - sf_write_short (_sound_files[i], (const short *) _deinterleave_buffer, this_time / _fs->bytes_per_sample()); - break; - default: - throw EncodeError ("unknown audio sample format"); - } - } - - position += this_time; - remaining -= this_time * _fs->audio_channels; + for (int i = 0; i < _fs->audio_channels(); ++i) { + sf_write_float (_sound_files[i], audio->data(i), audio->frames()); } + + _audio_frames_written += audio->frames (); }