Go to the documentation of this file.
40 #define BITSTREAM_WRITER_LE
60 }
else if (avctx->
bit_rate == 5300) {
85 *iir = (
buf[
i] << 15) + ((-*fir) << 15) +
MULL2(*iir, 0x7f00);
87 buf[
i] = av_clipl_int32((int64_t)*iir + (1 << 15)) >> 16;
116 autocorr[0] = av_clipl_int32((int64_t) (
temp << scale) +
121 memset(autocorr + 1, 0,
LPC_ORDER *
sizeof(int16_t));
126 autocorr[
i] = av_clipl_int32((int64_t)
temp + (1 << 15)) >> 16;
142 int16_t partial_corr;
145 memset(lpc, 0,
LPC_ORDER *
sizeof(int16_t));
150 for (j = 0; j <
i; j++)
151 temp -= lpc[j] * autocorr[
i - j - 1];
152 temp = ((autocorr[
i] << 13) +
temp) << 3;
159 lpc[
i] = av_clipl_int32((int64_t) (partial_corr << 14) +
167 memcpy(vector, lpc,
i *
sizeof(int16_t));
168 for (j = 0; j <
i; j++) {
169 temp = partial_corr * vector[
i - j - 1] << 1;
170 lpc[j] = av_clipl_int32((int64_t) (lpc[j] << 16) -
temp +
186 int16_t *autocorr_ptr = autocorr;
187 int16_t *lpc_ptr = lpc;
199 static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
214 f[0] =
f[1] = 1 << 25;
221 f[2 *
i + 3] =
f[2 *
i + 1] - ((lsp[
i] - lsp[
LPC_ORDER - 1 -
i]) << 12);
236 f[
i] = av_clipl_int32((int64_t) (
f[
i] <<
shift) + (1 << 15)) >> 16;
246 prev_val = av_clipl_int32(
temp << 1);
253 cur_val = av_clipl_int32(
temp << 1);
256 if ((cur_val ^ prev_val) < 0) {
257 int abs_cur =
FFABS(cur_val);
258 int abs_prev =
FFABS(prev_val);
259 int sum = abs_cur + abs_prev;
263 abs_prev = abs_prev << shift >> 8;
264 lsp[
count++] = ((
i - 1) << 7) + (abs_prev >> 1) / (sum >> 16);
277 cur_val = av_clipl_int32(
temp << 1);
283 memcpy(lsp, prev_lsp,
LPC_ORDER *
sizeof(int16_t));
293 #define get_index(num, offset, size) \
295 int error, max = -1; \
299 for (i = 0; i < LSP_CB_SIZE; i++) { \
300 for (j = 0; j < size; j++){ \
301 temp[j] = (weight[j + (offset)] * lsp_band##num[i][j] + \
304 error = ff_g723_1_dot_product(lsp + (offset), temp, size) << 1; \
305 error -= ff_g723_1_dot_product(lsp_band##num[i], temp, size); \
308 lsp_index[num] = i; \
326 weight[0] = (1 << 20) / (lsp[1] - lsp[0]);
351 (((prev_lsp[
i] -
dc_lsp[
i]) * 12288 + (1 << 14)) >> 15);
368 int16_t *
src, int16_t *dest)
376 iir_coef[
n - 1] * dest[m -
n];
379 dest[m] = av_clipl_int32((
src[m] << 16) + (
filter << 3) +
391 int16_t *unq_lpc, int16_t *
buf)
404 flt_coef[k + 2 * l +
LPC_ORDER] = (unq_lpc[k + l] *
409 vector +
i,
buf +
i);
425 int max_ccr = 0x4000;
426 int max_eng = 0x7fff;
430 int ccr, eng, orig_eng, ccr_eng,
exp;
449 ccr = av_clipl_int32((int64_t) (ccr <<
exp) + (1 << 15)) >> 16;
453 ccr = ccr << temp >> 16;
457 eng = av_clipl_int32((int64_t) (orig_eng <<
temp) + (1 << 15)) >> 16;
467 if (
exp + 1 < max_exp)
471 if (
exp + 1 == max_exp)
475 ccr_eng = ccr * max_eng;
477 if (
diff > 0 && (
i - index < PITCH_MIN || diff > ccr_eng >> 2)) {
497 int ccr, eng, max_ccr, max_eng;
502 for (
i = 0, j = pitch_lag - 3; j <= pitch_lag + 3;
i++, j++) {
514 for (
i = 0;
i < 15;
i++)
518 for (
i = 0;
i < 15;
i++) {
519 energy[
i] = av_clipl_int32((int64_t)(energy[
i] <<
exp) +
528 for (
i = 0;
i <= 6;
i++) {
529 eng = energy[
i << 1];
530 ccr = energy[(
i << 1) + 1];
535 ccr = (ccr * ccr + (1 << 14)) >> 15;
536 diff = ccr * max_eng - eng * max_ccr;
544 if (hf->
index == -1) {
545 hf->
index = pitch_lag;
549 eng = energy[14] * max_eng;
550 eng = (eng >> 2) + (eng >> 3);
551 ccr = energy[(hf->
index << 1) + 1] * energy[(hf->
index << 1) + 1];
553 eng = energy[(hf->
index << 1) + 1];
558 hf->
gain = ((eng << 15) / max_eng * 0x2800 + (1 << 14)) >> 15;
560 hf->
index += pitch_lag - 3;
574 dest[
i] = av_clipl_int32((
src[
i] << 16) -
temp + (1 << 15)) >> 16;
583 dest[
i] = av_clipl_int32(((dest[
i] -
src[
i]) << 16) +
temp +
598 int16_t *perf_fir, int16_t *perf_iir,
599 const int16_t *
src, int16_t *dest,
int scale)
607 memcpy(buf_16, perf_fir,
sizeof(int16_t) *
LPC_ORDER);
613 temp -= qnt_lpc[j - 1] * bptr_16[
i - j];
616 bptr_16[
i] = av_clipl_int32(
buf[
i] + (1 << 15)) >> 16;
620 int64_t fir = 0, iir = 0;
622 fir -= perf_lpc[j - 1] * bptr_16[
i - j];
623 iir += perf_lpc[j +
LPC_ORDER - 1] * dest[
i - j];
625 dest[
i] = av_clipl_int32(((
buf[
i] + (fir << 3)) << scale) + (iir << 3) +
640 int16_t *impulse_resp,
const int16_t *
buf,
652 int odd_frame =
index & 1;
653 int iter = 3 + odd_frame;
667 for (
i = 0;
i < iter;
i++) {
672 for (k = 0; k <= j; k++)
679 flt_buf[j][0] = ((
residual[j] << 13) + (1 << 14)) >> 15;
681 temp = (flt_buf[j + 1][k - 1] << 15) +
683 flt_buf[j][k] = av_clipl_int32((
temp << 1) + (1 << 15)) >> 16;
690 ccr_buf[
count++] = av_clipl_int32(
temp << 1);
700 for (k = 0; k < j; k++) {
702 ccr_buf[
count++] = av_clipl_int32(
temp << 2);
709 for (
i = 0;
i < 20 * iter;
i++)
714 for (
i = 0;
i < 20 * iter;
i++)
715 ccr_buf[
i] = av_clipl_int32((int64_t) (ccr_buf[
i] <<
temp) +
719 for (
i = 0;
i < iter;
i++) {
727 for (j = 0, k = 0; j < tbl_size; j++, k += 20) {
729 for (l = 0; l < 20; l++)
730 temp += ccr_buf[20 *
i + l] * cb_tbl[k + l];
742 pitch_lag += acb_lag - 1;
764 for (j = 0; j <=
i; j++)
767 buf[
i] = av_clipl_int32((
temp << 2) + (1 << 15)) >> 16;
778 int16_t *
buf,
int pulse_cnt,
int pitch_lag)
787 int amp, err,
max, max_amp_index,
min, scale,
i, j, k, l;
792 memcpy(impulse_r, impulse_resp,
sizeof(int16_t) *
SUBFRAME_LEN);
800 temp_corr[
i] = impulse_r[
i] >> 1;
806 impulse_corr[0] = av_clipl_int32((
temp << scale) + (1 << 15)) >> 16;
811 impulse_corr[
i] = av_clipl_int32((
temp << scale) + (1 << 15)) >> 16;
819 ccr1[
i] =
temp >> -scale;
821 ccr1[
i] = av_clipl_int32(
temp << scale);
840 for (j = max_amp_index; j >= 2; j--) {
842 impulse_corr[0] << 1);
852 for (j = 1; j < 5; j++) {
863 for (k = 1; k < pulse_cnt; k++) {
869 temp = av_clipl_int32((int64_t)
temp *
887 for (k = 0; k < pulse_cnt; k++)
892 for (l = 0; l <= k; l++) {
893 int prod = av_clipl_int32((int64_t) temp_corr[l] *
894 impulse_r[k - l] << 1);
897 temp_corr[k] = temp << 2 >> 16;
904 prod = av_clipl_int32((int64_t)
buf[k] * temp_corr[k] << 1);
905 err = av_clipl_int32(err - prod);
906 prod = av_clipl_int32((int64_t) temp_corr[k] * temp_corr[k]);
907 err = av_clipl_int32(err + prod);
911 if (err < optim->min_err) {
917 for (k = 0; k < pulse_cnt; k++) {
933 int16_t *
buf,
int pulse_cnt)
942 for (
i = 0; i < SUBFRAME_LEN >> 1;
i++) {
984 for (
i = 0;
i < pulse_cnt;
i++)
1075 memcpy(
in,
frame->data[0],
frame->nb_samples *
sizeof(int16_t));
1133 memset(vector, 0,
sizeof(int16_t) *
PITCH_MAX);
1136 flt_in[0] = 1 << 13;
1147 fir, iir, flt_in, vector +
PITCH_MAX, 0);
1167 in[j] = av_clip_int16((
in[j] << 1) + impulse_resp[j]);
1189 *got_packet_ptr = 1;
int frame_size
Number of samples per channel in an audio frame.
static av_cold int g723_1_encode_init(AVCodecContext *avctx)
int16_t prev_data[HALF_FRAME_LEN]
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int sample_rate
samples per second
static enum AVSampleFormat sample_fmts[]
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static const int16_t binomial_window[LPC_ORDER]
Binomial window coefficients scaled by 2^15.
int ad_cb_lag
adaptive codebook lag
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
#define get_index(num, offset, size)
Quantize the current LSP subvector.
void ff_g723_1_inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp, uint8_t *lsp_index, int bad_frame)
Perform inverse quantization of LSP frequencies.
int16_t prev_excitation[PITCH_MAX]
static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
static const int32_t combinatorial_table[PULSE_MAX][SUBFRAME_LEN/GRID_SIZE]
Used for the coding/decoding of the pulses positions for the MP-MLQ codebook.
int ff_g723_1_normalize_bits(int num, int width)
Calculate the number of left-shifts required for normalizing the input.
static const int16_t adaptive_cb_gain85[85 *20]
static void perceptual_filter(G723_1_ChannelContext *p, int16_t *flt_coef, int16_t *unq_lpc, int16_t *buf)
Apply the formant perceptual weighting filter.
void ff_g723_1_gen_dirac_train(int16_t *buf, int pitch_lag)
Generate a train of dirac functions with period as pitch lag.
static const int16_t percept_flt_tbl[2][LPC_ORDER]
0.5^i scaled by 2^15
int16_t hpf_fir_mem
highpass filter fir
int16_t prev_weight_sig[PITCH_MAX]
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static const int16_t adaptive_cb_gain170[170 *20]
static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
Compute harmonic noise filter parameters.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Harmonic filter parameters.
void ff_g723_1_gen_acb_excitation(int16_t *vector, int16_t *prev_excitation, int pitch_lag, G723_1_Subframe *subfrm, enum Rate cur_rate)
Generate adaptive codebook excitation.
int16_t perf_fir_mem[LPC_ORDER]
perceptual filter fir
static int pack_bitstream(G723_1_ChannelContext *p, AVPacket *avpkt)
Pack the frame parameters into output bitstream.
static void skip_put_bits(PutBitContext *s, int n)
Skip the given number of bits.
int16_t fir_mem[LPC_ORDER]
static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
Calculate LPC coefficients for the current frame.
AVCodec ff_g723_1_encoder
Optimized fixed codebook excitation parameters.
static void fcb_search(G723_1_ChannelContext *p, int16_t *impulse_resp, int16_t *buf, int index)
Compute the fixed codebook excitation.
static void acb_search(G723_1_ChannelContext *p, int16_t *residual, int16_t *impulse_resp, const int16_t *buf, int index)
Compute the adaptive codebook contribution.
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
void ff_g723_1_lsp_interpolate(int16_t *lpc, int16_t *cur_lsp, int16_t *prev_lsp)
Quantize LSP frequencies by interpolation and convert them to the corresponding LPC coefficients.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int64_t bit_rate
the average bitrate
void ff_g723_1_get_residual(int16_t *residual, int16_t *prev_excitation, int lag)
Get delayed contribution from the previous excitation vector.
static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
Vector quantize the LSP frequencies.
int ff_g723_1_dot_product(const int16_t *a, const int16_t *b, int length)
G723_1_Subframe subframe[4]
static int weight(int i, int blen, int offset)
static void error(const char *err)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int pulse_sign[PULSE_MAX]
uint8_t lsp_index[LSP_BANDS]
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
const char const char void * val
static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
Use Levinson-Durbin recursion to compute LPC coefficients from autocorrelation values.
G723.1 unpacked data subframe.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static const int16_t fixed_cb_gain[GAIN_LEVELS]
int channels
number of audio channels
int16_t perf_iir_mem[LPC_ORDER]
and iir memories
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim, int16_t *buf, int pulse_cnt)
Encode the pulse position and gain of the current subframe.
static void comp_autocorr(int16_t *buf, int16_t *autocorr)
Estimate autocorrelation of the input vector.
#define i(width, name, range_min, range_max)
int64_t ff_dot_product(const int16_t *a, const int16_t *b, int length)
Calculate the dot product of 2 int16_t vectors.
int ff_g723_1_scale_vector(int16_t *dst, const int16_t *vector, int length)
Scale vector contents based on the largest of their absolutes.
AVSampleFormat
Audio sample formats.
static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
@ AV_SAMPLE_FMT_S16
signed 16 bits
const char * name
Name of the codec implementation.
static const int16_t hamming_window[LPC_FRAME]
Hamming window coefficients scaled by 2^15.
static const int16_t bandwidth_expand[LPC_ORDER]
0.994^i scaled by 2^15
static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc, int16_t *perf_fir, int16_t *perf_iir, const int16_t *src, int16_t *dest, int scale)
Combined synthesis and formant perceptual weighting filer.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int16_t prev_lsp[LPC_ORDER]
static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
Remove DC component from the input signal.
static const int16_t dc_lsp[LPC_ORDER]
LSP DC component.
main external API structure.
static int estimate_pitch(int16_t *buf, int start)
Estimate the open loop pitch period.
static void iir_filter(int16_t *fir_coef, int16_t *iir_coef, int16_t *src, int16_t *dest)
Perform IIR filtering.
static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp, int16_t *buf)
Subtract the adaptive codebook contribution from the input to obtain the residual.
static int shift(int a, int b)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
This structure stores compressed data.
int16_t harmonic_mem[PITCH_MAX]
#define MULL2(a, b)
Bitexact implementation of 2ab scaled by 1/2^16.
int hpf_iir_mem
and iir memories
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static float cos_tab[256]
static const int8_t pulses[4]
Number of non-zero pulses in the MP-MLQ excitation.
static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp, int16_t *buf, int pulse_cnt, int pitch_lag)
Quantize the residual signal using the fixed codebook (MP-MLQ).
static const AVCodecDefault defaults[]
static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
Apply the harmonic noise shaping filter.