Go to the documentation of this file.
62 #define AMR_BLOCK_SIZE 160
63 #define AMR_SAMPLE_BOUND 32768.0
74 #define AMR_SAMPLE_SCALE (2.0 / 32768.0)
77 #define PRED_FAC_MODE_12k2 0.65
79 #define LSF_R_FAC (8000.0 / 32768.0)
80 #define MIN_LSF_SPACING (50.0488 / 8000.0)
81 #define PITCH_LAG_MIN_MODE_12k2 18
84 #define MIN_ENERGY -14.0
91 #define SHARP_MAX 0.79449462890625
94 #define AMR_TILT_RESPONSE 22
96 #define AMR_TILT_GAMMA_T 0.8
98 #define AMR_AGC_ALPHA 0.9
150 const double *in_b,
double weight_coeff_a,
151 double weight_coeff_b,
int length)
156 out[
i] = weight_coeff_a * in_a[
i]
157 + weight_coeff_b * in_b[
i];
184 for (
i = 0;
i < 4;
i++)
243 for (
i = 0;
i < 4;
i++)
244 ctx->weighted_vector_sumf(lsf_q[
i], lsf_q[3], lsf_new,
245 0.25 * (3 -
i), 0.25 * (
i + 1),
262 const int16_t *lsf_quantizer[5],
263 const int quantizer_offset,
264 const int sign,
const int update)
270 for (
i = 0; i < LP_FILTER_ORDER >> 1;
i++)
271 memcpy(&lsf_r[
i << 1], &lsf_quantizer[
i][quantizer_offset],
283 lsf_q[
i] = lsf_r[
i] * (
LSF_R_FAC / 8000.0) + lsf_no_r[
i] * (1.0 / 8000.0);
300 const uint16_t *lsf_param = p->
frame.
lsf;
302 const int16_t *lsf_quantizer[5];
305 lsf_quantizer[0] =
lsf_5_1[lsf_param[0]];
306 lsf_quantizer[1] =
lsf_5_2[lsf_param[1]];
307 lsf_quantizer[2] =
lsf_5_3[lsf_param[2] >> 1];
308 lsf_quantizer[3] =
lsf_5_4[lsf_param[3]];
309 lsf_quantizer[4] =
lsf_5_5[lsf_param[4]];
329 const uint16_t *lsf_param = p->
frame.
lsf;
332 const int16_t *lsf_quantizer;
336 memcpy(lsf_r, lsf_quantizer, 3 *
sizeof(*lsf_r));
339 memcpy(lsf_r + 3, lsf_quantizer, 3 *
sizeof(*lsf_r));
342 memcpy(lsf_r + 6, lsf_quantizer, 4 *
sizeof(*lsf_r));
357 for (
i = 1;
i <= 3;
i++)
373 const int prev_lag_int,
const int subframe)
375 if (subframe == 0 || subframe == 2) {
376 if (pitch_index < 463) {
377 *lag_int = (pitch_index + 107) * 10923 >> 16;
378 *lag_frac = pitch_index - *lag_int * 6 + 105;
380 *lag_int = pitch_index - 368;
384 *lag_int = ((pitch_index + 5) * 10923 >> 16) - 1;
385 *lag_frac = pitch_index - *lag_int * 6 - 3;
395 int pitch_lag_int, pitch_lag_frac;
413 pitch_lag_int += pitch_lag_frac > 0;
420 pitch_lag_frac + 6 - 6*(pitch_lag_frac > 0),
436 int i1,
int i2,
int i3)
442 pulse_position[i2] = (
positions[1] << 1) + ((
code >> 1) & 1);
443 pulse_position[i3] = (
positions[0] << 1) + ((
code >> 2) & 1);
456 int pulse_position[8];
464 temp = ((fixed_index[6] >> 2) * 25 + 12) >> 5;
465 pulse_position[3] =
temp % 5;
466 pulse_position[7] =
temp / 5;
467 if (pulse_position[7] & 1)
468 pulse_position[3] = 4 - pulse_position[3];
469 pulse_position[3] = (pulse_position[3] << 1) + ( fixed_index[6] & 1);
470 pulse_position[7] = (pulse_position[7] << 1) + ((fixed_index[6] >> 1) & 1);
473 for (
i = 0;
i < 4;
i++) {
474 const int pos1 = (pulse_position[
i] << 2) +
i;
475 const int pos2 = (pulse_position[
i + 4] << 2) +
i;
476 const float sign = fixed_index[
i] ? -1.0 : 1.0;
477 fixed_sparse->
x[
i ] = pos1;
478 fixed_sparse->
x[
i + 4] = pos2;
479 fixed_sparse->
y[
i ] = sign;
480 fixed_sparse->
y[
i + 4] = pos2 < pos1 ? -sign : sign;
500 const enum Mode mode,
const int subframe)
509 int *pulse_position = fixed_sparse->
x;
511 const int fixed_index =
pulses[0];
514 pulse_subset = ((fixed_index >> 3) & 8) + (subframe << 1);
515 pulse_position[0] = ( fixed_index & 7) * 5 +
track_position[pulse_subset];
516 pulse_position[1] = ((fixed_index >> 3) & 7) * 5 +
track_position[pulse_subset + 1];
519 pulse_subset = ((fixed_index & 1) << 1) + 1;
520 pulse_position[0] = ((fixed_index >> 1) & 7) * 5 + pulse_subset;
521 pulse_subset = (fixed_index >> 4) & 3;
522 pulse_position[1] = ((fixed_index >> 6) & 7) * 5 + pulse_subset + (pulse_subset == 3 ? 1 : 0);
523 fixed_sparse->
n = pulse_position[0] == pulse_position[1] ? 1 : 2;
525 pulse_position[0] = (fixed_index & 7) * 5;
526 pulse_subset = (fixed_index >> 2) & 2;
527 pulse_position[1] = ((fixed_index >> 4) & 7) * 5 + pulse_subset + 1;
528 pulse_subset = (fixed_index >> 6) & 2;
529 pulse_position[2] = ((fixed_index >> 8) & 7) * 5 + pulse_subset + 2;
533 pulse_position[1] =
gray_decode[(fixed_index >> 3) & 7] + 1;
534 pulse_position[2] =
gray_decode[(fixed_index >> 6) & 7] + 2;
535 pulse_subset = (fixed_index >> 9) & 1;
536 pulse_position[3] =
gray_decode[(fixed_index >> 10) & 7] + pulse_subset + 3;
539 for (
i = 0;
i < fixed_sparse->
n;
i++)
540 fixed_sparse->
y[
i] = (
pulses[1] >>
i) & 1 ? 1.0 : -1.0;
589 const float *lsf_avg,
const enum Mode mode)
595 diff += fabs(lsf_avg[
i] - lsf[
i]) / lsf_avg[
i];
611 const float smoothing_factor = av_clipf(4.0 *
diff - 1.6, 0.0, 1.0);
616 (1.0 - smoothing_factor) * fixed_gain_mean;
631 const enum Mode mode,
const int subframe,
632 float *fixed_gain_factor)
640 const uint16_t *gains;
651 p->
pitch_gain[4] = gains[0] * (1.0 / 16384.0);
652 *fixed_gain_factor = gains[1] * (1.0 / 4096.0);
677 int lag =
in->pitch_lag;
678 float fac =
in->pitch_fac;
685 if (lag < AMR_SUBFRAME_SIZE >> 1)
691 for (
i = 0;
i <
in->n;
i++) {
694 const float *filterp;
720 const float *fixed_vector,
721 float fixed_gain,
float *
out)
741 for (
i = 0;
i < 5;
i++)
749 }
else if (ir_filter_nr < 2)
755 if (fixed_gain < 5.0)
759 && ir_filter_nr < 2) {
791 float fixed_gain,
const float *fixed_vector,
913 const float *gamma_n, *gamma_d;
925 lpc_n[
i] = lpc[
i] * gamma_n[
i];
926 lpc_d[
i] = lpc[
i] * gamma_d[
i];
949 int *got_frame_ptr,
AVPacket *avpkt)
955 int buf_size = avpkt->
size;
957 int i, subframe,
ret;
958 float fixed_gain_factor;
961 float synth_fixed_gain;
962 const float *synth_fixed_vector;
968 buf_out = (
float *)
frame->data[0];
986 for (
i = 0;
i < 4;
i++)
989 for (subframe = 0; subframe < 4; subframe++) {
1002 &fixed_gain_factor);
1007 av_log(avctx,
AV_LOG_ERROR,
"The file is corrupted, pitch_lag = 0 is not allowed\n");
1044 synth_fixed_gain, spare_vector);
uint16_t p_gain
index to decode the pitch gain
static const int16_t lsf_3_1_MODE_7k95[512][3]
float excitation_buf[PITCH_DELAY_MAX+LP_FILTER_ORDER+1+AMR_SUBFRAME_SIZE]
current excitation and all necessary excitation history
uint16_t pulses[10]
pulses: 10 for MODE_12k2, 7 for MODE_10k2, and index and sign for others
static av_cold int init(AVCodecContext *avctx)
static const uint8_t track_position[16]
track start positions for algebraic code book routines
static const float * anti_sparseness(AMRContext *p, AMRFixed *fixed_sparse, const float *fixed_vector, float fixed_gain, float *out)
Reduce fixed vector sparseness by smoothing with one of three IR filters.
uint64_t channel_layout
Audio channel layout.
float ff_amr_set_fixed_gain(float fixed_gain_factor, float fixed_mean_energy, float *prediction_error, float energy_mean, const float *pred_table)
Calculate fixed gain (part of section 6.1.3 of AMR spec)
static const uint8_t gray_decode[8]
3-bit Gray code to binary lookup table
static const int16_t lsf_5_3[256][4]
double prev_lsp_sub4[LP_FILTER_ORDER]
lsp vector for the 4th subframe of the previous frame
void ff_decode_pitch_lag(int *lag_int, int *lag_frac, int pitch_index, const int prev_lag_int, const int subframe, int third_as_first, int resolution)
Decode the adaptive codebook index to the integer and fractional parts of the pitch lag for one subfr...
int sample_rate
samples per second
static void filter1(SUINT32 *dst, const int32_t *src, int32_t coeff, ptrdiff_t len)
static enum AVSampleFormat sample_fmts[]
#define AV_CH_LAYOUT_MONO
ACELPFContext acelpf_ctx
context for filters for ACELP-based codecs
static const int16_t lsf_3_2[512][3]
const float ff_b60_sinc[61]
b60 hamming windowed sinc function coefficients
static av_always_inline av_const float truncf(float x)
This structure describes decoded (raw) audio or video data.
#define PITCH_LAG_MIN_MODE_12k2
Lower bound on decoded lag search in 12.2kbit/s mode.
void ff_acelp_filter_init(ACELPFContext *c)
Initialize ACELPFContext.
static const uint8_t frame_sizes_nb[N_MODES]
number of bytes for each mode
#define AMR_SAMPLE_SCALE
Scale from constructed speech to [-1,1].
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static const uint16_t qua_gain_code[32]
scalar quantized fixed gain table for 7.95 and 12.2 kbps modes
void(* celp_lp_synthesis_filterf)(float *out, const float *filter_coeffs, const float *in, int buffer_length, int filter_length)
LP synthesis filter.
#define LP_FILTER_ORDER
linear predictive coding filter order
#define SHARP_MAX
Maximum sharpening factor.
uint8_t bad_frame_indicator
bad frame ? 1 : 0
uint8_t prev_ir_filter_nr
previous impulse response filter "impNr": 0 - strong, 1 - medium, 2 - none
const static uint16_t positions[][14][3]
const float ff_pow_0_55[10]
Table of pow(0.55,n)
void ff_clear_fixed_vector(float *out, const AMRFixed *in, int size)
Clear array values set by set_fixed_vector.
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
void ff_adaptive_gain_control(float *out, const float *in, float speech_energ, int size, float alpha, float *gain_mem)
Adaptive gain control (as used in AMR postfiltering)
static void lsf2lsp_5(AMRContext *p)
Decode a set of 5 split-matrix quantized lsf indexes into 2 lsp vectors.
int16_t prev_lsf_r[LP_FILTER_ORDER]
residual LSF vector from previous subframe
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void decode_pitch_lag_1_6(int *lag_int, int *lag_frac, int pitch_index, const int prev_lag_int, const int subframe)
Like ff_decode_pitch_lag(), but with 1/6 resolution.
float postfilter_agc
previous factor used for adaptive gain control
Mode
Frame type (Table 1a in 3GPP TS 26.101)
void(* acelp_interpolatef)(float *out, const float *in, const float *filter_coeffs, int precision, int frac_pos, int filter_length, int length)
Floating point version of ff_acelp_interpolate()
static void lsf2lsp_for_mode12k2(AMRContext *p, double lsp[LP_FILTER_ORDER], const float lsf_no_r[LP_FILTER_ORDER], const int16_t *lsf_quantizer[5], const int quantizer_offset, const int sign, const int update)
Decode a set of 5 split-matrix quantized lsf indexes into an lsp vector.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static const int16_t lsf_3_3_MODE_5k15[128][4]
AMRNB unpacked data frame.
uint8_t ir_filter_onset
flag for impulse response filter strength
static void postfilter(AMRContext *p, float *lpc, float *buf_out)
Perform adaptive post-filtering to enhance the quality of the speech.
static const float energy_pred_fac[4]
4-tap moving average prediction coefficients in reverse order
Sparse representation for the algebraic codebook (fixed) vector.
double lsp[4][LP_FILTER_ORDER]
lsp vectors from current frame
void ff_acelp_lsf2lspd(double *lsp, const float *lsf, int lp_order)
Floating point version of ff_acelp_lsf2lsp()
#define AMR_AGC_ALPHA
Adaptive gain control factor used in post-filter.
float * excitation
pointer to the current excitation vector in excitation_buf
static void weighted_vector_sumd(double *out, const double *in_a, const double *in_b, double weight_coeff_a, double weight_coeff_b, int length)
Double version of ff_weighted_vector_sumf()
AMRNBSubframe subframe[4]
unpacked data for each subframe
#define MIN_LSF_SPACING
Ensures stability of LPC filter.
#define AMR_SUBFRAME_SIZE
samples per subframe
#define PRED_FAC_MODE_12k2
Prediction factor for 12.2kbit/s mode.
static const int16_t lsf_5_5[64][4]
AMRNBFrame frame
decoded AMR parameters (lsf coefficients, codebook indexes, etc)
void(* celp_lp_zero_synthesis_filterf)(float *out, const float *filter_coeffs, const float *in, int buffer_length, int filter_length)
LP zero synthesis filter.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AMR_TILT_GAMMA_T
Tilt factor = 1st reflection coefficient * gamma_t.
static const float *const ir_filters_lookup[2]
static void apply_ir_filter(float *out, const AMRFixed *in, const float *filter)
Circularly convolve a sparse fixed vector with a phase dispersion impulse response filter (D....
uint16_t lsf[5]
lsf parameters: 5 parameters for MODE_12k2, only 3 for other modes
void ff_decode_10_pulses_35bits(const int16_t *fixed_index, AMRFixed *fixed_sparse, const uint8_t *gray_decode, int half_pulse_count, int bits)
Decode the algebraic codebook index to pulse positions and signs and construct the algebraic codebook...
static const float highpass_zeros[2]
float fixed_vector[AMR_SUBFRAME_SIZE]
algebraic codebook (fixed) vector (must be kept zero between frames)
static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf, int buf_size)
Unpack an RFC4867 speech frame into the AMR frame mode and parameters.
float pitch_gain[5]
quantified pitch gains for the current and previous four subframes
#define AMR_TILT_RESPONSE
Number of impulse response coefficients used for tilt factor.
static void decode_pitch_vector(AMRContext *p, const AMRNBSubframe *amr_subframe, const int subframe)
static const uint16_t qua_gain_pit[16]
scalar quantized pitch gain table for 7.95 and 12.2 kbps modes
static void update_state(AMRContext *p)
Update buffers and history at the end of decoding a subframe.
uint16_t p_lag
index to decode the pitch lag
void ff_acelp_vectors_init(ACELPVContext *c)
Initialize ACELPVContext.
float lsf_q[4][LP_FILTER_ORDER]
Interpolated LSF vector for fixed gain smoothing.
float tilt_mem
previous input to tilt compensation filter
void ff_celp_math_init(CELPMContext *c)
Initialize CELPMContext.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static const uint16_t gains_high[128][2]
gain table for 6.70, 7.40 and 10.2 kbps modes
float high_pass_mem[2]
previous intermediate values in the high-pass filter
enum AVSampleFormat sample_fmt
audio sample format
static const float highpass_poles[2]
void(* acelp_apply_order_2_transfer_function)(float *out, const float *in, const float zero_coeffs[2], const float pole_coeffs[2], float gain, float mem[2], int n)
Apply an order 2 rational transfer function in-place.
float lpc[4][LP_FILTER_ORDER]
lpc coefficient vectors for 4 subframes
static void pitch_sharpening(AMRContext *p, int subframe, enum Mode mode, AMRFixed *fixed_sparse)
Apply pitch lag to obtain the sharpened fixed vector (section 6.1.2)
const float ff_pow_0_7[10]
Table of pow(0.7,n)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static const float highpass_gain
float fixed_gain[5]
quantified fixed gains for the current and previous four subframes
static const uint16_t gains_MODE_4k75[512][2]
gain table for 4.75 kbps mode
float beta
previous pitch_gain, bounded by [0.0,SHARP_MAX]
float samples_in[LP_FILTER_ORDER+AMR_SUBFRAME_SIZE]
floating point samples
void ff_celp_filter_init(CELPFContext *c)
Initialize CELPFContext.
void ff_tilt_compensation(float *mem, float tilt, float *samples, int size)
Apply tilt compensation filter, 1 - tilt * z-1.
#define AV_LOG_INFO
Standard information.
static const int16_t lsf_3_3[512][4]
uint16_t fixed_gain
index to decode the fixed gain factor, for MODE_12k2 and MODE_7k95
int channels
number of audio channels
float pitch_vector[AMR_SUBFRAME_SIZE]
adaptive code book (pitch) vector
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static const int16_t lsf_5_4[256][4]
static void interpolate_lsf(ACELPVContext *ctx, float lsf_q[4][LP_FILTER_ORDER], float *lsf_new)
Interpolate the LSF vector (used for fixed gain smoothing).
static const float lsf_3_mean[LP_FILTER_ORDER]
static float fixed_gain_smooth(AMRContext *p, const float *lsf, const float *lsf_avg, const enum Mode mode)
fixed gain smoothing Note that where the spec specifies the "spectrum in the q domain" in section 6....
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int synthesis(AMRContext *p, float *lpc, float fixed_gain, const float *fixed_vector, float *samples, uint8_t overflow)
Conduct 10th order linear predictive coding synthesis.
static void decode_fixed_sparse(AMRFixed *fixed_sparse, const uint16_t *pulses, const enum Mode mode, const int subframe)
Decode the algebraic codebook index to pulse positions and signs, then construct the algebraic codebo...
static const int16_t lsf_5_2[256][4]
#define LSF_R_FAC
LSF residual tables to Hertz.
uint8_t pitch_lag_int
integer part of pitch lag from current subframe
static const int16_t lsp_avg_init[LP_FILTER_ORDER]
Mean lsp values.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
AVSampleFormat
Audio sample formats.
static const float pred_fac[LP_FILTER_ORDER]
Prediction factor table for modes other than 12.2kbit/s.
const float ff_pow_0_75[10]
Table of pow(0.75,n)
static const int16_t lsf_5_1[128][4]
const char * name
Name of the codec implementation.
uint8_t hang_count
the number of subframes since a hangover period started
static void lsf2lsp_3(AMRContext *p)
Decode a set of 3 split-matrix quantized lsf indexes into an lsp vector.
static const float lsf_5_mean[LP_FILTER_ORDER]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
float prediction_error[4]
quantified prediction errors {20log10(^gamma_gc)} for previous four subframes
static const float *const ir_filters_lookup_MODE_7k95[2]
void ff_celp_circ_addf(float *out, const float *in, const float *lagged, int lag, float fac, int n)
Add an array to a rotated array.
static const float energy_mean[8]
desired mean innovation energy, indexed by active mode
main external API structure.
#define AMR_BLOCK_SIZE
samples per frame
uint8_t diff_count
the number of subframes for which diff has been above 0.65
static const int16_t lsf_3_1[256][3]
void ff_set_fixed_vector(float *out, const AMRFixed *in, float scale, int size)
Add fixed vector to an array from a sparse representation.
Filter the word “frame” indicates either a video frame or a group of audio samples
static void ff_amr_bit_reorder(uint16_t *out, int size, const uint8_t *data, const R_TABLE_TYPE *ord_table)
Fill the frame structure variables from bitstream by parsing the given reordering table that uses the...
static const uint8_t base_five_table[128][3]
Base-5 representation for values 0-124.
ACELPVContext acelpv_ctx
context for vector operations for ACELP-based codecs
float(* dot_productf)(const float *a, const float *b, int length)
Return the dot product.
CELPFContext celpf_ctx
context for filters for CELP-based codecs
Undefined Behavior In the C some operations are like signed integer overflow
float prev_sparse_fixed_gain
previous fixed gain; used by anti-sparseness processing to determine "onset"
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static const uint16_t gains_low[64][2]
gain table for 5.15 and 5.90 kbps modes
This structure stores compressed data.
static void decode_8_pulses_31bits(const int16_t *fixed_index, AMRFixed *fixed_sparse)
Decode the algebraic codebook index to pulse positions and signs and construct the algebraic codebook...
static float tilt_factor(AMRContext *p, float *lpc_n, float *lpc_d)
Get the tilt factor of a formant filter from its transfer function.
static void decode_10bit_pulse(int code, int pulse_position[8], int i1, int i2, int i3)
Decode a 10-bit algebraic codebook index from a 10.2 kbit/s frame.
void ff_scale_vector_to_given_sum_of_squares(float *out, const float *in, float sum_of_squares, const int n)
Set the sum of squares of a signal by scaling.
float lsf_avg[LP_FILTER_ORDER]
vector of averaged lsf vector
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define MIN_ENERGY
Initial energy in dB.
float postfilter_mem[10]
previous intermediate values in the formant filter
void ff_acelp_lspd2lpc(const double *lsp, float *lpc, int lp_half_order)
Reconstruct LPC coefficients from the line spectral pair frequencies.
void(* weighted_vector_sumf)(float *out, const float *in_a, const float *in_b, float weight_coeff_a, float weight_coeff_b, int length)
float implementation of weighted sum of two vectors.
AMRNB unpacked data subframe.
#define AMR_SAMPLE_BOUND
threshold for synthesis overflow
static const uint8_t *const amr_unpacking_bitmaps_per_mode[N_MODES]
position of the bitmapping data for each packet type in the AMRNBFrame
static const int8_t pulses[4]
Number of non-zero pulses in the MP-MLQ excitation.
static av_cold int amrnb_decode_init(AVCodecContext *avctx)
void ff_set_min_dist_lsf(float *lsf, double min_spacing, int size)
Adjust the quantized LSFs so they are increasing and not too close.
static void decode_gains(AMRContext *p, const AMRNBSubframe *amr_subframe, const enum Mode mode, const int subframe, float *fixed_gain_factor)
Decode pitch gain and fixed gain factor (part of section 6.1.3).
CELPMContext celpm_ctx
context for fixed point math operations
static const int8_t lsp_sub4_init[LP_FILTER_ORDER]
Values for the lsp vector from the 4th subframe of the previous subframe values.