Go to the documentation of this file.
42 static void fcmul_add_c(
float *sum,
const float *t,
const float *
c, ptrdiff_t
len)
46 for (
n = 0;
n <
len;
n++) {
47 const float cre =
c[2 *
n ];
48 const float cim =
c[2 *
n + 1];
49 const float tre = t[2 *
n ];
50 const float tim = t[2 *
n + 1];
52 sum[2 *
n ] += tre * cre - tim * cim;
53 sum[2 *
n + 1] += tre * cim + tim * cre;
56 sum[2 *
n] += t[2 *
n] *
c[2 *
n];
62 const float *
in = (
const float *)
s->in[0]->extended_data[
ch] +
offset;
64 const int nb_samples =
FFMIN(
s->min_part_size,
out->nb_samples -
offset);
83 for (
n = 0;
n < nb_samples;
n++) {
89 memset(sum, 0,
sizeof(*sum) * seg->
fft_length);
128 memmove(
src,
src +
s->min_part_size, (seg->
input_size -
s->min_part_size) *
sizeof(*src));
130 for (
n = 0;
n < nb_samples;
n++) {
135 s->fdsp->vector_fmul_scalar(ptr, ptr,
s->wet_gain,
FFALIGN(nb_samples, 4));
155 const int start = (
out->channels * jobnr) / nb_jobs;
156 const int end = (
out->channels * (jobnr+1)) / nb_jobs;
200 for (
i = 0; txt[
i];
i++) {
204 for (char_y = 0; char_y < font_height; char_y++) {
206 if (font[txt[
i] * font_height + char_y] &
mask)
217 int dx =
FFABS(x1-x0);
218 int dy =
FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
219 int err = (dx>dy ? dx : -dy) / 2, e2;
224 if (x0 == x1 && y0 == y1)
244 float *mag, *phase, *delay,
min = FLT_MAX,
max = FLT_MIN;
245 float min_delay = FLT_MAX, max_delay = FLT_MIN;
246 int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
250 memset(
out->data[0], 0,
s->h *
out->linesize[0]);
255 if (!mag || !phase || !delay)
258 channel = av_clip(
s->ir_channel, 0,
s->in[1]->channels - 1);
259 for (
i = 0;
i <
s->w;
i++) {
260 const float *
src = (
const float *)
s->in[1]->extended_data[
channel];
261 double w =
i *
M_PI / (
s->w - 1);
262 double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
264 for (x = 0; x <
s->nb_taps; x++) {
265 real += cos(-x *
w) *
src[x];
266 imag += sin(-x *
w) *
src[x];
267 real_num += cos(-x *
w) *
src[x] * x;
268 imag_num += sin(-x *
w) *
src[x] * x;
271 mag[
i] =
hypot(real, imag);
272 phase[
i] = atan2(imag, real);
273 div = real * real + imag * imag;
274 delay[
i] = (real_num * real + imag_num * imag) / div;
277 min_delay =
fminf(min_delay, delay[
i]);
278 max_delay =
fmaxf(max_delay, delay[
i]);
281 for (
i = 0;
i <
s->w;
i++) {
282 int ymag = mag[
i] /
max * (
s->h - 1);
283 int ydelay = (delay[
i] - min_delay) / (max_delay - min_delay) * (
s->h - 1);
284 int yphase = (0.5 * (1. + phase[
i] /
M_PI)) * (
s->h - 1);
286 ymag =
s->h - 1 - av_clip(ymag, 0,
s->h - 1);
287 yphase =
s->h - 1 - av_clip(yphase, 0,
s->h - 1);
288 ydelay =
s->h - 1 - av_clip(ydelay, 0,
s->h - 1);
293 prev_yphase = yphase;
295 prev_ydelay = ydelay;
302 prev_yphase = yphase;
303 prev_ydelay = ydelay;
306 if (
s->w > 400 &&
s->h > 100) {
311 drawtext(
out, 2, 12,
"Min Magnitude:", 0xDDDDDDDD);
316 snprintf(text,
sizeof(text),
"%.2f", max_delay);
320 snprintf(text,
sizeof(text),
"%.2f", min_delay);
331 int offset,
int nb_partitions,
int part_size)
353 for (
int ch = 0;
ch <
ctx->inputs[0]->channels;
ch++) {
375 int left,
offset = 0, part_size, max_part_size;
383 if (
s->minp >
s->maxp) {
389 max_part_size = 1 <<
av_log2(
s->maxp);
391 s->min_part_size = part_size;
393 for (
i = 0;
left > 0;
i++) {
394 int step = part_size == max_part_size ? INT_MAX : 1 + (
i == 0);
395 int nb_partitions =
FFMIN(
step, (
left + part_size - 1) / part_size);
397 s->nb_segments =
i + 1;
401 offset += nb_partitions * part_size;
402 left -= nb_partitions * part_size;
404 part_size =
FFMIN(part_size, max_part_size);
423 for (
ch = 0;
ch <
ctx->inputs[1]->channels;
ch++) {
424 float *time = (
float *)
s->in[1]->extended_data[!
s->one2many *
ch];
426 for (
i = 0;
i <
s->nb_taps;
i++)
429 s->gain =
ctx->inputs[1]->channels / power;
432 for (
ch = 0;
ch <
ctx->inputs[1]->channels;
ch++) {
433 float *time = (
float *)
s->in[1]->extended_data[!
s->one2many *
ch];
435 for (
i = 0;
i <
s->nb_taps;
i++)
438 s->gain =
ctx->inputs[1]->channels / power;
441 for (
ch = 0;
ch <
ctx->inputs[1]->channels;
ch++) {
442 float *time = (
float *)
s->in[1]->extended_data[!
s->one2many *
ch];
444 for (
i = 0;
i <
s->nb_taps;
i++)
445 power += time[
i] * time[
i];
447 s->gain = sqrtf(
ch / power);
453 s->gain =
FFMIN(
s->gain *
s->ir_gain, 1.f);
455 for (
ch = 0;
ch <
ctx->inputs[1]->channels;
ch++) {
456 float *time = (
float *)
s->in[1]->extended_data[!
s->one2many *
ch];
458 s->fdsp->vector_fmul_scalar(time, time,
s->gain,
FFALIGN(
s->nb_taps, 4));
464 for (
ch = 0;
ch <
ctx->inputs[1]->channels;
ch++) {
465 float *time = (
float *)
s->in[1]->extended_data[!
s->one2many *
ch];
481 const float scale = 1.f / seg->
part_size;
483 const int remaining =
s->nb_taps - toffset;
492 coeff[coffset].im = 0;
523 int nb_taps, max_nb_taps;
526 max_nb_taps =
s->max_ir_len *
ctx->outputs[0]->sample_rate;
527 if (nb_taps > max_nb_taps) {
546 if (!
s->eof_coeffs) {
556 if (!
s->eof_coeffs) {
565 if (!
s->have_coeffs &&
s->eof_coeffs) {
572 wanted =
FFMAX(
s->min_part_size, (
available /
s->min_part_size) *
s->min_part_size);
580 if (
s->response &&
s->have_coeffs) {
581 int64_t old_pts =
s->video->pts;
582 int64_t new_pts =
av_rescale_q(
s->pts,
ctx->inputs[0]->time_base,
ctx->outputs[1]->time_base);
585 s->video->pts = new_pts;
678 s->one2many =
ctx->inputs[1]->channels == 1;
685 s->nb_coef_channels =
ctx->inputs[1]->channels;
696 for (
int ch = 0;
ch <
s->nb_channels;
ch++) {
703 for (
int ch = 0;
ch <
s->nb_channels;
ch++) {
725 for (
int i = 0;
i <
s->nb_segments;
i++) {
732 for (
int i = 0;
i <
ctx->nb_outputs;
i++)
823 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
824 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
825 #define OFFSET(x) offsetof(AudioFIRContext, x)
842 {
"channel",
"set IR channel to display frequency response",
OFFSET(ir_channel),
AV_OPT_TYPE_INT, {.i64=0}, 0, 1024,
VF },
854 .description =
NULL_IF_CONFIG_SMALL(
"Apply Finite Impulse Response filter with supplied coefficients in 2nd stream."),
856 .priv_class = &afir_class,
static int check_ir(AVFilterLink *link, AVFrame *frame)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
static int activate(AVFilterContext *ctx)
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
@ AV_SAMPLE_FMT_FLTP
float, planar
A list of supported channel layouts.
AVPixelFormat
Pixel format.
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
enum MovChannelLayoutTag * layouts
#define AVERROR_EOF
End of file.
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
#define AV_CH_LAYOUT_MONO
AVFilterFormats * in_formats
Lists of formats and channel layouts supported by the input and output filters respectively.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
void(* fcmul_add)(float *sum, const float *t, const float *c, ptrdiff_t len)
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
int channels
Number of channels.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
static av_cold void uninit(AVFilterContext *ctx)
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const AVOption afir_options[]
void ff_afir_init_x86(AudioFIRDSPContext *s)
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
A filter pad used for either input or output.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVFILTER_DEFINE_CLASS(afir)
static const uint16_t mask[17]
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
float fminf(float, float)
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
void av_rdft_calc(RDFTContext *s, FFTSample *data)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
void ff_afir_init(AudioFIRDSPContext *dsp)
Rational number (pair of numerator and denominator).
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
uint64_t channel_layout
channel layout of current buffer (see libavutil/channel_layout.h)
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static int convert_coeffs(AVFilterContext *ctx)
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
float fmaxf(float, float)
static av_const double hypot(double x, double y)
#define AV_NOPTS_VALUE
Undefined timestamp value.
AVFilterContext * src
source filter
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
static void draw_response(AVFilterContext *ctx, AVFrame *out)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int sample_rate
samples per second
static int query_formats(AVFilterContext *ctx)
static int config_video(AVFilterLink *outlink)
#define i(width, name, range_min, range_max)
int w
agreed upon image width
uint8_t ** extended_data
pointers to the data planes/channels.
#define av_malloc_array(a, b)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
AVSampleFormat
Audio sample formats.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
int ff_inlink_queued_samples(AVFilterLink *link)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
static int config_output(AVFilterLink *outlink)
int h
agreed upon image height
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
char * av_strdup(const char *s)
Duplicate a string.
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
const uint8_t avpriv_cga_font[2048]
static av_cold int init(AVFilterContext *ctx)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int offset, int nb_partitions, int part_size)
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
void av_rdft_end(RDFTContext *s)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static const double coeff[2][5]
The exact code depends on how similar the blocks are and how related they are to the block
static const AVFilterPad afir_inputs[]
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.