FFmpeg
vf_perspective.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2013 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/imgutils.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/opt.h"
28 #include "avfilter.h"
29 #include "filters.h"
30 #include "video.h"
31 
32 #define SUB_PIXEL_BITS 8
33 #define SUB_PIXELS (1 << SUB_PIXEL_BITS)
34 #define COEFF_BITS 11
35 
36 #define LINEAR 0
37 #define CUBIC 1
38 
39 typedef struct PerspectiveContext {
40  const AVClass *class;
41  char *expr_str[4][2];
42  double ref[4][2];
43  int32_t (*pv)[2];
46  int linesize[4];
47  int height[4];
48  int hsub, vsub;
49  int nb_planes;
50  int sense;
51  int eval_mode;
52 
54  void *arg, int job, int nb_jobs);
56 
57 #define OFFSET(x) offsetof(PerspectiveContext, x)
58 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
59 
61  PERSPECTIVE_SENSE_SOURCE = 0, ///< coordinates give locations in source of corners of destination.
62  PERSPECTIVE_SENSE_DESTINATION = 1, ///< coordinates give locations in destination of corners of source.
63 };
64 
65 enum EvalMode {
69 };
70 
71 static const AVOption perspective_options[] = {
72  { "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
73  { "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
74  { "x1", "set top right x coordinate", OFFSET(expr_str[1][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
75  { "y1", "set top right y coordinate", OFFSET(expr_str[1][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
76  { "x2", "set bottom left x coordinate", OFFSET(expr_str[2][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
77  { "y2", "set bottom left y coordinate", OFFSET(expr_str[2][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
78  { "x3", "set bottom right x coordinate", OFFSET(expr_str[3][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
79  { "y3", "set bottom right y coordinate", OFFSET(expr_str[3][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
80  { "interpolation", "set interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, 1, FLAGS, .unit = "interpolation" },
81  { "linear", "", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, .unit = "interpolation" },
82  { "cubic", "", 0, AV_OPT_TYPE_CONST, {.i64=CUBIC}, 0, 0, FLAGS, .unit = "interpolation" },
83  { "sense", "specify the sense of the coordinates", OFFSET(sense), AV_OPT_TYPE_INT, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 1, FLAGS, .unit = "sense"},
84  { "source", "specify locations in source to send to corners in destination",
85  0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 0, FLAGS, .unit = "sense"},
86  { "destination", "specify locations in destination to send corners of source",
87  0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_DESTINATION}, 0, 0, FLAGS, .unit = "sense"},
88  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
89  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
90  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
91 
92  { NULL }
93 };
94 
95 AVFILTER_DEFINE_CLASS(perspective);
96 
97 static const enum AVPixelFormat pix_fmts[] = {
102 };
103 
104 static inline double get_coeff(double d)
105 {
106  double coeff, A = -0.60;
107 
108  d = fabs(d);
109 
110  if (d < 1.0)
111  coeff = (1.0 - (A + 3.0) * d * d + (A + 2.0) * d * d * d);
112  else if (d < 2.0)
113  coeff = (-4.0 * A + 8.0 * A * d - 5.0 * A * d * d + A * d * d * d);
114  else
115  coeff = 0.0;
116 
117  return coeff;
118 }
119 
120 static const char *const var_names[] = { "W", "H", "in", "on", NULL };
122 
124 {
126  PerspectiveContext *s = ctx->priv;
127  AVFilterLink *outlink = ctx->outputs[0];
128  FilterLink *outl = ff_filter_link(outlink);
129  double (*ref)[2] = s->ref;
130 
131  double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h,
132  [VAR_IN] = inl->frame_count_out + 1,
133  [VAR_ON] = outl->frame_count_in + 1 };
134  const int h = values[VAR_H];
135  const int w = values[VAR_W];
136  double x0, x1, x2, x3, x4, x5, x6, x7, x8, q;
137  double t0, t1, t2, t3;
138  int x, y, i, j, ret;
139 
140  for (i = 0; i < 4; i++) {
141  for (j = 0; j < 2; j++) {
142  if (!s->expr_str[i][j])
143  return AVERROR(EINVAL);
144  ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
145  var_names, &values[0],
146  NULL, NULL, NULL, NULL,
147  0, 0, ctx);
148  if (ret < 0)
149  return ret;
150  }
151  }
152 
153  switch (s->sense) {
155  x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
156  (ref[2][1] - ref[3][1]) -
157  ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
158  (ref[2][0] - ref[3][0])) * h;
159  x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
160  (ref[1][0] - ref[3][0]) -
161  ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
162  (ref[1][1] - ref[3][1])) * w;
163  q = ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
164  ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);
165 
166  x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
167  x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
168  x2 = q * ref[0][0] * w * h;
169  x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
170  x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
171  x5 = q * ref[0][1] * w * h;
172  x8 = q * w * h;
173  break;
175  t0 = ref[0][0] * (ref[3][1] - ref[1][1]) +
176  ref[1][0] * (ref[0][1] - ref[3][1]) +
177  ref[3][0] * (ref[1][1] - ref[0][1]);
178  t1 = ref[1][0] * (ref[2][1] - ref[3][1]) +
179  ref[2][0] * (ref[3][1] - ref[1][1]) +
180  ref[3][0] * (ref[1][1] - ref[2][1]);
181  t2 = ref[0][0] * (ref[3][1] - ref[2][1]) +
182  ref[2][0] * (ref[0][1] - ref[3][1]) +
183  ref[3][0] * (ref[2][1] - ref[0][1]);
184  t3 = ref[0][0] * (ref[1][1] - ref[2][1]) +
185  ref[1][0] * (ref[2][1] - ref[0][1]) +
186  ref[2][0] * (ref[0][1] - ref[1][1]);
187 
188  x0 = t0 * t1 * w * (ref[2][1] - ref[0][1]);
189  x1 = t0 * t1 * w * (ref[0][0] - ref[2][0]);
190  x2 = t0 * t1 * w * (ref[0][1] * ref[2][0] - ref[0][0] * ref[2][1]);
191  x3 = t1 * t2 * h * (ref[1][1] - ref[0][1]);
192  x4 = t1 * t2 * h * (ref[0][0] - ref[1][0]);
193  x5 = t1 * t2 * h * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]);
194  x6 = t1 * t2 * (ref[1][1] - ref[0][1]) +
195  t0 * t3 * (ref[2][1] - ref[3][1]);
196  x7 = t1 * t2 * (ref[0][0] - ref[1][0]) +
197  t0 * t3 * (ref[3][0] - ref[2][0]);
198  x8 = t1 * t2 * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]) +
199  t0 * t3 * (ref[2][0] * ref[3][1] - ref[2][1] * ref[3][0]);
200  break;
201  default:
202  av_assert0(0);
203  }
204 
205  for (y = 0; y < h; y++){
206  for (x = 0; x < w; x++){
207  int u, v;
208 
209  u = lrint(SUB_PIXELS * (x0 * x + x1 * y + x2) /
210  (x6 * x + x7 * y + x8));
211  v = lrint(SUB_PIXELS * (x3 * x + x4 * y + x5) /
212  (x6 * x + x7 * y + x8));
213 
214  s->pv[x + y * w][0] = u;
215  s->pv[x + y * w][1] = v;
216  }
217  }
218 
219  return 0;
220 }
221 
223 {
224  AVFilterContext *ctx = inlink->dst;
225  PerspectiveContext *s = ctx->priv;
227  int h = inlink->h;
228  int w = inlink->w;
229  int i, j, ret;
230  s->hsub = desc->log2_chroma_w;
231  s->vsub = desc->log2_chroma_h;
232  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
233  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
234  return ret;
235 
236  s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
237  s->height[0] = s->height[3] = inlink->h;
238 
239  s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
240  if (!s->pv)
241  return AVERROR(ENOMEM);
242 
243  if (s->eval_mode == EVAL_MODE_INIT) {
244  if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
245  return ret;
246  }
247  }
248 
249  for (i = 0; i < SUB_PIXELS; i++){
250  double d = i / (double)SUB_PIXELS;
251  double temp[4];
252  double sum = 0;
253 
254  for (j = 0; j < 4; j++)
255  temp[j] = get_coeff(j - d - 1);
256 
257  for (j = 0; j < 4; j++)
258  sum += temp[j];
259 
260  for (j = 0; j < 4; j++)
261  s->coeff[i][j] = lrint((1 << COEFF_BITS) * temp[j] / sum);
262  }
263 
264  return 0;
265 }
266 
267 typedef struct ThreadData {
268  uint8_t *dst;
269  int dst_linesize;
270  uint8_t *src;
271  int src_linesize;
272  int w, h;
273  int hsub, vsub;
274 } ThreadData;
275 
277  int job, int nb_jobs)
278 {
279  PerspectiveContext *s = ctx->priv;
280  ThreadData *td = arg;
281  uint8_t *dst = td->dst;
282  int dst_linesize = td->dst_linesize;
283  uint8_t *src = td->src;
284  int src_linesize = td->src_linesize;
285  int w = td->w;
286  int h = td->h;
287  int hsub = td->hsub;
288  int vsub = td->vsub;
289  int start = (h * job) / nb_jobs;
290  int end = (h * (job+1)) / nb_jobs;
291  const int linesize = s->linesize[0];
292  int x, y;
293 
294  for (y = start; y < end; y++) {
295  int sy = y << vsub;
296  for (x = 0; x < w; x++) {
297  int u, v, subU, subV, sum, sx;
298 
299  sx = x << hsub;
300  u = s->pv[sx + sy * linesize][0] >> hsub;
301  v = s->pv[sx + sy * linesize][1] >> vsub;
302  subU = u & (SUB_PIXELS - 1);
303  subV = v & (SUB_PIXELS - 1);
304  u >>= SUB_PIXEL_BITS;
305  v >>= SUB_PIXEL_BITS;
306 
307  if (u > 0 && v > 0 && u < w - 2 && v < h - 2){
308  const int index = u + v*src_linesize;
309  const int a = s->coeff[subU][0];
310  const int b = s->coeff[subU][1];
311  const int c = s->coeff[subU][2];
312  const int d = s->coeff[subU][3];
313 
314  sum = s->coeff[subV][0] * (a * src[index - 1 - src_linesize] + b * src[index - 0 - src_linesize] +
315  c * src[index + 1 - src_linesize] + d * src[index + 2 - src_linesize]) +
316  s->coeff[subV][1] * (a * src[index - 1 ] + b * src[index - 0 ] +
317  c * src[index + 1 ] + d * src[index + 2 ]) +
318  s->coeff[subV][2] * (a * src[index - 1 + src_linesize] + b * src[index - 0 + src_linesize] +
319  c * src[index + 1 + src_linesize] + d * src[index + 2 + src_linesize]) +
320  s->coeff[subV][3] * (a * src[index - 1 + 2 * src_linesize] + b * src[index - 0 + 2 * src_linesize] +
321  c * src[index + 1 + 2 * src_linesize] + d * src[index + 2 + 2 * src_linesize]);
322  } else {
323  int dx, dy;
324 
325  sum = 0;
326 
327  for (dy = 0; dy < 4; dy++) {
328  int iy = v + dy - 1;
329 
330  if (iy < 0)
331  iy = 0;
332  else if (iy >= h)
333  iy = h-1;
334  for (dx = 0; dx < 4; dx++) {
335  int ix = u + dx - 1;
336 
337  if (ix < 0)
338  ix = 0;
339  else if (ix >= w)
340  ix = w - 1;
341 
342  sum += s->coeff[subU][dx] * s->coeff[subV][dy] * src[ ix + iy * src_linesize];
343  }
344  }
345  }
346 
347  sum = (sum + (1<<(COEFF_BITS * 2 - 1))) >> (COEFF_BITS * 2);
348  sum = av_clip_uint8(sum);
349  dst[x + y * dst_linesize] = sum;
350  }
351  }
352  return 0;
353 }
354 
356  int job, int nb_jobs)
357 {
358  PerspectiveContext *s = ctx->priv;
359  ThreadData *td = arg;
360  uint8_t *dst = td->dst;
361  int dst_linesize = td->dst_linesize;
362  uint8_t *src = td->src;
363  int src_linesize = td->src_linesize;
364  int w = td->w;
365  int h = td->h;
366  int hsub = td->hsub;
367  int vsub = td->vsub;
368  int start = (h * job) / nb_jobs;
369  int end = (h * (job+1)) / nb_jobs;
370  const int linesize = s->linesize[0];
371  int x, y;
372 
373  for (y = start; y < end; y++){
374  int sy = y << vsub;
375  for (x = 0; x < w; x++){
376  int u, v, subU, subV, sum, sx, index, subUI, subVI;
377 
378  sx = x << hsub;
379  u = s->pv[sx + sy * linesize][0] >> hsub;
380  v = s->pv[sx + sy * linesize][1] >> vsub;
381  subU = u & (SUB_PIXELS - 1);
382  subV = v & (SUB_PIXELS - 1);
383  u >>= SUB_PIXEL_BITS;
384  v >>= SUB_PIXEL_BITS;
385 
386  index = u + v * src_linesize;
387  subUI = SUB_PIXELS - subU;
388  subVI = SUB_PIXELS - subV;
389 
390  if ((unsigned)u < (unsigned)(w - 1)){
391  if((unsigned)v < (unsigned)(h - 1)){
392  sum = subVI * (subUI * src[index] + subU * src[index + 1]) +
393  subV * (subUI * src[index + src_linesize] + subU * src[index + src_linesize + 1]);
394  sum = (sum + (1 << (SUB_PIXEL_BITS * 2 - 1)))>> (SUB_PIXEL_BITS * 2);
395  } else {
396  if (v < 0)
397  v = 0;
398  else
399  v = h - 1;
400  index = u + v * src_linesize;
401  sum = subUI * src[index] + subU * src[index + 1];
402  sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
403  }
404  } else {
405  if (u < 0)
406  u = 0;
407  else
408  u = w - 1;
409  if ((unsigned)v < (unsigned)(h - 1)){
410  index = u + v * src_linesize;
411  sum = subVI * src[index] + subV * src[index + src_linesize];
412  sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
413  } else {
414  if (v < 0)
415  v = 0;
416  else
417  v = h - 1;
418  index = u + v * src_linesize;
419  sum = src[index];
420  }
421  }
422 
423  sum = av_clip_uint8(sum);
424  dst[x + y * dst_linesize] = sum;
425  }
426  }
427  return 0;
428 }
429 
431 {
432  PerspectiveContext *s = ctx->priv;
433 
434  switch (s->interpolation) {
435  case LINEAR: s->perspective = resample_linear; break;
436  case CUBIC: s->perspective = resample_cubic; break;
437  }
438 
439  return 0;
440 }
441 
443 {
444  AVFilterContext *ctx = inlink->dst;
445  AVFilterLink *outlink = ctx->outputs[0];
446  PerspectiveContext *s = ctx->priv;
447  AVFrame *out;
448  int plane;
449  int ret;
450 
451  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
452  if (!out) {
454  return AVERROR(ENOMEM);
455  }
457 
458  if (s->eval_mode == EVAL_MODE_FRAME) {
459  if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
460  av_frame_free(&out);
461  return ret;
462  }
463  }
464 
465  for (plane = 0; plane < s->nb_planes; plane++) {
466  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
467  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
468  ThreadData td = {.dst = out->data[plane],
469  .dst_linesize = out->linesize[plane],
470  .src = frame->data[plane],
471  .src_linesize = frame->linesize[plane],
472  .w = s->linesize[plane],
473  .h = s->height[plane],
474  .hsub = hsub,
475  .vsub = vsub };
476  ff_filter_execute(ctx, s->perspective, &td, NULL,
478  }
479 
481  return ff_filter_frame(outlink, out);
482 }
483 
485 {
486  PerspectiveContext *s = ctx->priv;
487 
488  av_freep(&s->pv);
489 }
490 
491 static const AVFilterPad perspective_inputs[] = {
492  {
493  .name = "default",
494  .type = AVMEDIA_TYPE_VIDEO,
495  .filter_frame = filter_frame,
496  .config_props = config_input,
497  },
498 };
499 
501  .name = "perspective",
502  .description = NULL_IF_CONFIG_SMALL("Correct the perspective of video."),
503  .priv_size = sizeof(PerspectiveContext),
504  .init = init,
505  .uninit = uninit,
509  .priv_class = &perspective_class,
511 };
get_coeff
static double get_coeff(double d)
Definition: vf_perspective.c:104
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:116
A
#define A(x)
Definition: vpx_arith.h:28
PerspectiveContext::sense
int sense
Definition: vf_perspective.c:50
ThreadData::hsub
int hsub
Definition: vf_perspective.c:273
PerspectiveContext::coeff
int32_t coeff[SUB_PIXELS][4]
Definition: vf_perspective.c:44
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_perspective.c:97
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: filters.h:242
out
FILE * out
Definition: movenc.c:55
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
PerspectiveContext::height
int height[4]
Definition: vf_perspective.c:47
PerspectiveContext::expr_str
char * expr_str[4][2]
Definition: vf_perspective.c:41
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
PerspectiveContext::interpolation
int interpolation
Definition: vf_perspective.c:45
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
PerspectiveContext::perspective
int(* perspective)(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:53
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
pixdesc.h
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_perspective.c:67
w
uint8_t w
Definition: llviddspenc.c:38
calc_persp_luts
static int calc_persp_luts(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: vf_perspective.c:123
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:41
PerspectiveContext::nb_planes
int nb_planes
Definition: vf_perspective.c:49
PERSPECTIVESense
PERSPECTIVESense
Definition: vf_perspective.c:60
ThreadData::w
int w
Definition: vf_blend.c:61
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
video.h
PERSPECTIVE_SENSE_SOURCE
@ PERSPECTIVE_SENSE_SOURCE
coordinates give locations in source of corners of destination.
Definition: vf_perspective.c:61
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:74
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3210
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_perspective.c:121
PerspectiveContext::ref
double ref[4][2]
Definition: vf_perspective.c:42
COEFF_BITS
#define COEFF_BITS
Definition: vf_perspective.c:34
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_perspective.c:68
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
av_cold
#define av_cold
Definition: attributes.h:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_perspective.c:430
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
SUB_PIXEL_BITS
#define SUB_PIXEL_BITS
Definition: vf_perspective.c:32
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
resample_linear
static int resample_linear(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:355
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
PerspectiveContext::linesize
int linesize[4]
Definition: vf_perspective.c:46
LINEAR
#define LINEAR
Definition: vf_perspective.c:36
ThreadData::src
uint8_t * src
Definition: vf_perspective.c:270
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
ThreadData::h
int h
Definition: vf_blend.c:61
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
arg
const char * arg
Definition: jacosubdec.c:67
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:58
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:32
perspective_options
static const AVOption perspective_options[]
Definition: vf_perspective.c:71
perspective_inputs
static const AVFilterPad perspective_inputs[]
Definition: vf_perspective.c:491
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:713
ThreadData::src
const uint8_t * src
Definition: vf_bm3d.c:54
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
double
double
Definition: af_crystalizer.c:132
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
VAR_ON
@ VAR_ON
Definition: vf_perspective.c:121
eval.h
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:803
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
OFFSET
#define OFFSET(x)
Definition: vf_perspective.c:57
ff_vf_perspective
const AVFilter ff_vf_perspective
Definition: vf_perspective.c:500
PERSPECTIVE_SENSE_DESTINATION
@ PERSPECTIVE_SENSE_DESTINATION
coordinates give locations in destination of corners of source.
Definition: vf_perspective.c:62
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(perspective)
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
interpolation
static int interpolation(DeclickChannel *c, const double *src, int ar_order, double *acoefficients, int *index, int nb_errors, double *auxiliary, double *interpolated)
Definition: af_adeclick.c:389
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:182
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ThreadData::dst
uint8_t * dst
Definition: vf_perspective.c:268
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:841
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
EvalMode
EvalMode
Definition: af_volume.h:39
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_perspective.c:222
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
VAR_H
@ VAR_H
Definition: vf_perspective.c:121
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_perspective.c:484
AVFilter
Filter definition.
Definition: avfilter.h:201
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
PerspectiveContext
Definition: vf_perspective.c:39
ThreadData::vsub
int vsub
Definition: vf_perspective.c:273
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_perspective.c:66
PerspectiveContext::vsub
int vsub
Definition: vf_perspective.c:48
ThreadData::src_linesize
int src_linesize
Definition: vf_bm3d.c:55
resample_cubic
static int resample_cubic(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:276
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1667
PerspectiveContext::hsub
int hsub
Definition: vf_perspective.c:48
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
var_names
static const char *const var_names[]
Definition: vf_perspective.c:120
VAR_IN
@ VAR_IN
Definition: vf_perspective.c:121
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:263
SUB_PIXELS
#define SUB_PIXELS
Definition: vf_perspective.c:33
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:152
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
VAR_W
@ VAR_W
Definition: vf_perspective.c:121
ThreadData::dst_linesize
int dst_linesize
Definition: vf_colorlevels.c:91
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
PerspectiveContext::eval_mode
int eval_mode
Definition: vf_perspective.c:51
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_perspective.c:442
h
h
Definition: vp9dsp_template.c:2070
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
PerspectiveContext::pv
int32_t(* pv)[2]
Definition: vf_perspective.c:43
FLAGS
#define FLAGS
Definition: vf_perspective.c:58
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
src
#define src
Definition: vp8dsp.c:248
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
CUBIC
#define CUBIC
Definition: vf_perspective.c:37