FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/csp.h"
28 #include "libavutil/frame.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/pixfmt.h"
34 
35 #include "avfilter.h"
36 #include "colorspacedsp.h"
37 #include "filters.h"
38 #include "formats.h"
39 #include "video.h"
40 #include "colorspace.h"
41 
42 enum DitherMode {
46 };
47 
48 enum Colorspace {
59 };
60 
67 };
68 
73 };
74 
86 };
87 
88 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
99 };
100 
101 static const enum AVColorSpace default_csp[CS_NB + 1] = {
112 };
113 
115  double alpha, beta, gamma, delta;
116 };
117 
118 typedef struct ColorSpaceContext {
119  const AVClass *class;
120 
122 
123  enum Colorspace user_all, user_iall;
124  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
125  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
126  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
127  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
128  enum AVPixelFormat in_format, user_format;
133 
134  int16_t *rgb[3];
135  ptrdiff_t rgb_stride;
136  unsigned rgb_sz;
138 
141  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
142 
145  int16_t *lin_lut, *delin_lut;
146 
149  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
150  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
151  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
152  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
159 
162 
163 // FIXME deal with odd width/heights
164 // FIXME faster linearize/delinearize implementation (integer pow)
165 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
166 // FIXME test that the values in (de)lin_lut don't exceed their container storage
167 // type size (only useful if we keep the LUT and don't move to fast integer pow)
168 // FIXME dithering if bitdepth goes down?
169 // FIXME bitexact for fate integration?
170 
171 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
172 // find any actual tables that document their real values...
173 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
175  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
176  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
177  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
178  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
179  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
180  [AVCOL_TRC_LINEAR] = { 1.0, 0.0, 1.0, 0.0 },
181  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
182  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
183  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
184  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
185 };
186 
187 static const struct TransferCharacteristics *
189 {
190  const struct TransferCharacteristics *coeffs;
191 
192  if ((unsigned)trc >= FF_ARRAY_ELEMS(transfer_characteristics))
193  return NULL;
194  coeffs = &transfer_characteristics[trc];
195  if (!coeffs->alpha)
196  return NULL;
197 
198  return coeffs;
199 }
200 
202 {
203  int n;
204  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
205  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
206  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
207  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
208  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
209  int clip_gamut = s->clip_gamut == CLIP_GAMUT_RGB;
210 
211  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
212  if (!s->lin_lut)
213  return AVERROR(ENOMEM);
214  s->delin_lut = &s->lin_lut[32768];
215  for (n = 0; n < 32768; n++) {
216  double v = (n - 2048.0) / 28672.0, d, l;
217 
218  // delinearize
219  if (v <= -out_beta) {
220  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
221  } else if (v < out_beta) {
222  d = out_delta * v;
223  } else {
224  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
225  }
226  int d_rounded = lrint(d * 28672.0);
227  s->delin_lut[n] = clip_gamut ? av_clip(d_rounded, 0, 28672)
228  : av_clip_int16(d_rounded);
229 
230  // linearize
231  if (v <= -in_beta * in_delta) {
232  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
233  } else if (v < in_beta * in_delta) {
234  l = v * in_idelta;
235  } else {
236  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
237  }
238  int l_rounded = lrint(l * 28672.0);
239  s->lin_lut[n] = clip_gamut ? av_clip(l_rounded, 0, 28672)
240  : av_clip_int16(l_rounded);
241  }
242 
243  return 0;
244 }
245 
246 /*
247  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
248  * This function uses the Bradford mechanism.
249  */
250 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
251  const AVWhitepointCoefficients *wp_src,
252  const AVWhitepointCoefficients *wp_dst)
253 {
254  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
255  [WP_ADAPT_BRADFORD] = {
256  { 0.8951, 0.2664, -0.1614 },
257  { -0.7502, 1.7135, 0.0367 },
258  { 0.0389, -0.0685, 1.0296 },
259  }, [WP_ADAPT_VON_KRIES] = {
260  { 0.40024, 0.70760, -0.08081 },
261  { -0.22630, 1.16532, 0.04570 },
262  { 0.00000, 0.00000, 0.91822 },
263  },
264  };
265  const double (*ma)[3] = ma_tbl[wp_adapt];
266  double xw_src = av_q2d(wp_src->x), yw_src = av_q2d(wp_src->y);
267  double xw_dst = av_q2d(wp_dst->x), yw_dst = av_q2d(wp_dst->y);
268  double zw_src = 1.0 - xw_src - yw_src;
269  double zw_dst = 1.0 - xw_dst - yw_dst;
270  double mai[3][3], fac[3][3], tmp[3][3];
271  double rs, gs, bs, rd, gd, bd;
272 
273  ff_matrix_invert_3x3(ma, mai);
274  rs = ma[0][0] * xw_src + ma[0][1] * yw_src + ma[0][2] * zw_src;
275  gs = ma[1][0] * xw_src + ma[1][1] * yw_src + ma[1][2] * zw_src;
276  bs = ma[2][0] * xw_src + ma[2][1] * yw_src + ma[2][2] * zw_src;
277  rd = ma[0][0] * xw_dst + ma[0][1] * yw_dst + ma[0][2] * zw_dst;
278  gd = ma[1][0] * xw_dst + ma[1][1] * yw_dst + ma[1][2] * zw_dst;
279  bd = ma[2][0] * xw_dst + ma[2][1] * yw_dst + ma[2][2] * zw_dst;
280  fac[0][0] = rd / rs;
281  fac[1][1] = gd / gs;
282  fac[2][2] = bd / bs;
283  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
284  ff_matrix_mul_3x3(tmp, ma, fac);
285  ff_matrix_mul_3x3(out, tmp, mai);
286 }
287 
288 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
289  int w, int h, const int16_t *lut)
290 {
291  int y, x, n;
292 
293  for (n = 0; n < 3; n++) {
294  int16_t *data = buf[n];
295 
296  for (y = 0; y < h; y++) {
297  for (x = 0; x < w; x++)
298  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
299 
300  data += stride;
301  }
302  }
303 }
304 
305 typedef struct ThreadData {
306  AVFrame *in, *out;
307  ptrdiff_t in_linesize[3], out_linesize[3];
309 } ThreadData;
310 
311 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
312 {
313  const ThreadData *td = data;
314  ColorSpaceContext *s = ctx->priv;
315  uint8_t *in_data[3], *out_data[3];
316  int16_t *rgb[3];
317  int h_in = (td->in->height + 1) >> 1;
318  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
319  int w = td->in->width, h = h2 - h1;
320 
321  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
322  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
323  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
324  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
325  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
326  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
327  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
328  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
329  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
330 
331  // FIXME for simd, also make sure we do pictures with negative stride
332  // top-down so we don't overwrite lines with padding of data before it
333  // in the same buffer (same as swscale)
334 
335  if (s->yuv2yuv_fastmode) {
336  // FIXME possibly use a fast mode in case only the y range changes?
337  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
338  // are non-zero
339  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
340  s->yuv2yuv_coeffs, s->yuv_offset);
341  } else {
342  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
343  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
344  // 2 lines, for yuv420.)
345  /*
346  * General design:
347  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
348  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
349  * of RGB in psuedo-restricted 15+sign bits. That means that the float
350  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
351  * range is used for overflow/underflow outside the representable
352  * range of this RGB type. rgb2yuv is the exact opposite.
353  * - gamma correction is done using a LUT since that appears to work
354  * fairly fast.
355  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
356  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
357  * read chroma pixels at luma resolution. If you want some more fancy
358  * filter, you can use swscale to convert to yuv444p.
359  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
360  */
361  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
362  s->yuv2rgb_coeffs, s->yuv_offset[0]);
363  if (!s->rgb2rgb_passthrough) {
364  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
365  if (!s->lrgb2lrgb_passthrough)
366  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
367  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
368  }
369  if (s->dither == DITHER_FSB) {
370  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
371  s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
372  } else {
373  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
374  s->rgb2yuv_coeffs, s->yuv_offset[1]);
375  }
376  }
377 
378  return 0;
379 }
380 
381 static int get_range_off(AVFilterContext *ctx, int *off,
382  int *y_rng, int *uv_rng,
383  enum AVColorRange rng, int depth)
384 {
385  switch (rng) {
387  ColorSpaceContext *s = ctx->priv;
388 
389  if (!s->did_warn_range) {
390  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
391  s->did_warn_range = 1;
392  }
393  }
394  // fall-through
395  case AVCOL_RANGE_MPEG:
396  *off = 16 << (depth - 8);
397  *y_rng = 219 << (depth - 8);
398  *uv_rng = 224 << (depth - 8);
399  break;
400  case AVCOL_RANGE_JPEG:
401  *off = 0;
402  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
403  break;
404  default:
405  return AVERROR(EINVAL);
406  }
407 
408  return 0;
409 }
410 
412  const AVFrame *in, const AVFrame *out)
413 {
414  ColorSpaceContext *s = ctx->priv;
415  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
416  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
417  int m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
418 
419 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
420 #define supported_subsampling(lcw, lch) \
421  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
422 #define supported_format(d) \
423  ((d) != NULL && (d)->nb_components == 3 && \
424  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
425  supported_depth((d)->comp[0].depth) && \
426  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
427 
428  if (!supported_format(in_desc)) {
430  "Unsupported input format %d (%s) or bitdepth (%d)\n",
432  in_desc ? in_desc->comp[0].depth : -1);
433  return AVERROR(EINVAL);
434  }
435  if (!supported_format(out_desc)) {
437  "Unsupported output format %d (%s) or bitdepth (%d)\n",
438  out->format, av_get_pix_fmt_name(out->format),
439  out_desc ? out_desc->comp[0].depth : -1);
440  return AVERROR(EINVAL);
441  }
442 
443  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
444  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
445  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
446  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
447  if (in->colorspace != s->in_csp ||
448  in->color_range != s->in_rng) s->in_lumacoef = NULL;
449  if (out->color_range != s->out_rng) s->rgb2yuv = NULL;
450 
451  if (!s->out_primaries || !s->in_primaries) {
452  s->in_prm = in->color_primaries;
453  if (s->user_iall != CS_UNSPECIFIED)
454  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
455  if (s->user_iprm != AVCOL_PRI_UNSPECIFIED)
456  s->in_prm = s->user_iprm;
457  s->in_primaries = av_csp_primaries_desc_from_id(s->in_prm);
458  if (!s->in_primaries) {
460  "Unsupported input primaries %d (%s)\n",
461  s->in_prm, av_color_primaries_name(s->in_prm));
462  return AVERROR(EINVAL);
463  }
464  s->out_prm = out->color_primaries;
465  s->out_primaries = av_csp_primaries_desc_from_id(s->out_prm);
466  if (!s->out_primaries) {
467  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
468  if (s->user_all == CS_UNSPECIFIED) {
469  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
470  } else {
472  "Unsupported output color property %d\n", s->user_all);
473  }
474  } else {
476  "Unsupported output primaries %d (%s)\n",
477  s->out_prm, av_color_primaries_name(s->out_prm));
478  }
479  return AVERROR(EINVAL);
480  }
481  s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
482  sizeof(*s->in_primaries));
483  if (!s->lrgb2lrgb_passthrough) {
484  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
485  const AVWhitepointCoefficients *wp_out, *wp_in;
486 
487  wp_out = &s->out_primaries->wp;
488  wp_in = &s->in_primaries->wp;
489  ff_fill_rgb2xyz_table(&s->out_primaries->prim, wp_out, rgb2xyz);
490  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
491  ff_fill_rgb2xyz_table(&s->in_primaries->prim, wp_in, rgb2xyz);
492  if (memcmp(wp_in, wp_out, sizeof(*wp_in)) != 0 &&
493  s->wp_adapt != WP_ADAPT_IDENTITY) {
494  double wpconv[3][3], tmp[3][3];
495 
496  fill_whitepoint_conv_table(wpconv, s->wp_adapt, &s->in_primaries->wp,
497  &s->out_primaries->wp);
498  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
499  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
500  } else {
501  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
502  }
503  for (m = 0; m < 3; m++)
504  for (n = 0; n < 3; n++) {
505  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
506  for (o = 1; o < 8; o++)
507  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
508  }
509 
510  }
511  }
512 
513  if (!s->in_txchr) {
514  av_freep(&s->lin_lut);
515  s->in_trc = in->color_trc;
516  if (s->user_iall != CS_UNSPECIFIED)
517  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
518  if (s->user_itrc != AVCOL_TRC_UNSPECIFIED)
519  s->in_trc = s->user_itrc;
520  s->in_txchr = get_transfer_characteristics(s->in_trc);
521  if (!s->in_txchr) {
523  "Unsupported input transfer characteristics %d (%s)\n",
524  s->in_trc, av_color_transfer_name(s->in_trc));
525  return AVERROR(EINVAL);
526  }
527  }
528 
529  if (!s->out_txchr) {
530  av_freep(&s->lin_lut);
531  s->out_trc = out->color_trc;
532  s->out_txchr = get_transfer_characteristics(s->out_trc);
533  if (!s->out_txchr) {
534  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
535  if (s->user_all == CS_UNSPECIFIED) {
537  "Please specify output transfer characteristics\n");
538  } else {
540  "Unsupported output color property %d\n", s->user_all);
541  }
542  } else {
544  "Unsupported output transfer characteristics %d (%s)\n",
545  s->out_trc, av_color_transfer_name(s->out_trc));
546  }
547  return AVERROR(EINVAL);
548  }
549  }
550 
551  s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
552  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
553  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
554  res = fill_gamma_table(s);
555  if (res < 0)
556  return res;
557  }
558 
559  if (!s->in_lumacoef) {
560  s->in_csp = in->colorspace;
561  if (s->user_iall != CS_UNSPECIFIED)
562  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
563  if (s->user_icsp != AVCOL_SPC_UNSPECIFIED)
564  s->in_csp = s->user_icsp;
565  s->in_rng = in->color_range;
566  if (s->user_irng != AVCOL_RANGE_UNSPECIFIED)
567  s->in_rng = s->user_irng;
568  s->in_lumacoef = av_csp_luma_coeffs_from_avcsp(s->in_csp);
569  if (!s->in_lumacoef) {
571  "Unsupported input colorspace %d (%s)\n",
572  s->in_csp, av_color_space_name(s->in_csp));
573  return AVERROR(EINVAL);
574  }
575  redo_yuv2rgb = 1;
576  }
577 
578  if (!s->rgb2yuv) {
579  s->out_rng = out->color_range;
580  redo_rgb2yuv = 1;
581  }
582 
583  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
584  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
585  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
586  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
587  !memcmp(s->in_lumacoef, s->out_lumacoef,
588  sizeof(*s->in_lumacoef)) &&
589  in_desc->comp[0].depth == out_desc->comp[0].depth;
590  if (!s->yuv2yuv_passthrough) {
591  if (redo_yuv2rgb) {
592  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
593  int off, bits, in_rng;
594 
595  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
596  s->in_rng, in_desc->comp[0].depth);
597  if (res < 0) {
599  "Unsupported input color range %d (%s)\n",
600  s->in_rng, av_color_range_name(s->in_rng));
601  return res;
602  }
603  for (n = 0; n < 8; n++)
604  s->yuv_offset[0][n] = off;
605  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
607  bits = 1 << (in_desc->comp[0].depth - 1);
608  for (n = 0; n < 3; n++) {
609  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
610  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
611  for (o = 1; o < 8; o++)
612  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
613  }
614  }
615  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
616  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
617  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
618  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
619  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
620  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
621  }
622 
623  if (redo_rgb2yuv) {
624  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
625  int off, out_rng, bits;
626 
627  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
628  s->out_rng, out_desc->comp[0].depth);
629  if (res < 0) {
631  "Unsupported output color range %d (%s)\n",
632  s->out_rng, av_color_range_name(s->out_rng));
633  return res;
634  }
635  for (n = 0; n < 8; n++)
636  s->yuv_offset[1][n] = off;
637  ff_fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
638  bits = 1 << (29 - out_desc->comp[0].depth);
639  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
640  for (m = 0; m < 3; m++) {
641  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
642  for (o = 1; o < 8; o++)
643  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
644  }
645  }
646  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
647  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
648  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
649  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
650  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
651  }
652 
653  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
654  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
655  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
656  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
657  double yuv2yuv[3][3];
658  int in_rng, out_rng;
659 
661  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
662  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
663  s->yuv2yuv_coeffs[m][n][0] =
664  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
665  (in_rng * (1 << odepth)));
666  for (o = 1; o < 8; o++)
667  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
668  }
669  }
670  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
671  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
672  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
673  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
674  }
675  }
676 
677  return 0;
678 }
679 
681 {
682  ColorSpaceContext *s = ctx->priv;
683 
684  s->out_csp = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
685  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
686  s->out_lumacoef = av_csp_luma_coeffs_from_avcsp(s->out_csp);
687  if (!s->out_lumacoef) {
688  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
689  if (s->user_all == CS_UNSPECIFIED) {
691  "Please specify output colorspace\n");
692  } else {
694  "Unsupported output color property %d\n", s->user_all);
695  }
696  } else {
698  "Unsupported output colorspace %d (%s)\n", s->out_csp,
699  av_color_space_name(s->out_csp));
700  }
701  return AVERROR(EINVAL);
702  }
703 
704  ff_colorspacedsp_init(&s->dsp);
705 
706  return 0;
707 }
708 
710 {
711  ColorSpaceContext *s = ctx->priv;
712 
713  av_freep(&s->rgb[0]);
714  av_freep(&s->rgb[1]);
715  av_freep(&s->rgb[2]);
716  s->rgb_sz = 0;
717  av_freep(&s->dither_scratch_base[0][0]);
718  av_freep(&s->dither_scratch_base[0][1]);
719  av_freep(&s->dither_scratch_base[1][0]);
720  av_freep(&s->dither_scratch_base[1][1]);
721  av_freep(&s->dither_scratch_base[2][0]);
722  av_freep(&s->dither_scratch_base[2][1]);
723 
724  av_freep(&s->lin_lut);
725 }
726 
728 {
729  AVFilterContext *ctx = link->dst;
730  AVFilterLink *outlink = ctx->outputs[0];
731  ColorSpaceContext *s = ctx->priv;
732  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
733  // input one if it is writable *OR* the actual literal values of in_*
734  // and out_* are identical (not just their respective properties)
735  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
736  int res;
737  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
738  unsigned rgb_sz = rgb_stride * in->height;
739  ThreadData td;
740 
741  if (!out) {
742  av_frame_free(&in);
743  return AVERROR(ENOMEM);
744  }
745  res = av_frame_copy_props(out, in);
746  if (res < 0) {
747  av_frame_free(&in);
748  av_frame_free(&out);
749  return res;
750  }
751 
752  out->colorspace = s->out_csp;
753  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
754  in->color_range : s->user_rng;
755  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
756  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
757  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
759 
760  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
761  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
762  out->color_trc = AVCOL_TRC_BT2020_12;
763  } else {
764  out->color_trc = s->user_trc;
765  }
766 
767  if (out->color_primaries != in->color_primaries || out->color_trc != in->color_trc) {
768  av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data,
770  }
771 
772  if (rgb_sz != s->rgb_sz) {
774  int uvw = in->width >> desc->log2_chroma_w;
775 
776  av_freep(&s->rgb[0]);
777  av_freep(&s->rgb[1]);
778  av_freep(&s->rgb[2]);
779  s->rgb_sz = 0;
780  av_freep(&s->dither_scratch_base[0][0]);
781  av_freep(&s->dither_scratch_base[0][1]);
782  av_freep(&s->dither_scratch_base[1][0]);
783  av_freep(&s->dither_scratch_base[1][1]);
784  av_freep(&s->dither_scratch_base[2][0]);
785  av_freep(&s->dither_scratch_base[2][1]);
786 
787  s->rgb[0] = av_malloc(rgb_sz);
788  s->rgb[1] = av_malloc(rgb_sz);
789  s->rgb[2] = av_malloc(rgb_sz);
790  s->dither_scratch_base[0][0] =
791  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
792  s->dither_scratch_base[0][1] =
793  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
794  s->dither_scratch_base[1][0] =
795  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
796  s->dither_scratch_base[1][1] =
797  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
798  s->dither_scratch_base[2][0] =
799  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
800  s->dither_scratch_base[2][1] =
801  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
802  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
803  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
804  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
805  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
806  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
807  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
808  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
809  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
810  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
811  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
812  uninit(ctx);
813  av_frame_free(&in);
814  av_frame_free(&out);
815  return AVERROR(ENOMEM);
816  }
817  s->rgb_sz = rgb_sz;
818  }
819  res = create_filtergraph(ctx, in, out);
820  if (res < 0) {
821  av_frame_free(&in);
822  av_frame_free(&out);
823  return res;
824  }
825  s->rgb_stride = rgb_stride / sizeof(int16_t);
826  td.in = in;
827  td.out = out;
828  td.in_linesize[0] = in->linesize[0];
829  td.in_linesize[1] = in->linesize[1];
830  td.in_linesize[2] = in->linesize[2];
831  td.out_linesize[0] = out->linesize[0];
832  td.out_linesize[1] = out->linesize[1];
833  td.out_linesize[2] = out->linesize[2];
836  if (s->yuv2yuv_passthrough) {
837  res = av_frame_copy(out, in);
838  if (res < 0) {
839  av_frame_free(&in);
840  av_frame_free(&out);
841  return res;
842  }
843  } else {
845  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
846  }
847  av_frame_free(&in);
848 
849  return ff_filter_frame(outlink, out);
850 }
851 
853  AVFilterFormatsConfig **cfg_in,
854  AVFilterFormatsConfig **cfg_out)
855 {
856  static const enum AVPixelFormat pix_fmts[] = {
862  };
863  int res;
864  const ColorSpaceContext *s = ctx->priv;
866 
867  res = ff_formats_ref(ff_make_formats_list_singleton(s->out_csp), &cfg_out[0]->color_spaces);
868  if (res < 0)
869  return res;
870  if (s->user_rng != AVCOL_RANGE_UNSPECIFIED) {
871  res = ff_formats_ref(ff_make_formats_list_singleton(s->user_rng), &cfg_out[0]->color_ranges);
872  if (res < 0)
873  return res;
874  }
875 
877  if (!formats)
878  return AVERROR(ENOMEM);
879  if (s->user_format == AV_PIX_FMT_NONE)
880  return ff_set_common_formats2(ctx, cfg_in, cfg_out, formats);
881 
882  res = ff_formats_ref(formats, &cfg_in[0]->formats);
883  if (res < 0)
884  return res;
885 
886  formats = NULL;
887  res = ff_add_format(&formats, s->user_format);
888  if (res < 0)
889  return res;
890 
891  return ff_formats_ref(formats, &cfg_out[0]->formats);
892 }
893 
894 static int config_props(AVFilterLink *outlink)
895 {
896  AVFilterContext *ctx = outlink->dst;
897  AVFilterLink *inlink = outlink->src->inputs[0];
898 
899  if (inlink->w % 2 || inlink->h % 2) {
900  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
901  inlink->w, inlink->h);
902  return AVERROR_PATCHWELCOME;
903  }
904 
905  outlink->w = inlink->w;
906  outlink->h = inlink->h;
907  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
908  outlink->time_base = inlink->time_base;
909 
910  return 0;
911 }
912 
913 #define OFFSET(x) offsetof(ColorSpaceContext, x)
914 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
915 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, .unit = z }
916 
917 static const AVOption colorspace_options[] = {
918  { "all", "Set all color properties together",
919  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
920  CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" },
921  ENUM("bt470m", CS_BT470M, "all"),
922  ENUM("bt470bg", CS_BT470BG, "all"),
923  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
924  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
925  ENUM("bt709", CS_BT709, "all"),
926  ENUM("smpte170m", CS_SMPTE170M, "all"),
927  ENUM("smpte240m", CS_SMPTE240M, "all"),
928  ENUM("bt2020", CS_BT2020, "all"),
929 
930  { "space", "Output colorspace",
931  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
932  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, .unit = "csp"},
933  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
934  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
935  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
936  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
937  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
938  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
939  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
940  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
941  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
942 
943  { "range", "Output color range",
944  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
945  AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" },
946  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
947  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
948  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
949  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
950 
951  { "primaries", "Output color primaries",
952  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
953  AVCOL_PRI_RESERVED0, AVCOL_PRI_EXT_NB - 1, FLAGS, .unit = "prm" },
954  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
955  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
956  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
957  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
958  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
959  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
960  ENUM("film", AVCOL_PRI_FILM, "prm"),
961  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
962  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
963  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
964  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
965  ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"),
966  ENUM("vgamut", AVCOL_PRI_V_GAMUT, "prm"),
967 
968  { "trc", "Output transfer characteristics",
969  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
970  AVCOL_TRC_RESERVED0, AVCOL_TRC_EXT_NB - 1, FLAGS, .unit = "trc" },
971  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
972  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
973  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
974  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
975  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
976  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
977  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
978  ENUM("linear", AVCOL_TRC_LINEAR, "trc"),
979  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
980  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
981  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
982  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
983  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
984  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
985  ENUM("vlog", AVCOL_TRC_V_LOG, "trc"),
986 
987  { "format", "Output pixel format",
988  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
989  AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, .unit = "fmt" },
990  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
991  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
992  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
993  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
994  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
995  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
996  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
997  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
998  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
999 
1000  { "fast", "Ignore primary chromaticity and gamma correction",
1001  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1002  0, 1, FLAGS },
1003 
1004  { "dither", "Dithering mode",
1005  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1006  DITHER_NONE, DITHER_NB - 1, FLAGS, .unit = "dither" },
1007  ENUM("none", DITHER_NONE, "dither"),
1008  ENUM("fsb", DITHER_FSB, "dither"),
1009 
1010  { "wpadapt", "Whitepoint adaptation method",
1011  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1012  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, .unit = "wpadapt" },
1013  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1014  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1015  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1016 
1017  { "clipgamut",
1018  "Controls how to clip out-of-gamut colors that arise as a result of colorspace conversion.",
1019  OFFSET(clip_gamut), AV_OPT_TYPE_INT, { .i64 = CLIP_GAMUT_NONE },
1020  CLIP_GAMUT_NONE, NB_CLIP_GAMUT - 1, FLAGS, .unit = "clipgamut" },
1021  ENUM("none", CLIP_GAMUT_NONE, "clipgamut"),
1022  ENUM("rgb", CLIP_GAMUT_RGB, "clipgamut"),
1023 
1024  { "iall", "Set all input color properties together",
1025  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1026  CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" },
1027  { "ispace", "Input colorspace",
1028  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1029  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "csp" },
1030  { "irange", "Input color range",
1031  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1032  AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" },
1033  { "iprimaries", "Input color primaries",
1034  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1035  AVCOL_PRI_RESERVED0, AVCOL_PRI_EXT_NB - 1, FLAGS, .unit = "prm" },
1036  { "itrc", "Input transfer characteristics",
1037  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1038  AVCOL_TRC_RESERVED0, AVCOL_TRC_EXT_NB - 1, FLAGS, .unit = "trc" },
1039 
1040  { NULL }
1041 };
1042 
1043 AVFILTER_DEFINE_CLASS(colorspace);
1044 
1045 static const AVFilterPad inputs[] = {
1046  {
1047  .name = "default",
1048  .type = AVMEDIA_TYPE_VIDEO,
1049  .filter_frame = filter_frame,
1050  },
1051 };
1052 
1053 static const AVFilterPad outputs[] = {
1054  {
1055  .name = "default",
1056  .type = AVMEDIA_TYPE_VIDEO,
1057  .config_props = config_props,
1058  },
1059 };
1060 
1062  .p.name = "colorspace",
1063  .p.description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1064  .p.priv_class = &colorspace_class,
1066  .init = init,
1067  .uninit = uninit,
1068  .priv_size = sizeof(ColorSpaceContext),
1072 };
formats
formats
Definition: signature.h:47
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:117
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:682
ColorSpaceContext::fast_mode
int fast_mode
Definition: vf_colorspace.c:129
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:678
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
NB_CLIP_GAMUT
@ NB_CLIP_GAMUT
Definition: vf_colorspace.c:72
ColorSpaceContext::yuv2yuv_passthrough
int yuv2yuv_passthrough
Definition: vf_colorspace.c:148
AVCOL_PRI_EBU3213
@ AVCOL_PRI_EBU3213
EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors.
Definition: pixfmt.h:652
av_clip
#define av_clip
Definition: common.h:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ColorSpaceContext::rgb2yuv_fsb
rgb2yuv_fsb_fn rgb2yuv_fsb
Definition: vf_colorspace.c:155
WP_ADAPT_VON_KRIES
@ WP_ADAPT_VON_KRIES
Definition: vf_colorspace.c:63
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:499
ColorSpaceContext::user_format
enum AVPixelFormat in_format user_format
Definition: vf_colorspace.c:128
ColorSpaceContext::delin_lut
int16_t * delin_lut
Definition: vf_colorspace.c:145
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:666
mem_internal.h
out
FILE * out
Definition: movenc.c:55
AVColorPrimariesDesc
Struct that contains both white point location and primaries location, providing the complete descrip...
Definition: csp.h:78
NB_WP_ADAPT
@ NB_WP_ADAPT
Definition: vf_colorspace.c:66
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1067
ColorSpaceContext::dither_scratch_base
int * dither_scratch_base[3][2]
Definition: vf_colorspace.c:137
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ff_matrix_invert_3x3
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:675
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ColorSpaceContext::yuv2rgb
yuv2rgb_fn yuv2rgb
Definition: vf_colorspace.c:153
ff_set_common_formats2
int ff_set_common_formats2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, AVFilterFormats *formats)
Definition: formats.c:1124
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ColorSpaceContext::out_txchr
const struct TransferCharacteristics * out_txchr
Definition: vf_colorspace.c:143
CS_SMPTE240M
@ CS_SMPTE240M
Definition: vf_colorspace.c:56
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:680
TransferCharacteristics::gamma
double gamma
Definition: vf_colorspace.c:115
WP_ADAPT_BRADFORD
@ WP_ADAPT_BRADFORD
Definition: vf_colorspace.c:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:689
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:263
ColorSpaceContext::rgb_sz
unsigned rgb_sz
Definition: vf_colorspace.c:136
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorspace)
fill_whitepoint_conv_table
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, const AVWhitepointCoefficients *wp_src, const AVWhitepointCoefficients *wp_dst)
Definition: vf_colorspace.c:250
pixdesc.h
AVFrame::width
int width
Definition: frame.h:499
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
av_csp_luma_coeffs_from_avcsp
const struct AVLumaCoefficients * av_csp_luma_coeffs_from_avcsp(enum AVColorSpace csp)
Retrieves the Luma coefficients necessary to construct a conversion matrix from an enum constant desc...
Definition: csp.c:58
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVOption
AVOption.
Definition: opt.h:429
AVCOL_SPC_NB
@ AVCOL_SPC_NB
Not part of ABI.
Definition: pixfmt.h:720
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:669
data
const char data[16]
Definition: mxf.c:149
rgb2yuv
static const char rgb2yuv[]
Definition: vf_scale_vulkan.c:83
ColorSpaceContext::rgb2yuv_dbl_coeffs
double rgb2yuv_dbl_coeffs[3][3]
Definition: vf_colorspace.c:157
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
AVCOL_PRI_JEDEC_P22
@ AVCOL_PRI_JEDEC_P22
Definition: pixfmt.h:653
ThreadData::out_ss_h
int out_ss_h
Definition: vf_colorspace.c:308
ColorSpaceContext::in_uv_rng
int in_uv_rng
Definition: vf_colorspace.c:158
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:701
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:682
AVLumaCoefficients
Struct containing luma coefficients to be used for RGB to YUV/YCoCg, or similar calculations.
Definition: csp.h:48
CS_BT709
@ CS_BT709
Definition: vf_colorspace.c:54
WP_ADAPT_IDENTITY
@ WP_ADAPT_IDENTITY
Definition: vf_colorspace.c:65
ColorSpaceContext::lrgb2lrgb_coeffs
int16_t lrgb2lrgb_coeffs[3][3][8]
Definition: vf_colorspace.c:141
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:636
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:220
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
get_transfer_characteristics
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
Definition: vf_colorspace.c:188
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:155
ColorSpaceContext::wp_adapt
enum WhitepointAdaptation wp_adapt
Definition: vf_colorspace.c:131
ff_make_formats_list_singleton
AVFilterFormats * ff_make_formats_list_singleton(int fmt)
Equivalent to ff_make_format_list({const int[]}{ fmt, -1 })
Definition: formats.c:593
colorspace_options
static const AVOption colorspace_options[]
Definition: vf_colorspace.c:917
Colorspace
Colorspace
Definition: vf_colorspace.c:48
ColorSpaceContext::rgb2rgb_passthrough
int rgb2rgb_passthrough
Definition: vf_colorspace.c:144
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:311
DITHER_FSB
@ DITHER_FSB
Definition: vf_colorspace.c:44
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
rgb
Definition: rpzaenc.c:60
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:680
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
ThreadData::out_linesize
ptrdiff_t out_linesize[3]
Definition: vf_colorspace.c:307
colorspace.h
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:778
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:672
ColorSpaceContext
Definition: vf_colorspace.c:118
CS_BT2020
@ CS_BT2020
Definition: vf_colorspace.c:57
CS_BT601_6_525
@ CS_BT601_6_525
Definition: vf_colorspace.c:52
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:671
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:39
ColorSpaceContext::yuv_offset
int16_t yuv_offset[2][8]
Definition: vf_colorspace.c:152
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
transfer_characteristics
static const struct TransferCharacteristics transfer_characteristics[]
Definition: vf_colorspace.c:174
FFFilter
Definition: filters.h:266
AVCOL_PRI_RESERVED0
@ AVCOL_PRI_RESERVED0
Definition: pixfmt.h:637
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
s
#define s(width, name)
Definition: cbs_vp9.c:198
DITHER_NB
@ DITHER_NB
Definition: vf_colorspace.c:45
AVCOL_PRI_NB
@ AVCOL_PRI_NB
Not part of ABI.
Definition: pixfmt.h:654
av_csp_primaries_desc_from_id
const AVColorPrimariesDesc * av_csp_primaries_desc_from_id(enum AVColorPrimaries prm)
Retrieves a complete gamut description from an enum constant describing the color primaries.
Definition: csp.c:95
CS_BT470BG
@ CS_BT470BG
Definition: vf_colorspace.c:51
CS_UNSPECIFIED
@ CS_UNSPECIFIED
Definition: vf_colorspace.c:49
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:707
ColorSpaceContext::yuv2rgb_coeffs
int16_t yuv2rgb_coeffs[3][3][8]
Definition: vf_colorspace.c:149
get_range_off
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
Definition: vf_colorspace.c:381
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:753
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ColorSpaceDSPContext
Definition: colorspacedsp.h:59
bits
uint8_t bits
Definition: vp3data.h:128
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_colorspace.c:727
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
default_trc
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:75
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AVCOL_PRI_SMPTE428
@ AVCOL_PRI_SMPTE428
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:648
AVFilterFormatsConfig::color_spaces
AVFilterFormats * color_spaces
Lists of supported YUV color metadata, only for YUV video.
Definition: avfilter.h:141
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
Definition: pixfmt.h:645
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:264
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:639
ColorSpaceContext::yuv2yuv
yuv2yuv_fn yuv2yuv
Definition: vf_colorspace.c:156
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:643
ColorSpaceContext::rgb2yuv_coeffs
int16_t rgb2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:150
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:644
ColorSpaceContext::user_irng
enum AVColorRange in_rng out_rng user_rng user_irng
Definition: vf_colorspace.c:125
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_colorspace.c:680
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3772
ColorSpaceContext::yuv2yuv_coeffs
int16_t yuv2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:151
ff_matrix_mul_3x3
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_colorspace.c:894
CS_NB
@ CS_NB
Definition: vf_colorspace.c:58
AVCOL_TRC_RESERVED0
@ AVCOL_TRC_RESERVED0
Definition: pixfmt.h:667
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
TransferCharacteristics::alpha
double alpha
Definition: vf_colorspace.c:115
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
CS_SMPTE170M
@ CS_SMPTE170M
Definition: vf_colorspace.c:55
ColorSpaceContext::user_itrc
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
Definition: vf_colorspace.c:126
AVCOL_TRC_IEC61966_2_4
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
Definition: pixfmt.h:678
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:282
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
Definition: pixfmt.h:638
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:568
fill_gamma_table
static int fill_gamma_table(ColorSpaceContext *s)
Definition: vf_colorspace.c:201
ColorSpaceContext::lin_lut
int16_t * lin_lut
Definition: vf_colorspace.c:145
CLIP_GAMUT_NONE
@ CLIP_GAMUT_NONE
Definition: vf_colorspace.c:70
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3790
double
double
Definition: af_crystalizer.c:132
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:681
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
Definition: pixfmt.h:709
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
ColorSpaceContext::in_txchr
const struct TransferCharacteristics * in_txchr
Definition: vf_colorspace.c:143
AVCIExy
Struct containing chromaticity x and y values for the standard CIE 1931 chromaticity definition.
Definition: csp.h:56
ColorSpaceContext::user_iall
enum Colorspace user_all user_iall
Definition: vf_colorspace.c:123
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:121
CLIP_GAMUT_RGB
@ CLIP_GAMUT_RGB
Definition: vf_colorspace.c:71
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:647
uninit
static void uninit(AVFilterContext *ctx)
Definition: vf_colorspace.c:709
ColorSpaceContext::out_y_rng
int out_y_rng
Definition: vf_colorspace.c:158
AVCIExy::x
AVRational x
Definition: csp.h:57
ColorSpaceContext::lrgb2lrgb_passthrough
int lrgb2lrgb_passthrough
Definition: vf_colorspace.c:140
AVCOL_PRI_SMPTE431
@ AVCOL_PRI_SMPTE431
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:650
yuv2yuv_fn
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:674
AVCOL_PRI_FILM
@ AVCOL_PRI_FILM
colour filters using Illuminant C
Definition: pixfmt.h:646
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:711
ColorSpaceContext::yuv2yuv_fastmode
int yuv2yuv_fastmode
Definition: vf_colorspace.c:148
OFFSET
#define OFFSET(x)
Definition: vf_colorspace.c:913
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:544
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1896
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:546
TransferCharacteristics
Definition: vf_colorspace.c:114
ColorSpaceContext::rgb2yuv
rgb2yuv_fn rgb2yuv
Definition: vf_colorspace.c:154
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
frame.h
ColorSpaceContext::clip_gamut
enum ClipGamutMode clip_gamut
Definition: vf_colorspace.c:132
ColorSpaceContext::in_y_rng
int in_y_rng
Definition: vf_colorspace.c:158
ColorSpaceContext::yuv2rgb_dbl_coeffs
double yuv2rgb_dbl_coeffs[3][3]
Definition: vf_colorspace.c:157
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: vf_colorspace.c:852
csp.h
AVCOL_TRC_EXT_NB
@ AVCOL_TRC_EXT_NB
Not part of ABI.
Definition: pixfmt.h:693
AVFilterFormatsConfig::color_ranges
AVFilterFormats * color_ranges
AVColorRange.
Definition: avfilter.h:142
av_frame_side_data_remove_by_props
void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd, int props)
Remove and free all side data instances that match any of the given side data properties.
Definition: side_data.c:117
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:668
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:197
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:708
convert
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
Definition: vf_colorspace.c:311
yuv2yuv
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp_yuv2yuv_template.c:40
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
ff_fill_rgb2yuv_table
void ff_fill_rgb2yuv_table(const AVLumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:125
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:711
ColorSpaceContext::dither_scratch
int * dither_scratch[3][2]
Definition: vf_colorspace.c:137
ColorSpaceContext::in_primaries
const AVColorPrimariesDesc * in_primaries
Definition: vf_colorspace.c:139
AVCOL_PRI_V_GAMUT
@ AVCOL_PRI_V_GAMUT
Definition: pixfmt.h:658
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
CS_BT601_6_625
@ CS_BT601_6_625
Definition: vf_colorspace.c:53
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:845
ColorSpaceContext::in_lumacoef
const AVLumaCoefficients * in_lumacoef
Definition: vf_colorspace.c:147
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:240
ColorSpaceContext::out_primaries
const AVColorPrimariesDesc * out_primaries
Definition: vf_colorspace.c:139
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
DitherMode
DitherMode
Definition: vf_colorspace.c:42
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:45
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
FLAGS
#define FLAGS
Definition: vf_colorspace.c:914
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
AV_SIDE_DATA_PROP_COLOR_DEPENDENT
@ AV_SIDE_DATA_PROP_COLOR_DEPENDENT
Side data depends on the video color space.
Definition: frame.h:316
ColorSpaceContext::dsp
ColorSpaceDSPContext dsp
Definition: vf_colorspace.c:121
NB_WP_ADAPT_NON_IDENTITY
@ NB_WP_ADAPT_NON_IDENTITY
Definition: vf_colorspace.c:64
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:641
ClipGamutMode
ClipGamutMode
Definition: vf_colorspace.c:69
pixfmt.h
outputs
static const AVFilterPad outputs[]
Definition: vf_colorspace.c:1053
ColorSpaceContext::out_lumacoef
const AVLumaCoefficients * out_lumacoef
Definition: vf_colorspace.c:147
AVCIExy::y
AVRational y
Definition: csp.h:57
ff_fill_rgb2xyz_table
void ff_fill_rgb2xyz_table(const AVPrimaryCoefficients *coeffs, const AVWhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:79
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:543
CS_BT470M
@ CS_BT470M
Definition: vf_colorspace.c:50
yuv2rgb_fn
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
ColorSpaceContext::user_icsp
enum AVColorSpace in_csp out_csp user_csp user_icsp
Definition: vf_colorspace.c:124
ColorSpaceContext::dither
enum DitherMode dither
Definition: vf_colorspace.c:130
AVFrame::height
int height
Definition: frame.h:499
AVCOL_PRI_EXT_NB
@ AVCOL_PRI_EXT_NB
Not part of ABI.
Definition: pixfmt.h:659
default_csp
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:101
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1693
default_prm
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:88
ff_vf_colorspace
const FFFilter ff_vf_colorspace
Definition: vf_colorspace.c:1061
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:705
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
colorspacedsp.h
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
rgb2yuv_fn
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
ColorSpaceContext::out_uv_rng
int out_uv_rng
Definition: vf_colorspace.c:158
AVCOL_TRC_SMPTE170M
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:673
inputs
static const AVFilterPad inputs[]
Definition: vf_colorspace.c:1045
ThreadData::in_ss_h
int in_ss_h
Definition: vf_colorspace.c:308
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
ColorSpaceContext::did_warn_range
int did_warn_range
Definition: vf_colorspace.c:160
WhitepointAdaptation
WhitepointAdaptation
Definition: vf_colorspace.c:61
ColorSpaceContext::user_iprm
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
Definition: vf_colorspace.c:127
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:167
desc
const char * desc
Definition: libsvtav1.c:78
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:270
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
ENUM
#define ENUM(x, y, z)
Definition: vf_colorspace.c:915
mem.h
rgb2yuv_fsb_fn
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVCOL_PRI_SMPTE432
@ AVCOL_PRI_SMPTE432
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:651
AVCOL_TRC_V_LOG
@ AVCOL_TRC_V_LOG
Definition: pixfmt.h:692
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DITHER_NONE
@ DITHER_NONE
Definition: vf_colorspace.c:43
TransferCharacteristics::beta
double beta
Definition: vf_colorspace.c:115
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ma
#define ma
Definition: vf_colormatrix.c:98
TransferCharacteristics::delta
double delta
Definition: vf_colorspace.c:115
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
supported_format
#define supported_format(d)
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:702
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:742
create_filtergraph
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
Definition: vf_colorspace.c:411
ThreadData::in_linesize
ptrdiff_t in_linesize[3]
Definition: vf_colorspace.c:307
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:263
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3823
ff_colorspacedsp_init
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
Definition: colorspacedsp.c:102
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
ColorSpaceContext::rgb_stride
ptrdiff_t rgb_stride
Definition: vf_colorspace.c:135
apply_lut
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
Definition: vf_colorspace.c:288
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:62