FFmpeg
swscale.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <string.h>
24 
25 #include "libavutil/avassert.h"
26 #include "libavutil/bswap.h"
27 #include "libavutil/common.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/emms.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/hwcontext.h"
35 #include "config.h"
36 #include "swscale_internal.h"
37 #include "swscale.h"
38 #if CONFIG_VULKAN
39 #include "vulkan/ops.h"
40 #endif
41 
42 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_128)[9][8] = {
43  { 36, 68, 60, 92, 34, 66, 58, 90, },
44  { 100, 4, 124, 28, 98, 2, 122, 26, },
45  { 52, 84, 44, 76, 50, 82, 42, 74, },
46  { 116, 20, 108, 12, 114, 18, 106, 10, },
47  { 32, 64, 56, 88, 38, 70, 62, 94, },
48  { 96, 0, 120, 24, 102, 6, 126, 30, },
49  { 48, 80, 40, 72, 54, 86, 46, 78, },
50  { 112, 16, 104, 8, 118, 22, 110, 14, },
51  { 36, 68, 60, 92, 34, 66, 58, 90, },
52 };
53 
54 DECLARE_ALIGNED(8, static const uint8_t, sws_pb_64)[8] = {
55  64, 64, 64, 64, 64, 64, 64, 64
56 };
57 
58 static av_always_inline void fillPlane(uint8_t *plane, int stride, int width,
59  int height, int y, uint8_t val)
60 {
61  int i;
62  uint8_t *ptr = plane + stride * y;
63  for (i = 0; i < height; i++) {
64  memset(ptr, val, width);
65  ptr += stride;
66  }
67 }
68 
69 static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW,
70  const uint8_t *_src, const int16_t *filter,
71  const int32_t *filterPos, int filterSize)
72 {
73  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
74  int i;
75  int32_t *dst = (int32_t *) _dst;
76  const uint16_t *src = (const uint16_t *) _src;
77  int bits = desc->comp[0].depth - 1;
78  int sh = bits - 4;
79 
80  if ((isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
81  sh = 9;
82  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
83  sh = 16 - 1 - 4;
84  }
85 
86  for (i = 0; i < dstW; i++) {
87  int j;
88  int srcPos = filterPos[i];
89  int val = 0;
90 
91  for (j = 0; j < filterSize; j++) {
92  val += src[srcPos + j] * filter[filterSize * i + j];
93  }
94  // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
95  dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
96  }
97 }
98 
99 static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW,
100  const uint8_t *_src, const int16_t *filter,
101  const int32_t *filterPos, int filterSize)
102 {
103  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
104  int i;
105  const uint16_t *src = (const uint16_t *) _src;
106  int sh = desc->comp[0].depth - 1;
107 
108  if (sh<15) {
109  sh = isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
110  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
111  sh = 16 - 1;
112  }
113 
114  for (i = 0; i < dstW; i++) {
115  int j;
116  int srcPos = filterPos[i];
117  int val = 0;
118 
119  for (j = 0; j < filterSize; j++) {
120  val += src[srcPos + j] * filter[filterSize * i + j];
121  }
122  // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
123  dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
124  }
125 }
126 
127 // bilinear / bicubic scaling
128 static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW,
129  const uint8_t *src, const int16_t *filter,
130  const int32_t *filterPos, int filterSize)
131 {
132  int i;
133  for (i = 0; i < dstW; i++) {
134  int j;
135  int srcPos = filterPos[i];
136  int val = 0;
137  for (j = 0; j < filterSize; j++) {
138  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
139  }
140  dst[i] = FFMIN(val >> 7, (1 << 15) - 1); // the cubic equation does overflow ...
141  }
142 }
143 
144 static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW,
145  const uint8_t *src, const int16_t *filter,
146  const int32_t *filterPos, int filterSize)
147 {
148  int i;
149  int32_t *dst = (int32_t *) _dst;
150  for (i = 0; i < dstW; i++) {
151  int j;
152  int srcPos = filterPos[i];
153  int val = 0;
154  for (j = 0; j < filterSize; j++) {
155  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
156  }
157  dst[i] = FFMIN(val >> 3, (1 << 19) - 1); // the cubic equation does overflow ...
158  }
159 }
160 
161 // FIXME all pal and rgb srcFormats could do this conversion as well
162 // FIXME all scalers more complex than bilinear could do half of this transform
163 static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width,
164  uint32_t _coeff, int64_t _offset)
165 {
166  uint16_t coeff = _coeff;
167  int32_t offset = _offset;
168  int i;
169  for (i = 0; i < width; i++) {
170  int U = (dstU[i] * coeff + offset) >> 14;
171  int V = (dstV[i] * coeff + offset) >> 14;
172  dstU[i] = FFMIN(U, (1 << 15) - 1);
173  dstV[i] = FFMIN(V, (1 << 15) - 1);
174  }
175 }
176 
177 static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width,
178  uint32_t _coeff, int64_t _offset)
179 {
180  uint16_t coeff = _coeff;
181  int32_t offset = _offset;
182  int i;
183  for (i = 0; i < width; i++) {
184  dstU[i] = (dstU[i] * coeff + offset) >> 14;
185  dstV[i] = (dstV[i] * coeff + offset) >> 14;
186  }
187 }
188 
189 static void lumRangeToJpeg_c(int16_t *dst, int width,
190  uint32_t _coeff, int64_t _offset)
191 {
192  uint16_t coeff = _coeff;
193  int32_t offset = _offset;
194  int i;
195  for (i = 0; i < width; i++) {
196  int Y = (dst[i] * coeff + offset) >> 14;
197  dst[i] = FFMIN(Y, (1 << 15) - 1);
198  }
199 }
200 
201 static void lumRangeFromJpeg_c(int16_t *dst, int width,
202  uint32_t _coeff, int64_t _offset)
203 {
204  uint16_t coeff = _coeff;
205  int32_t offset = _offset;
206  int i;
207  for (i = 0; i < width; i++)
208  dst[i] = (dst[i] * coeff + offset) >> 14;
209 }
210 
211 static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
212  uint32_t coeff, int64_t offset)
213 {
214  int i;
215  int32_t *dstU = (int32_t *) _dstU;
216  int32_t *dstV = (int32_t *) _dstV;
217  for (i = 0; i < width; i++) {
218  int U = ((int64_t) dstU[i] * coeff + offset) >> 18;
219  int V = ((int64_t) dstV[i] * coeff + offset) >> 18;
220  dstU[i] = FFMIN(U, (1 << 19) - 1);
221  dstV[i] = FFMIN(V, (1 << 19) - 1);
222  }
223 }
224 
225 static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
226  uint32_t coeff, int64_t offset)
227 {
228  int i;
229  int32_t *dstU = (int32_t *) _dstU;
230  int32_t *dstV = (int32_t *) _dstV;
231  for (i = 0; i < width; i++) {
232  dstU[i] = ((int64_t) dstU[i] * coeff + offset) >> 18;
233  dstV[i] = ((int64_t) dstV[i] * coeff + offset) >> 18;
234  }
235 }
236 
237 static void lumRangeToJpeg16_c(int16_t *_dst, int width,
238  uint32_t coeff, int64_t offset)
239 {
240  int i;
241  int32_t *dst = (int32_t *) _dst;
242  for (i = 0; i < width; i++) {
243  int Y = ((int64_t) dst[i] * coeff + offset) >> 18;
244  dst[i] = FFMIN(Y, (1 << 19) - 1);
245  }
246 }
247 
248 static void lumRangeFromJpeg16_c(int16_t *_dst, int width,
249  uint32_t coeff, int64_t offset)
250 {
251  int i;
252  int32_t *dst = (int32_t *) _dst;
253  for (i = 0; i < width; i++)
254  dst[i] = ((int64_t) dst[i] * coeff + offset) >> 18;
255 }
256 
257 
258 #define DEBUG_SWSCALE_BUFFERS 0
259 #define DEBUG_BUFFERS(...) \
260  if (DEBUG_SWSCALE_BUFFERS) \
261  av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
262 
263 int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[],
264  int srcSliceY, int srcSliceH, uint8_t *const dst[],
265  const int dstStride[], int dstSliceY, int dstSliceH)
266 {
267  const int scale_dst = dstSliceY > 0 || dstSliceH < c->opts.dst_h;
268 
269  /* load a few things into local vars to make the code more readable?
270  * and faster */
271  const int dstW = c->opts.dst_w;
272  int dstH = c->opts.dst_h;
273 
274  const enum AVPixelFormat dstFormat = c->opts.dst_format;
275  const int flags = c->opts.flags;
276  int32_t *vLumFilterPos = c->vLumFilterPos;
277  int32_t *vChrFilterPos = c->vChrFilterPos;
278 
279  const int vLumFilterSize = c->vLumFilterSize;
280  const int vChrFilterSize = c->vChrFilterSize;
281 
282  yuv2planar1_fn yuv2plane1 = c->yuv2plane1;
283  yuv2planarX_fn yuv2planeX = c->yuv2planeX;
284  yuv2interleavedX_fn yuv2nv12cX = c->yuv2nv12cX;
285  yuv2packed1_fn yuv2packed1 = c->yuv2packed1;
286  yuv2packed2_fn yuv2packed2 = c->yuv2packed2;
287  yuv2packedX_fn yuv2packedX = c->yuv2packedX;
288  yuv2anyX_fn yuv2anyX = c->yuv2anyX;
289  const int chrSrcSliceY = srcSliceY >> c->chrSrcVSubSample;
290  const int chrSrcSliceH = AV_CEIL_RSHIFT(srcSliceH, c->chrSrcVSubSample);
291  int should_dither = isNBPS(c->opts.src_format) ||
292  is16BPS(c->opts.src_format);
293  int lastDstY;
294 
295  /* vars which will change and which we need to store back in the context */
296  int dstY = c->dstY;
297  int lastInLumBuf = c->lastInLumBuf;
298  int lastInChrBuf = c->lastInChrBuf;
299 
300  int lumStart = 0;
301  int lumEnd = c->descIndex[0];
302  int chrStart = lumEnd;
303  int chrEnd = c->descIndex[1];
304  int vStart = chrEnd;
305  int vEnd = c->numDesc;
306  SwsSlice *src_slice = &c->slice[lumStart];
307  SwsSlice *hout_slice = &c->slice[c->numSlice-2];
308  SwsSlice *vout_slice = &c->slice[c->numSlice-1];
309  SwsFilterDescriptor *desc = c->desc;
310 
311  int needAlpha = c->needAlpha;
312 
313  int hasLumHoles = 1;
314  int hasChrHoles = 1;
315 
316  const uint8_t *src2[4];
317  int srcStride2[4];
318 
319  if (isPacked(c->opts.src_format)) {
320  src2[0] =
321  src2[1] =
322  src2[2] =
323  src2[3] = src[0];
324  srcStride2[0] =
325  srcStride2[1] =
326  srcStride2[2] =
327  srcStride2[3] = srcStride[0];
328  } else {
329  memcpy(src2, src, sizeof(src2));
330  memcpy(srcStride2, srcStride, sizeof(srcStride2));
331  }
332 
333  srcStride2[1] *= 1 << c->vChrDrop;
334  srcStride2[2] *= 1 << c->vChrDrop;
335 
336  DEBUG_BUFFERS("swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
337  src2[0], srcStride2[0], src2[1], srcStride2[1],
338  src2[2], srcStride2[2], src2[3], srcStride2[3],
339  dst[0], dstStride[0], dst[1], dstStride[1],
340  dst[2], dstStride[2], dst[3], dstStride[3]);
341  DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
342  srcSliceY, srcSliceH, dstY, dstH);
343  DEBUG_BUFFERS("vLumFilterSize: %d vChrFilterSize: %d\n",
344  vLumFilterSize, vChrFilterSize);
345 
346  if (dstStride[0]&15 || dstStride[1]&15 ||
347  dstStride[2]&15 || dstStride[3]&15) {
348  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
349  if (flags & SWS_PRINT_INFO &&
350  !atomic_exchange_explicit(&ctx->stride_unaligned_warned, 1, memory_order_relaxed)) {
352  "Warning: dstStride is not aligned!\n"
353  " ->cannot do aligned memory accesses anymore\n");
354  }
355  }
356 
357 #if ARCH_X86
358  if ( (uintptr_t) dst[0]&15 || (uintptr_t) dst[1]&15 || (uintptr_t) dst[2]&15
359  || (uintptr_t)src2[0]&15 || (uintptr_t)src2[1]&15 || (uintptr_t)src2[2]&15
360  || srcStride2[0]&15 || srcStride2[1]&15 || srcStride2[2]&15 || srcStride2[3]&15
361  ) {
362  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
363  int cpu_flags = av_get_cpu_flags();
364  if (flags & SWS_PRINT_INFO && HAVE_MMXEXT && (cpu_flags & AV_CPU_FLAG_SSE2) &&
365  !atomic_exchange_explicit(&ctx->stride_unaligned_warned,1, memory_order_relaxed)) {
366  av_log(c, AV_LOG_WARNING, "Warning: data is not aligned! This can lead to a speed loss\n");
367  }
368  }
369 #endif
370 
371  if (scale_dst) {
372  dstY = dstSliceY;
373  dstH = dstY + dstSliceH;
374  lastInLumBuf = -1;
375  lastInChrBuf = -1;
376  } else if (srcSliceY == 0) {
377  /* Note the user might start scaling the picture in the middle so this
378  * will not get executed. This is not really intended but works
379  * currently, so people might do it. */
380  dstY = 0;
381  lastInLumBuf = -1;
382  lastInChrBuf = -1;
383  }
384 
385  if (!should_dither) {
386  c->chrDither8 = c->lumDither8 = sws_pb_64;
387  }
388  lastDstY = dstY;
389 
390  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
391  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, c->use_mmx_vfilter);
392 
393  ff_init_slice_from_src(src_slice, (uint8_t**)src2, srcStride2, c->opts.src_w,
394  srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
395 
396  ff_init_slice_from_src(vout_slice, (uint8_t**)dst, dstStride, c->opts.dst_w,
397  dstY, dstSliceH, dstY >> c->chrDstVSubSample,
398  AV_CEIL_RSHIFT(dstSliceH, c->chrDstVSubSample), scale_dst);
399  if (srcSliceY == 0) {
400  hout_slice->plane[0].sliceY = lastInLumBuf + 1;
401  hout_slice->plane[1].sliceY = lastInChrBuf + 1;
402  hout_slice->plane[2].sliceY = lastInChrBuf + 1;
403  hout_slice->plane[3].sliceY = lastInLumBuf + 1;
404 
405  hout_slice->plane[0].sliceH =
406  hout_slice->plane[1].sliceH =
407  hout_slice->plane[2].sliceH =
408  hout_slice->plane[3].sliceH = 0;
409  hout_slice->width = dstW;
410  }
411 
412  for (; dstY < dstH; dstY++) {
413  const int chrDstY = dstY >> c->chrDstVSubSample;
414  int use_mmx_vfilter= c->use_mmx_vfilter;
415 
416  // First line needed as input
417  const int firstLumSrcY = FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
418  const int firstLumSrcY2 = FFMAX(1 - vLumFilterSize, vLumFilterPos[FFMIN(dstY | ((1 << c->chrDstVSubSample) - 1), c->opts.dst_h - 1)]);
419  // First line needed as input
420  const int firstChrSrcY = FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
421 
422  // Last line needed as input
423  int lastLumSrcY = FFMIN(c->opts.src_h, firstLumSrcY + vLumFilterSize) - 1;
424  int lastLumSrcY2 = FFMIN(c->opts.src_h, firstLumSrcY2 + vLumFilterSize) - 1;
425  int lastChrSrcY = FFMIN(c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
426  int enough_lines;
427 
428  int i;
429  int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
430 
431  // handle holes (FAST_BILINEAR & weird filters)
432  if (firstLumSrcY > lastInLumBuf) {
433 
434  hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
435  if (hasLumHoles) {
436  hout_slice->plane[0].sliceY = firstLumSrcY;
437  hout_slice->plane[3].sliceY = firstLumSrcY;
438  hout_slice->plane[0].sliceH =
439  hout_slice->plane[3].sliceH = 0;
440  }
441 
442  lastInLumBuf = firstLumSrcY - 1;
443  }
444  if (firstChrSrcY > lastInChrBuf) {
445 
446  hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
447  if (hasChrHoles) {
448  hout_slice->plane[1].sliceY = firstChrSrcY;
449  hout_slice->plane[2].sliceY = firstChrSrcY;
450  hout_slice->plane[1].sliceH =
451  hout_slice->plane[2].sliceH = 0;
452  }
453 
454  lastInChrBuf = firstChrSrcY - 1;
455  }
456 
457  DEBUG_BUFFERS("dstY: %d\n", dstY);
458  DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
459  firstLumSrcY, lastLumSrcY, lastInLumBuf);
460  DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
461  firstChrSrcY, lastChrSrcY, lastInChrBuf);
462 
463  // Do we have enough lines in this slice to output the dstY line
464  enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
465  lastChrSrcY < AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample);
466 
467  if (!enough_lines) {
468  lastLumSrcY = srcSliceY + srcSliceH - 1;
469  lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
470  DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
471  lastLumSrcY, lastChrSrcY);
472  }
473 
474  av_assert0((lastLumSrcY - firstLumSrcY + 1) <= hout_slice->plane[0].available_lines);
475  av_assert0((lastChrSrcY - firstChrSrcY + 1) <= hout_slice->plane[1].available_lines);
476 
477 
478  posY = hout_slice->plane[0].sliceY + hout_slice->plane[0].sliceH;
479  if (posY <= lastLumSrcY && !hasLumHoles) {
480  firstPosY = FFMAX(firstLumSrcY, posY);
481  lastPosY = FFMIN(firstLumSrcY + hout_slice->plane[0].available_lines - 1, srcSliceY + srcSliceH - 1);
482  } else {
483  firstPosY = posY;
484  lastPosY = lastLumSrcY;
485  }
486 
487  cPosY = hout_slice->plane[1].sliceY + hout_slice->plane[1].sliceH;
488  if (cPosY <= lastChrSrcY && !hasChrHoles) {
489  firstCPosY = FFMAX(firstChrSrcY, cPosY);
490  lastCPosY = FFMIN(firstChrSrcY + hout_slice->plane[1].available_lines - 1, AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample) - 1);
491  } else {
492  firstCPosY = cPosY;
493  lastCPosY = lastChrSrcY;
494  }
495 
496  ff_rotate_slice(hout_slice, lastPosY, lastCPosY);
497 
498  if (posY < lastLumSrcY + 1) {
499  for (i = lumStart; i < lumEnd; ++i)
500  desc[i].process(c, &desc[i], firstPosY, lastPosY - firstPosY + 1);
501  }
502 
503  lastInLumBuf = lastLumSrcY;
504 
505  if (cPosY < lastChrSrcY + 1) {
506  for (i = chrStart; i < chrEnd; ++i)
507  desc[i].process(c, &desc[i], firstCPosY, lastCPosY - firstCPosY + 1);
508  }
509 
510  lastInChrBuf = lastChrSrcY;
511 
512  if (!enough_lines)
513  break; // we can't output a dstY line so let's try with the next slice
514 
515 #if HAVE_MMX_INLINE
517  c->dstW_mmx = c->opts.dst_w;
518 #endif
519  if (should_dither) {
520  c->chrDither8 = ff_dither_8x8_128[chrDstY & 7];
521  c->lumDither8 = ff_dither_8x8_128[dstY & 7];
522  }
523  if (dstY >= c->opts.dst_h - 2) {
524  /* hmm looks like we can't use MMX here without overwriting
525  * this array's tail */
526  ff_sws_init_output_funcs(c, &yuv2plane1, &yuv2planeX, &yuv2nv12cX,
527  &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
528  use_mmx_vfilter= 0;
529  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
530  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
531  }
532 
533  for (i = vStart; i < vEnd; ++i)
534  desc[i].process(c, &desc[i], dstY, 1);
535  }
536  if (isPlanar(dstFormat) && isALPHA(dstFormat) && !needAlpha) {
537  int offset = lastDstY - dstSliceY;
538  int length = dstW;
539  int height = dstY - lastDstY;
540 
541  if (is16BPS(dstFormat) || isNBPS(dstFormat)) {
542  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
543  fillPlane16(dst[3], dstStride[3], length, height, offset,
544  1, desc->comp[3].depth,
545  isBE(dstFormat));
546  } else if (is32BPS(dstFormat)) {
547  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
548  fillPlane32(dst[3], dstStride[3], length, height, offset,
549  1, desc->comp[3].depth,
550  isBE(dstFormat), desc->flags & AV_PIX_FMT_FLAG_FLOAT);
551  } else
552  fillPlane(dst[3], dstStride[3], length, height, offset, 255);
553  }
554 
555 #if HAVE_MMXEXT_INLINE
557  __asm__ volatile ("sfence" ::: "memory");
558 #endif
559  emms_c();
560 
561  /* store changed local vars back in the context */
562  c->dstY = dstY;
563  c->lastInLumBuf = lastInLumBuf;
564  c->lastInChrBuf = lastInChrBuf;
565 
566  return dstY - lastDstY;
567 }
568 
569 /*
570  * Solve for coeff and offset:
571  * dst = ((src << src_shift) * coeff + offset) >> (mult_shift + src_shift)
572  *
573  * If SwsInternal->dstBpc is > 14, coeff is uint16_t and offset is int32_t,
574  * otherwise (SwsInternal->dstBpc is <= 14) coeff is uint32_t and offset is
575  * int64_t.
576  */
577 static void solve_range_convert(uint16_t src_min, uint16_t src_max,
578  uint16_t dst_min, uint16_t dst_max,
579  int src_bits, int src_shift, int mult_shift,
580  uint32_t *coeff, int64_t *offset)
581 {
582  uint16_t src_range = src_max - src_min;
583  uint16_t dst_range = dst_max - dst_min;
584  int total_shift = mult_shift + src_shift;
585  *coeff = AV_CEIL_RSHIFT(((uint64_t) dst_range << total_shift) / src_range, src_shift);
586  *offset = ((int64_t) dst_max << total_shift) -
587  ((int64_t) src_max << src_shift) * *coeff +
588  (1U << (mult_shift - 1));
589 }
590 
592 {
593  const int bit_depth = c->dstBpc ? FFMIN(c->dstBpc, 16) : 8;
594  const int src_bits = bit_depth <= 14 ? 15 : 19;
595  const int src_shift = src_bits - bit_depth;
596  const int mult_shift = bit_depth <= 14 ? 14 : 18;
597  const uint16_t mpeg_min = 16U << (bit_depth - 8);
598  const uint16_t mpeg_max_lum = 235U << (bit_depth - 8);
599  const uint16_t mpeg_max_chr = 240U << (bit_depth - 8);
600  const uint16_t jpeg_max = (1U << bit_depth) - 1;
601  uint16_t src_min, src_max_lum, src_max_chr;
602  uint16_t dst_min, dst_max_lum, dst_max_chr;
603  if (c->opts.src_range) {
604  src_min = 0;
605  src_max_lum = jpeg_max;
606  src_max_chr = jpeg_max;
607  dst_min = mpeg_min;
608  dst_max_lum = mpeg_max_lum;
609  dst_max_chr = mpeg_max_chr;
610  } else {
611  src_min = mpeg_min;
612  src_max_lum = mpeg_max_lum;
613  src_max_chr = mpeg_max_chr;
614  dst_min = 0;
615  dst_max_lum = jpeg_max;
616  dst_max_chr = jpeg_max;
617  }
618  solve_range_convert(src_min, src_max_lum, dst_min, dst_max_lum,
619  src_bits, src_shift, mult_shift,
620  &c->lumConvertRange_coeff, &c->lumConvertRange_offset);
621  solve_range_convert(src_min, src_max_chr, dst_min, dst_max_chr,
622  src_bits, src_shift, mult_shift,
623  &c->chrConvertRange_coeff, &c->chrConvertRange_offset);
624 }
625 
627 {
628  c->lumConvertRange = NULL;
629  c->chrConvertRange = NULL;
630  if (c->opts.src_range != c->opts.dst_range && !isAnyRGB(c->opts.dst_format) && c->dstBpc < 32) {
632  if (c->dstBpc <= 14) {
633  if (c->opts.src_range) {
634  c->lumConvertRange = lumRangeFromJpeg_c;
635  c->chrConvertRange = chrRangeFromJpeg_c;
636  } else {
637  c->lumConvertRange = lumRangeToJpeg_c;
638  c->chrConvertRange = chrRangeToJpeg_c;
639  }
640  } else {
641  if (c->opts.src_range) {
642  c->lumConvertRange = lumRangeFromJpeg16_c;
643  c->chrConvertRange = chrRangeFromJpeg16_c;
644  } else {
645  c->lumConvertRange = lumRangeToJpeg16_c;
646  c->chrConvertRange = chrRangeToJpeg16_c;
647  }
648  }
649 
650 #if ARCH_AARCH64
652 #elif ARCH_LOONGARCH64
654 #elif ARCH_RISCV
656 #elif ARCH_X86
658 #endif
659  }
660 }
661 
663 {
664  enum AVPixelFormat srcFormat = c->opts.src_format;
665 
667 
668  ff_sws_init_output_funcs(c, &c->yuv2plane1, &c->yuv2planeX,
669  &c->yuv2nv12cX, &c->yuv2packed1,
670  &c->yuv2packed2, &c->yuv2packedX, &c->yuv2anyX);
671 
672  ff_sws_init_input_funcs(c, &c->lumToYV12, &c->alpToYV12, &c->chrToYV12,
673  &c->readLumPlanar, &c->readAlpPlanar, &c->readChrPlanar);
674 
675  if (c->srcBpc == 8) {
676  if (c->dstBpc <= 14) {
677  c->hyScale = c->hcScale = hScale8To15_c;
678  if (c->opts.flags & SWS_FAST_BILINEAR) {
679  c->hyscale_fast = ff_hyscale_fast_c;
680  c->hcscale_fast = ff_hcscale_fast_c;
681  }
682  } else {
683  c->hyScale = c->hcScale = hScale8To19_c;
684  }
685  } else {
686  c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_c
687  : hScale16To15_c;
688  }
689 
691 
692  if (!(isGray(srcFormat) || isGray(c->opts.dst_format) ||
693  srcFormat == AV_PIX_FMT_MONOBLACK || srcFormat == AV_PIX_FMT_MONOWHITE))
694  c->needs_hcscale = 1;
695 }
696 
698 {
700 
701 #if ARCH_PPC
703 #elif ARCH_X86
705 #elif ARCH_AARCH64
707 #elif ARCH_ARM
709 #elif ARCH_LOONGARCH64
711 #elif ARCH_RISCV
713 #endif
714 }
715 
716 static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
717 {
718  if (!isALPHA(format))
719  src[3] = NULL;
720  if (!isPlanar(format)) {
721  src[3] = src[2] = NULL;
722 
723  if (!usePal(format))
724  src[1] = NULL;
725  }
726 }
727 
728 static int check_image_pointers(const uint8_t * const data[4], enum AVPixelFormat pix_fmt,
729  const int linesizes[4])
730 {
732  int i;
733 
734  av_assert2(desc);
735 
736  for (i = 0; i < 4; i++) {
737  int plane = desc->comp[i].plane;
738  if (!data[plane] || !linesizes[plane])
739  return 0;
740  }
741 
742  return 1;
743 }
744 
745 static void xyz12Torgb48_c(const SwsInternal *c, uint8_t *dst, int dst_stride,
746  const uint8_t *src, int src_stride, int w, int h)
747 {
748  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
749 
750  for (int yp = 0; yp < h; yp++) {
751  const uint16_t *src16 = (const uint16_t *) src;
752  uint16_t *dst16 = (uint16_t *) dst;
753 
754  for (int xp = 0; xp < 3 * w; xp += 3) {
755  int x, y, z, r, g, b;
756 
757  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
758  x = AV_RB16(src16 + xp + 0);
759  y = AV_RB16(src16 + xp + 1);
760  z = AV_RB16(src16 + xp + 2);
761  } else {
762  x = AV_RL16(src16 + xp + 0);
763  y = AV_RL16(src16 + xp + 1);
764  z = AV_RL16(src16 + xp + 2);
765  }
766 
767  x = c->xyz2rgb.gamma.in[x >> 4];
768  y = c->xyz2rgb.gamma.in[y >> 4];
769  z = c->xyz2rgb.gamma.in[z >> 4];
770 
771  // convert from XYZlinear to sRGBlinear
772  r = c->xyz2rgb.mat[0][0] * x +
773  c->xyz2rgb.mat[0][1] * y +
774  c->xyz2rgb.mat[0][2] * z >> 12;
775  g = c->xyz2rgb.mat[1][0] * x +
776  c->xyz2rgb.mat[1][1] * y +
777  c->xyz2rgb.mat[1][2] * z >> 12;
778  b = c->xyz2rgb.mat[2][0] * x +
779  c->xyz2rgb.mat[2][1] * y +
780  c->xyz2rgb.mat[2][2] * z >> 12;
781 
782  // limit values to 16-bit depth
783  r = av_clip_uint16(r);
784  g = av_clip_uint16(g);
785  b = av_clip_uint16(b);
786 
787  // convert from sRGBlinear to RGB and scale from 12bit to 16bit
788  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
789  AV_WB16(dst16 + xp + 0, c->xyz2rgb.gamma.out[r] << 4);
790  AV_WB16(dst16 + xp + 1, c->xyz2rgb.gamma.out[g] << 4);
791  AV_WB16(dst16 + xp + 2, c->xyz2rgb.gamma.out[b] << 4);
792  } else {
793  AV_WL16(dst16 + xp + 0, c->xyz2rgb.gamma.out[r] << 4);
794  AV_WL16(dst16 + xp + 1, c->xyz2rgb.gamma.out[g] << 4);
795  AV_WL16(dst16 + xp + 2, c->xyz2rgb.gamma.out[b] << 4);
796  }
797  }
798 
799  src += src_stride;
800  dst += dst_stride;
801  }
802 }
803 
804 static void rgb48Toxyz12_c(const SwsInternal *c, uint8_t *dst, int dst_stride,
805  const uint8_t *src, int src_stride, int w, int h)
806 {
807  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.dst_format);
808 
809  for (int yp = 0; yp < h; yp++) {
810  uint16_t *src16 = (uint16_t *) src;
811  uint16_t *dst16 = (uint16_t *) dst;
812 
813  for (int xp = 0; xp < 3 * w; xp += 3) {
814  int x, y, z, r, g, b;
815 
816  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
817  r = AV_RB16(src16 + xp + 0);
818  g = AV_RB16(src16 + xp + 1);
819  b = AV_RB16(src16 + xp + 2);
820  } else {
821  r = AV_RL16(src16 + xp + 0);
822  g = AV_RL16(src16 + xp + 1);
823  b = AV_RL16(src16 + xp + 2);
824  }
825 
826  r = c->rgb2xyz.gamma.in[r >> 4];
827  g = c->rgb2xyz.gamma.in[g >> 4];
828  b = c->rgb2xyz.gamma.in[b >> 4];
829 
830  // convert from sRGBlinear to XYZlinear
831  x = c->rgb2xyz.mat[0][0] * r +
832  c->rgb2xyz.mat[0][1] * g +
833  c->rgb2xyz.mat[0][2] * b >> 12;
834  y = c->rgb2xyz.mat[1][0] * r +
835  c->rgb2xyz.mat[1][1] * g +
836  c->rgb2xyz.mat[1][2] * b >> 12;
837  z = c->rgb2xyz.mat[2][0] * r +
838  c->rgb2xyz.mat[2][1] * g +
839  c->rgb2xyz.mat[2][2] * b >> 12;
840 
841  // limit values to 16-bit depth
842  x = av_clip_uint16(x);
843  y = av_clip_uint16(y);
844  z = av_clip_uint16(z);
845 
846  // convert from XYZlinear to X'Y'Z' and scale from 12bit to 16bit
847  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
848  AV_WB16(dst16 + xp + 0, c->rgb2xyz.gamma.out[x] << 4);
849  AV_WB16(dst16 + xp + 1, c->rgb2xyz.gamma.out[y] << 4);
850  AV_WB16(dst16 + xp + 2, c->rgb2xyz.gamma.out[z] << 4);
851  } else {
852  AV_WL16(dst16 + xp + 0, c->rgb2xyz.gamma.out[x] << 4);
853  AV_WL16(dst16 + xp + 1, c->rgb2xyz.gamma.out[y] << 4);
854  AV_WL16(dst16 + xp + 2, c->rgb2xyz.gamma.out[z] << 4);
855  }
856  }
857 
858  src += src_stride;
859  dst += dst_stride;
860  }
861 }
862 
864 {
865  c->xyz12Torgb48 = xyz12Torgb48_c;
866  c->rgb48Toxyz12 = rgb48Toxyz12_c;
867 
868 #if ARCH_AARCH64
870 #endif
871 }
872 
873 void ff_update_palette(SwsInternal *c, const uint32_t *pal)
874 {
875  uint32_t *rgb2yuv = c->input_rgb2yuv_table;
876 
877  int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
878  int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
879  int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
880 
881  for (int i = 0; i < 256; i++) {
882  int r, g, b, y, u, v, a = 0xff;
883  if (c->opts.src_format == AV_PIX_FMT_PAL8) {
884  uint32_t p = pal[i];
885  a = (p >> 24) & 0xFF;
886  r = (p >> 16) & 0xFF;
887  g = (p >> 8) & 0xFF;
888  b = p & 0xFF;
889  } else if (c->opts.src_format == AV_PIX_FMT_RGB8) {
890  r = ( i >> 5 ) * 36;
891  g = ((i >> 2) & 7) * 36;
892  b = ( i & 3) * 85;
893  } else if (c->opts.src_format == AV_PIX_FMT_BGR8) {
894  b = ( i >> 6 ) * 85;
895  g = ((i >> 3) & 7) * 36;
896  r = ( i & 7) * 36;
897  } else if (c->opts.src_format == AV_PIX_FMT_RGB4_BYTE) {
898  r = ( i >> 3 ) * 255;
899  g = ((i >> 1) & 3) * 85;
900  b = ( i & 1) * 255;
901  } else if (c->opts.src_format == AV_PIX_FMT_GRAY8 || c->opts.src_format == AV_PIX_FMT_GRAY8A) {
902  r = g = b = i;
903  } else {
904  av_assert1(c->opts.src_format == AV_PIX_FMT_BGR4_BYTE);
905  b = ( i >> 3 ) * 255;
906  g = ((i >> 1) & 3) * 85;
907  r = ( i & 1) * 255;
908  }
909 
910  y = av_clip_uint8((ry * r + gy * g + by * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
911  u = av_clip_uint8((ru * r + gu * g + bu * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
912  v = av_clip_uint8((rv * r + gv * g + bv * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
913 
914  c->pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24);
915 
916  switch (c->opts.dst_format) {
917  case AV_PIX_FMT_BGR32:
918 #if !HAVE_BIGENDIAN
919  case AV_PIX_FMT_RGB24:
920 #endif
921  c->pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24);
922  break;
923  case AV_PIX_FMT_BGR32_1:
924 #if HAVE_BIGENDIAN
925  case AV_PIX_FMT_BGR24:
926 #endif
927  c->pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24);
928  break;
929  case AV_PIX_FMT_RGB32_1:
930 #if HAVE_BIGENDIAN
931  case AV_PIX_FMT_RGB24:
932 #endif
933  c->pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24);
934  break;
935  case AV_PIX_FMT_GBRP:
936  case AV_PIX_FMT_GBRAP:
937 #if HAVE_BIGENDIAN
938  c->pal_rgb[i]= a + (r<<8) + (b<<16) + ((unsigned)g<<24);
939 #else
940  c->pal_rgb[i]= g + (b<<8) + (r<<16) + ((unsigned)a<<24);
941 #endif
942  break;
943  case AV_PIX_FMT_RGB32:
944 #if !HAVE_BIGENDIAN
945  case AV_PIX_FMT_BGR24:
946 #endif
947  default:
948  c->pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24);
949  }
950  }
951 }
952 
953 static int scale_internal(SwsContext *sws,
954  const uint8_t * const srcSlice[], const int srcStride[],
955  int srcSliceY, int srcSliceH,
956  uint8_t *const dstSlice[], const int dstStride[],
957  int dstSliceY, int dstSliceH);
958 
960  const uint8_t * const srcSlice[], const int srcStride[],
961  int srcSliceY, int srcSliceH,
962  uint8_t * const dstSlice[], const int dstStride[],
963  int dstSliceY, int dstSliceH)
964 {
965  int ret = scale_internal(c->cascaded_context[0],
966  srcSlice, srcStride, srcSliceY, srcSliceH,
967  c->cascaded_tmp[0], c->cascaded_tmpStride[0], 0, c->opts.src_h);
968 
969  if (ret < 0)
970  return ret;
971 
972  if (c->cascaded_context[2])
973  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
974  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
975  c->cascaded_tmp[1], c->cascaded_tmpStride[1], 0, c->opts.dst_h);
976  else
977  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
978  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
979  dstSlice, dstStride, dstSliceY, dstSliceH);
980 
981  if (ret < 0)
982  return ret;
983 
984  if (c->cascaded_context[2]) {
985  const int dstY1 = sws_internal(c->cascaded_context[1])->dstY;
986  ret = scale_internal(c->cascaded_context[2], (const uint8_t * const *)c->cascaded_tmp[1],
987  c->cascaded_tmpStride[1], dstY1 - ret, dstY1,
988  dstSlice, dstStride, dstSliceY, dstSliceH);
989  }
990  return ret;
991 }
992 
994  const uint8_t * const srcSlice[], const int srcStride[],
995  int srcSliceY, int srcSliceH,
996  uint8_t * const dstSlice[], const int dstStride[],
997  int dstSliceY, int dstSliceH)
998 {
999  const int dstH0 = c->cascaded_context[0]->dst_h;
1000  int ret = scale_internal(c->cascaded_context[0],
1001  srcSlice, srcStride, srcSliceY, srcSliceH,
1002  c->cascaded_tmp[0], c->cascaded_tmpStride[0],
1003  0, dstH0);
1004  if (ret < 0)
1005  return ret;
1006  ret = scale_internal(c->cascaded_context[1],
1007  (const uint8_t * const * )c->cascaded_tmp[0], c->cascaded_tmpStride[0],
1008  0, dstH0, dstSlice, dstStride, dstSliceY, dstSliceH);
1009  return ret;
1010 }
1011 
1012 static int scale_internal(SwsContext *sws,
1013  const uint8_t * const srcSlice[], const int srcStride[],
1014  int srcSliceY, int srcSliceH,
1015  uint8_t *const dstSlice[], const int dstStride[],
1016  int dstSliceY, int dstSliceH)
1017 {
1018  SwsInternal *c = sws_internal(sws);
1019  const int scale_dst = dstSliceY > 0 || dstSliceH < sws->dst_h;
1020  const int frame_start = scale_dst || !c->sliceDir;
1021  int i, ret;
1022  const uint8_t *src2[4];
1023  uint8_t *dst2[4];
1024  int macro_height_src = isBayer(sws->src_format) ? 2 : (1 << c->chrSrcVSubSample);
1025  int macro_height_dst = isBayer(sws->dst_format) ? 2 : (1 << c->chrDstVSubSample);
1026  // copy strides, so they can safely be modified
1027  int srcStride2[4];
1028  int dstStride2[4];
1029  int srcSliceY_internal = srcSliceY;
1030 
1031  if (!srcStride || !dstStride || !dstSlice || !srcSlice) {
1032  av_log(c, AV_LOG_ERROR, "One of the input parameters to sws_scale() is NULL, please check the calling code\n");
1033  return AVERROR(EINVAL);
1034  }
1035 
1036  if ((srcSliceY & (macro_height_src - 1)) ||
1037  ((srcSliceH & (macro_height_src - 1)) && srcSliceY + srcSliceH != sws->src_h) ||
1038  srcSliceY + srcSliceH > sws->src_h ||
1039  (isBayer(sws->src_format) && srcSliceH <= 1)) {
1040  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
1041  return AVERROR(EINVAL);
1042  }
1043 
1044  if ((dstSliceY & (macro_height_dst - 1)) ||
1045  ((dstSliceH & (macro_height_dst - 1)) && dstSliceY + dstSliceH != sws->dst_h) ||
1046  dstSliceY + dstSliceH > sws->dst_h) {
1047  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", dstSliceY, dstSliceH);
1048  return AVERROR(EINVAL);
1049  }
1050 
1051  if (!check_image_pointers(srcSlice, sws->src_format, srcStride)) {
1052  av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
1053  return AVERROR(EINVAL);
1054  }
1055  if (!check_image_pointers((const uint8_t* const*)dstSlice, sws->dst_format, dstStride)) {
1056  av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
1057  return AVERROR(EINVAL);
1058  }
1059 
1060  // do not mess up sliceDir if we have a "trailing" 0-size slice
1061  if (srcSliceH == 0)
1062  return 0;
1063 
1064  if (sws->gamma_flag && c->cascaded_context[0])
1065  return scale_gamma(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1066  dstSlice, dstStride, dstSliceY, dstSliceH);
1067 
1068  if (c->cascaded_context[0] && srcSliceY == 0 && srcSliceH == c->cascaded_context[0]->src_h)
1069  return scale_cascaded(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1070  dstSlice, dstStride, dstSliceY, dstSliceH);
1071 
1072  if (!srcSliceY && (sws->flags & SWS_BITEXACT) && sws->dither == SWS_DITHER_ED && c->dither_error[0])
1073  for (i = 0; i < 4; i++)
1074  memset(c->dither_error[i], 0, sizeof(c->dither_error[0][0]) * (sws->dst_w+2));
1075 
1076  if (usePal(sws->src_format))
1077  ff_update_palette(c, (const uint32_t *)srcSlice[1]);
1078 
1079  memcpy(src2, srcSlice, sizeof(src2));
1080  memcpy(dst2, dstSlice, sizeof(dst2));
1081  memcpy(srcStride2, srcStride, sizeof(srcStride2));
1082  memcpy(dstStride2, dstStride, sizeof(dstStride2));
1083 
1084  if (frame_start && !scale_dst) {
1085  if (srcSliceY != 0 && srcSliceY + srcSliceH != sws->src_h) {
1086  av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
1087  return AVERROR(EINVAL);
1088  }
1089 
1090  c->sliceDir = (srcSliceY == 0) ? 1 : -1;
1091  } else if (scale_dst)
1092  c->sliceDir = 1;
1093 
1094  if (c->src0Alpha && !c->dst0Alpha && isALPHA(sws->dst_format)) {
1095  uint8_t *base;
1096  int x,y;
1097 
1098  av_fast_malloc(&c->rgb0_scratch, &c->rgb0_scratch_allocated,
1099  FFABS(srcStride[0]) * srcSliceH + 32);
1100  if (!c->rgb0_scratch)
1101  return AVERROR(ENOMEM);
1102 
1103  base = srcStride[0] < 0 ? c->rgb0_scratch - srcStride[0] * (srcSliceH-1) :
1104  c->rgb0_scratch;
1105  for (y=0; y<srcSliceH; y++){
1106  memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*sws->src_w);
1107  for (x=c->src0Alpha-1; x<4*sws->src_w; x+=4) {
1108  base[ srcStride[0]*y + x] = 0xFF;
1109  }
1110  }
1111  src2[0] = base;
1112  }
1113 
1114  if (c->srcXYZ && !(c->dstXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1115  uint8_t *base;
1116 
1117  av_fast_malloc(&c->xyz_scratch, &c->xyz_scratch_allocated,
1118  FFABS(srcStride[0]) * srcSliceH + 32);
1119  if (!c->xyz_scratch)
1120  return AVERROR(ENOMEM);
1121 
1122  base = srcStride[0] < 0 ? c->xyz_scratch - srcStride[0] * (srcSliceH-1) :
1123  c->xyz_scratch;
1124 
1125  c->xyz12Torgb48(c, base, srcStride[0], src2[0], srcStride[0], sws->src_w, srcSliceH);
1126  src2[0] = base;
1127  }
1128 
1129  if (c->sliceDir != 1) {
1130  // slices go from bottom to top => we flip the image internally
1131  for (i=0; i<4; i++) {
1132  srcStride2[i] *= -1;
1133  dstStride2[i] *= -1;
1134  }
1135 
1136  src2[0] += (srcSliceH - 1) * srcStride[0];
1137  if (!usePal(sws->src_format))
1138  src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
1139  src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
1140  src2[3] += (srcSliceH - 1) * srcStride[3];
1141  dst2[0] += ( sws->dst_h - 1) * dstStride[0];
1142  dst2[1] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[1];
1143  dst2[2] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[2];
1144  dst2[3] += ( sws->dst_h - 1) * dstStride[3];
1145 
1146  srcSliceY_internal = sws->src_h-srcSliceY-srcSliceH;
1147  }
1148  reset_ptr(src2, sws->src_format);
1149  reset_ptr((void*)dst2, sws->dst_format);
1150 
1151  if (c->convert_unscaled) {
1152  int offset = srcSliceY_internal;
1153  int slice_h = srcSliceH;
1154 
1155  // for dst slice scaling, offset the pointers to match the unscaled API
1156  if (scale_dst) {
1157  av_assert0(offset == 0);
1158  for (i = 0; i < 4 && src2[i]; i++) {
1159  if (!src2[i] || (i > 0 && usePal(sws->src_format)))
1160  break;
1161  src2[i] += (dstSliceY >> ((i == 1 || i == 2) ? c->chrSrcVSubSample : 0)) * srcStride2[i];
1162  }
1163 
1164  for (i = 0; i < 4 && dst2[i]; i++) {
1165  if (!dst2[i] || (i > 0 && usePal(sws->dst_format)))
1166  break;
1167  dst2[i] -= (dstSliceY >> ((i == 1 || i == 2) ? c->chrDstVSubSample : 0)) * dstStride2[i];
1168  }
1169  offset = dstSliceY;
1170  slice_h = dstSliceH;
1171  }
1172 
1173  ret = c->convert_unscaled(c, src2, srcStride2, offset, slice_h,
1174  dst2, dstStride2);
1175  if (scale_dst)
1176  dst2[0] += dstSliceY * dstStride2[0];
1177  } else {
1178  ret = ff_swscale(c, src2, srcStride2, srcSliceY_internal, srcSliceH,
1179  dst2, dstStride2, dstSliceY, dstSliceH);
1180  }
1181 
1182  if (c->dstXYZ && !(c->srcXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1183  uint8_t *dst;
1184 
1185  if (scale_dst) {
1186  dst = dst2[0];
1187  } else {
1188  int dstY = c->dstY ? c->dstY : srcSliceY + srcSliceH;
1189 
1190  av_assert0(dstY >= ret);
1191  av_assert0(ret >= 0);
1192  av_assert0(sws->dst_h >= dstY);
1193  dst = dst2[0] + (dstY - ret) * dstStride2[0];
1194  }
1195 
1196  /* replace on the same data */
1197  c->rgb48Toxyz12(c, dst, dstStride2[0], dst, dstStride2[0], sws->dst_w, ret);
1198  }
1199 
1200  /* reset slice direction at end of frame */
1201  if ((srcSliceY_internal + srcSliceH == sws->src_h) || scale_dst)
1202  c->sliceDir = 0;
1203 
1204  return ret;
1205 }
1206 
1208 {
1209  SwsInternal *c = sws_internal(sws);
1210  if (!c->is_legacy_init)
1211  return;
1212  av_frame_unref(c->frame_src);
1213  av_frame_unref(c->frame_dst);
1214  c->src_ranges.nb_ranges = 0;
1215 }
1216 
1218 {
1219  SwsInternal *c = sws_internal(sws);
1220  FFFramePool *pool = &c->frame_pool;
1221 
1222  av_assert0(!frame->hw_frames_ctx);
1223  const int nb_planes = av_pix_fmt_count_planes(frame->format);
1224  for (int i = 0; i < nb_planes; i++) {
1225  frame->linesize[i] = pool->linesize[i];
1226  frame->buf[i] = av_buffer_pool_get(pool->pools[i]);
1227  if (!frame->buf[i])
1228  return AVERROR(ENOMEM);
1229  frame->data[i] = frame->buf[i]->data;
1230  }
1231 
1232  return 0;
1233 }
1234 
1236 {
1237  SwsInternal *c = sws_internal(sws);
1238  int ret, allocated = 0;
1239  if (!c->is_legacy_init)
1240  return AVERROR(EINVAL);
1241 
1242  ret = av_frame_ref(c->frame_src, src);
1243  if (ret < 0)
1244  return ret;
1245 
1246  if (!dst->buf[0]) {
1247  dst->width = sws->dst_w;
1248  dst->height = sws->dst_h;
1249  dst->format = sws->dst_format;
1250 
1251  ret = av_frame_get_buffer(dst, 0);
1252  if (ret < 0)
1253  return ret;
1254  allocated = 1;
1255  }
1256 
1257  ret = av_frame_ref(c->frame_dst, dst);
1258  if (ret < 0) {
1259  if (allocated)
1261 
1262  return ret;
1263  }
1264 
1265  return 0;
1266 }
1267 
1268 int sws_send_slice(SwsContext *sws, unsigned int slice_start,
1269  unsigned int slice_height)
1270 {
1271  SwsInternal *c = sws_internal(sws);
1272  int ret;
1273  if (!c->is_legacy_init)
1274  return AVERROR(EINVAL);
1275 
1276  ret = ff_range_add(&c->src_ranges, slice_start, slice_height);
1277  if (ret < 0)
1278  return ret;
1279 
1280  return 0;
1281 }
1282 
1283 unsigned int sws_receive_slice_alignment(const SwsContext *sws)
1284 {
1285  SwsInternal *c = sws_internal(sws);
1286  if (c->slice_ctx)
1287  return sws_internal(c->slice_ctx[0])->dst_slice_align;
1288 
1289  return c->dst_slice_align;
1290 }
1291 
1293  unsigned int slice_height)
1294 {
1295  SwsInternal *c = sws_internal(sws);
1296  unsigned int align = sws_receive_slice_alignment(sws);
1297  uint8_t *dst[4];
1298  if (!c->is_legacy_init)
1299  return AVERROR(EINVAL);
1300 
1301  /* wait until complete input has been received */
1302  if (!(c->src_ranges.nb_ranges == 1 &&
1303  c->src_ranges.ranges[0].start == 0 &&
1304  c->src_ranges.ranges[0].len == sws->src_h))
1305  return AVERROR(EAGAIN);
1306 
1307  if ((slice_start > 0 || slice_height < sws->dst_h) &&
1308  (slice_start % align || slice_height % align)) {
1310  "Incorrectly aligned output: %u/%u not multiples of %u\n",
1311  slice_start, slice_height, align);
1312  return AVERROR(EINVAL);
1313  }
1314 
1315  if (c->slicethread) {
1316  int nb_jobs = c->nb_slice_ctx;
1317  int ret = 0;
1318 
1319  if (c->slice_ctx[0]->dither == SWS_DITHER_ED)
1320  nb_jobs = 1;
1321 
1322  c->dst_slice_start = slice_start;
1323  c->dst_slice_height = slice_height;
1324 
1325  avpriv_slicethread_execute(c->slicethread, nb_jobs, 0);
1326 
1327  for (int i = 0; i < c->nb_slice_ctx; i++) {
1328  if (c->slice_err[i] < 0) {
1329  ret = c->slice_err[i];
1330  break;
1331  }
1332  }
1333 
1334  memset(c->slice_err, 0, c->nb_slice_ctx * sizeof(*c->slice_err));
1335 
1336  return ret;
1337  }
1338 
1339  for (int i = 0; i < FF_ARRAY_ELEMS(dst); i++) {
1340  ptrdiff_t offset = c->frame_dst->linesize[i] * (ptrdiff_t)(slice_start >> c->chrDstVSubSample);
1341  dst[i] = FF_PTR_ADD(c->frame_dst->data[i], offset);
1342  }
1343 
1344  return scale_internal(sws, (const uint8_t * const *)c->frame_src->data,
1345  c->frame_src->linesize, 0, sws->src_h,
1346  dst, c->frame_dst->linesize, slice_start, slice_height);
1347 }
1348 
1349 /* Subset of av_frame_ref() that only references (video) data buffers */
1350 static int frame_ref(AVFrame *dst, const AVFrame *src)
1351 {
1352  /* ref the buffers */
1353  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
1354  if (!src->buf[i])
1355  break;
1356  dst->buf[i] = av_buffer_ref(src->buf[i]);
1357  if (!dst->buf[i])
1358  return AVERROR(ENOMEM);
1359  }
1360 
1361  memcpy(dst->data, src->data, sizeof(src->data));
1362  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
1363  return 0;
1364 }
1365 
1367 {
1368  int ret, allocated = 0;
1369  SwsInternal *c = sws_internal(sws);
1370  if (!src || !dst)
1371  return AVERROR(EINVAL);
1372 
1373  if (c->is_legacy_init) {
1374  /* Context has been initialized with explicit values, fall back to
1375  * legacy API behavior. */
1376  ret = sws_frame_start(sws, dst, src);
1377  if (ret < 0)
1378  return ret;
1379 
1380  ret = sws_send_slice(sws, 0, src->height);
1381  if (ret >= 0)
1382  ret = sws_receive_slice(sws, 0, dst->height);
1383 
1384  sws_frame_end(sws);
1385 
1386  return ret;
1387  }
1388 
1389  ret = sws_frame_setup(sws, dst, src);
1390  if (ret < 0)
1391  return ret;
1392 
1393  if (!src->data[0])
1394  return 0;
1395 
1396  const SwsGraph *top = c->graph[FIELD_TOP];
1397  const SwsGraph *bot = c->graph[FIELD_BOTTOM];
1398  if (dst->data[0]) /* user-provided buffers */
1399  goto process_frame;
1400 
1401  /* Sanity */
1402  memset(dst->buf, 0, sizeof(dst->buf));
1403  memset(dst->data, 0, sizeof(dst->data));
1404  memset(dst->linesize, 0, sizeof(dst->linesize));
1405  dst->extended_data = dst->data;
1406 
1407  if (src->buf[0] && top->noop && (!bot || bot->noop))
1408  return frame_ref(dst, src);
1409 
1410  ret = frame_alloc_buffers(sws, dst);
1411  if (ret < 0)
1412  return ret;
1413  allocated = 1;
1414 
1416  for (int field = 0; field < (bot ? 2 : 1); field++) {
1417  ret = ff_sws_graph_run(c->graph[field], dst, src);
1418  if (ret < 0) {
1419  if (allocated)
1421  return ret;
1422  }
1423  }
1424 
1425  return 0;
1426 }
1427 
1429 {
1430 #define VALIDATE(field, min, max) \
1431  if (ctx->field < min || ctx->field > max) { \
1432  av_log(ctx, AV_LOG_ERROR, "'%s' (%d) out of range [%d, %d]\n", \
1433  #field, (int) ctx->field, min, max); \
1434  return AVERROR(EINVAL); \
1435  }
1436 
1437  VALIDATE(threads, 0, SWS_MAX_THREADS);
1438  VALIDATE(dither, 0, SWS_DITHER_NB - 1)
1439  VALIDATE(alpha_blend, 0, SWS_ALPHA_BLEND_NB - 1)
1440  return 0;
1441 }
1442 
1444 {
1446  const char *err_msg;
1447  int ret;
1448 
1449  if (!src || !dst)
1450  return AVERROR(EINVAL);
1451  if ((ret = validate_params(ctx)) < 0)
1452  return ret;
1453 
1454  /* For now, if a single frame has a context, then both need a context */
1455  if (!!src->hw_frames_ctx != !!dst->hw_frames_ctx) {
1456  return AVERROR(ENOTSUP);
1457  } else if (!!src->hw_frames_ctx) {
1458  /* Both hardware frames must already be allocated */
1459  if (!src->data[0] || !dst->data[0])
1460  return AVERROR(EINVAL);
1461 
1462  AVHWFramesContext *src_hwfc, *dst_hwfc;
1463  src_hwfc = (AVHWFramesContext *)src->hw_frames_ctx->data;
1464  dst_hwfc = (AVHWFramesContext *)dst->hw_frames_ctx->data;
1465 
1466  /* Both frames must live on the same device */
1467  if (src_hwfc->device_ref->data != dst_hwfc->device_ref->data)
1468  return AVERROR(EINVAL);
1469 
1470  /* Only Vulkan devices are supported */
1471  AVHWDeviceContext *dev_ctx;
1472  dev_ctx = (AVHWDeviceContext *)src_hwfc->device_ref->data;
1473  if (dev_ctx->type != AV_HWDEVICE_TYPE_VULKAN)
1474  return AVERROR(ENOTSUP);
1475 
1476 #if CONFIG_UNSTABLE && CONFIG_VULKAN
1477  ret = ff_sws_vk_init(ctx, src_hwfc->device_ref);
1478  if (ret < 0)
1479  return ret;
1480 #endif
1481  }
1482 
1483  int dst_width = dst->width;
1484  for (int field = 0; field < 2; field++) {
1485  SwsFormat src_fmt = ff_fmt_from_frame(src, field);
1486  SwsFormat dst_fmt = ff_fmt_from_frame(dst, field);
1487  int src_ok, dst_ok;
1488 
1489  if ((src->flags ^ dst->flags) & AV_FRAME_FLAG_INTERLACED) {
1490  err_msg = "Cannot convert interlaced to progressive frames or vice versa.\n";
1491  ret = AVERROR(EINVAL);
1492  goto fail;
1493  }
1494 
1495  src_ok = ff_test_fmt(&src_fmt, 0);
1496  dst_ok = ff_test_fmt(&dst_fmt, 1);
1497  if ((!src_ok || !dst_ok) && !ff_props_equal(&src_fmt, &dst_fmt)) {
1498  err_msg = src_ok ? "Unsupported output" : "Unsupported input";
1499  ret = AVERROR(ENOTSUP);
1500  goto fail;
1501  }
1502 
1503  ret = ff_sws_graph_reinit(ctx, &dst_fmt, &src_fmt, field, &s->graph[field]);
1504  if (ret < 0) {
1505  err_msg = "Failed initializing scaling graph";
1506  goto fail;
1507  }
1508 
1509  const SwsGraph *graph = s->graph[field];
1510  if (graph->incomplete && ctx->flags & SWS_STRICT) {
1511  err_msg = "Incomplete scaling graph";
1512  ret = AVERROR(EINVAL);
1513  goto fail;
1514  }
1515 
1516  if (!graph->noop) {
1517  av_assert0(graph->num_passes);
1518  const SwsPass *last_pass = graph->passes[graph->num_passes - 1];
1519  const int aligned_w = ff_sws_pass_aligned_width(last_pass, dst->width);
1520  dst_width = FFMAX(dst_width, aligned_w);
1521  }
1522 
1523  if (!src_fmt.interlaced) {
1524  ff_sws_graph_free(&s->graph[FIELD_BOTTOM]);
1525  break;
1526  }
1527 
1528  continue;
1529 
1530  fail:
1531  av_log(ctx, AV_LOG_ERROR, "%s (%s): fmt:%s csp:%s prim:%s trc:%s ->"
1532  " fmt:%s csp:%s prim:%s trc:%s\n",
1533  err_msg, av_err2str(ret),
1538 
1539  for (int i = 0; i < FF_ARRAY_ELEMS(s->graph); i++)
1540  ff_sws_graph_free(&s->graph[i]);
1541 
1542  return ret;
1543  }
1544 
1545  if (!dst->hw_frames_ctx) {
1546  ret = ff_frame_pool_video_reinit(&s->frame_pool, dst_width, dst->height,
1547  dst->format, av_cpu_max_align());
1548  if (ret < 0)
1549  return ret;
1550  }
1551 
1552  return 0;
1553 }
1554 
1555 /**
1556  * swscale wrapper, so we don't need to export the SwsContext.
1557  * Assumes planar YUV to be in YUV order instead of YVU.
1558  */
1560  const uint8_t * const srcSlice[],
1561  const int srcStride[], int srcSliceY,
1562  int srcSliceH, uint8_t *const dst[],
1563  const int dstStride[])
1564 {
1565  SwsInternal *c = sws_internal(sws);
1566  if (!c->is_legacy_init)
1567  return AVERROR(EINVAL);
1568 
1569  if (c->nb_slice_ctx) {
1570  sws = c->slice_ctx[0];
1571  c = sws_internal(sws);
1572  }
1573 
1574  return scale_internal(sws, srcSlice, srcStride, srcSliceY, srcSliceH,
1575  dst, dstStride, 0, sws->dst_h);
1576 }
1577 
1578 void ff_sws_slice_worker(void *priv, int jobnr, int threadnr,
1579  int nb_jobs, int nb_threads)
1580 {
1581  SwsInternal *parent = priv;
1582  SwsContext *sws = parent->slice_ctx[threadnr];
1583  SwsInternal *c = sws_internal(sws);
1584 
1585  const int slice_height = FFALIGN(FFMAX((parent->dst_slice_height + nb_jobs - 1) / nb_jobs, 1),
1586  c->dst_slice_align);
1587  const int slice_start = jobnr * slice_height;
1588  const int slice_end = FFMIN((jobnr + 1) * slice_height, parent->dst_slice_height);
1589  int err = 0;
1590 
1591  if (slice_end > slice_start) {
1592  uint8_t *dst[4] = { NULL };
1593 
1594  for (int i = 0; i < FF_ARRAY_ELEMS(dst) && parent->frame_dst->data[i]; i++) {
1595  const int vshift = (i == 1 || i == 2) ? c->chrDstVSubSample : 0;
1596  const ptrdiff_t offset = parent->frame_dst->linesize[i] *
1597  (ptrdiff_t)((slice_start + parent->dst_slice_start) >> vshift);
1598 
1599  dst[i] = parent->frame_dst->data[i] + offset;
1600  }
1601 
1602  err = scale_internal(sws, (const uint8_t * const *)parent->frame_src->data,
1603  parent->frame_src->linesize, 0, sws->src_h,
1604  dst, parent->frame_dst->linesize,
1606  }
1607 
1608  parent->slice_err[threadnr] = err;
1609 }
sws_init_swscale
static av_cold void sws_init_swscale(SwsInternal *c)
Definition: swscale.c:662
isBayer
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:860
flags
const SwsFlags flags[]
Definition: swscale.c:72
ff_test_fmt
int ff_test_fmt(const SwsFormat *fmt, int output)
Definition: format.c:613
_dst
uint8_t * _dst
Definition: dsp.h:56
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:126
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
SwsPlane::sliceH
int sliceH
number of lines
Definition: swscale_internal.h:1109
SwsPass
Represents a single filter pass in the scaling graph.
Definition: graph.h:75
ff_props_equal
static int ff_props_equal(const SwsFormat *fmt1, const SwsFormat *fmt2)
Definition: format.h:124
isPacked
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:905
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
SwsGraph::passes
SwsPass ** passes
Sorted sequence of filter passes to apply.
Definition: graph.h:131
mem_internal.h
ff_fmt_from_frame
SwsFormat ff_fmt_from_frame(const AVFrame *frame, int field)
This function also sanitizes and strips the input data, removing irrelevant fields for certain format...
Definition: format.c:348
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:513
SwsFormat::interlaced
int interlaced
Definition: format.h:79
lumRangeToJpeg16_c
static void lumRangeToJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:237
lumRangeToJpeg_c
static void lumRangeToJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:189
ff_sws_init_range_convert_aarch64
av_cold void ff_sws_init_range_convert_aarch64(SwsInternal *c)
Definition: swscale.c:314
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1885
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
SwsContext::src_w
int src_w
Deprecated frame property overrides, for the legacy API only.
Definition: swscale.h:253
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
process_frame
static av_always_inline int process_frame(AVTextFormatContext *tfc, InputFile *ifile, AVFrame *frame, const AVPacket *pkt, int *packet_new)
Definition: ffprobe.c:1560
ff_sws_graph_reinit
int ff_sws_graph_reinit(SwsContext *ctx, const SwsFormat *dst, const SwsFormat *src, int field, SwsGraph **out_graph)
Wrapper around ff_sws_graph_create() that reuses the existing graph if the format is compatible.
Definition: graph.c:938
ff_rotate_slice
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
Definition: slice.c:120
int64_t
long long int64_t
Definition: coverity.c:34
AV_PIX_FMT_FLAG_FLOAT
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:158
SwsSlice::plane
SwsPlane plane[MAX_SLICE_PLANES]
color planes
Definition: swscale_internal.h:1127
avpriv_slicethread_execute
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
Definition: slicethread.c:270
ff_sws_init_range_convert_loongarch
av_cold void ff_sws_init_range_convert_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:27
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
pixdesc.h
RV_IDX
#define RV_IDX
Definition: swscale_internal.h:473
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
isGray
static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:805
RU_IDX
#define RU_IDX
Definition: swscale_internal.h:470
SWS_BITEXACT
@ SWS_BITEXACT
Definition: swscale.h:157
b
#define b
Definition: input.c:42
SwsFilterDescriptor
Struct which holds all necessary data for processing a slice.
Definition: swscale_internal.h:1134
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
data
const char data[16]
Definition: mxf.c:149
GV_IDX
#define GV_IDX
Definition: swscale_internal.h:474
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:82
rgb2yuv
static const char rgb2yuv[]
Definition: vf_scale_vulkan.c:86
BV_IDX
#define BV_IDX
Definition: swscale_internal.h:475
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:512
SwsContext::flags
unsigned flags
Bitmask of SWS_*.
Definition: swscale.h:219
base
uint8_t base
Definition: vp3data.h:128
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
sws_receive_slice
int sws_receive_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Request a horizontal slice of the output data to be written into the frame previously provided to sws...
Definition: swscale.c:1292
FIELD_TOP
@ FIELD_TOP
Definition: format.h:56
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
ff_sws_init_swscale_riscv
av_cold void ff_sws_init_swscale_riscv(SwsInternal *c)
Definition: swscale.c:74
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:109
DEBUG_BUFFERS
#define DEBUG_BUFFERS(...)
Definition: swscale.c:259
bit_depth
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
Definition: af_astats.c:246
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:56
hScale16To15_c
static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:99
_src
uint8_t ptrdiff_t const uint8_t * _src
Definition: dsp.h:56
SwsInternal::frame_dst
AVFrame * frame_dst
Definition: swscale_internal.h:376
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:455
AV_HWDEVICE_TYPE_VULKAN
@ AV_HWDEVICE_TYPE_VULKAN
Definition: hwcontext.h:39
SWS_FAST_BILINEAR
@ SWS_FAST_BILINEAR
Scaler selection options.
Definition: swscale.h:176
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:745
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
ff_sws_init_input_funcs
void ff_sws_init_input_funcs(SwsInternal *c, planar1_YV12_fn *lumToYV12, planar1_YV12_fn *alpToYV12, planar2_YV12_fn *chrToYV12, planarX_YV12_fn *readLumPlanar, planarX_YV12_fn *readAlpPlanar, planarX2_YV12_fn *readChrPlanar)
validate_params
static int validate_params(SwsContext *ctx)
Definition: swscale.c:1428
chrRangeToJpeg16_c
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:211
FFFramePool
Frame pool.
Definition: framepool.h:32
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1693
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
chrRangeFromJpeg_c
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:177
SWS_MAX_THREADS
#define SWS_MAX_THREADS
Definition: swscale_internal.h:57
fail
#define fail()
Definition: checkasm.h:224
chrRangeFromJpeg16_c
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:225
SwsInternal::frame_src
AVFrame * frame_src
Definition: swscale_internal.h:375
sws_frame_setup
int sws_frame_setup(SwsContext *ctx, const AVFrame *dst, const AVFrame *src)
Like sws_scale_frame, but without actually scaling.
Definition: swscale.c:1443
ff_sws_init_xyzdsp_aarch64
av_cold void ff_sws_init_xyzdsp_aarch64(SwsInternal *c)
Definition: swscale.c:339
val
static double val(void *priv, double ch)
Definition: aeval.c:77
SWS_ALPHA_BLEND_NB
@ SWS_ALPHA_BLEND_NB
Definition: swscale.h:92
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:759
init_range_convert_constants
static void init_range_convert_constants(SwsInternal *c)
Definition: swscale.c:591
SwsColor::trc
enum AVColorTransferCharacteristic trc
Definition: format.h:62
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:63
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:90
avassert.h
hScale8To19_c
static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:144
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:111
yuv2packed2_fn
void(* yuv2packed2_fn)(SwsInternal *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:232
SwsContext::dither
SwsDither dither
Dither mode.
Definition: swscale.h:235
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:390
SwsInternal::slice_err
int * slice_err
Definition: swscale_internal.h:344
emms_c
#define emms_c()
Definition: emms.h:89
ff_sws_vk_init
int ff_sws_vk_init(SwsContext *sws, AVBufferRef *dev_ref)
Definition: ops.c:44
intreadwrite.h
dither
static const uint16_t dither[8][8]
Definition: vf_gradfun.c:46
SwsInternal::slice_ctx
SwsContext ** slice_ctx
Definition: swscale_internal.h:343
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_update_palette
void ff_update_palette(SwsInternal *c, const uint32_t *pal)
Definition: swscale.c:873
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
ff_sws_init_swscale_arm
av_cold void ff_sws_init_swscale_arm(SwsInternal *c)
Definition: swscale.c:33
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1414
g
const char * g
Definition: vf_curves.c:128
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ops.h
SwsSlice::width
int width
Slice line width.
Definition: swscale_internal.h:1121
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
VALIDATE
#define VALIDATE(field, min, max)
SwsGraph::num_passes
int num_passes
Definition: graph.h:132
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
chrRangeToJpeg_c
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:163
ff_hcscale_fast_c
void ff_hcscale_fast_c(SwsInternal *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:38
frame_alloc_buffers
static int frame_alloc_buffers(SwsContext *sws, AVFrame *frame)
Definition: swscale.c:1217
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_sws_init_range_convert_riscv
av_cold void ff_sws_init_range_convert_riscv(SwsInternal *c)
Definition: swscale.c:29
FFFramePool::pools
AVBufferPool * pools[4]
Definition: framepool.h:52
GY_IDX
#define GY_IDX
Definition: swscale_internal.h:468
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:514
sws_frame_end
void sws_frame_end(SwsContext *sws)
Finish the scaling process for a pair of source/destination frames previously submitted with sws_fram...
Definition: swscale.c:1207
FIELD_BOTTOM
@ FIELD_BOTTOM
Definition: format.h:57
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_sws_init_range_convert_x86
av_cold void ff_sws_init_range_convert_x86(SwsInternal *c)
Definition: swscale.c:473
AV_PIX_FMT_GRAY8A
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:143
scale_internal
static int scale_internal(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:1012
fillPlane
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
Definition: swscale.c:58
NULL
#define NULL
Definition: coverity.c:32
rgb48Toxyz12_c
static void rgb48Toxyz12_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:804
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
SwsPlane::available_lines
int available_lines
max number of lines that can be hold by this plane
Definition: swscale_internal.h:1107
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
SwsContext::gamma_flag
int gamma_flag
Use gamma correct scaling.
Definition: swscale.h:245
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:129
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
V
#define V
Definition: avdct.c:32
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3790
av_cpu_max_align
size_t av_cpu_max_align(void)
Get the maximum data alignment that may be required by FFmpeg.
Definition: cpu.c:287
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
hScale8To15_c
static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:128
ff_sws_init_range_convert
av_cold void ff_sws_init_range_convert(SwsInternal *c)
Definition: swscale.c:626
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
hScale16To19_c
static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:69
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
SwsInternal::dstY
int dstY
Last destination vertical line output from last slice.
Definition: swscale_internal.h:458
ff_sws_init_xyzdsp
av_cold void ff_sws_init_xyzdsp(SwsInternal *c)
Definition: swscale.c:863
ff_sws_pass_aligned_width
int ff_sws_pass_aligned_width(const SwsPass *pass, int width)
Align width to the optimal size for pass.
Definition: graph.c:41
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:92
ff_range_add
int ff_range_add(RangeList *r, unsigned int start, unsigned int len)
Definition: utils.c:2432
attribute_align_arg
#define attribute_align_arg
Definition: internal.h:50
yuv2packedX_fn
void(* yuv2packedX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:264
AV_CPU_FLAG_SSE2
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:37
ff_sws_graph_free
void ff_sws_graph_free(SwsGraph **pgraph)
Uninitialize any state associate with this filter graph and free it.
Definition: graph.c:903
ff_sws_slice_worker
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
Definition: swscale.c:1578
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:766
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
RY_IDX
#define RY_IDX
Definition: swscale_internal.h:467
ff_sws_init_swscale_loongarch
av_cold void ff_sws_init_swscale_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:62
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
fillPlane16
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
Definition: swscale_internal.h:1067
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
usePal
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:936
cpu.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
isAnyRGB
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:874
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
SwsContext::src_h
int src_h
Width and height of the source frame.
Definition: swscale.h:253
SwsFormat
Definition: format.h:77
xyz12Torgb48_c
static void xyz12Torgb48_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:745
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:419
is32BPS
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:752
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
lumRangeFromJpeg_c
static void lumRangeFromJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:201
SWS_DITHER_NB
@ SWS_DITHER_NB
Definition: swscale.h:86
ff_sws_init_swscale_ppc
av_cold void ff_sws_init_swscale_ppc(SwsInternal *c)
Definition: swscale_altivec.c:233
SwsContext::dst_format
int dst_format
Destination pixel format.
Definition: swscale.h:256
fillPlane32
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
Definition: swscale_internal.h:1081
Y
#define Y
Definition: boxblur.h:37
yuv2anyX_fn
void(* yuv2anyX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:298
BY_IDX
#define BY_IDX
Definition: swscale_internal.h:469
ff_sws_init_swscale_x86
av_cold void ff_sws_init_swscale_x86(SwsInternal *c)
Definition: swscale.c:489
scale_cascaded
static int scale_cascaded(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:993
emms.h
SwsInternal::dst_slice_align
unsigned int dst_slice_align
Definition: swscale_internal.h:692
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
SwsFormat::format
enum AVPixelFormat format
Definition: format.h:80
sws_send_slice
int sws_send_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Indicate that a horizontal slice of input data is available in the source frame previously provided t...
Definition: swscale.c:1268
ff_sws_init_scale
void ff_sws_init_scale(SwsInternal *c)
Definition: swscale.c:697
src2
const pixel * src2
Definition: h264pred_template.c:421
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
check_image_pointers
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
Definition: swscale.c:728
av_always_inline
#define av_always_inline
Definition: attributes.h:68
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:162
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:116
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
SwsContext::dst_h
int dst_h
Width and height of the destination frame.
Definition: swscale.h:254
ff_updateMMXDitherTables
void ff_updateMMXDitherTables(SwsInternal *c, int dstY)
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:95
SwsSlice
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
Definition: swscale_internal.h:1119
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:657
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:844
ff_init_slice_from_src
int ff_init_slice_from_src(SwsSlice *s, uint8_t *const src[4], const int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
Definition: slice.c:148
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
frame_ref
static int frame_ref(AVFrame *dst, const AVFrame *src)
Definition: swscale.c:1350
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale.h:83
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
yuv2packed1_fn
void(* yuv2packed1_fn)(SwsInternal *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:199
SwsInternal
Definition: swscale_internal.h:335
ret
ret
Definition: filter_design.txt:187
sws_receive_slice_alignment
unsigned int sws_receive_slice_alignment(const SwsContext *sws)
Get the alignment required for slices.
Definition: swscale.c:1283
__asm__
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
AVHWDeviceContext::type
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:75
bswap.h
sws_frame_start
int sws_frame_start(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Initialize the scaling process for a given pair of source/destination frames.
Definition: swscale.c:1235
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
sws_pb_64
static const uint8_t sws_pb_64[8]
Definition: swscale.c:54
U
#define U(x)
Definition: vpx_arith.h:37
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:142
SwsGraph::noop
bool noop
Definition: graph.h:126
reset_ptr
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
Definition: swscale.c:716
ff_init_vscale_pfn
void ff_init_vscale_pfn(SwsInternal *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
Definition: vscale.c:258
sws_scale
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1559
FFFramePool::linesize
int linesize[4]
Definition: framepool.h:51
SWS_PRINT_INFO
@ SWS_PRINT_INFO
Emit verbose log of scaling parameters.
Definition: swscale.h:120
SwsFormat::color
SwsColor color
Definition: format.h:86
lumRangeFromJpeg16_c
static void lumRangeFromJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:248
atomic_exchange_explicit
#define atomic_exchange_explicit(object, desired, order)
Definition: stdatomic.h:106
SWS_STRICT
@ SWS_STRICT
Return an error on underspecified conversions.
Definition: swscale.h:115
ff_dither_8x8_128
const uint8_t ff_dither_8x8_128[9][8]
Definition: swscale.c:42
AV_CPU_FLAG_MMXEXT
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:33
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
ff_swscale
int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:263
SwsFormat::csp
enum AVColorSpace csp
Definition: format.h:83
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:82
scale_gamma
static int scale_gamma(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:959
SwsGraph::incomplete
bool incomplete
Definition: graph.h:125
mem.h
BU_IDX
#define BU_IDX
Definition: swscale_internal.h:472
SwsPlane::sliceY
int sliceY
index of first line
Definition: swscale_internal.h:1108
SwsContext::dst_w
int dst_w
Definition: swscale.h:254
SwsInternal::dst_slice_height
int dst_slice_height
Definition: swscale_internal.h:352
SwsGraph
Filter graph, which represents a 'baked' pixel format conversion.
Definition: graph.h:121
SwsContext::src_format
int src_format
Source pixel format.
Definition: swscale.h:255
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
ff_hyscale_fast_c
void ff_hyscale_fast_c(SwsInternal *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:23
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
ff_sws_init_output_funcs
av_cold void ff_sws_init_output_funcs(SwsInternal *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:3291
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
ff_sws_init_swscale_aarch64
av_cold void ff_sws_init_swscale_aarch64(SwsInternal *c)
Definition: swscale.c:350
ff_frame_pool_video_reinit
int ff_frame_pool_video_reinit(FFFramePool *pool, int width, int height, enum AVPixelFormat format, int align)
Recreate the video frame pool if its current configuration differs from the provided configuration.
Definition: framepool.c:223
SwsInternal::dst_slice_start
int dst_slice_start
Definition: swscale_internal.h:351
int32_t
int32_t
Definition: audioconvert.c:56
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:479
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sws_internal
static SwsInternal * sws_internal(const SwsContext *sws)
Definition: swscale_internal.h:79
sws_scale_frame
int sws_scale_frame(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
Definition: swscale.c:1366
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
solve_range_convert
static void solve_range_convert(uint16_t src_min, uint16_t src_max, uint16_t dst_min, uint16_t dst_max, int src_bits, int src_shift, int mult_shift, uint32_t *coeff, int64_t *offset)
Definition: swscale.c:577
isPlanar
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:914
GU_IDX
#define GU_IDX
Definition: swscale_internal.h:471
width
#define width
Definition: dsp.h:89
SwsContext
Main external API structure.
Definition: swscale.h:206
ff_sws_graph_run
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image dimensions or settings change in any way splits interlaced images into separate and calls ff_sws_graph_run() on each. From the point of view of SwsGraph itself
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3823
src
#define src
Definition: vp8dsp.c:248
swscale.h
SwsColor::prim
enum AVColorPrimaries prim
Definition: format.h:61
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
isALPHA
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:896
RGB2YUV_SHIFT
#define RGB2YUV_SHIFT
Definition: swscale_internal.h:476