35 #define vzero vec_splat_s32(0)
38 #define GET_LS(a,b,c,s) {\
39 vector signed short l2 = vec_ld(((b) << 1) + 16, s);\
40 ls = vec_perm(a, l2, c);\
44 #define GET_LS(a,b,c,s) {\
46 a = vec_vsx_ld(((b) << 1) + 16, s);\
50 #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
51 vector signed short ls;\
52 GET_LS(l1, x, perm, src);\
53 vector signed int i1 = vec_mule(filter, ls);\
54 vector signed int i2 = vec_mulo(filter, ls);\
55 vector signed int vf1, vf2;\
56 vf1 = vec_mergeh(i1, i2);\
57 vf2 = vec_mergel(i1, i2);\
58 d1 = vec_add(d1, vf1);\
59 d2 = vec_add(d2, vf2);\
63 #define LOAD_FILTER(vf,f) {\
64 vector unsigned char perm0 = vec_lvsl(joffset, f);\
65 vf = vec_ld(joffset, f);\
66 vf = vec_perm(vf, vf, perm0);\
68 #define LOAD_L1(ll1,s,p){\
69 p = vec_lvsl(xoffset, s);\
70 ll1 = vec_ld(xoffset, s);\
73 #define LOAD_FILTER(vf,f) {\
74 vf = vec_vsx_ld(joffset, f);\
76 #define LOAD_L1(ll1,s,p){\
77 ll1 = vec_vsx_ld(xoffset, s);\
81 static void yuv2planeX_16_altivec(
const int16_t *
filter,
int filterSize,
87 vector
signed int vo1, vo2, vo3, vo4;
88 vector
unsigned short vs1, vs2;
89 vector
unsigned char vf;
90 vector
unsigned int altivec_vectorShiftInt19 =
91 vec_add(vec_splat_u32(10), vec_splat_u32(9));
93 for (i = 0; i < 16; i++)
94 val[i] = dither[(x + i + offset) & 7] << 12;
97 vo2 = vec_ld(16,
val);
98 vo3 = vec_ld(32,
val);
99 vo4 = vec_ld(48,
val);
101 for (j = 0; j < filterSize; j++) {
102 unsigned int joffset=j<<1;
103 unsigned int xoffset=x<<1;
104 vector
unsigned char perm;
105 vector
signed short l1,vLumFilter;
106 LOAD_FILTER(vLumFilter,filter);
107 vLumFilter = vec_splat(vLumFilter, 0);
108 LOAD_L1(l1,src[j],perm);
109 yuv2planeX_8(vo1, vo2, l1, src[j], x, perm, vLumFilter);
110 yuv2planeX_8(vo3, vo4, l1, src[j], x + 8, perm, vLumFilter);
113 vo1 = vec_sra(vo1, altivec_vectorShiftInt19);
114 vo2 = vec_sra(vo2, altivec_vectorShiftInt19);
115 vo3 = vec_sra(vo3, altivec_vectorShiftInt19);
116 vo4 = vec_sra(vo4, altivec_vectorShiftInt19);
117 vs1 = vec_packsu(vo1, vo2);
118 vs2 = vec_packsu(vo3, vo4);
119 vf = vec_packsu(vs1, vs2);
124 static inline void yuv2planeX_u(
const int16_t *filter,
int filterSize,
125 const int16_t **src,
uint8_t *dest,
int dstW,
126 const uint8_t *dither,
int offset,
int x)
130 for (i = x; i < dstW; i++) {
131 int t = dither[(i +
offset) & 7] << 12;
132 for (j = 0; j < filterSize; j++)
133 t += src[j][i] * filter[j];
134 dest[i] = av_clip_uint8(t >> 19);
138 static void yuv2planeX_altivec(
const int16_t *filter,
int filterSize,
139 const int16_t **src,
uint8_t *dest,
int dstW,
140 const uint8_t *dither,
int offset)
142 int dst_u = -(uintptr_t)dest & 15;
145 yuv2planeX_u(filter, filterSize, src, dest, dst_u, dither, offset, 0);
147 for (i = dst_u; i < dstW - 15; i += 16)
148 yuv2planeX_16_altivec(filter, filterSize, src, dest + i, dither,
151 yuv2planeX_u(filter, filterSize, src, dest, dstW, dither, offset, i);
161 #define GET_VF4(a, vf, f) {\
162 vf = vec_ld(a<< 3, f);\
164 vf = vec_mergel(vf, (vector signed short)vzero);\
166 vf = vec_mergeh(vf, (vector signed short)vzero);\
168 #define FIRST_LOAD(sv, pos, s, per) {\
169 sv = vec_ld(pos, s);\
170 per = vec_lvsl(pos, s);\
172 #define UPDATE_PTR(s0, d0, s1, d1) {\
176 #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
177 v1 = vec_ld(pos + a + 16, s);\
178 vf = vec_perm(v0, v1, per);\
180 #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) {\
181 if ((((uintptr_t)s + pos) % 16) > 8) {\
182 v1 = vec_ld(pos + a + 16, s);\
184 vf = vec_perm(v0, src_v1, per);\
186 #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
187 vf1 = vec_ld((a * 2 * filterSize) + (b * 2) + 16 + off, f);\
188 vf = vec_perm(vf0, vf1, per);\
191 #define GET_VF4(a, vf, f) {\
192 vf = (vector signed short)vec_vsx_ld(a << 3, f);\
193 vf = vec_mergeh(vf, (vector signed short)vzero);\
195 #define FIRST_LOAD(sv, pos, s, per) {}
196 #define UPDATE_PTR(s0, d0, s1, d1) {}
197 #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
198 vf = vec_vsx_ld(pos + a, s);\
200 #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
201 #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
202 vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
206 static void hScale_altivec_real(
SwsContext *
c, int16_t *dst,
int dstW,
207 const uint8_t *src,
const int16_t *filter,
208 const int32_t *filterPos,
int filterSize)
213 if (filterSize % 4) {
214 for (i = 0; i < dstW; i++) {
216 register int srcPos = filterPos[i];
217 register int val = 0;
218 for (j = 0; j < filterSize; j++)
219 val += ((
int)src[srcPos + j]) * filter[filterSize * i + j];
220 dst[i] =
FFMIN(val >> 7, (1 << 15) - 1);
223 switch (filterSize) {
225 for (i = 0; i < dstW; i++) {
226 register int srcPos = filterPos[i];
228 vector
unsigned char src_vF = unaligned_load(srcPos, src);
229 vector
signed short src_v, filter_v;
230 vector
signed int val_vEven, val_s;
232 (vector
signed short)(VEC_MERGEH((vector
unsigned char)vzero, src_vF));
234 src_v = vec_mergeh(src_v, (vector
signed short)vzero);
235 GET_VF4(i, filter_v, filter);
236 val_vEven = vec_mule(src_v, filter_v);
237 val_s = vec_sums(val_vEven, vzero);
238 vec_st(val_s, 0, tempo);
239 dst[i] =
FFMIN(tempo[3] >> 7, (1 << 15) - 1);
243 for (i = 0; i < dstW; i++) {
244 register int srcPos = filterPos[i];
245 vector
unsigned char src_vF, src_v0, src_v1;
246 vector
unsigned char permS;
247 vector
signed short src_v, filter_v;
248 vector
signed int val_v, val_s;
249 FIRST_LOAD(src_v0, srcPos, src, permS);
250 LOAD_SRCV8(srcPos, 0, src, permS, src_v0, src_v1, src_vF);
252 (vector
signed short)(VEC_MERGEH((vector
unsigned char)vzero, src_vF));
253 filter_v = vec_ld(i << 4, filter);
254 val_v = vec_msums(src_v, filter_v, (vector
signed int)vzero);
255 val_s = vec_sums(val_v, vzero);
256 vec_st(val_s, 0, tempo);
257 dst[i] =
FFMIN(tempo[3] >> 7, (1 << 15) - 1);
262 for (i = 0; i < dstW; i++) {
263 register int srcPos = filterPos[i];
265 vector
unsigned char src_vF = unaligned_load(srcPos, src);
266 vector
signed short src_vA =
267 (vector
signed short)(VEC_MERGEH((vector
unsigned char)vzero, src_vF));
268 vector
signed short src_vB =
269 (vector
signed short)(VEC_MERGEL((vector
unsigned char)vzero, src_vF));
270 vector
signed short filter_v0 = vec_ld(i << 5, filter);
271 vector
signed short filter_v1 = vec_ld((i << 5) + 16, filter);
273 vector
signed int val_acc = vec_msums(src_vA, filter_v0, (vector
signed int)vzero);
274 vector
signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
276 vector
signed int val_s = vec_sums(val_v, vzero);
278 VEC_ST(val_s, 0, tempo);
279 dst[i] =
FFMIN(tempo[3] >> 7, (1 << 15) - 1);
284 for (i = 0; i < dstW; i++) {
285 register int j, offset = i * 2 * filterSize;
286 register int srcPos = filterPos[i];
288 vector
signed int val_s, val_v = (vector
signed int)vzero;
289 vector
signed short filter_v0R;
290 vector
unsigned char permF, src_v0, permS;
291 FIRST_LOAD(filter_v0R, offset, filter, permF);
292 FIRST_LOAD(src_v0, srcPos, src, permS);
294 for (j = 0; j < filterSize - 15; j += 16) {
295 vector
unsigned char src_v1, src_vF;
296 vector
signed short filter_v1R, filter_v2R, filter_v0, filter_v1;
297 LOAD_SRCV(srcPos, j, src, permS, src_v0, src_v1, src_vF);
298 vector
signed short src_vA =
299 (vector
signed short)(VEC_MERGEH((vector
unsigned char)vzero, src_vF));
300 vector
signed short src_vB =
301 (vector
signed short)(VEC_MERGEL((vector
unsigned char)vzero, src_vF));
302 GET_VFD(i, j, filter, filter_v0R, filter_v1R, permF, filter_v0, 0);
303 GET_VFD(i, j, filter, filter_v1R, filter_v2R, permF, filter_v1, 16);
305 vector
signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
306 val_v = vec_msums(src_vB, filter_v1, val_acc);
307 UPDATE_PTR(filter_v2R, filter_v0R, src_v1, src_v0);
310 if (j < filterSize - 7) {
312 vector
unsigned char src_v1, src_vF;
313 vector
signed short src_v, filter_v1R, filter_v;
314 LOAD_SRCV8(srcPos, j, src, permS, src_v0, src_v1, src_vF);
316 (vector
signed short)(VEC_MERGEH((vector
unsigned char)vzero, src_vF));
317 GET_VFD(i, j, filter, filter_v0R, filter_v1R, permF, filter_v, 0);
318 val_v = vec_msums(src_v, filter_v, val_v);
320 val_s = vec_sums(val_v, vzero);
322 VEC_ST(val_s, 0, tempo);
323 dst[i] =
FFMIN(tempo[3] >> 7, (1 << 15) - 1);
IEEE-754 single precision Y, 32bpp, big-endian.
#define AV_CPU_FLAG_ALTIVEC
standard
const char const char void * val
packed RGB 8:8:8, 24bpp, RGBRGB...
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Macro definitions for various function/variable attributes.
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
#define SWS_FULL_CHR_H_INT
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
enum AVPixelFormat dstFormat
Destination pixel format.
yuv2packedX_fn yuv2packedX
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
static const uint8_t dither[8][8]
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
static const uint8_t offset[127][2]
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
as above, but U and V bytes are swapped
av_cold void ff_sws_init_swscale_ppc(SwsContext *c)
void(* hcScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
packed RGB 8:8:8, 24bpp, BGRBGR...
yuv2planarX_fn yuv2planeX
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Contains misc utility macros and inline functions.
#define LOCAL_ALIGNED(a, t, v,...)
IEEE-754 single precision Y, 32bpp, little-endian.
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
void(* hyScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Scale one horizontal line of input data using a filter over the input lines, to produce one (differen...
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)