112 typedef struct SwsInternalAltivec {
115 vector
signed short CY;
116 vector
signed short CRV;
117 vector
signed short CBU;
118 vector
signed short CGU;
119 vector
signed short CGV;
120 vector
signed short OY;
121 vector
unsigned short CSHIFT;
122 vector
signed short *vYCoeffsBank, *vCCoeffsBank;
123 } SwsInternalAltivec;
128 "SWSINTERNAL_ADDITIONAL_ASM_SIZE needs to be increased");
130 static inline SwsInternalAltivec *sws_internal_altivec(
SwsInternal *
c)
132 return (SwsInternalAltivec*)
c;
135 #undef PROFILE_THE_BEAST
138 typedef unsigned char ubyte;
139 typedef signed char sbyte;
176 static const vector
unsigned char
177 perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
178 0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
179 perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
180 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
181 perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
182 0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
183 perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
184 0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
186 #define vec_merge3(x2, x1, x0, y0, y1, y2) \
188 __typeof__(x0) o0, o2, o3; \
189 o0 = vec_mergeh(x0, x1); \
190 y0 = vec_perm(o0, x2, perm_rgb_0); \
191 o2 = vec_perm(o0, x2, perm_rgb_1); \
192 o3 = vec_mergel(x0, x1); \
193 y1 = vec_perm(o3, o2, perm_rgb_2); \
194 y2 = vec_perm(o3, o2, perm_rgb_3); \
197 #define vec_mstbgr24(x0, x1, x2, ptr) \
199 __typeof__(x0) _0, _1, _2; \
200 vec_merge3(x0, x1, x2, _0, _1, _2); \
201 vec_st(_0, 0, ptr++); \
202 vec_st(_1, 0, ptr++); \
203 vec_st(_2, 0, ptr++); \
206 #define vec_mstrgb24(x0, x1, x2, ptr) \
208 __typeof__(x0) _0, _1, _2; \
209 vec_merge3(x2, x1, x0, _0, _1, _2); \
210 vec_st(_0, 0, ptr++); \
211 vec_st(_1, 0, ptr++); \
212 vec_st(_2, 0, ptr++); \
219 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr) \
222 _0 = vec_mergeh(x0, x1); \
223 _1 = vec_mergeh(x2, x3); \
224 _2 = (T) vec_mergeh((vector unsigned short) _0, \
225 (vector unsigned short) _1); \
226 _3 = (T) vec_mergel((vector unsigned short) _0, \
227 (vector unsigned short) _1); \
228 vec_st(_2, 0 * 16, (T *) ptr); \
229 vec_st(_3, 1 * 16, (T *) ptr); \
230 _0 = vec_mergel(x0, x1); \
231 _1 = vec_mergel(x2, x3); \
232 _2 = (T) vec_mergeh((vector unsigned short) _0, \
233 (vector unsigned short) _1); \
234 _3 = (T) vec_mergel((vector unsigned short) _0, \
235 (vector unsigned short) _1); \
236 vec_st(_2, 2 * 16, (T *) ptr); \
237 vec_st(_3, 3 * 16, (T *) ptr); \
256 (vector signed short) \
257 vec_perm(x, (__typeof__(x)) { 0 }, \
258 ((vector unsigned char) { \
259 0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03, \
260 0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 }))
263 (vector signed short) \
264 vec_perm(x, (__typeof__(x)) { 0 }, \
265 ((vector unsigned char) { \
266 0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B, \
267 0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F }))
269 #define vec_unh(x)(vector signed short) vec_mergeh(x,(__typeof__(x)) { 0 })
270 #define vec_unl(x)(vector signed short) vec_mergel(x,(__typeof__(x)) { 0 })
273 #define vec_clip_s16(x) \
274 vec_max(vec_min(x, ((vector signed short) { \
275 235, 235, 235, 235, 235, 235, 235, 235 })), \
276 ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 }))
278 #define vec_packclp(x, y) \
279 (vector unsigned char) \
280 vec_packs((vector unsigned short) \
281 vec_max(x, ((vector signed short) { 0 })), \
282 (vector unsigned short) \
283 vec_max(y, ((vector signed short) { 0 })))
285 static inline void cvtyuvtoRGB(
SwsInternal *
c, vector
signed short Y,
286 vector
signed short U, vector
signed short V,
287 vector
signed short *
R, vector
signed short *
G,
288 vector
signed short *
B)
290 SwsInternalAltivec *
const a = sws_internal_altivec(
c);
291 vector
signed short vx, ux, uvx;
293 Y = vec_mradds(
Y,
a->CY,
a->OY);
294 U = vec_sub(
U, (vector
signed short)
295 vec_splat((vector
signed short) { 128 }, 0));
296 V = vec_sub(
V, (vector
signed short)
297 vec_splat((vector
signed short) { 128 }, 0));
300 ux = vec_sl(
U,
a->CSHIFT);
301 *
B = vec_mradds(ux,
a->CBU,
Y);
304 vx = vec_sl(
V,
a->CSHIFT);
305 *
R = vec_mradds(vx,
a->CRV,
Y);
308 uvx = vec_mradds(
U,
a->CGU,
Y);
309 *
G = vec_mradds(
V,
a->CGV, uvx);
319 static inline vector
unsigned char vec_xl(
signed long long offset,
const ubyte *addr)
321 const vector
unsigned char *v_addr = (
const vector
unsigned char *) (addr +
offset);
322 vector
unsigned char align_perm = vec_lvsl(
offset, addr);
324 return (vector
unsigned char) vec_perm(v_addr[0], v_addr[1], align_perm);
328 #define DEFCSP420_CVT(name, out_pixels) \
329 static int altivec_ ## name(SwsInternal *c, const unsigned char *const *in, \
330 const int *instrides, int srcSliceY, int srcSliceH, \
331 unsigned char *const *oplanes, const int *outstrides) \
333 SwsInternalAltivec *const a = sws_internal_altivec(c); \
334 int w = c->opts.src_w; \
337 int instrides_scl[3]; \
338 vector unsigned char y0, y1; \
340 vector signed char u, v; \
342 vector signed short Y0, Y1, Y2, Y3; \
343 vector signed short U, V; \
344 vector signed short vx, ux, uvx; \
345 vector signed short vx0, ux0, uvx0; \
346 vector signed short vx1, ux1, uvx1; \
347 vector signed short R0, G0, B0; \
348 vector signed short R1, G1, B1; \
349 vector unsigned char R, G, B; \
351 vector signed short lCY = a->CY; \
352 vector signed short lOY = a->OY; \
353 vector signed short lCRV = a->CRV; \
354 vector signed short lCBU = a->CBU; \
355 vector signed short lCGU = a->CGU; \
356 vector signed short lCGV = a->CGV; \
357 vector unsigned short lCSHIFT = a->CSHIFT; \
359 const ubyte *y1i = in[0]; \
360 const ubyte *y2i = in[0] + instrides[0]; \
361 const ubyte *ui = in[1]; \
362 const ubyte *vi = in[2]; \
364 vector unsigned char *oute, *outo; \
367 instrides_scl[0] = instrides[0] * 2 - w; \
369 instrides_scl[1] = instrides[1] - w / 2; \
371 instrides_scl[2] = instrides[2] - w / 2; \
373 for (i = 0; i < h / 2; i++) { \
374 oute = (vector unsigned char *)(oplanes[0] + outstrides[0] * \
375 (srcSliceY + i * 2)); \
376 outo = oute + (outstrides[0] >> 4); \
377 vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0); \
378 vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1); \
380 for (j = 0; j < w / 16; j++) { \
381 y0 = vec_xl(0, y1i); \
383 y1 = vec_xl(0, y2i); \
385 u = (vector signed char) vec_xl(0, ui); \
387 v = (vector signed char) vec_xl(0, vi); \
389 u = (vector signed char) \
391 (vector signed char) \
392 vec_splat((vector signed char) { 128 }, 0)); \
393 v = (vector signed char) \
395 (vector signed char) \
396 vec_splat((vector signed char) { 128 }, 0)); \
398 U = vec_unpackh(u); \
399 V = vec_unpackh(v); \
406 Y0 = vec_mradds(Y0, lCY, lOY); \
407 Y1 = vec_mradds(Y1, lCY, lOY); \
408 Y2 = vec_mradds(Y2, lCY, lOY); \
409 Y3 = vec_mradds(Y3, lCY, lOY); \
412 ux = vec_sl(U, lCSHIFT); \
413 ux = vec_mradds(ux, lCBU, (vector signed short) { 0 }); \
414 ux0 = vec_mergeh(ux, ux); \
415 ux1 = vec_mergel(ux, ux); \
418 vx = vec_sl(V, lCSHIFT); \
419 vx = vec_mradds(vx, lCRV, (vector signed short) { 0 }); \
420 vx0 = vec_mergeh(vx, vx); \
421 vx1 = vec_mergel(vx, vx); \
424 uvx = vec_mradds(U, lCGU, (vector signed short) { 0 }); \
425 uvx = vec_mradds(V, lCGV, uvx); \
426 uvx0 = vec_mergeh(uvx, uvx); \
427 uvx1 = vec_mergel(uvx, uvx); \
429 R0 = vec_add(Y0, vx0); \
430 G0 = vec_add(Y0, uvx0); \
431 B0 = vec_add(Y0, ux0); \
432 R1 = vec_add(Y1, vx1); \
433 G1 = vec_add(Y1, uvx1); \
434 B1 = vec_add(Y1, ux1); \
436 R = vec_packclp(R0, R1); \
437 G = vec_packclp(G0, G1); \
438 B = vec_packclp(B0, B1); \
440 out_pixels(R, G, B, oute); \
442 R0 = vec_add(Y2, vx0); \
443 G0 = vec_add(Y2, uvx0); \
444 B0 = vec_add(Y2, ux0); \
445 R1 = vec_add(Y3, vx1); \
446 G1 = vec_add(Y3, uvx1); \
447 B1 = vec_add(Y3, ux1); \
448 R = vec_packclp(R0, R1); \
449 G = vec_packclp(G0, G1); \
450 B = vec_packclp(B0, B1); \
453 out_pixels(R, G, B, outo); \
461 ui += instrides_scl[1]; \
462 vi += instrides_scl[2]; \
463 y1i += instrides_scl[0]; \
464 y2i += instrides_scl[0]; \
469 #define out_abgr(a, b, c, ptr) \
470 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) vec_splat((__typeof__(a)){ 255 }, 0)), c, b, a, ptr)
471 #define out_bgra(a, b, c, ptr) \
472 vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) vec_splat((__typeof__(a)){ 255 }, 0)), ptr)
473 #define out_rgba(a, b, c, ptr) \
474 vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) vec_splat((__typeof__(a)){ 255 }, 0)), ptr)
475 #define out_argb(a, b, c, ptr) \
476 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) vec_splat((__typeof__(a)){ 255 }, 0)), a, b, c, ptr)
477 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr)
478 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr)
480 DEFCSP420_CVT(yuv2_abgr, out_abgr)
481 DEFCSP420_CVT(yuv2_bgra, out_bgra)
482 DEFCSP420_CVT(yuv2_rgba, out_rgba)
483 DEFCSP420_CVT(yuv2_argb, out_argb)
484 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
485 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
489 static const vector
unsigned char
490 demux_u = { 0x10, 0x00, 0x10, 0x00,
491 0x10, 0x04, 0x10, 0x04,
492 0x10, 0x08, 0x10, 0x08,
493 0x10, 0x0c, 0x10, 0x0c },
494 demux_v = { 0x10, 0x02, 0x10, 0x02,
495 0x10, 0x06, 0x10, 0x06,
496 0x10, 0x0A, 0x10, 0x0A,
497 0x10, 0x0E, 0x10, 0x0E },
498 demux_y = { 0x10, 0x01, 0x10, 0x03,
499 0x10, 0x05, 0x10, 0x07,
500 0x10, 0x09, 0x10, 0x0B,
501 0x10, 0x0D, 0x10, 0x0F };
506 static int altivec_uyvy_rgb32(
SwsInternal *
c,
const unsigned char *
const *in,
507 const int *instrides,
int srcSliceY,
int srcSliceH,
508 unsigned char *
const *oplanes,
const int *outstrides)
510 int w =
c->opts.src_w;
513 vector
unsigned char uyvy;
514 vector
signed short Y,
U,
V;
515 vector
signed short R0, G0,
B0,
R1, G1,
B1;
516 vector
unsigned char R,
G,
B;
517 vector
unsigned char *
out;
521 out = (vector
unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
523 for (
i = 0;
i <
h;
i++)
524 for (j = 0; j <
w / 16; j++) {
525 uyvy = vec_ld(0,
img);
528 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
530 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
532 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
534 cvtyuvtoRGB(
c,
Y,
U,
V, &
R0, &G0, &
B0);
536 uyvy = vec_ld(16,
img);
539 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
541 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
543 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
545 cvtyuvtoRGB(
c,
Y,
U,
V, &
R1, &G1, &
B1);
547 R = vec_packclp(
R0,
R1);
548 G = vec_packclp(G0, G1);
549 B = vec_packclp(
B0,
B1);
580 if ((
c->opts.src_w & 0
xf) != 0)
583 switch (
c->opts.src_format) {
590 if ((
c->opts.src_h & 0x1) != 0)
600 switch (
c->opts.dst_format) {
603 return altivec_yuv2_rgb24;
606 return altivec_yuv2_bgr24;
609 return altivec_yuv2_argb;
612 return altivec_yuv2_abgr;
615 return altivec_yuv2_rgba;
618 return altivec_yuv2_bgra;
619 default:
return NULL;
625 switch (
c->opts.dst_format) {
628 return altivec_uyvy_rgb32;
629 default:
return NULL;
639 const int inv_table[4],
645 SwsInternalAltivec *
const a = sws_internal_altivec(
c);
649 vector
signed short vec;
655 buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;
656 buf.tmp[1] = -256 * brightness;
657 buf.tmp[2] = (inv_table[0] >> 3) * (contrast >> 16) * (
saturation >> 16);
658 buf.tmp[3] = (inv_table[1] >> 3) * (contrast >> 16) * (
saturation >> 16);
659 buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (
saturation >> 16));
660 buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (
saturation >> 16));
662 a->CSHIFT = (vector
unsigned short) vec_splat_u16(2);
663 a->CY = vec_splat((vector
signed short) buf.vec, 0);
664 a->OY = vec_splat((vector
signed short) buf.vec, 1);
665 a->CRV = vec_splat((vector
signed short) buf.vec, 2);
666 a->CBU = vec_splat((vector
signed short) buf.vec, 3);
667 a->CGU = vec_splat((vector
signed short) buf.vec, 4);
668 a->CGV = vec_splat((vector
signed short) buf.vec, 5);
676 const int16_t *lumFilter,
677 const int16_t **lumSrc,
679 const int16_t *chrFilter,
680 const int16_t **chrUSrc,
681 const int16_t **chrVSrc,
683 const int16_t **alpSrc,
688 SwsInternalAltivec *
const a = sws_internal_altivec(
c);
691 vector
signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1,
U,
V;
692 vector
signed short R0, G0,
B0,
R1, G1,
B1;
694 vector
unsigned char R,
G,
B;
695 vector
unsigned char *
out, *nout;
697 vector
signed short RND = vec_splat_s16(1 << 3);
698 vector
unsigned short SCL = vec_splat_u16(4);
701 vector
signed short *YCoeffs, *CCoeffs;
703 YCoeffs =
a->vYCoeffsBank + dstY * lumFilterSize;
704 CCoeffs =
a->vCCoeffsBank + dstY * chrFilterSize;
706 out = (vector
unsigned char *) dest;
708 for (
i = 0;
i < dstW;
i += 16) {
712 for (j = 0; j < lumFilterSize; j++) {
713 X0 = vec_ld(0, &lumSrc[j][
i]);
714 X1 = vec_ld(16, &lumSrc[j][
i]);
715 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
716 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
722 for (j = 0; j < chrFilterSize; j++) {
723 X = vec_ld(0, &chrUSrc[j][
i / 2]);
724 U = vec_mradds(
X, CCoeffs[j],
U);
725 X = vec_ld(0, &chrVSrc[j][
i / 2]);
726 V = vec_mradds(
X, CCoeffs[j],
V);
730 Y0 = vec_sra(Y0, SCL);
731 Y1 = vec_sra(Y1, SCL);
735 Y0 = vec_clip_s16(Y0);
736 Y1 = vec_clip_s16(Y1);
749 U0 = vec_mergeh(
U,
U);
750 V0 = vec_mergeh(
V,
V);
752 U1 = vec_mergel(
U,
U);
753 V1 = vec_mergel(
V,
V);
755 cvtyuvtoRGB(
c, Y0, U0, V0, &
R0, &G0, &
B0);
756 cvtyuvtoRGB(
c, Y1, U1, V1, &
R1, &G1, &
B1);
758 R = vec_packclp(
R0,
R1);
759 G = vec_packclp(G0, G1);
760 B = vec_packclp(
B0,
B1);
785 static int printed_error_message;
786 if (!printed_error_message) {
788 "altivec_yuv2packedX doesn't support %s output\n",
790 printed_error_message = 1;
803 for (j = 0; j < lumFilterSize; j++) {
804 X0 = vec_ld(0, &lumSrc[j][
i]);
805 X1 = vec_ld(16, &lumSrc[j][
i]);
806 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
807 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
813 for (j = 0; j < chrFilterSize; j++) {
814 X = vec_ld(0, &chrUSrc[j][
i / 2]);
815 U = vec_mradds(
X, CCoeffs[j],
U);
816 X = vec_ld(0, &chrVSrc[j][
i / 2]);
817 V = vec_mradds(
X, CCoeffs[j],
V);
821 Y0 = vec_sra(Y0, SCL);
822 Y1 = vec_sra(Y1, SCL);
826 Y0 = vec_clip_s16(Y0);
827 Y1 = vec_clip_s16(Y1);
840 U0 = vec_mergeh(
U,
U);
841 V0 = vec_mergeh(
V,
V);
843 U1 = vec_mergel(
U,
U);
844 V1 = vec_mergel(
V,
V);
846 cvtyuvtoRGB(
c, Y0, U0, V0, &
R0, &G0, &
B0);
847 cvtyuvtoRGB(
c, Y1, U1, V1, &
R1, &G1, &
B1);
849 R = vec_packclp(
R0,
R1);
850 G = vec_packclp(G0, G1);
851 B = vec_packclp(
B0,
B1);
853 nout = (vector
unsigned char *) scratch;
856 out_abgr(
R,
G,
B, nout);
859 out_bgra(
R,
G,
B, nout);
862 out_rgba(
R,
G,
B, nout);
865 out_argb(
R,
G,
B, nout);
868 out_rgb24(
R,
G,
B, nout);
871 out_bgr24(
R,
G,
B, nout);
876 "altivec_yuv2packedX doesn't support %s output\n",
881 memcpy(&((uint32_t *) dest)[
i], scratch, (dstW -
i) / 4);
885 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \
886 void ff_yuv2 ## suffix ## _X_altivec(SwsInternal *c, \
887 const int16_t *lumFilter, \
888 const int16_t **lumSrc, \
890 const int16_t *chrFilter, \
891 const int16_t **chrUSrc, \
892 const int16_t **chrVSrc, \
894 const int16_t **alpSrc, \
895 uint8_t *dest, int dstW, int dstY) \
897 yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \
898 chrFilter, chrUSrc, chrVSrc, \
899 chrFilterSize, alpSrc, \
900 dest, dstW, dstY, pixfmt); \
913 SwsInternalAltivec *
const a = sws_internal_altivec(
c);
916 a->vCCoeffsBank =
av_malloc_array(
c->chrDstH,
c->vChrFilterSize *
sizeof(*
a->vCCoeffsBank));
917 if (!
a->vYCoeffsBank || !
a->vCCoeffsBank)
920 for (
int i = 0;
i <
c->vLumFilterSize * sws->
dst_h; ++
i) {
921 short *
p = (
short *)&
a->vYCoeffsBank[
i];
922 for (
int j = 0; j < 8; ++j)
923 p[j] =
c->vLumFilter[
i];
926 for (
int i = 0;
i <
c->vChrFilterSize *
c->chrDstH; ++
i) {
927 short *
p = (
short *)&
a->vCCoeffsBank[
i];
928 for (
int j = 0; j < 8; ++j)
929 p[j] =
c->vChrFilter[
i];
937 SwsInternalAltivec *
const a = sws_internal_altivec(
c);