42 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)               \ 
   44     vz0 = vec_add(vb0,vb2);            \ 
   45     vz1 = vec_sub(vb0,vb2);            \ 
   46     vz2 = vec_sra(vb1,vec_splat_u16(1));                          \ 
   47     vz2 = vec_sub(vz2,vb3);        \ 
   48     vz3 = vec_sra(vb3,vec_splat_u16(1));                          \ 
   49     vz3 = vec_add(vb1,vz3);        \ 
   51     va0 = vec_add(vz0,vz3);         \ 
   52     va1 = vec_add(vz1,vz2);         \ 
   53     va2 = vec_sub(vz1,vz2);         \ 
   54     va3 = vec_sub(vz0,vz3)         
   56 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \ 
   57     b0 = vec_mergeh( a0, a0 ); \ 
   58     b1 = vec_mergeh( a1, a0 ); \ 
   59     b2 = vec_mergeh( a2, a0 ); \ 
   60     b3 = vec_mergeh( a3, a0 ); \ 
   61     a0 = vec_mergeh( b0, b2 ); \ 
   62     a1 = vec_mergel( b0, b2 ); \ 
   63     a2 = vec_mergeh( b1, b3 ); \ 
   64     a3 = vec_mergel( b1, b3 ); \ 
   65     b0 = vec_mergeh( a0, a2 ); \ 
   66     b1 = vec_mergel( a0, a2 ); \ 
   67     b2 = vec_mergeh( a1, a3 ); \ 
   68     b3 = vec_mergel( a1, a3 ) 
   71 #define vdst_load(d)              \ 
   72     vdst_orig = vec_ld(0, dst);   \ 
   73     vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); 
   75 #define vdst_load(d) vdst = vec_vsx_ld(0, dst) 
   78 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \ 
   80     vdst_ss = (vec_s16) VEC_MERGEH(zero_u8v, vdst);           \ 
   81     va = vec_add(va, vdst_ss);                                \ 
   82     va_u8 = vec_packsu(va, zero_s16v);                        \ 
   83     va_u32 = vec_splat((vec_u32)va_u8, 0);                  \ 
   84     vec_ste(va_u32, element, (uint32_t*)dst); 
   90     vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
 
   94     const vec_u16 v6us = vec_splat_u16(6);
 
   96     vec_u8 vdst_mask = vec_lvsl(0, dst);
 
   97     int element = ((
unsigned long)dst & 0
xf) >> 2;
 
  102     vtmp0 = vec_ld(0,block);
 
  103     vtmp1 = vec_sld(vtmp0, vtmp0, 8);
 
  104     vtmp2 = vec_ld(16,block);
 
  105     vtmp3 = vec_sld(vtmp2, vtmp2, 8);
 
  106     memset(block, 0, 16 * 
sizeof(int16_t));
 
  108     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
 
  109     VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
 
  110     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
 
  112     va0 = vec_sra(va0,v6us);
 
  113     va1 = vec_sra(va1,v6us);
 
  114     va2 = vec_sra(va2,v6us);
 
  115     va3 = vec_sra(va3,v6us);
 
  117     VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
 
  119     VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
 
  121     VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
 
  123     VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
 
  126 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\ 
  128     vec_s16 a0v = vec_add(s0, s4);    \ 
  130     vec_s16 a2v = vec_sub(s0, s4);    \ 
  132     vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6);    \ 
  134     vec_s16 a6v = vec_add(vec_sra(s6, onev), s2);    \ 
  136     vec_s16 b0v = vec_add(a0v, a6v);  \ 
  138     vec_s16 b2v = vec_add(a2v, a4v);  \ 
  140     vec_s16 b4v = vec_sub(a2v, a4v);  \ 
  142     vec_s16 b6v = vec_sub(a0v, a6v);  \ 
  145     vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \ 
  148     vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\ 
  151     vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\ 
  153     vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\ 
  155     vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \ 
  157     vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \ 
  159     vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \ 
  161     vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \ 
  163     d0 = vec_add(b0v, b7v); \ 
  165     d1 = vec_add(b2v, b5v); \ 
  167     d2 = vec_add(b4v, b3v); \ 
  169     d3 = vec_add(b6v, b1v); \ 
  171     d4 = vec_sub(b6v, b1v); \ 
  173     d5 = vec_sub(b4v, b3v); \ 
  175     d6 = vec_sub(b2v, b5v); \ 
  177     d7 = vec_sub(b0v, b7v); \ 
  181 #define GET_2PERM(ldv, stv, d)  \ 
  182     ldv = vec_lvsl(0, d);       \ 
  183     stv = vec_lvsr(8, d); 
  184 #define dstv_load(d)            \ 
  185     vec_u8 hv = vec_ld( 0, d ); \ 
  186     vec_u8 lv = vec_ld( 7, d);  \ 
  187     vec_u8 dstv   = vec_perm( hv, lv, (vec_u8)perm_ldv ); 
  188 #define dest_unligned_store(d)                                 \ 
  190     vec_u8 bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );  \ 
  191     vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv );       \ 
  192     lv    = vec_sel( lv, bodyv, edgelv );                      \ 
  193     vec_st( lv, 7, d );                                        \ 
  194     hv    = vec_ld( 0, d );                                    \ 
  195     edgehv = vec_perm( zero_u8v, sel, perm_stv );              \ 
  196     hv    = vec_sel( hv, bodyv, edgehv );                      \ 
  200 #define GET_2PERM(ldv, stv, d) {} 
  201 #define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d) 
  202 #define dest_unligned_store(d)\ 
  203     vec_u8 dst8 = vec_perm((vec_u8)idstsum8, dstv, vcprm(2,3,s2,s3));\ 
  204     vec_vsx_st(dst8, 0, d) 
  207 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \ 
  210     vec_s16 idct_sh6 = vec_sra(idctv, sixv);                 \ 
  211     vec_u16 dst16 = (vec_u16)VEC_MERGEH(zero_u8v, dstv);   \ 
  212     vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16);  \ 
  213     vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum);        \ 
  215     dest_unligned_store(dest);\ 
  218 static void h264_idct8_add_altivec(
uint8_t *dst, int16_t *dct, 
int stride)
 
  221     vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
 
  222     vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, 
idct6, idct7;
 
  224     vec_u8 perm_ldv, perm_stv;
 
  225     GET_2PERM(perm_ldv, perm_stv, dst);
 
  227     const vec_u16 onev = vec_splat_u16(1);
 
  228     const vec_u16 twov = vec_splat_u16(2);
 
  229     const vec_u16 sixv = vec_splat_u16(6);
 
  231     const vec_u8 sel = (
vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
 
  236     s0 = vec_ld(0x00, (int16_t*)dct);
 
  237     s1 = vec_ld(0x10, (int16_t*)dct);
 
  238     s2 = vec_ld(0x20, (int16_t*)dct);
 
  239     s3 = vec_ld(0x30, (int16_t*)dct);
 
  240     s4 = vec_ld(0x40, (int16_t*)dct);
 
  241     s5 = vec_ld(0x50, (int16_t*)dct);
 
  242     s6 = vec_ld(0x60, (int16_t*)dct);
 
  243     s7 = vec_ld(0x70, (int16_t*)dct);
 
  244     memset(dct, 0, 64 * 
sizeof(int16_t));
 
  246     IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
 
  247                      d0, d1, d2, d3, d4, d5, d6, d7);
 
  249     TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );
 
  251     IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,
 
  252                      idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
 
  254     ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
 
  255     ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
 
  256     ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
 
  257     ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
 
  258     ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
 
  259     ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
 
  260     ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
 
  261     ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
 
  265 #define DST_LD vec_ld 
  267 #define DST_LD vec_vsx_ld 
  272     vec_u8 dcplus, dcminus, 
v0, v1, v2, v3, aligner;
 
  278     dc = (block[0] + 32) >> 6;
 
  280     v_dc32 = vec_lde(0, &
dc);
 
  281     dc16 = VEC_SPLAT16((
vec_s16)v_dc32, 1);
 
  289     aligner = vec_lvsr(0, dst);
 
  290     dcplus = vec_perm(dcplus, dcplus, aligner);
 
  291     dcminus = vec_perm(dcminus, dcminus, aligner);
 
  294     for (i = 0; i < 
size; i += 4) {
 
  295         v0 = DST_LD(0, dst+0*stride);
 
  296         v1 = DST_LD(0, dst+1*stride);
 
  297         v2 = DST_LD(0, dst+2*stride);
 
  298         v3 = DST_LD(0, dst+3*stride);
 
  300         v0 = vec_adds(v0, dcplus);
 
  301         v1 = vec_adds(v1, dcplus);
 
  302         v2 = vec_adds(v2, dcplus);
 
  303         v3 = vec_adds(v3, dcplus);
 
  305         v0 = vec_subs(v0, dcminus);
 
  306         v1 = vec_subs(v1, dcminus);
 
  307         v2 = vec_subs(v2, dcminus);
 
  308         v3 = vec_subs(v3, dcminus);
 
  310         VEC_ST(v0, 0, dst+0*stride);
 
  311         VEC_ST(v1, 0, dst+1*stride);
 
  312         VEC_ST(v2, 0, dst+2*stride);
 
  313         VEC_ST(v3, 0, dst+3*stride);
 
  319 static void h264_idct_dc_add_altivec(
uint8_t *dst, int16_t *block, 
int stride)
 
  321     h264_idct_dc_add_internal(dst, block, stride, 4);
 
  324 static void h264_idct8_dc_add_altivec(
uint8_t *dst, int16_t *block, 
int stride)
 
  326     h264_idct_dc_add_internal(dst, block, stride, 8);
 
  329 static void h264_idct_add16_altivec(
uint8_t *dst, 
const int *block_offset,
 
  330                                     int16_t *block, 
int stride,
 
  335         int nnz = nnzc[ 
scan8[i] ];
 
  337             if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  338             else                      h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  343 static void h264_idct_add16intra_altivec(
uint8_t *dst, 
const int *block_offset,
 
  344                                          int16_t *block, 
int stride,
 
  349         if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  350         else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  354 static void h264_idct8_add4_altivec(
uint8_t *dst, 
const int *block_offset,
 
  355                                     int16_t *block, 
int stride,
 
  359     for(i=0; i<16; i+=4){
 
  360         int nnz = nnzc[ scan8[i] ];
 
  362             if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  363             else                      h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  368 static void h264_idct_add8_altivec(
uint8_t **dest, 
const int *block_offset,
 
  369                                    int16_t *block, 
int stride,
 
  373     for (j = 1; j < 3; j++) {
 
  374         for(i = j * 16; i < j * 16 + 4; i++){
 
  376                 h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
 
  378                 h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
 
  383 #define transpose4x16(r0, r1, r2, r3) {      \ 
  384     register vec_u8 r4;                    \ 
  385     register vec_u8 r5;                    \ 
  386     register vec_u8 r6;                    \ 
  387     register vec_u8 r7;                    \ 
  389     r4 = vec_mergeh(r0, r2);   \ 
  390     r5 = vec_mergel(r0, r2);   \ 
  391     r6 = vec_mergeh(r1, r3);   \ 
  392     r7 = vec_mergel(r1, r3);   \ 
  394     r0 = vec_mergeh(r4, r6);    \ 
  395     r1 = vec_mergel(r4, r6);    \ 
  396     r2 = vec_mergeh(r5, r7);    \ 
  397     r3 = vec_mergel(r5, r7);    \ 
  400 static inline void write16x4(
uint8_t *dst, 
int dst_stride,
 
  404     uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
 
  405     int int_dst_stride = dst_stride/4;
 
  407     vec_st(r0, 0, result);
 
  408     vec_st(r1, 16, result);
 
  409     vec_st(r2, 32, result);
 
  410     vec_st(r3, 48, result);
 
  413     *(dst_int+   int_dst_stride) = *(src_int + 1);
 
  414     *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
 
  415     *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
 
  416     *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
 
  417     *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
 
  418     *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
 
  419     *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
 
  420     *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
 
  421     *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
 
  422     *(dst_int+10*int_dst_stride) = *(src_int + 10);
 
  423     *(dst_int+11*int_dst_stride) = *(src_int + 11);
 
  424     *(dst_int+12*int_dst_stride) = *(src_int + 12);
 
  425     *(dst_int+13*int_dst_stride) = *(src_int + 13);
 
  426     *(dst_int+14*int_dst_stride) = *(src_int + 14);
 
  427     *(dst_int+15*int_dst_stride) = *(src_int + 15);
 
  433 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\ 
  434     register vec_u8 r0  = unaligned_load(0,             src);            \ 
  435     register vec_u8 r1  = unaligned_load(   src_stride, src);            \ 
  436     register vec_u8 r2  = unaligned_load(2* src_stride, src);            \ 
  437     register vec_u8 r3  = unaligned_load(3* src_stride, src);            \ 
  438     register vec_u8 r4  = unaligned_load(4* src_stride, src);            \ 
  439     register vec_u8 r5  = unaligned_load(5* src_stride, src);            \ 
  440     register vec_u8 r6  = unaligned_load(6* src_stride, src);            \ 
  441     register vec_u8 r7  = unaligned_load(7* src_stride, src);            \ 
  442     register vec_u8 r14 = unaligned_load(14*src_stride, src);            \ 
  443     register vec_u8 r15 = unaligned_load(15*src_stride, src);            \ 
  445     r8  = unaligned_load( 8*src_stride, src);                              \ 
  446     r9  = unaligned_load( 9*src_stride, src);                              \ 
  447     r10 = unaligned_load(10*src_stride, src);                              \ 
  448     r11 = unaligned_load(11*src_stride, src);                              \ 
  449     r12 = unaligned_load(12*src_stride, src);                              \ 
  450     r13 = unaligned_load(13*src_stride, src);                              \ 
  453     r0 = vec_mergeh(r0, r8);                                       \ 
  454     r1 = vec_mergeh(r1, r9);                                       \ 
  455     r2 = vec_mergeh(r2, r10);                                      \ 
  456     r3 = vec_mergeh(r3, r11);                                      \ 
  457     r4 = vec_mergeh(r4, r12);                                      \ 
  458     r5 = vec_mergeh(r5, r13);                                      \ 
  459     r6 = vec_mergeh(r6, r14);                                      \ 
  460     r7 = vec_mergeh(r7, r15);                                      \ 
  463     r8  = vec_mergeh(r0, r4);                           \ 
  464     r9  = vec_mergel(r0, r4);                           \ 
  465     r10 = vec_mergeh(r1, r5);                           \ 
  466     r11 = vec_mergel(r1, r5);                           \ 
  467     r12 = vec_mergeh(r2, r6);                           \ 
  468     r13 = vec_mergel(r2, r6);                           \ 
  469     r14 = vec_mergeh(r3, r7);                           \ 
  470     r15 = vec_mergel(r3, r7);                           \ 
  473     r0 = vec_mergeh(r8,  r12);                 \ 
  474     r1 = vec_mergel(r8,  r12);                 \ 
  475     r2 = vec_mergeh(r9,  r13);                 \ 
  476     r4 = vec_mergeh(r10, r14);                 \ 
  477     r5 = vec_mergel(r10, r14);                 \ 
  478     r6 = vec_mergeh(r11, r15);                 \ 
  482     r8  = vec_mergeh(r0, r4);                                 \ 
  483     r9  = vec_mergel(r0, r4);                                 \ 
  484     r10 = vec_mergeh(r1, r5);                                 \ 
  485     r11 = vec_mergel(r1, r5);                                 \ 
  486     r12 = vec_mergeh(r2, r6);                                 \ 
  487     r13 = vec_mergel(r2, r6);                                 \ 
  493 static inline vec_u8 diff_lt_altivec ( 
register vec_u8 x,
 
  498     register vec_u8 diffneg = vec_subs(y, x);
 
  499     register vec_u8 o = vec_or(diff, diffneg); 
 
  500     o = (
vec_u8)vec_cmplt(o, a);
 
  504 static inline vec_u8 h264_deblock_mask ( 
register vec_u8 p0,
 
  514     mask = diff_lt_altivec(p0, q0, alpha);
 
  515     tempmask = diff_lt_altivec(p1, p0, beta);
 
  516     mask = vec_and(mask, tempmask);
 
  517     tempmask = diff_lt_altivec(q1, q0, beta);
 
  518     mask = vec_and(mask, tempmask);
 
  524 static inline vec_u8 h264_deblock_q1(
register vec_u8 p0,
 
  530     register vec_u8 average = vec_avg(p0, q0);
 
  532     register vec_u8 unclipped;
 
  538     temp = vec_xor(average, p2);
 
  539     average = vec_avg(average, p2);     
 
  540     ones = vec_splat_u8(1);
 
  541     temp = vec_and(temp, ones);         
 
  542     unclipped = vec_subs(average, temp); 
 
  543     max = vec_adds(p1, tc0);
 
  544     min = vec_subs(p1, tc0);
 
  545     newp1 = vec_max(min, unclipped);
 
  546     newp1 = vec_min(max, newp1);
 
  550 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                           \ 
  552     const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                               \ 
  554     register vec_u8 pq0bit = vec_xor(p0,q0);                                                    \ 
  555     register vec_u8 q1minus;                                                                    \ 
  556     register vec_u8 p0minus;                                                                    \ 
  557     register vec_u8 stage1;                                                                     \ 
  558     register vec_u8 stage2;                                                                     \ 
  559     register vec_u8 vec160;                                                                     \ 
  560     register vec_u8 delta;                                                                      \ 
  561     register vec_u8 deltaneg;                                                                   \ 
  563     q1minus = vec_nor(q1, q1);                                                      \ 
  564     stage1 = vec_avg(p1, q1minus);                                        \ 
  565     stage2 = vec_sr(stage1, vec_splat_u8(1));       \ 
  566     p0minus = vec_nor(p0, p0);                                                      \ 
  567     stage1 = vec_avg(q0, p0minus);                                        \ 
  568     pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                    \ 
  569     stage2 = vec_avg(stage2, pq0bit);           \ 
  570     stage2 = vec_adds(stage2, stage1);           \ 
  571     vec160 = vec_ld(0, &A0v);                                                                     \ 
  572     deltaneg = vec_subs(vec160, stage2);                                                  \ 
  573     delta = vec_subs(stage2, vec160);                                                      \ 
  574     deltaneg = vec_min(tc0masked, deltaneg);                                                      \ 
  575     delta = vec_min(tc0masked, delta);                                                            \ 
  576     p0 = vec_subs(p0, deltaneg);                                                                  \ 
  577     q0 = vec_subs(q0, delta);                                                                     \ 
  578     p0 = vec_adds(p0, delta);                                                                     \ 
  579     q0 = vec_adds(q0, deltaneg);                                                                  \ 
  582 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \ 
  583     DECLARE_ALIGNED(16, unsigned char, temp)[16];                                             \ 
  584     register vec_u8 alphavec;                                                              \ 
  585     register vec_u8 betavec;                                                               \ 
  586     register vec_u8 mask;                                                                  \ 
  587     register vec_u8 p1mask;                                                                \ 
  588     register vec_u8 q1mask;                                                                \ 
  589     register vector signed   char tc0vec;                                                    \ 
  590     register vec_u8 finaltc0;                                                              \ 
  591     register vec_u8 tc0masked;                                                             \ 
  592     register vec_u8 newp1;                                                                 \ 
  593     register vec_u8 newq1;                                                                 \ 
  597     alphavec = vec_ld(0, temp);                                                              \ 
  598     betavec = vec_splat(alphavec, 0x1);                                                      \ 
  599     alphavec = vec_splat(alphavec, 0x0);                                                     \ 
  600     mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec);             \ 
  602     AV_COPY32(temp, tc0);                                                                    \ 
  603     tc0vec = vec_ld(0, (signed char*)temp);                                                  \ 
  604     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \ 
  605     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \ 
  606     mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));           \ 
  607     finaltc0 = vec_and((vec_u8)tc0vec, mask);                                \ 
  609     p1mask = diff_lt_altivec(p2, p0, betavec);                                               \ 
  610     p1mask = vec_and(p1mask, mask);                              \ 
  611     tc0masked = vec_and(p1mask, (vec_u8)tc0vec);                                           \ 
  612     finaltc0 = vec_sub(finaltc0, p1mask);                                          \ 
  613     newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      \ 
  616     q1mask = diff_lt_altivec(q2, q0, betavec);                                               \ 
  617     q1mask = vec_and(q1mask, mask);                             \ 
  618     tc0masked = vec_and(q1mask, (vec_u8)tc0vec);                                           \ 
  619     finaltc0 = vec_sub(finaltc0, q1mask);                                          \ 
  620     newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      \ 
  623     h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            \ 
  628 static void h264_v_loop_filter_luma_altivec(
uint8_t *pix, 
int stride, 
int alpha, 
int beta, int8_t *tc0) {
 
  630     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
 
  631         register vec_u8 p2 = vec_ld(-3*stride, pix);
 
  632         register vec_u8 p1 = vec_ld(-2*stride, pix);
 
  633         register vec_u8 p0 = vec_ld(-1*stride, pix);
 
  634         register vec_u8 q0 = vec_ld(0, pix);
 
  635         register vec_u8 q1 = vec_ld(stride, pix);
 
  636         register vec_u8 q2 = vec_ld(2*stride, pix);
 
  637         h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
 
  638         vec_st(p1, -2*stride, pix);
 
  639         vec_st(p0, -1*stride, pix);
 
  641         vec_st(q1, stride, pix);
 
  645 static void h264_h_loop_filter_luma_altivec(
uint8_t *pix, 
int stride, 
int alpha, 
int beta, int8_t *tc0) {
 
  647     register vec_u8 line0, line1, line2, line3, line4, line5;
 
  648     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
 
  650     readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
 
  651     h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
 
  652     transpose4x16(line1, line2, line3, line4);
 
  653     write16x4(pix-2, stride, line1, line2, line3, line4);
 
  657 void weight_h264_W_altivec(
uint8_t *block, 
int stride, 
int height,
 
  667     offset <<= log2_denom;
 
  668     if(log2_denom) offset += 1<<(log2_denom-1);
 
  669     temp[0] = log2_denom;
 
  675     vtemp =(
vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
 
  677     vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
 
  678     vweight = vec_splat(vtemp, 3);
 
  679     voffset = vec_splat(vtemp, 5);
 
  680     aligned = !((
unsigned long)block & 0xf);
 
  682     for (y = 0; y < 
height; y++) {
 
  683         vblock = vec_ld(0, block);
 
  688         if (w == 16 || aligned) {
 
  690             v0 = vec_adds(v0, voffset);
 
  691             v0 = vec_sra(v0, vlog2_denom);
 
  693         if (w == 16 || !aligned) {
 
  695             v1 = vec_adds(v1, voffset);
 
  696             v1 = vec_sra(v1, vlog2_denom);
 
  698         vblock = vec_packsu(v0, v1);
 
  699         vec_st(vblock, 0, block);
 
  707                              int log2_denom, 
int weightd, 
int weights, 
int offset, 
int w)
 
  709     int y, dst_aligned, src_aligned;
 
  711     vec_s16 vtemp, vweights, vweightd, voffset, 
v0, v1, v2, v3;
 
  716     offset = ((offset + 1) | 1) << log2_denom;
 
  717     temp[0] = log2_denom+1;
 
  724     vtemp =(
vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
 
  726     vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
 
  727     vweights = vec_splat(vtemp, 3);
 
  728     vweightd = vec_splat(vtemp, 5);
 
  729     voffset = vec_splat(vtemp, 7);
 
  730     dst_aligned = !((
unsigned long)dst & 0xf);
 
  731     src_aligned = !((
unsigned long)src & 0xf);
 
  733     for (y = 0; y < 
height; y++) {
 
  734         vdst = vec_ld(0, dst);
 
  735         vsrc = vec_ld(0, src);
 
  749         if (w == 16 || dst_aligned) {
 
  753             v0 = vec_adds(v0, voffset);
 
  754             v0 = vec_adds(v0, v2);
 
  755             v0 = vec_sra(v0, vlog2_denom);
 
  757         if (w == 16 || !dst_aligned) {
 
  761             v1 = vec_adds(v1, voffset);
 
  762             v1 = vec_adds(v1, v3);
 
  763             v1 = vec_sra(v1, vlog2_denom);
 
  765         vdst = vec_packsu(v0, v1);
 
  766         vec_st(vdst, 0, dst);
 
  773 #define H264_WEIGHT(W) \ 
  774 static void weight_h264_pixels ## W ## _altivec(uint8_t *block, ptrdiff_t stride, int height, \ 
  775                                                 int log2_denom, int weight, int offset) \ 
  777     weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \ 
  779 static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, \ 
  780                                                   int log2_denom, int weightd, int weights, int offset) \ 
  782     biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \ 
  790                                  const int chroma_format_idc)
 
  796     if (bit_depth == 8) {
 
  798         if (chroma_format_idc <= 1)
 
void(* h264_idct_add)(uint8_t *dst, int16_t *block, int stride)
 
Memory handling functions. 
 
static const uint8_t q1[256]
 
Macro definitions for various function/variable attributes. 
 
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
 
void(* h264_idct_add8)(uint8_t **dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
 
h264_weight_func weight_h264_pixels_tab[4]
 
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory. 
 
static int aligned(int val)
 
static const uint16_t mask[17]
 
#define PPC_ALTIVEC(flags)
 
void(* h264_idct8_add)(uint8_t *dst, int16_t *block, int stride)
 
static const uint8_t offset[127][2]
 
static const uint8_t q0[256]
 
static void idct6(int pre_mant[6])
Calculate 6-point IDCT of the pre-mantissas. 
 
h264_biweight_func biweight_h264_pixels_tab[4]
 
Context for storing H.264 DSP functions. 
 
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
 
void(* h264_idct_dc_add)(uint8_t *dst, int16_t *block, int stride)
 
H.264 / AVC / MPEG-4 part10 codec. 
 
av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
 
static const int16_t alpha[]
 
void(* h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
 
static const uint8_t scan8[16 *3+3]
 
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
 
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU. 
 
static int weight(int i, int blen, int offset)
 
void(* h264_idct8_dc_add)(uint8_t *dst, int16_t *block, int stride)
 
Contains misc utility macros and inline functions. 
 
GLint GLenum GLboolean GLsizei stride
 
static av_always_inline int diff(const uint32_t a, const uint32_t b)
 
#define xf(width, name, var, range_min, range_max, subs,...)
 
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
 
void(* h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
 
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc