28     uint32_t src_data1, src_data2;
 
   31     src_data2 = 
LW(src + 4);
 
   33     for (row = 8; row--;) {
 
   35         SW(src_data2, (dst + 4));
 
   48     for (row = 16; row--;) {
 
   57     uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
 
   59     out0 = src[0 * src_stride] * 0x0101010101010101;
 
   60     out1 = src[1 * src_stride] * 0x0101010101010101;
 
   61     out2 = src[2 * src_stride] * 0x0101010101010101;
 
   62     out3 = src[3 * src_stride] * 0x0101010101010101;
 
   63     out4 = src[4 * src_stride] * 0x0101010101010101;
 
   64     out5 = src[5 * src_stride] * 0x0101010101010101;
 
   65     out6 = src[6 * src_stride] * 0x0101010101010101;
 
   66     out7 = src[7 * src_stride] * 0x0101010101010101;
 
   68     SD4(out0, out1, out2, out3, dst, dst_stride);
 
   69     dst += (4 * dst_stride);
 
   70     SD4(out4, out5, out6, out7, dst, dst_stride);
 
   80     for (row = 4; row--;) {
 
   90         src0 = (v16u8) __msa_fill_b(inp0);
 
   91         src1 = (v16u8) __msa_fill_b(inp1);
 
   92         src2 = (v16u8) __msa_fill_b(inp2);
 
   93         src3 = (v16u8) __msa_fill_b(inp3);
 
   95         ST_UB4(src0, src1, src2, src3, dst, dst_stride);
 
   96         dst += (4 * dst_stride);
 
  106     uint32_t 
out, addition = 0;
 
  107     v16u8 src_above, store;
 
  112     if (is_left && is_above) {
 
  113         src_above = 
LD_UB(src_top);
 
  115         sum_above = __msa_hadd_u_h(src_above, src_above);
 
  116         sum_top = __msa_hadd_u_w(sum_above, sum_above);
 
  117         sum = __msa_hadd_u_d(sum_top, sum_top);
 
  118         addition = __msa_copy_u_w((v4i32) sum, 0);
 
  120         for (row = 0; row < 8; row++) {
 
  121             addition += src_left[row * src_stride_left];
 
  124         addition = (addition + 8) >> 4;
 
  125         store = (v16u8) __msa_fill_b(addition);
 
  126     } 
else if (is_left) {
 
  127         for (row = 0; row < 8; row++) {
 
  128             addition += src_left[row * src_stride_left];
 
  131         addition = (addition + 4) >> 3;
 
  132         store = (v16u8) __msa_fill_b(addition);
 
  133     } 
else if (is_above) {
 
  134         src_above = 
LD_UB(src_top);
 
  136         sum_above = __msa_hadd_u_h(src_above, src_above);
 
  137         sum_top = __msa_hadd_u_w(sum_above, sum_above);
 
  138         sum = __msa_hadd_u_d(sum_top, sum_top);
 
  139         sum = (v2u64) __msa_srari_d((v2i64) sum, 3);
 
  140         store = (v16u8) __msa_splati_b((v16i8) sum, 0);
 
  142         store = (v16u8) __msa_ldi_b(128);
 
  145     out = __msa_copy_u_w((v4i32) store, 0);
 
  147     for (row = 8; row--;) {
 
  160     uint32_t addition = 0;
 
  161     v16u8 src_above, store;
 
  166     if (is_left && is_above) {
 
  167         src_above = 
LD_UB(src_top);
 
  169         sum_above = __msa_hadd_u_h(src_above, src_above);
 
  170         sum_top = __msa_hadd_u_w(sum_above, sum_above);
 
  171         sum = __msa_hadd_u_d(sum_top, sum_top);
 
  172         sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
 
  173         sum = __msa_hadd_u_d(sum_top, sum_top);
 
  174         addition = __msa_copy_u_w((v4i32) sum, 0);
 
  176         for (row = 0; row < 16; row++) {
 
  177             addition += src_left[row * src_stride_left];
 
  180         addition = (addition + 16) >> 5;
 
  181         store = (v16u8) __msa_fill_b(addition);
 
  182     } 
else if (is_left) {
 
  183         for (row = 0; row < 16; row++) {
 
  184             addition += src_left[row * src_stride_left];
 
  187         addition = (addition + 8) >> 4;
 
  188         store = (v16u8) __msa_fill_b(addition);
 
  189     } 
else if (is_above) {
 
  190         src_above = 
LD_UB(src_top);
 
  192         sum_above = __msa_hadd_u_h(src_above, src_above);
 
  193         sum_top = __msa_hadd_u_w(sum_above, sum_above);
 
  194         sum = __msa_hadd_u_d(sum_top, sum_top);
 
  195         sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
 
  196         sum = __msa_hadd_u_d(sum_top, sum_top);
 
  197         sum = (v2u64) __msa_srari_d((v2i64) sum, 4);
 
  198         store = (v16u8) __msa_splati_b((v16i8) sum, 0);
 
  200         store = (v16u8) __msa_ldi_b(128);
 
  203     for (row = 16; row--;) {
 
  209 #define INTRA_PREDICT_VALDC_8X8_MSA(val)                         \ 
  210 static void intra_predict_##val##dc_8x8_msa(uint8_t *dst,        \ 
  211                                             int32_t dst_stride)  \ 
  216     store = __msa_ldi_b(val);                                    \ 
  217     out = __msa_copy_u_w((v4i32) store, 0);                      \ 
  219     for (row = 8; row--;) {                                      \ 
  221         SW(out, (dst + 4));                                      \ 
  229 #define INTRA_PREDICT_VALDC_16X16_MSA(val)                         \ 
  230 static void intra_predict_##val##dc_16x16_msa(uint8_t *dst,        \ 
  231                                               int32_t dst_stride)  \ 
  236     store = (v16u8) __msa_ldi_b(val);                              \ 
  238     for (row = 16; row--;) {                                       \ 
  250     int32_t res, res0, res1, res2, res3;
 
  252     v16i8 shf_mask = { 3, 5, 2, 6, 1, 7, 0, 8, 3, 5, 2, 6, 1, 7, 0, 8 };
 
  253     v8i16 short_multiplier = { 1, 2, 3, 4, 1, 2, 3, 4 };
 
  254     v4i32 int_multiplier = { 0, 1, 2, 3 };
 
  256     v8i16 vec9, vec10, vec11;
 
  257     v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8;
 
  260     src_top = 
LD_UB(src - (stride + 1));
 
  261     src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
 
  263     vec9 = __msa_hsub_u_h(src_top, src_top);
 
  264     vec9 *= short_multiplier;
 
  265     vec8 = __msa_hadd_s_w(vec9, vec9);
 
  266     sum = __msa_hadd_s_d(vec8, vec8);
 
  268     res0 = __msa_copy_s_w((v4i32) sum, 0);
 
  270     res1 = (src[4 * stride - 1] - src[2 * stride - 1]) +
 
  271         2 * (src[5 * stride - 1] - src[stride - 1]) +
 
  272         3 * (src[6 * stride - 1] - src[-1]) +
 
  273         4 * (src[7 * stride - 1] - src[-stride - 1]);
 
  277     res0 = (res0 + 16) >> 5;
 
  278     res1 = (res1 + 16) >> 5;
 
  280     res3 = 3 * (res0 + res1);
 
  281     res2 = 16 * (src[7 * stride - 1] + src[-stride + 7] + 1);
 
  284     vec8 = __msa_fill_w(res0);
 
  285     vec4 = __msa_fill_w(res);
 
  286     vec2 = __msa_fill_w(res1);
 
  287     vec5 = vec8 * int_multiplier;
 
  290     for (lpcnt = 4; lpcnt--;) {
 
  299         SRA_4V(vec0, vec1, vec6, vec7, 5);
 
  302         PCKEV_B2_SH(vec10, vec10, vec11, vec11, vec10, vec11);
 
  304         out0 = __msa_copy_s_d((v2i64) vec10, 0);
 
  305         out1 = __msa_copy_s_d((v2i64) vec11, 0);
 
  318     int32_t res0, res1, res2, res3;
 
  319     uint64_t load0, load1;
 
  320     v16i8 shf_mask = { 7, 8, 6, 9, 5, 10, 4, 11, 3, 12, 2, 13, 1, 14, 0, 15 };
 
  321     v8i16 short_multiplier = { 1, 2, 3, 4, 5, 6, 7, 8 };
 
  322     v4i32 int_multiplier = { 0, 1, 2, 3 };
 
  323     v16u8 src_top = { 0 };
 
  325     v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, res_add;
 
  327     load0 = 
LD(src - (stride + 1));
 
  328     load1 = 
LD(src - (stride + 1) + 9);
 
  332     src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
 
  334     vec9 = __msa_hsub_u_h(src_top, src_top);
 
  335     vec9 *= short_multiplier;
 
  336     vec8 = __msa_hadd_s_w(vec9, vec9);
 
  337     res_add = (v4i32) __msa_hadd_s_d(vec8, vec8);
 
  339     res0 = __msa_copy_s_w(res_add, 0) + __msa_copy_s_w(res_add, 2);
 
  341     res1 = (src[8 * stride - 1] - src[6 * stride - 1]) +
 
  342         2 * (src[9 * stride - 1] - src[5 * stride - 1]) +
 
  343         3 * (src[10 * stride - 1] - src[4 * stride - 1]) +
 
  344         4 * (src[11 * stride - 1] - src[3 * stride - 1]) +
 
  345         5 * (src[12 * stride - 1] - src[2 * stride - 1]) +
 
  346         6 * (src[13 * stride - 1] - src[stride - 1]) +
 
  347         7 * (src[14 * stride - 1] - src[-1]) +
 
  348         8 * (src[15 * stride - 1] - src[-1 * stride - 1]);
 
  352     res0 = (res0 + 32) >> 6;
 
  353     res1 = (res1 + 32) >> 6;
 
  355     res3 = 7 * (res0 + res1);
 
  356     res2 = 16 * (src[15 * stride - 1] + src[-stride + 15] + 1);
 
  359     vec8 = __msa_fill_w(res0);
 
  360     vec4 = __msa_fill_w(res2);
 
  361     vec5 = __msa_fill_w(res1);
 
  363     vec7 = vec8 * int_multiplier;
 
  365     for (lpcnt = 16; lpcnt--;) {
 
  372         SRA_4V(vec0, vec1, vec2, vec3, 5);
 
  385     uint32_t 
src0, 
src1, src3, src2 = 0;
 
  386     uint32_t out0, out1, out2, out3;
 
  391     src_top = 
LD_UB(src - stride);
 
  392     add = __msa_hadd_u_h((v16u8) src_top, (v16u8) src_top);
 
  393     sum = __msa_hadd_u_w(add, add);
 
  394     src0 = __msa_copy_u_w((v4i32) sum, 0);
 
  395     src1 = __msa_copy_u_w((v4i32) sum, 1);
 
  397     for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
 
  398         src0 += src[lp_cnt * stride - 1];
 
  399         src2 += src[(4 + lp_cnt) * stride - 1];
 
  402     src0 = (src0 + 4) >> 3;
 
  403     src3 = (src1 + src2 + 4) >> 3;
 
  404     src1 = (src1 + 2) >> 2;
 
  405     src2 = (src2 + 2) >> 2;
 
  406     out0 = src0 * 0x01010101;
 
  407     out1 = src1 * 0x01010101;
 
  408     out2 = src2 * 0x01010101;
 
  409     out3 = src3 * 0x01010101;
 
  411     for (lp_cnt = 4; lp_cnt--;) {
 
  414         SW(out2, (src + 4 * stride));
 
  415         SW(out3, (src + 4 * stride + 4));
 
  426     for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
 
  427         src0 += src[lp_cnt * stride - 1];
 
  428         src1 += src[(4 + lp_cnt) * stride - 1];
 
  431     src0 = (src0 + 2) >> 2;
 
  433     out0 = src0 * 0x0101010101010101;
 
  434     out1 = 
src1 * 0x0101010101010101;
 
  436     for (lp_cnt = 4; lp_cnt--;) {
 
  438         SD(out1, (src + 4 * stride));
 
  446     uint32_t out0 = 0, out1 = 0;
 
  452     src_top = 
LD_UB(src - stride);
 
  453     add = __msa_hadd_u_h(src_top, src_top);
 
  454     sum = __msa_hadd_u_w(add, add);
 
  455     sum = (v4u32) __msa_srari_w((v4i32) sum, 2);
 
  456     res0 = (v4i32) __msa_splati_b((v16i8) sum, 0);
 
  457     res1 = (v4i32) __msa_splati_b((v16i8) sum, 4);
 
  458     out0 = __msa_copy_u_w(res0, 0);
 
  459     out1 = __msa_copy_u_w(res1, 0);
 
  461     for (lp_cnt = 8; lp_cnt--;) {
 
  472     uint32_t out0, out1, out2;
 
  477     src_top = 
LD_UB(src - stride);
 
  478     add = __msa_hadd_u_h(src_top, src_top);
 
  479     sum = __msa_hadd_u_w(add, add);
 
  480     src0 = __msa_copy_u_w((v4i32) sum, 0);
 
  481     src1 = __msa_copy_u_w((v4i32) sum, 1);
 
  483     for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
 
  484         src2 += src[lp_cnt * stride - 1];
 
  486     src2 = (src0 + src2 + 4) >> 3;
 
  487     src0 = (src0 + 2) >> 2;
 
  488     src1 = (src1 + 2) >> 2;
 
  489     out0 = src0 * 0x01010101;
 
  490     out1 = src1 * 0x01010101;
 
  491     out2 = src2 * 0x01010101;
 
  493     for (lp_cnt = 4; lp_cnt--;) {
 
  496         SW(out0, src + stride * 4);
 
  497         SW(out1, src + stride * 4 + 4);
 
  505     uint32_t 
src0, 
src1, src2 = 0, src3;
 
  506     uint32_t out0, out1, out2, out3;
 
  511     src_top = 
LD_UB(src - stride);
 
  512     add = __msa_hadd_u_h(src_top, src_top);
 
  513     sum = __msa_hadd_u_w(add, add);
 
  514     src0 = __msa_copy_u_w((v4i32) sum, 0);
 
  515     src1 = __msa_copy_u_w((v4i32) sum, 1);
 
  517     for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
 
  518         src2 += src[(4 + lp_cnt) * stride - 1];
 
  521     src0 = (src0 + 2) >> 2;
 
  522     src3 = (src1 + src2 + 4) >> 3;
 
  523     src1 = (src1 + 2) >> 2;
 
  524     src2 = (src2 + 2) >> 2;
 
  526     out0 = src0 * 0x01010101;
 
  527     out1 = src1 * 0x01010101;
 
  528     out2 = src2 * 0x01010101;
 
  529     out3 = src3 * 0x01010101;
 
  531     for (lp_cnt = 4; lp_cnt--;) {
 
  534         SW(out2, src + stride * 4);
 
  535         SW(out3, src + stride * 4 + 4);
 
  546     for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
 
  547         src0 += src[lp_cnt * stride - 1];
 
  550     src0 = (src0 + 2) >> 2;
 
  551     out0 = src0 * 0x0101010101010101;
 
  552     out1 = 0x8080808080808080;
 
  554     for (lp_cnt = 4; lp_cnt--;) {
 
  556         SD(out1, src + stride * 4);
 
  567     for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
 
  568         src0 += src[(4 + lp_cnt) * stride - 1];
 
  571     src0 = (src0 + 2) >> 2;
 
  573     out0 = 0x8080808080808080;
 
  574     out1 = src0 * 0x0101010101010101;
 
  576     for (lp_cnt = 4; lp_cnt--;) {
 
  578         SD(out1, src + stride * 4);
 
  707     intra_predict_127dc_8x8_msa(src, stride);
 
  712     intra_predict_129dc_8x8_msa(src, stride);
 
  717     intra_predict_127dc_16x16_msa(src, stride);
 
  722     intra_predict_129dc_16x16_msa(src, stride);
 
void ff_h264_intra_predict_plane_16x16_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_vert_dc_8x8_msa(uint8_t *src, int32_t stride)
static void intra_predict_plane_8x8_msa(uint8_t *src, int32_t stride)
void ff_h264_intra_pred_dc_128_16x16_msa(uint8_t *src, ptrdiff_t stride)
#define INTRA_PREDICT_VALDC_16X16_MSA(val)
static void intra_predict_dc_8x8_msa(uint8_t *src_top, uint8_t *src_left, int32_t src_stride_left, uint8_t *dst, int32_t dst_stride, uint8_t is_above, uint8_t is_left)
static void intra_predict_dc_4blk_8x8_msa(uint8_t *src, int32_t stride)
void ff_h264_intra_pred_horiz_16x16_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_hor_dc_8x8_msa(uint8_t *src, int32_t stride)
void ff_h264_intra_predict_vert_dc_8x8_msa(uint8_t *src, ptrdiff_t stride)
#define SRA_4V(in0, in1, in2, in3, shift)
void ff_h264_intra_pred_vert_8x8_msa(uint8_t *src, ptrdiff_t stride)
#define PCKEV_ST_SB(in0, in1, pdst)
void ff_h264_intra_pred_dc_16x16_msa(uint8_t *src, ptrdiff_t stride)
void ff_h264_intra_pred_dc_128_8x8_msa(uint8_t *src, ptrdiff_t stride)
void ff_h264_intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src, ptrdiff_t stride)
void ff_h264_intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src, ptrdiff_t stride)
#define CLIP_SH2_0_255(in0, in1)
void ff_h264_intra_pred_dc_left_16x16_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_vert_8x8_msa(uint8_t *src, uint8_t *dst, int32_t dst_stride)
static void intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src, int32_t stride)
void ff_h264_intra_pred_dc_top_16x16_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_horiz_8x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride)
#define INTRA_PREDICT_VALDC_8X8_MSA(val)
void ff_h264_intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_dc_16x16_msa(uint8_t *src_top, uint8_t *src_left, int32_t src_stride_left, uint8_t *dst, int32_t dst_stride, uint8_t is_above, uint8_t is_left)
void ff_h264_intra_predict_plane_8x8_msa(uint8_t *src, ptrdiff_t stride)
void ff_h264_intra_predict_dc_4blk_8x8_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src, int32_t stride)
void ff_h264_intra_pred_horiz_8x8_msa(uint8_t *src, ptrdiff_t stride)
void ff_h264_intra_predict_hor_dc_8x8_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_plane_16x16_msa(uint8_t *src, int32_t stride)
static void intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src, int32_t stride)
#define SD4(in0, in1, in2, in3, pdst, stride)
void ff_vp8_pred8x8_129_dc_8_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_horiz_16x16_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride)
#define INSERT_D2_UB(...)
GLint GLenum GLboolean GLsizei stride
void ff_h264_intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src, ptrdiff_t stride)
void ff_h264_intra_pred_vert_16x16_msa(uint8_t *src, ptrdiff_t stride)
void ff_vp8_pred8x8_127_dc_8_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_vert_16x16_msa(uint8_t *src, uint8_t *dst, int32_t dst_stride)
void ff_vp8_pred16x16_127_dc_8_msa(uint8_t *src, ptrdiff_t stride)
static void intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src, int32_t stride)
void ff_vp8_pred16x16_129_dc_8_msa(uint8_t *src, ptrdiff_t stride)