00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 
00018 
00019 
00020 
00021 
00022 
00030 #ifndef AVCODEC_DSPUTIL_H
00031 #define AVCODEC_DSPUTIL_H
00032 
00033 #include "libavutil/intreadwrite.h"
00034 #include "avcodec.h"
00035 
00036 
00037 
00038 
00039 typedef short DCTELEM;
00040 
00041 void ff_fdct_ifast (DCTELEM *data);
00042 void ff_fdct_ifast248 (DCTELEM *data);
00043 void ff_jpeg_fdct_islow_8(DCTELEM *data);
00044 void ff_jpeg_fdct_islow_10(DCTELEM *data);
00045 void ff_fdct248_islow_8(DCTELEM *data);
00046 void ff_fdct248_islow_10(DCTELEM *data);
00047 
00048 void ff_j_rev_dct (DCTELEM *data);
00049 void ff_j_rev_dct4 (DCTELEM *data);
00050 void ff_j_rev_dct2 (DCTELEM *data);
00051 void ff_j_rev_dct1 (DCTELEM *data);
00052 void ff_wmv2_idct_c(DCTELEM *data);
00053 
00054 void ff_fdct_mmx(DCTELEM *block);
00055 void ff_fdct_mmx2(DCTELEM *block);
00056 void ff_fdct_sse2(DCTELEM *block);
00057 
00058 #define H264_IDCT(depth) \
00059 void ff_h264_idct8_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
00060 void ff_h264_idct_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
00061 void ff_h264_idct8_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
00062 void ff_h264_idct_dc_add_ ## depth ## _c(uint8_t *dst, DCTELEM *block, int stride);\
00063 void ff_h264_idct_add16_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
00064 void ff_h264_idct_add16intra_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
00065 void ff_h264_idct8_add4_ ## depth ## _c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
00066 void ff_h264_idct_add8_422_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
00067 void ff_h264_idct_add8_ ## depth ## _c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);\
00068 void ff_h264_luma_dc_dequant_idct_ ## depth ## _c(DCTELEM *output, DCTELEM *input, int qmul);\
00069 void ff_h264_chroma422_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);\
00070 void ff_h264_chroma_dc_dequant_idct_ ## depth ## _c(DCTELEM *block, int qmul);
00071 
00072 H264_IDCT( 8)
00073 H264_IDCT( 9)
00074 H264_IDCT(10)
00075 H264_IDCT(12)
00076 H264_IDCT(14)
00077 
00078 void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp);
00079 void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
00080 
00081 
00082 extern const uint8_t ff_alternate_horizontal_scan[64];
00083 extern const uint8_t ff_alternate_vertical_scan[64];
00084 extern const uint8_t ff_zigzag_direct[64];
00085 extern const uint8_t ff_zigzag248_direct[64];
00086 
00087 
00088 #define MAX_NEG_CROP 1024
00089 
00090 
00091 extern uint32_t ff_squareTbl[512];
00092 extern uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP];
00093 
00094 #define PUTAVG_PIXELS(depth)\
00095 void ff_put_pixels8x8_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
00096 void ff_avg_pixels8x8_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
00097 void ff_put_pixels16x16_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);\
00098 void ff_avg_pixels16x16_ ## depth ## _c(uint8_t *dst, uint8_t *src, int stride);
00099 
00100 PUTAVG_PIXELS( 8)
00101 PUTAVG_PIXELS( 9)
00102 PUTAVG_PIXELS(10)
00103 PUTAVG_PIXELS(12)
00104 PUTAVG_PIXELS(14)
00105 
00106 #define ff_put_pixels8x8_c ff_put_pixels8x8_8_c
00107 #define ff_avg_pixels8x8_c ff_avg_pixels8x8_8_c
00108 #define ff_put_pixels16x16_c ff_put_pixels16x16_8_c
00109 #define ff_avg_pixels16x16_c ff_avg_pixels16x16_8_c
00110 
00111 
00112 void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride);
00113 void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride);
00114 void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride);
00115 void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride);
00116 
00117 
00118 void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00119 void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00120 void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00121 
00122 void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
00123               int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
00124 
00125 
00126 
00127 
00128 
00129 
00130 
00131 
00132 
00133 
00134 
00135 
00136 
00137 
00138 
00139 
00140 
00141 
00142 
00143 
00144 
00145 
00146 
00147 
00148 
00149 typedef void (*op_pixels_func)(uint8_t *block, const uint8_t *pixels, int line_size, int h);
00150 typedef void (*tpel_mc_func)(uint8_t *block, const uint8_t *pixels, int line_size, int w, int h);
00151 typedef void (*qpel_mc_func)(uint8_t *dst, uint8_t *src, int stride);
00152 typedef void (*h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y);
00153 
00154 typedef void (*op_fill_func)(uint8_t *block, uint8_t value, int line_size, int h);
00155 
00156 #define DEF_OLD_QPEL(name)\
00157 void ff_put_        ## name (uint8_t *dst, uint8_t *src, int stride);\
00158 void ff_put_no_rnd_ ## name (uint8_t *dst, uint8_t *src, int stride);\
00159 void ff_avg_        ## name (uint8_t *dst, uint8_t *src, int stride);
00160 
00161 DEF_OLD_QPEL(qpel16_mc11_old_c)
00162 DEF_OLD_QPEL(qpel16_mc31_old_c)
00163 DEF_OLD_QPEL(qpel16_mc12_old_c)
00164 DEF_OLD_QPEL(qpel16_mc32_old_c)
00165 DEF_OLD_QPEL(qpel16_mc13_old_c)
00166 DEF_OLD_QPEL(qpel16_mc33_old_c)
00167 DEF_OLD_QPEL(qpel8_mc11_old_c)
00168 DEF_OLD_QPEL(qpel8_mc31_old_c)
00169 DEF_OLD_QPEL(qpel8_mc12_old_c)
00170 DEF_OLD_QPEL(qpel8_mc32_old_c)
00171 DEF_OLD_QPEL(qpel8_mc13_old_c)
00172 DEF_OLD_QPEL(qpel8_mc33_old_c)
00173 
00174 #define CALL_2X_PIXELS(a, b, n)\
00175 static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00176     b(block  , pixels  , line_size, h);\
00177     b(block+n, pixels+n, line_size, h);\
00178 }
00179 
00180 
00181 
00182 
00183 typedef int (*me_cmp_func)(void  *s, uint8_t *blk1, uint8_t *blk2, int line_size, int h);
00184 
00188 typedef struct ScanTable{
00189     const uint8_t *scantable;
00190     uint8_t permutated[64];
00191     uint8_t raster_end[64];
00192 } ScanTable;
00193 
00194 void ff_init_scantable(uint8_t *, ScanTable *st, const uint8_t *src_scantable);
00195 void ff_init_scantable_permutation(uint8_t *idct_permutation,
00196                                    int idct_permutation_type);
00197 
00198 #define EMULATED_EDGE(depth) \
00199 void ff_emulated_edge_mc_ ## depth (uint8_t *buf, const uint8_t *src, int linesize,\
00200                          int block_w, int block_h,\
00201                          int src_x, int src_y, int w, int h);
00202 
00203 EMULATED_EDGE(8)
00204 EMULATED_EDGE(9)
00205 EMULATED_EDGE(10)
00206 EMULATED_EDGE(12)
00207 EMULATED_EDGE(14)
00208 
00212 typedef struct DSPContext {
00216     int dct_bits;
00217 
00218     
00219     void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size);
00220     void (*diff_pixels)(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride);
00221     void (*put_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
00222     void (*put_signed_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
00223     void (*add_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
00224     void (*add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size);
00225     void (*add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size);
00226     int (*sum_abs_dctelem)(DCTELEM *block);
00239     void (*emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize,
00240                              int block_w, int block_h,
00241                              int src_x, int src_y, int w, int h);
00245     void (*gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder);
00249     void (*gmc )(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
00250                     int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
00251     void (*clear_block)(DCTELEM *block);
00252     void (*clear_blocks)(DCTELEM *blocks);
00253     int (*pix_sum)(uint8_t * pix, int line_size);
00254     int (*pix_norm1)(uint8_t * pix, int line_size);
00255 
00256 
00257     me_cmp_func sad[6]; 
00258     me_cmp_func sse[6];
00259     me_cmp_func hadamard8_diff[6];
00260     me_cmp_func dct_sad[6];
00261     me_cmp_func quant_psnr[6];
00262     me_cmp_func bit[6];
00263     me_cmp_func rd[6];
00264     me_cmp_func vsad[6];
00265     me_cmp_func vsse[6];
00266     me_cmp_func nsse[6];
00267     me_cmp_func w53[6];
00268     me_cmp_func w97[6];
00269     me_cmp_func dct_max[6];
00270     me_cmp_func dct264_sad[6];
00271 
00272     me_cmp_func me_pre_cmp[6];
00273     me_cmp_func me_cmp[6];
00274     me_cmp_func me_sub_cmp[6];
00275     me_cmp_func mb_cmp[6];
00276     me_cmp_func ildct_cmp[6]; 
00277     me_cmp_func frame_skip_cmp[6]; 
00278 
00279     int (*ssd_int8_vs_int16)(const int8_t *pix1, const int16_t *pix2,
00280                              int size);
00281 
00292     op_pixels_func put_pixels_tab[4][4];
00293 
00304     op_pixels_func avg_pixels_tab[4][4];
00305 
00316     op_pixels_func put_no_rnd_pixels_tab[4][4];
00317 
00328     op_pixels_func avg_no_rnd_pixels_tab[4][4];
00329 
00330     void (*put_no_rnd_pixels_l2[2])(uint8_t *block, const uint8_t *a, const uint8_t *b, int line_size, int h);
00331 
00342     tpel_mc_func put_tpel_pixels_tab[11]; 
00343     tpel_mc_func avg_tpel_pixels_tab[11]; 
00344 
00345     qpel_mc_func put_qpel_pixels_tab[2][16];
00346     qpel_mc_func avg_qpel_pixels_tab[2][16];
00347     qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
00348     qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16];
00349     qpel_mc_func put_mspel_pixels_tab[8];
00350 
00354     h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
00355     h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
00356 
00357     qpel_mc_func put_h264_qpel_pixels_tab[4][16];
00358     qpel_mc_func avg_h264_qpel_pixels_tab[4][16];
00359 
00360     qpel_mc_func put_2tap_qpel_pixels_tab[4][16];
00361     qpel_mc_func avg_2tap_qpel_pixels_tab[4][16];
00362 
00363     me_cmp_func pix_abs[2][4];
00364 
00365     
00366     void (*add_bytes)(uint8_t *dst, uint8_t *src, int w);
00367     void (*diff_bytes)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,int w);
00372     void (*sub_hfyu_median_prediction)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top);
00373     void (*add_hfyu_median_prediction)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
00374     int  (*add_hfyu_left_prediction)(uint8_t *dst, const uint8_t *src, int w, int left);
00375     void (*add_hfyu_left_prediction_bgr32)(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha);
00376     
00377     void (*bswap_buf)(uint32_t *dst, const uint32_t *src, int w);
00378     void (*bswap16_buf)(uint16_t *dst, const uint16_t *src, int len);
00379 
00380     void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
00381     void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
00382 
00383     void (*h261_loop_filter)(uint8_t *src, int stride);
00384 
00385     
00386     void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize);
00387     
00388     void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
00389     
00390     void (*vector_fmul_add)(float *dst, const float *src0, const float *src1, const float *src2, int len);
00391     
00392     void (*vector_fmul_window)(float *dst, const float *src0, const float *src1, const float *win, int len);
00393     
00394     void (*vector_clipf)(float *dst , const float *src , float min, float max, int len );
00403     void (*vector_fmul_scalar)(float *dst, const float *src, float mul,
00404                                int len);
00411     float (*scalarproduct_float)(const float *v1, const float *v2, int len);
00418     void (*butterflies_float)(float *av_restrict v1, float *av_restrict v2, int len);
00419 
00434     void (*butterflies_float_interleave)(float *dst, const float *src0,
00435                                          const float *src1, int len);
00436 
00437     
00438     void (*fdct)(DCTELEM *block);
00439     void (*fdct248)(DCTELEM *block);
00440 
00441     
00442     void (*idct)(DCTELEM *block);
00443 
00449     void (*idct_put)(uint8_t *dest, int line_size, DCTELEM *block);
00450 
00455     void (*idct_add)(uint8_t *dest, int line_size, DCTELEM *block);
00456 
00469     uint8_t idct_permutation[64];
00470     int idct_permutation_type;
00471 #define FF_NO_IDCT_PERM 1
00472 #define FF_LIBMPEG2_IDCT_PERM 2
00473 #define FF_SIMPLE_IDCT_PERM 3
00474 #define FF_TRANSPOSE_IDCT_PERM 4
00475 #define FF_PARTTRANS_IDCT_PERM 5
00476 #define FF_SSE2_IDCT_PERM 6
00477 
00478     int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale);
00479     void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale);
00480 #define BASIS_SHIFT 16
00481 #define RECON_SHIFT 6
00482 
00483     void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides);
00484 #define EDGE_WIDTH 16
00485 #define EDGE_TOP    1
00486 #define EDGE_BOTTOM 2
00487 
00488     void (*prefetch)(void *mem, int stride, int h);
00489 
00490     void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00491 
00492     
00493     void (*mlp_filter_channel)(int32_t *state, const int32_t *coeff,
00494                                int firorder, int iirorder,
00495                                unsigned int filter_shift, int32_t mask, int blocksize,
00496                                int32_t *sample_buffer);
00497 
00502     int32_t (*scalarproduct_int16)(const int16_t *v1, const int16_t *v2, int len);
00503     
00509     int32_t (*scalarproduct_and_madd_int16)(int16_t *v1, const int16_t *v2, const int16_t *v3, int len, int mul);
00510 
00522     void (*apply_window_int16)(int16_t *output, const int16_t *input,
00523                                const int16_t *window, unsigned int len);
00524 
00538     void (*vector_clip_int32)(int32_t *dst, const int32_t *src, int32_t min,
00539                               int32_t max, unsigned int len);
00540 
00541     op_fill_func fill_block_tab[2];
00542 } DSPContext;
00543 
00544 void ff_dsputil_static_init(void);
00545 void ff_dsputil_init(DSPContext* p, AVCodecContext *avctx);
00546 attribute_deprecated void dsputil_init(DSPContext* c, AVCodecContext *avctx);
00547 
00548 int ff_check_alignment(void);
00549 
00559 float ff_scalarproduct_float_c(const float *v1, const float *v2, int len);
00560 
00565 void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last);
00566 
00567 void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type);
00568 
00569 #define         BYTE_VEC32(c)   ((c)*0x01010101UL)
00570 #define         BYTE_VEC64(c)   ((c)*0x0001000100010001UL)
00571 
00572 static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
00573 {
00574     return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
00575 }
00576 
00577 static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
00578 {
00579     return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
00580 }
00581 
00582 static inline uint64_t rnd_avg64(uint64_t a, uint64_t b)
00583 {
00584     return (a | b) - (((a ^ b) & ~BYTE_VEC64(0x01)) >> 1);
00585 }
00586 
00587 static inline uint64_t no_rnd_avg64(uint64_t a, uint64_t b)
00588 {
00589     return (a & b) + (((a ^ b) & ~BYTE_VEC64(0x01)) >> 1);
00590 }
00591 
00592 static inline int get_penalty_factor(int lambda, int lambda2, int type){
00593     switch(type&0xFF){
00594     default:
00595     case FF_CMP_SAD:
00596         return lambda>>FF_LAMBDA_SHIFT;
00597     case FF_CMP_DCT:
00598         return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
00599     case FF_CMP_W53:
00600         return (4*lambda)>>(FF_LAMBDA_SHIFT);
00601     case FF_CMP_W97:
00602         return (2*lambda)>>(FF_LAMBDA_SHIFT);
00603     case FF_CMP_SATD:
00604     case FF_CMP_DCT264:
00605         return (2*lambda)>>FF_LAMBDA_SHIFT;
00606     case FF_CMP_RD:
00607     case FF_CMP_PSNR:
00608     case FF_CMP_SSE:
00609     case FF_CMP_NSSE:
00610         return lambda2>>FF_LAMBDA_SHIFT;
00611     case FF_CMP_BIT:
00612         return 1;
00613     }
00614 }
00615 
00616 void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
00617 void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
00618 void ff_dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
00619 void ff_dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
00620 void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
00621 void ff_dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
00622 void ff_dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
00623 void ff_dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
00624 void ff_dsputil_init_mips(DSPContext* c, AVCodecContext *avctx);
00625 
00626 void ff_dsputil_init_dwt(DSPContext *c);
00627 void ff_mlp_init(DSPContext* c, AVCodecContext *avctx);
00628 void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx);
00629 
00630 #if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMI || HAVE_MMX
00631 #   define STRIDE_ALIGN 16
00632 #else
00633 #   define STRIDE_ALIGN 8
00634 #endif
00635 
00636 
00637 
00638 #define E(x) x
00639 
00640 #define LOCAL_ALIGNED_A(a, t, v, s, o, ...)             \
00641     uint8_t la_##v[sizeof(t s o) + (a)];                \
00642     t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
00643 
00644 #define LOCAL_ALIGNED_D(a, t, v, s, o, ...) DECLARE_ALIGNED(a, t, v) s o
00645 
00646 #define LOCAL_ALIGNED(a, t, v, ...) E(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
00647 
00648 #if HAVE_LOCAL_ALIGNED_8
00649 #   define LOCAL_ALIGNED_8(t, v, ...) E(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
00650 #else
00651 #   define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
00652 #endif
00653 
00654 #if HAVE_LOCAL_ALIGNED_16
00655 #   define LOCAL_ALIGNED_16(t, v, ...) E(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
00656 #else
00657 #   define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
00658 #endif
00659 
00660 #define WRAPPER8_16(name8, name16)\
00661 static int name16(void  *s, uint8_t *dst, uint8_t *src, int stride, int h){\
00662     return name8(s, dst           , src           , stride, h)\
00663           +name8(s, dst+8         , src+8         , stride, h);\
00664 }
00665 
00666 #define WRAPPER8_16_SQ(name8, name16)\
00667 static int name16(void  *s, uint8_t *dst, uint8_t *src, int stride, int h){\
00668     int score=0;\
00669     score +=name8(s, dst           , src           , stride, 8);\
00670     score +=name8(s, dst+8         , src+8         , stride, 8);\
00671     if(h==16){\
00672         dst += 8*stride;\
00673         src += 8*stride;\
00674         score +=name8(s, dst           , src           , stride, 8);\
00675         score +=name8(s, dst+8         , src+8         , stride, 8);\
00676     }\
00677     return score;\
00678 }
00679 
00680 
00681 static inline void copy_block2(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00682 {
00683     int i;
00684     for(i=0; i<h; i++)
00685     {
00686         AV_WN16(dst   , AV_RN16(src   ));
00687         dst+=dstStride;
00688         src+=srcStride;
00689     }
00690 }
00691 
00692 static inline void copy_block4(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00693 {
00694     int i;
00695     for(i=0; i<h; i++)
00696     {
00697         AV_WN32(dst   , AV_RN32(src   ));
00698         dst+=dstStride;
00699         src+=srcStride;
00700     }
00701 }
00702 
00703 static inline void copy_block8(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00704 {
00705     int i;
00706     for(i=0; i<h; i++)
00707     {
00708         AV_WN32(dst   , AV_RN32(src   ));
00709         AV_WN32(dst+4 , AV_RN32(src+4 ));
00710         dst+=dstStride;
00711         src+=srcStride;
00712     }
00713 }
00714 
00715 static inline void copy_block9(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00716 {
00717     int i;
00718     for(i=0; i<h; i++)
00719     {
00720         AV_WN32(dst   , AV_RN32(src   ));
00721         AV_WN32(dst+4 , AV_RN32(src+4 ));
00722         dst[8]= src[8];
00723         dst+=dstStride;
00724         src+=srcStride;
00725     }
00726 }
00727 
00728 static inline void copy_block16(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00729 {
00730     int i;
00731     for(i=0; i<h; i++)
00732     {
00733         AV_WN32(dst   , AV_RN32(src   ));
00734         AV_WN32(dst+4 , AV_RN32(src+4 ));
00735         AV_WN32(dst+8 , AV_RN32(src+8 ));
00736         AV_WN32(dst+12, AV_RN32(src+12));
00737         dst+=dstStride;
00738         src+=srcStride;
00739     }
00740 }
00741 
00742 static inline void copy_block17(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
00743 {
00744     int i;
00745     for(i=0; i<h; i++)
00746     {
00747         AV_WN32(dst   , AV_RN32(src   ));
00748         AV_WN32(dst+4 , AV_RN32(src+4 ));
00749         AV_WN32(dst+8 , AV_RN32(src+8 ));
00750         AV_WN32(dst+12, AV_RN32(src+12));
00751         dst[16]= src[16];
00752         dst+=dstStride;
00753         src+=srcStride;
00754     }
00755 }
00756 
00757 #endif