31 #define randomize_buffers(buf, size) \
34 uint8_t *tmp_buf = (uint8_t *)buf;\
35 for (j = 0; j < size; j++) \
36 tmp_buf[j] = rnd() & 0xFF; \
39 #define init_buffer(a0, a1, type, width)\
42 randomize_buffers(a0, width * sizeof(type));\
43 memcpy(a1, a0, width*sizeof(type));\
62 if (memcmp(dst0, dst1, width))
83 int *left,
int *left_top);
95 call_ref(dst0, src0, diff0, width, &A0, &B0);
96 call_new(dst1, src1, diff1, width, &A1, &B1);
97 if (memcmp(dst0, dst1, width) || (A0 != A1) || (B0 != B1))
99 bench_new(dst1, src1, diff1, width, &A1, &B1);
125 res0 =
call_ref(dst0, src0, width, acc);
126 res1 =
call_new(dst1, src1, width, acc);
127 if ((res0 & 0xFF) != (res1 & 0xFF)||\
128 memcmp(dst0, dst1, width))
154 res0 =
call_ref(dst0, src0, mask, width, acc);
155 res1 =
call_new(dst1, src1, mask, width, acc);
156 if ((res0 &0xFFFF) != (res1 &0xFFFF)||\
157 memcmp(dst0, dst1, width))
172 const ptrdiff_t
width);
175 src_size = (stride + 32) * 2;
182 call_ref(src0 + stride + 32, stride, w);
183 call_new(src1 + stride + 32, stride, w);
184 if (memcmp(src0, src1, stride)||
185 memcmp(src0+stride, src1 + stride, w + 32)) {
188 bench_new(src1 + stride + 32, stride, w);
198 int width = 16 * av_clip(
rnd(), 16, 128);
199 int accRnd =
rnd() & 0xFF;
207 report(
"add_median_pred");
210 report(
"add_left_pred_zero");
213 report(
"add_left_pred_rnd_acc");
216 report(
"add_left_pred_int16");
219 report(
"add_gradient_pred");
int(* add_left_pred_int16)(uint16_t *dst, const uint16_t *src, unsigned mask, ptrdiff_t w, unsigned left)
Memory handling functions.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void(* add_bytes)(uint8_t *dst, uint8_t *src, ptrdiff_t w)
void checkasm_check_llviddsp(void)
static const uint16_t mask[17]
#define declare_func_emms(cpu_flags, ret,...)
static void check_add_bytes(LLVidDSPContext c, int width)
#define AV_CPU_FLAG_MMX
standard MMX
static void check_add_median_pred(LLVidDSPContext c, int width)
void ff_llviddsp_init(LLVidDSPContext *c)
#define check_func(func,...)
void(* add_gradient_pred)(uint8_t *src, const ptrdiff_t stride, const ptrdiff_t width)
common internal and external API header
static void check_add_left_pred_16(LLVidDSPContext c, unsigned mask, int width, unsigned acc, const char *report)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
#define init_buffer(a0, a1, type, width)
static void check_add_left_pred(LLVidDSPContext c, int width, int acc, const char *report)
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
static void check_add_gradient_pred(LLVidDSPContext c, int w)
void * av_mallocz_array(size_t nmemb, size_t size)