FFmpeg
snowenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/emms.h"
22 #include "libavutil/intmath.h"
23 #include "libavutil/libm.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "avcodec.h"
29 #include "codec_internal.h"
30 #include "encode.h"
31 #include "internal.h" //For AVCodecInternal.recon_frame
32 #include "me_cmp.h"
33 #include "qpeldsp.h"
34 #include "snow_dwt.h"
35 #include "snow.h"
36 
37 #include "rangecoder.h"
38 #include "mathops.h"
39 
40 #include "mpegvideo.h"
41 #include "h263enc.h"
42 
43 #define FF_ME_ITER 3
44 
45 typedef struct SnowEncContext {
49 
50  int lambda;
51  int lambda2;
52  int pass1_rc;
53 
54  int pred;
55  int memc_only;
61 
63  MPVMainEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MPVEncContext, so this will be removed then (FIXME/XXX)
65 #define ME_CACHE_SIZE 1024
68 
70 
71  uint8_t *emu_edge_buffer;
72 
75 
76 #define PTR_ADD(ptr, off) ((ptr) ? (ptr) + (off) : NULL)
77 
78 static void init_ref(MotionEstContext *c, const uint8_t *const src[3],
79  uint8_t *const ref[3], uint8_t *const ref2[3],
80  int x, int y, int ref_index)
81 {
82  SnowContext *s = c->avctx->priv_data;
83  const int offset[3] = {
84  y*c-> stride + x,
85  ((y*c->uvstride + x) >> s->chroma_h_shift),
86  ((y*c->uvstride + x) >> s->chroma_h_shift),
87  };
88  for (int i = 0; i < 3; i++) {
89  c->src[0][i] = src [i];
90  c->ref[0][i] = PTR_ADD(ref[i], offset[i]);
91  }
92  av_assert2(!ref_index);
93 }
94 
95 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
96 {
97  if (v) {
98  const int a = FFABS(v);
99  const int e = av_log2(a);
100  const int el = FFMIN(e, 10);
101  int i;
102 
103  put_rac(c, state + 0, 0);
104 
105  for (i = 0; i < el; i++)
106  put_rac(c, state + 1 + i, 1); //1..10
107  for(; i < e; i++)
108  put_rac(c, state + 1 + 9, 1); //1..10
109  put_rac(c, state + 1 + FFMIN(i, 9), 0);
110 
111  for (i = e - 1; i >= el; i--)
112  put_rac(c, state + 22 + 9, (a >> i) & 1); //22..31
113  for(; i >= 0; i--)
114  put_rac(c, state + 22 + i, (a >> i) & 1); //22..31
115 
116  if (is_signed)
117  put_rac(c, state + 11 + el, v < 0); //11..21
118  } else {
119  put_rac(c, state + 0, 1);
120  }
121 }
122 
123 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
124 {
125  int r = log2 >= 0 ? 1<<log2 : 1;
126 
127  av_assert2(v >= 0);
128  av_assert2(log2 >= -4);
129 
130  while (v >= r) {
131  put_rac(c, state + 4 + log2, 1);
132  v -= r;
133  log2++;
134  if (log2 > 0) r += r;
135  }
136  put_rac(c, state + 4 + log2, 0);
137 
138  for (int i = log2 - 1; i >= 0; i--)
139  put_rac(c, state + 31 - i, (v >> i) & 1);
140 }
141 
143 {
144  int ret;
145 
146  frame->width = s->avctx->width + 2 * EDGE_WIDTH;
147  frame->height = s->avctx->height + 2 * EDGE_WIDTH;
148 
149  ret = ff_encode_alloc_frame(s->avctx, frame);
150  if (ret < 0)
151  return ret;
152  for (int i = 0; frame->data[i]; i++) {
153  int offset = (EDGE_WIDTH >> (i ? s->chroma_v_shift : 0)) *
154  frame->linesize[i] +
155  (EDGE_WIDTH >> (i ? s->chroma_h_shift : 0));
156  frame->data[i] += offset;
157  }
158  frame->width = s->avctx->width;
159  frame->height = s->avctx->height;
160 
161  return 0;
162 }
163 
165 {
166  SnowEncContext *const enc = avctx->priv_data;
167  SnowContext *const s = &enc->com;
168  MPVEncContext *const mpv = &enc->m.s;
169  int plane_index, ret;
170  int i;
171 
172  if (enc->pred == DWT_97
173  && (avctx->flags & AV_CODEC_FLAG_QSCALE)
174  && avctx->global_quality == 0){
175  av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
176  return AVERROR(EINVAL);
177  }
178 
179  s->spatial_decomposition_type = enc->pred; //FIXME add decorrelator type r transform_type
180 
181  s->mv_scale = (avctx->flags & AV_CODEC_FLAG_QPEL) ? 2 : 4;
182  s->block_max_depth= (avctx->flags & AV_CODEC_FLAG_4MV ) ? 1 : 0;
183 
184  for(plane_index=0; plane_index<3; plane_index++){
185  s->plane[plane_index].diag_mc= 1;
186  s->plane[plane_index].htaps= 6;
187  s->plane[plane_index].hcoeff[0]= 40;
188  s->plane[plane_index].hcoeff[1]= -10;
189  s->plane[plane_index].hcoeff[2]= 2;
190  s->plane[plane_index].fast_mc= 1;
191  }
192 
193  // Must be before ff_snow_common_init()
194  ff_hpeldsp_init(&s->hdsp, avctx->flags);
195  if ((ret = ff_snow_common_init(avctx)) < 0) {
196  return ret;
197  }
198 
199 #define mcf(dx,dy)\
200  enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
201  enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
202  s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
203  enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
204  enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
205  s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
206 
207  mcf( 0, 0)
208  mcf( 4, 0)
209  mcf( 8, 0)
210  mcf(12, 0)
211  mcf( 0, 4)
212  mcf( 4, 4)
213  mcf( 8, 4)
214  mcf(12, 4)
215  mcf( 0, 8)
216  mcf( 4, 8)
217  mcf( 8, 8)
218  mcf(12, 8)
219  mcf( 0,12)
220  mcf( 4,12)
221  mcf( 8,12)
222  mcf(12,12)
223 
224  ff_me_cmp_init(&enc->mecc, avctx);
225  ret = ff_me_init(&mpv->me, avctx, &enc->mecc, 0);
226  if (ret < 0)
227  return ret;
228  ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
229 
231 
232  s->version=0;
233 
234  mpv->c.avctx = avctx;
235  enc->m.bit_rate = avctx->bit_rate;
236  enc->m.lmin = avctx->mb_lmin;
237  enc->m.lmax = avctx->mb_lmax;
238  mpv->c.mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol
239 
240  mpv->me.temp =
241  mpv->me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t));
242  if (!mpv->me.scratchpad)
243  return AVERROR(ENOMEM);
244 
246 
247  s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
248 
249  if(avctx->flags&AV_CODEC_FLAG_PASS1){
250  if(!avctx->stats_out)
251  avctx->stats_out = av_mallocz(256);
252 
253  if (!avctx->stats_out)
254  return AVERROR(ENOMEM);
255  }
256  if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&AV_CODEC_FLAG_QSCALE)){
257  ret = ff_rate_control_init(&enc->m);
258  if(ret < 0)
259  return ret;
260  }
262 
263  switch(avctx->pix_fmt){
264  case AV_PIX_FMT_YUV444P:
265 // case AV_PIX_FMT_YUV422P:
266  case AV_PIX_FMT_YUV420P:
267 // case AV_PIX_FMT_YUV411P:
268  case AV_PIX_FMT_YUV410P:
269  s->nb_planes = 3;
270  s->colorspace_type= 0;
271  break;
272  case AV_PIX_FMT_GRAY8:
273  s->nb_planes = 1;
274  s->colorspace_type = 1;
275  break;
276 /* case AV_PIX_FMT_RGB32:
277  s->colorspace= 1;
278  break;*/
279  }
280 
281  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
282  &s->chroma_v_shift);
283  if (ret)
284  return ret;
285 
286  s->input_picture = av_frame_alloc();
287  if (!s->input_picture)
288  return AVERROR(ENOMEM);
289 
290  if ((ret = get_encode_buffer(s, s->input_picture)) < 0)
291  return ret;
292 
293  enc->emu_edge_buffer = av_calloc(avctx->width + 128, 2 * (2 * MB_SIZE + HTAPS_MAX - 1));
294  if (!enc->emu_edge_buffer)
295  return AVERROR(ENOMEM);
296 
297  if (enc->motion_est == FF_ME_ITER) {
298  int size= s->b_width * s->b_height << 2*s->block_max_depth;
299  for(i=0; i<s->max_ref_frames; i++){
300  s->ref_mvs[i] = av_calloc(size, sizeof(*s->ref_mvs[i]));
301  s->ref_scores[i] = av_calloc(size, sizeof(*s->ref_scores[i]));
302  if (!s->ref_mvs[i] || !s->ref_scores[i])
303  return AVERROR(ENOMEM);
304  }
305  }
306 
307  return 0;
308 }
309 
310 //near copy & paste from dsputil, FIXME
311 static int pix_sum(const uint8_t * pix, int line_size, int w, int h)
312 {
313  int s, i, j;
314 
315  s = 0;
316  for (i = 0; i < h; i++) {
317  for (j = 0; j < w; j++) {
318  s += pix[0];
319  pix ++;
320  }
321  pix += line_size - w;
322  }
323  return s;
324 }
325 
326 //near copy & paste from dsputil, FIXME
327 static int pix_norm1(const uint8_t * pix, int line_size, int w)
328 {
329  int s, i, j;
330  const uint32_t *sq = ff_square_tab + 256;
331 
332  s = 0;
333  for (i = 0; i < w; i++) {
334  for (j = 0; j < w; j ++) {
335  s += sq[pix[0]];
336  pix ++;
337  }
338  pix += line_size - w;
339  }
340  return s;
341 }
342 
343 static inline int get_penalty_factor(int lambda, int lambda2, int type){
344  switch(type&0xFF){
345  default:
346  case FF_CMP_SAD:
347  return lambda>>FF_LAMBDA_SHIFT;
348  case FF_CMP_DCT:
349  return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
350  case FF_CMP_W53:
351  return (4*lambda)>>(FF_LAMBDA_SHIFT);
352  case FF_CMP_W97:
353  return (2*lambda)>>(FF_LAMBDA_SHIFT);
354  case FF_CMP_SATD:
355  case FF_CMP_DCT264:
356  return (2*lambda)>>FF_LAMBDA_SHIFT;
357  case FF_CMP_RD:
358  case FF_CMP_PSNR:
359  case FF_CMP_SSE:
360  case FF_CMP_NSSE:
361  return lambda2>>FF_LAMBDA_SHIFT;
362  case FF_CMP_BIT:
363  return 1;
364  }
365 }
366 
367 //FIXME copy&paste
368 #define P_LEFT P[1]
369 #define P_TOP P[2]
370 #define P_TOPRIGHT P[3]
371 #define P_MEDIAN P[4]
372 #define P_MV1 P[9]
373 #define FLAG_QPEL 1 //must be 1
374 
375 static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
376 {
377  SnowContext *const s = &enc->com;
378  MotionEstContext *const c = &enc->m.s.me;
379  uint8_t p_buffer[1024];
380  uint8_t i_buffer[1024];
381  uint8_t p_state[sizeof(s->block_state)];
382  uint8_t i_state[sizeof(s->block_state)];
383  RangeCoder pc, ic;
384  uint8_t *pbbak= s->c.bytestream;
385  uint8_t *pbbak_start= s->c.bytestream_start;
386  int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
387  const int w= s->b_width << s->block_max_depth;
388  const int h= s->b_height << s->block_max_depth;
389  const int rem_depth= s->block_max_depth - level;
390  const int index= (x + y*w) << rem_depth;
391  const int block_w= 1<<(LOG2_MB_SIZE - level);
392  int trx= (x+1)<<rem_depth;
393  int try= (y+1)<<rem_depth;
394  const BlockNode *left = x ? &s->block[index-1] : &null_block;
395  const BlockNode *top = y ? &s->block[index-w] : &null_block;
396  const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
397  const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
398  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
399  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
400  int pl = left->color[0];
401  int pcb= left->color[1];
402  int pcr= left->color[2];
403  int pmx, pmy;
404  int mx=0, my=0;
405  int l,cr,cb;
406  const int stride= s->current_picture->linesize[0];
407  const int uvstride= s->current_picture->linesize[1];
408  const uint8_t *const current_data[3] = { s->input_picture->data[0] + (x + y* stride)*block_w,
409  PTR_ADD(s->input_picture->data[1], ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)),
410  PTR_ADD(s->input_picture->data[2], ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift))};
411  int P[10][2];
412  int16_t last_mv[3][2];
413  int qpel= !!(s->avctx->flags & AV_CODEC_FLAG_QPEL); //unused
414  const int shift= 1+qpel;
415  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
416  int mx_context= av_log2(2*FFABS(left->mx - top->mx));
417  int my_context= av_log2(2*FFABS(left->my - top->my));
418  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
419  int ref, best_ref, ref_score, ref_mx, ref_my;
420  int range = MAX_MV >> (1 + qpel);
421 
422  av_assert0(sizeof(s->block_state) >= 256);
423  if(s->keyframe){
424  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
425  return 0;
426  }
427 
428 // clip predictors / edge ?
429 
430  P_LEFT[0]= left->mx;
431  P_LEFT[1]= left->my;
432  P_TOP [0]= top->mx;
433  P_TOP [1]= top->my;
434  P_TOPRIGHT[0]= tr->mx;
435  P_TOPRIGHT[1]= tr->my;
436 
437  last_mv[0][0]= s->block[index].mx;
438  last_mv[0][1]= s->block[index].my;
439  last_mv[1][0]= right->mx;
440  last_mv[1][1]= right->my;
441  last_mv[2][0]= bottom->mx;
442  last_mv[2][1]= bottom->my;
443 
444  enc->m.s.c.mb_stride = 2;
445  enc->m.s.c.mb_x =
446  enc->m.s.c.mb_y = 0;
447  c->skip= 0;
448 
449  av_assert1(c-> stride == stride);
450  av_assert1(c->uvstride == uvstride);
451 
452  c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
453  c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
454  c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
455  c->current_mv_penalty = c->mv_penalty[enc->m.s.f_code=1] + MAX_DMV;
456 
457  c->xmin = - x*block_w - 16+3;
458  c->ymin = - y*block_w - 16+3;
459  c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
460  c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
461 
462  c->xmin = FFMAX(c->xmin,-range);
463  c->xmax = FFMIN(c->xmax, range);
464  c->ymin = FFMAX(c->ymin,-range);
465  c->ymax = FFMIN(c->ymax, range);
466 
467  if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
468  if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
469  if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
470  if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
471  if(P_TOPRIGHT[0] < (c->xmin * (1<<shift))) P_TOPRIGHT[0]= (c->xmin * (1<<shift));
472  if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
473  if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
474 
475  P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
476  P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
477 
478  if (!y) {
479  c->pred_x= P_LEFT[0];
480  c->pred_y= P_LEFT[1];
481  } else {
482  c->pred_x = P_MEDIAN[0];
483  c->pred_y = P_MEDIAN[1];
484  }
485 
486  score= INT_MAX;
487  best_ref= 0;
488  for(ref=0; ref<s->ref_frames; ref++){
489  init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
490 
491  ref_score = ff_epzs_motion_search(&enc->m.s, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
492  (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
493 
494  av_assert2(ref_mx >= c->xmin);
495  av_assert2(ref_mx <= c->xmax);
496  av_assert2(ref_my >= c->ymin);
497  av_assert2(ref_my <= c->ymax);
498 
499  ref_score = c->sub_motion_search(&enc->m.s, &ref_mx, &ref_my, ref_score,
500  0, 0, level-LOG2_MB_SIZE+4, block_w);
501  ref_score = ff_get_mb_score(&enc->m.s, ref_mx, ref_my, 0, 0,
502  level-LOG2_MB_SIZE+4, block_w, 0);
503  ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
504  if(s->ref_mvs[ref]){
505  s->ref_mvs[ref][index][0]= ref_mx;
506  s->ref_mvs[ref][index][1]= ref_my;
507  s->ref_scores[ref][index]= ref_score;
508  }
509  if(score > ref_score){
510  score= ref_score;
511  best_ref= ref;
512  mx= ref_mx;
513  my= ref_my;
514  }
515  }
516  //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
517 
518  // subpel search
519  base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
520  pc= s->c;
521  pc.bytestream_start=
522  pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
523  memcpy(p_state, s->block_state, sizeof(s->block_state));
524 
525  if(level!=s->block_max_depth)
526  put_rac(&pc, &p_state[4 + s_context], 1);
527  put_rac(&pc, &p_state[1 + left->type + top->type], 0);
528  if(s->ref_frames > 1)
529  put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
530  pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
531  put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
532  put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
533  p_len= pc.bytestream - pc.bytestream_start;
534  score += (enc->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
535 
536  block_s= block_w*block_w;
537  sum = pix_sum(current_data[0], stride, block_w, block_w);
538  l= (sum + block_s/2)/block_s;
539  iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
540 
541  if (s->nb_planes > 2) {
542  block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
543  sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
544  cb= (sum + block_s/2)/block_s;
545  // iscore += pix_norm1(&current_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
546  sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
547  cr= (sum + block_s/2)/block_s;
548  // iscore += pix_norm1(&current_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
549  }else
550  cb = cr = 0;
551 
552  ic= s->c;
553  ic.bytestream_start=
554  ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
555  memcpy(i_state, s->block_state, sizeof(s->block_state));
556  if(level!=s->block_max_depth)
557  put_rac(&ic, &i_state[4 + s_context], 1);
558  put_rac(&ic, &i_state[1 + left->type + top->type], 1);
559  put_symbol(&ic, &i_state[32], l-pl , 1);
560  if (s->nb_planes > 2) {
561  put_symbol(&ic, &i_state[64], cb-pcb, 1);
562  put_symbol(&ic, &i_state[96], cr-pcr, 1);
563  }
564  i_len= ic.bytestream - ic.bytestream_start;
565  iscore += (enc->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
566 
567  av_assert1(iscore < 255*255*256 + enc->lambda2*10);
568  av_assert1(iscore >= 0);
569  av_assert1(l>=0 && l<=255);
570  av_assert1(pl>=0 && pl<=255);
571 
572  if(level==0){
573  int varc= iscore >> 8;
574  int vard= score >> 8;
575  if (vard <= 64 || vard < varc)
576  c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
577  else
578  c->scene_change_score += enc->m.s.c.qscale;
579  }
580 
581  if(level!=s->block_max_depth){
582  put_rac(&s->c, &s->block_state[4 + s_context], 0);
583  score2 = encode_q_branch(enc, level+1, 2*x+0, 2*y+0);
584  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+0);
585  score2+= encode_q_branch(enc, level+1, 2*x+0, 2*y+1);
586  score2+= encode_q_branch(enc, level+1, 2*x+1, 2*y+1);
587  score2+= enc->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
588 
589  if(score2 < score && score2 < iscore)
590  return score2;
591  }
592 
593  if(iscore < score){
594  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
595  memcpy(pbbak, i_buffer, i_len);
596  s->c= ic;
597  s->c.bytestream_start= pbbak_start;
598  s->c.bytestream= pbbak + i_len;
599  set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
600  memcpy(s->block_state, i_state, sizeof(s->block_state));
601  return iscore;
602  }else{
603  memcpy(pbbak, p_buffer, p_len);
604  s->c= pc;
605  s->c.bytestream_start= pbbak_start;
606  s->c.bytestream= pbbak + p_len;
607  set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
608  memcpy(s->block_state, p_state, sizeof(s->block_state));
609  return score;
610  }
611 }
612 
613 static void encode_q_branch2(SnowContext *s, int level, int x, int y){
614  const int w= s->b_width << s->block_max_depth;
615  const int rem_depth= s->block_max_depth - level;
616  const int index= (x + y*w) << rem_depth;
617  int trx= (x+1)<<rem_depth;
618  BlockNode *b= &s->block[index];
619  const BlockNode *left = x ? &s->block[index-1] : &null_block;
620  const BlockNode *top = y ? &s->block[index-w] : &null_block;
621  const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
622  const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
623  int pl = left->color[0];
624  int pcb= left->color[1];
625  int pcr= left->color[2];
626  int pmx, pmy;
627  int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
628  int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
629  int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
630  int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
631 
632  if(s->keyframe){
633  set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
634  return;
635  }
636 
637  if(level!=s->block_max_depth){
638  if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
639  put_rac(&s->c, &s->block_state[4 + s_context], 1);
640  }else{
641  put_rac(&s->c, &s->block_state[4 + s_context], 0);
642  encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
643  encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
644  encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
645  encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
646  return;
647  }
648  }
649  if(b->type & BLOCK_INTRA){
650  pred_mv(s, &pmx, &pmy, 0, left, top, tr);
651  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
652  put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
653  if (s->nb_planes > 2) {
654  put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
655  put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
656  }
657  set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
658  }else{
659  pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
660  put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
661  if(s->ref_frames > 1)
662  put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
663  put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
664  put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
665  set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
666  }
667 }
668 
669 static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
670 {
671  SnowContext *const s = &enc->com;
672  int i, x2, y2;
673  Plane *p= &s->plane[plane_index];
674  const int block_size = MB_SIZE >> s->block_max_depth;
675  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
676  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
677  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
678  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
679  const int ref_stride= s->current_picture->linesize[plane_index];
680  const uint8_t *src = s->input_picture->data[plane_index];
681  IDWTELEM *dst = enc->obmc_scratchpad + plane_index * block_size * block_size * 4; //FIXME change to unsigned
682  const int b_stride = s->b_width << s->block_max_depth;
683  const int w= p->width;
684  const int h= p->height;
685  int index= mb_x + mb_y*b_stride;
686  BlockNode *b= &s->block[index];
687  BlockNode backup= *b;
688  int ab=0;
689  int aa=0;
690 
691  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
692 
693  b->type|= BLOCK_INTRA;
694  b->color[plane_index]= 0;
695  memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
696 
697  for(i=0; i<4; i++){
698  int mb_x2= mb_x + (i &1) - 1;
699  int mb_y2= mb_y + (i>>1) - 1;
700  int x= block_w*mb_x2 + block_w/2;
701  int y= block_h*mb_y2 + block_h/2;
702 
703  add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
704  x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
705 
706  for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
707  for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
708  int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
709  int obmc_v= obmc[index];
710  int d;
711  if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
712  if(x<0) obmc_v += obmc[index + block_w];
713  if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
714  if(x+block_w>w) obmc_v += obmc[index - block_w];
715  //FIXME precalculate this or simplify it somehow else
716 
717  d = -dst[index] + (1<<(FRAC_BITS-1));
718  dst[index] = d;
719  ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
720  aa += obmc_v * obmc_v; //FIXME precalculate this
721  }
722  }
723  }
724  *b= backup;
725 
726  return av_clip_uint8( ROUNDED_DIV((int64_t)ab<<LOG2_OBMC_MAX, aa) ); //FIXME we should not need clipping
727 }
728 
729 static inline int get_block_bits(SnowContext *s, int x, int y, int w){
730  const int b_stride = s->b_width << s->block_max_depth;
731  const int b_height = s->b_height<< s->block_max_depth;
732  int index= x + y*b_stride;
733  const BlockNode *b = &s->block[index];
734  const BlockNode *left = x ? &s->block[index-1] : &null_block;
735  const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
736  const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
737  const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
738  int dmx, dmy;
739 // int mx_context= av_log2(2*FFABS(left->mx - top->mx));
740 // int my_context= av_log2(2*FFABS(left->my - top->my));
741 
742  if(x<0 || x>=b_stride || y>=b_height)
743  return 0;
744 /*
745 1 0 0
746 01X 1-2 1
747 001XX 3-6 2-3
748 0001XXX 7-14 4-7
749 00001XXXX 15-30 8-15
750 */
751 //FIXME try accurate rate
752 //FIXME intra and inter predictors if surrounding blocks are not the same type
753  if(b->type & BLOCK_INTRA){
754  return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
755  + av_log2(2*FFABS(left->color[1] - b->color[1]))
756  + av_log2(2*FFABS(left->color[2] - b->color[2])));
757  }else{
758  pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
759  dmx-= b->mx;
760  dmy-= b->my;
761  return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
762  + av_log2(2*FFABS(dmy))
763  + av_log2(2*b->ref));
764  }
765 }
766 
767 static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
768  int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2])
769 {
770  SnowContext *const s = &enc->com;
771  Plane *p= &s->plane[plane_index];
772  const int block_size = MB_SIZE >> s->block_max_depth;
773  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
774  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
775  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
776  const int ref_stride= s->current_picture->linesize[plane_index];
777  uint8_t *dst= s->current_picture->data[plane_index];
778  const uint8_t *src = s->input_picture->data[plane_index];
779  IDWTELEM *pred = enc->obmc_scratchpad + plane_index * block_size * block_size * 4;
780  uint8_t *cur = s->scratchbuf;
781  uint8_t *tmp = enc->emu_edge_buffer;
782  const int b_stride = s->b_width << s->block_max_depth;
783  const int b_height = s->b_height<< s->block_max_depth;
784  const int w= p->width;
785  const int h= p->height;
786  int distortion;
787  int rate= 0;
788  const int penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
789  int sx= block_w*mb_x - block_w/2;
790  int sy= block_h*mb_y - block_h/2;
791  int x0= FFMAX(0,-sx);
792  int y0= FFMAX(0,-sy);
793  int x1= FFMIN(block_w*2, w-sx);
794  int y1= FFMIN(block_h*2, h-sy);
795  int i,x,y;
796 
797  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumptions below chckinhg only block_w
798 
799  ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
800 
801  for(y=y0; y<y1; y++){
802  const uint8_t *obmc1= obmc_edged[y];
803  const IDWTELEM *pred1 = pred + y*obmc_stride;
804  uint8_t *cur1 = cur + y*ref_stride;
805  uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
806  for(x=x0; x<x1; x++){
807 #if FRAC_BITS >= LOG2_OBMC_MAX
808  int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
809 #else
810  int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
811 #endif
812  v = (v + pred1[x]) >> FRAC_BITS;
813  if(v&(~255)) v= ~(v>>31);
814  dst1[x] = v;
815  }
816  }
817 
818  /* copy the regions where obmc[] = (uint8_t)(1<<LOG2_OBMC_MAX) */
819  if ((mb_x == 0 || mb_x == b_stride-1) &&
820  (mb_y == 0 || mb_y == b_height-1)){
821  if(mb_x == 0)
822  x1 = block_w;
823  else
824  x0 = block_w;
825  if(mb_y == 0)
826  y1 = block_h;
827  else
828  y0 = block_h;
829  for(y=y0; y<y1; y++)
830  memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
831  }
832 
833  if(block_w==16){
834  /* FIXME rearrange dsputil to fit 32x32 cmp functions */
835  /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
836  /* FIXME cmps overlap but do not cover the wavelet's whole support.
837  * So improving the score of one block is not strictly guaranteed
838  * to improve the score of the whole frame, thus iterative motion
839  * estimation does not always converge. */
840  if(s->avctx->me_cmp == FF_CMP_W97)
841  distortion = ff_w97_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
842  else if(s->avctx->me_cmp == FF_CMP_W53)
843  distortion = ff_w53_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
844  else{
845  distortion = 0;
846  for(i=0; i<4; i++){
847  int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
848  distortion += enc->m.s.me.me_cmp[0](&enc->m.s, src + off, dst + off, ref_stride, 16);
849  }
850  }
851  }else{
852  av_assert2(block_w==8);
853  distortion = enc->m.s.me.me_cmp[0](&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
854  }
855 
856  if(plane_index==0){
857  for(i=0; i<4; i++){
858 /* ..RRr
859  * .RXx.
860  * rxx..
861  */
862  rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
863  }
864  if(mb_x == b_stride-2)
865  rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
866  }
867  return distortion + rate*penalty_factor;
868 }
869 
870 static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
871 {
872  SnowContext *const s = &enc->com;
873  int i, y2;
874  Plane *p= &s->plane[plane_index];
875  const int block_size = MB_SIZE >> s->block_max_depth;
876  const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
877  const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
878  const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
879  const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
880  const int ref_stride= s->current_picture->linesize[plane_index];
881  uint8_t *dst= s->current_picture->data[plane_index];
882  const uint8_t *src = s->input_picture->data[plane_index];
883  //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
884  // const has only been removed from zero_dst to suppress a warning
885  static IDWTELEM zero_dst[4096]; //FIXME
886  const int b_stride = s->b_width << s->block_max_depth;
887  const int w= p->width;
888  const int h= p->height;
889  int distortion= 0;
890  int rate= 0;
891  const int penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, s->avctx->me_cmp);
892 
893  av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumptions below
894 
895  for(i=0; i<9; i++){
896  int mb_x2= mb_x + (i%3) - 1;
897  int mb_y2= mb_y + (i/3) - 1;
898  int x= block_w*mb_x2 + block_w/2;
899  int y= block_h*mb_y2 + block_h/2;
900 
901  add_yblock(s, 0, NULL, zero_dst, dst, obmc,
902  x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
903 
904  //FIXME find a cleaner/simpler way to skip the outside stuff
905  for(y2= y; y2<0; y2++)
906  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
907  for(y2= h; y2<y+block_h; y2++)
908  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
909  if(x<0){
910  for(y2= y; y2<y+block_h; y2++)
911  memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
912  }
913  if(x+block_w > w){
914  for(y2= y; y2<y+block_h; y2++)
915  memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
916  }
917 
918  av_assert1(block_w== 8 || block_w==16);
919  distortion += enc->m.s.me.me_cmp[block_w==8](&enc->m.s, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
920  }
921 
922  if(plane_index==0){
923  BlockNode *b= &s->block[mb_x+mb_y*b_stride];
924  int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
925 
926 /* ..RRRr
927  * .RXXx.
928  * .RXXx.
929  * rxxx.
930  */
931  if(merged)
932  rate = get_block_bits(s, mb_x, mb_y, 2);
933  for(i=merged?4:0; i<9; i++){
934  static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
935  rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
936  }
937  }
938  return distortion + rate*penalty_factor;
939 }
940 
941 static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
942  const int w= b->width;
943  const int h= b->height;
944  int x, y;
945 
946  if(1){
947  int run=0;
948  int *runs = s->run_buffer;
949  int run_index=0;
950  int max_index;
951 
952  for(y=0; y<h; y++){
953  for(x=0; x<w; x++){
954  int v, p=0;
955  int /*ll=0, */l=0, lt=0, t=0, rt=0;
956  v= src[x + y*stride];
957 
958  if(y){
959  t= src[x + (y-1)*stride];
960  if(x){
961  lt= src[x - 1 + (y-1)*stride];
962  }
963  if(x + 1 < w){
964  rt= src[x + 1 + (y-1)*stride];
965  }
966  }
967  if(x){
968  l= src[x - 1 + y*stride];
969  /*if(x > 1){
970  if(orientation==1) ll= src[y + (x-2)*stride];
971  else ll= src[x - 2 + y*stride];
972  }*/
973  }
974  if(parent){
975  int px= x>>1;
976  int py= y>>1;
977  if(px<b->parent->width && py<b->parent->height)
978  p= parent[px + py*2*stride];
979  }
980  if(!(/*ll|*/l|lt|t|rt|p)){
981  if(v){
982  runs[run_index++]= run;
983  run=0;
984  }else{
985  run++;
986  }
987  }
988  }
989  }
990  max_index= run_index;
991  runs[run_index++]= run;
992  run_index=0;
993  run= runs[run_index++];
994 
995  put_symbol2(&s->c, b->state[30], max_index, 0);
996  if(run_index <= max_index)
997  put_symbol2(&s->c, b->state[1], run, 3);
998 
999  for(y=0; y<h; y++){
1000  if(s->c.bytestream_end - s->c.bytestream < w*40){
1001  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1002  return AVERROR(ENOMEM);
1003  }
1004  for(x=0; x<w; x++){
1005  int v, p=0;
1006  int /*ll=0, */l=0, lt=0, t=0, rt=0;
1007  v= src[x + y*stride];
1008 
1009  if(y){
1010  t= src[x + (y-1)*stride];
1011  if(x){
1012  lt= src[x - 1 + (y-1)*stride];
1013  }
1014  if(x + 1 < w){
1015  rt= src[x + 1 + (y-1)*stride];
1016  }
1017  }
1018  if(x){
1019  l= src[x - 1 + y*stride];
1020  /*if(x > 1){
1021  if(orientation==1) ll= src[y + (x-2)*stride];
1022  else ll= src[x - 2 + y*stride];
1023  }*/
1024  }
1025  if(parent){
1026  int px= x>>1;
1027  int py= y>>1;
1028  if(px<b->parent->width && py<b->parent->height)
1029  p= parent[px + py*2*stride];
1030  }
1031  if(/*ll|*/l|lt|t|rt|p){
1032  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1033 
1034  put_rac(&s->c, &b->state[0][context], !!v);
1035  }else{
1036  if(!run){
1037  run= runs[run_index++];
1038 
1039  if(run_index <= max_index)
1040  put_symbol2(&s->c, b->state[1], run, 3);
1041  av_assert2(v);
1042  }else{
1043  run--;
1044  av_assert2(!v);
1045  }
1046  }
1047  if(v){
1048  int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
1049  int l2= 2*FFABS(l) + (l<0);
1050  int t2= 2*FFABS(t) + (t<0);
1051 
1052  put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
1053  put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
1054  }
1055  }
1056  }
1057  }
1058  return 0;
1059 }
1060 
1061 static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
1062 // encode_subband_qtree(s, b, src, parent, stride, orientation);
1063 // encode_subband_z0run(s, b, src, parent, stride, orientation);
1064  return encode_subband_c0run(s, b, src, parent, stride, orientation);
1065 // encode_subband_dzr(s, b, src, parent, stride, orientation);
1066 }
1067 
1068 static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3],
1069  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1070 {
1071  SnowContext *const s = &enc->com;
1072  const int b_stride= s->b_width << s->block_max_depth;
1073  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1074  BlockNode backup= *block;
1075  int rd;
1076 
1077  av_assert2(mb_x>=0 && mb_y>=0);
1078  av_assert2(mb_x<b_stride);
1079 
1080  block->color[0] = p[0];
1081  block->color[1] = p[1];
1082  block->color[2] = p[2];
1083  block->type |= BLOCK_INTRA;
1084 
1085  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged) + enc->intra_penalty;
1086 
1087 //FIXME chroma
1088  if(rd < *best_rd){
1089  *best_rd= rd;
1090  return 1;
1091  }else{
1092  *block= backup;
1093  return 0;
1094  }
1095 }
1096 
1097 /* special case for int[2] args we discard afterwards,
1098  * fixes compilation problem with gcc 2.95 */
1100  int mb_x, int mb_y, int p0, int p1,
1101  uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd)
1102 {
1103  SnowContext *const s = &enc->com;
1104  const int b_stride = s->b_width << s->block_max_depth;
1105  BlockNode *block = &s->block[mb_x + mb_y * b_stride];
1106  BlockNode backup = *block;
1107  unsigned value;
1108  int rd, index;
1109 
1110  av_assert2(mb_x >= 0 && mb_y >= 0);
1111  av_assert2(mb_x < b_stride);
1112 
1113  index = (p0 + 31 * p1) & (ME_CACHE_SIZE-1);
1114  value = enc->me_cache_generation + (p0 >> 10) + p1 * (1 << 6) + (block->ref << 12);
1115  if (enc->me_cache[index] == value)
1116  return 0;
1117  enc->me_cache[index] = value;
1118 
1119  block->mx = p0;
1120  block->my = p1;
1121  block->type &= ~BLOCK_INTRA;
1122 
1123  rd = get_block_rd(enc, mb_x, mb_y, 0, obmc_edged);
1124 
1125 //FIXME chroma
1126  if (rd < *best_rd) {
1127  *best_rd = rd;
1128  return 1;
1129  } else {
1130  *block = backup;
1131  return 0;
1132  }
1133 }
1134 
1135 static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y,
1136  int p0, int p1, int ref, int *best_rd)
1137 {
1138  SnowContext *const s = &enc->com;
1139  const int b_stride= s->b_width << s->block_max_depth;
1140  BlockNode *block= &s->block[mb_x + mb_y * b_stride];
1141  BlockNode backup[4];
1142  unsigned value;
1143  int rd, index;
1144 
1145  /* We don't initialize backup[] during variable declaration, because
1146  * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
1147  * 'int16_t'". */
1148  backup[0] = block[0];
1149  backup[1] = block[1];
1150  backup[2] = block[b_stride];
1151  backup[3] = block[b_stride + 1];
1152 
1153  av_assert2(mb_x>=0 && mb_y>=0);
1154  av_assert2(mb_x<b_stride);
1155  av_assert2(((mb_x|mb_y)&1) == 0);
1156 
1157  index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
1158  value = enc->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
1159  if (enc->me_cache[index] == value)
1160  return 0;
1161  enc->me_cache[index] = value;
1162 
1163  block->mx= p0;
1164  block->my= p1;
1165  block->ref= ref;
1166  block->type &= ~BLOCK_INTRA;
1167  block[1]= block[b_stride]= block[b_stride+1]= *block;
1168 
1169  rd = get_4block_rd(enc, mb_x, mb_y, 0);
1170 
1171 //FIXME chroma
1172  if(rd < *best_rd){
1173  *best_rd= rd;
1174  return 1;
1175  }else{
1176  block[0]= backup[0];
1177  block[1]= backup[1];
1178  block[b_stride]= backup[2];
1179  block[b_stride+1]= backup[3];
1180  return 0;
1181  }
1182 }
1183 
1184 static void iterative_me(SnowEncContext *enc)
1185 {
1186  SnowContext *const s = &enc->com;
1187  int pass, mb_x, mb_y;
1188  const int b_width = s->b_width << s->block_max_depth;
1189  const int b_height= s->b_height << s->block_max_depth;
1190  const int b_stride= b_width;
1191  int color[3];
1192 
1193  {
1194  RangeCoder r = s->c;
1195  uint8_t state[sizeof(s->block_state)];
1196  memcpy(state, s->block_state, sizeof(s->block_state));
1197  for(mb_y= 0; mb_y<s->b_height; mb_y++)
1198  for(mb_x= 0; mb_x<s->b_width; mb_x++)
1199  encode_q_branch(enc, 0, mb_x, mb_y);
1200  s->c = r;
1201  memcpy(s->block_state, state, sizeof(s->block_state));
1202  }
1203 
1204  for(pass=0; pass<25; pass++){
1205  int change= 0;
1206 
1207  for(mb_y= 0; mb_y<b_height; mb_y++){
1208  for(mb_x= 0; mb_x<b_width; mb_x++){
1209  int dia_change, i, j, ref;
1210  int best_rd= INT_MAX, ref_rd;
1211  BlockNode backup, ref_b;
1212  const int index= mb_x + mb_y * b_stride;
1213  BlockNode *block= &s->block[index];
1214  BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
1215  BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
1216  BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
1217  BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
1218  BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
1219  BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
1220  BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
1221  BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
1222  const int b_w= (MB_SIZE >> s->block_max_depth);
1223  uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
1224 
1225  if(pass && (block->type & BLOCK_OPT))
1226  continue;
1227  block->type |= BLOCK_OPT;
1228 
1229  backup= *block;
1230 
1231  if (!enc->me_cache_generation)
1232  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1233  enc->me_cache_generation += 1<<22;
1234 
1235  //FIXME precalculate
1236  {
1237  int x, y;
1238  for (y = 0; y < b_w * 2; y++)
1239  memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
1240  if(mb_x==0)
1241  for(y=0; y<b_w*2; y++)
1242  memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1243  if(mb_x==b_stride-1)
1244  for(y=0; y<b_w*2; y++)
1245  memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1246  if(mb_y==0){
1247  for(x=0; x<b_w*2; x++)
1248  obmc_edged[0][x] += obmc_edged[b_w-1][x];
1249  for(y=1; y<b_w; y++)
1250  memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1251  }
1252  if(mb_y==b_height-1){
1253  for(x=0; x<b_w*2; x++)
1254  obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1255  for(y=b_w; y<b_w*2-1; y++)
1256  memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1257  }
1258  }
1259 
1260  //skip stuff outside the picture
1261  if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1262  const uint8_t *src = s->input_picture->data[0];
1263  uint8_t *dst= s->current_picture->data[0];
1264  const int stride= s->current_picture->linesize[0];
1265  const int block_w= MB_SIZE >> s->block_max_depth;
1266  const int block_h= MB_SIZE >> s->block_max_depth;
1267  const int sx= block_w*mb_x - block_w/2;
1268  const int sy= block_h*mb_y - block_h/2;
1269  const int w= s->plane[0].width;
1270  const int h= s->plane[0].height;
1271  int y;
1272 
1273  for(y=sy; y<0; y++)
1274  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1275  for(y=h; y<sy+block_h*2; y++)
1276  memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
1277  if(sx<0){
1278  for(y=sy; y<sy+block_h*2; y++)
1279  memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
1280  }
1281  if(sx+block_w*2 > w){
1282  for(y=sy; y<sy+block_h*2; y++)
1283  memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
1284  }
1285  }
1286 
1287  // intra(black) = neighbors' contribution to the current block
1288  for(i=0; i < s->nb_planes; i++)
1289  color[i]= get_dc(enc, mb_x, mb_y, i);
1290 
1291  // get previous score (cannot be cached due to OBMC)
1292  if(pass > 0 && (block->type&BLOCK_INTRA)){
1293  int color0[3]= {block->color[0], block->color[1], block->color[2]};
1294  check_block_intra(enc, mb_x, mb_y, color0, obmc_edged, &best_rd);
1295  }else
1296  check_block_inter(enc, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
1297 
1298  ref_b= *block;
1299  ref_rd= best_rd;
1300  for(ref=0; ref < s->ref_frames; ref++){
1301  int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
1302  if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
1303  continue;
1304  block->ref= ref;
1305  best_rd= INT_MAX;
1306 
1307  check_block_inter(enc, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
1308  check_block_inter(enc, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
1309  if(tb)
1310  check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1311  if(lb)
1312  check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1313  if(rb)
1314  check_block_inter(enc, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
1315  if(bb)
1316  check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1317 
1318  /* fullpel ME */
1319  //FIXME avoid subpel interpolation / round to nearest integer
1320  do{
1321  int newx = block->mx;
1322  int newy = block->my;
1323  int dia_size = enc->iterative_dia_size ? enc->iterative_dia_size : FFMAX(s->avctx->dia_size, 1);
1324  dia_change=0;
1325  for(i=0; i < dia_size; i++){
1326  for(j=0; j<i; j++){
1327  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+4*(i-j), newy+(4*j), obmc_edged, &best_rd);
1328  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-4*(i-j), newy-(4*j), obmc_edged, &best_rd);
1329  dia_change |= check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(i-j), obmc_edged, &best_rd);
1330  dia_change |= check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(i-j), obmc_edged, &best_rd);
1331  }
1332  }
1333  }while(dia_change);
1334  /* subpel ME */
1335  do{
1336  static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1337  dia_change=0;
1338  for(i=0; i<8; i++)
1339  dia_change |= check_block_inter(enc, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
1340  }while(dia_change);
1341  //FIXME or try the standard 2 pass qpel or similar
1342 
1343  mvr[0][0]= block->mx;
1344  mvr[0][1]= block->my;
1345  if(ref_rd > best_rd){
1346  ref_rd= best_rd;
1347  ref_b= *block;
1348  }
1349  }
1350  best_rd= ref_rd;
1351  *block= ref_b;
1352  check_block_intra(enc, mb_x, mb_y, color, obmc_edged, &best_rd);
1353  //FIXME RD style color selection
1354  if(!same_block(block, &backup)){
1355  if(tb ) tb ->type &= ~BLOCK_OPT;
1356  if(lb ) lb ->type &= ~BLOCK_OPT;
1357  if(rb ) rb ->type &= ~BLOCK_OPT;
1358  if(bb ) bb ->type &= ~BLOCK_OPT;
1359  if(tlb) tlb->type &= ~BLOCK_OPT;
1360  if(trb) trb->type &= ~BLOCK_OPT;
1361  if(blb) blb->type &= ~BLOCK_OPT;
1362  if(brb) brb->type &= ~BLOCK_OPT;
1363  change ++;
1364  }
1365  }
1366  }
1367  av_log(s->avctx, AV_LOG_DEBUG, "pass:%d changed:%d\n", pass, change);
1368  if(!change)
1369  break;
1370  }
1371 
1372  if(s->block_max_depth == 1){
1373  int change= 0;
1374  for(mb_y= 0; mb_y<b_height; mb_y+=2){
1375  for(mb_x= 0; mb_x<b_width; mb_x+=2){
1376  int i;
1377  int best_rd, init_rd;
1378  const int index= mb_x + mb_y * b_stride;
1379  BlockNode *b[4];
1380 
1381  b[0]= &s->block[index];
1382  b[1]= b[0]+1;
1383  b[2]= b[0]+b_stride;
1384  b[3]= b[2]+1;
1385  if(same_block(b[0], b[1]) &&
1386  same_block(b[0], b[2]) &&
1387  same_block(b[0], b[3]))
1388  continue;
1389 
1390  if (!enc->me_cache_generation)
1391  memset(enc->me_cache, 0, sizeof(enc->me_cache));
1392  enc->me_cache_generation += 1<<22;
1393 
1394  init_rd = best_rd = get_4block_rd(enc, mb_x, mb_y, 0);
1395 
1396  //FIXME more multiref search?
1397  check_4block_inter(enc, mb_x, mb_y,
1398  (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
1399  (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
1400 
1401  for(i=0; i<4; i++)
1402  if(!(b[i]->type&BLOCK_INTRA))
1403  check_4block_inter(enc, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
1404 
1405  if(init_rd != best_rd)
1406  change++;
1407  }
1408  }
1409  av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
1410  }
1411 }
1412 
1413 static void encode_blocks(SnowEncContext *enc, int search)
1414 {
1415  SnowContext *const s = &enc->com;
1416  int x, y;
1417  int w= s->b_width;
1418  int h= s->b_height;
1419 
1420  if (enc->motion_est == FF_ME_ITER && !s->keyframe && search)
1421  iterative_me(enc);
1422 
1423  for(y=0; y<h; y++){
1424  if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
1425  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
1426  return;
1427  }
1428  for(x=0; x<w; x++){
1429  if (enc->motion_est == FF_ME_ITER || !search)
1430  encode_q_branch2(s, 0, x, y);
1431  else
1432  encode_q_branch (enc, 0, x, y);
1433  }
1434  }
1435 }
1436 
1437 static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
1438  const int w= b->width;
1439  const int h= b->height;
1440  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1441  const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
1442  int x,y, thres1, thres2;
1443 
1444  if(s->qlog == LOSSLESS_QLOG){
1445  for(y=0; y<h; y++)
1446  for(x=0; x<w; x++)
1447  dst[x + y*stride]= src[x + y*stride];
1448  return;
1449  }
1450 
1451  bias= bias ? 0 : (3*qmul)>>3;
1452  thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
1453  thres2= 2*thres1;
1454 
1455  if(!bias){
1456  for(y=0; y<h; y++){
1457  for(x=0; x<w; x++){
1458  int i= src[x + y*stride];
1459 
1460  if((unsigned)(i+thres1) > thres2){
1461  if(i>=0){
1462  i<<= QEXPSHIFT;
1463  i/= qmul; //FIXME optimize
1464  dst[x + y*stride]= i;
1465  }else{
1466  i= -i;
1467  i<<= QEXPSHIFT;
1468  i/= qmul; //FIXME optimize
1469  dst[x + y*stride]= -i;
1470  }
1471  }else
1472  dst[x + y*stride]= 0;
1473  }
1474  }
1475  }else{
1476  for(y=0; y<h; y++){
1477  for(x=0; x<w; x++){
1478  int i= src[x + y*stride];
1479 
1480  if((unsigned)(i+thres1) > thres2){
1481  if(i>=0){
1482  i<<= QEXPSHIFT;
1483  i= (i + bias) / qmul; //FIXME optimize
1484  dst[x + y*stride]= i;
1485  }else{
1486  i= -i;
1487  i<<= QEXPSHIFT;
1488  i= (i + bias) / qmul; //FIXME optimize
1489  dst[x + y*stride]= -i;
1490  }
1491  }else
1492  dst[x + y*stride]= 0;
1493  }
1494  }
1495  }
1496 }
1497 
1499  const int w= b->width;
1500  const int h= b->height;
1501  const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1502  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1503  const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
1504  int x,y;
1505 
1506  if(s->qlog == LOSSLESS_QLOG) return;
1507 
1508  for(y=0; y<h; y++){
1509  for(x=0; x<w; x++){
1510  int i= src[x + y*stride];
1511  if(i<0){
1512  src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
1513  }else if(i>0){
1514  src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
1515  }
1516  }
1517  }
1518 }
1519 
1520 static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1521  const int w= b->width;
1522  const int h= b->height;
1523  int x,y;
1524 
1525  for(y=h-1; y>=0; y--){
1526  for(x=w-1; x>=0; x--){
1527  int i= x + y*stride;
1528 
1529  if(x){
1530  if(use_median){
1531  if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1532  else src[i] -= src[i - 1];
1533  }else{
1534  if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1535  else src[i] -= src[i - 1];
1536  }
1537  }else{
1538  if(y) src[i] -= src[i - stride];
1539  }
1540  }
1541  }
1542 }
1543 
1544 static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
1545  const int w= b->width;
1546  const int h= b->height;
1547  int x,y;
1548 
1549  for(y=0; y<h; y++){
1550  for(x=0; x<w; x++){
1551  int i= x + y*stride;
1552 
1553  if(x){
1554  if(use_median){
1555  if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
1556  else src[i] += src[i - 1];
1557  }else{
1558  if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
1559  else src[i] += src[i - 1];
1560  }
1561  }else{
1562  if(y) src[i] += src[i - stride];
1563  }
1564  }
1565  }
1566 }
1567 
1569  int plane_index, level, orientation;
1570 
1571  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1572  for(level=0; level<s->spatial_decomposition_count; level++){
1573  for(orientation=level ? 1:0; orientation<4; orientation++){
1574  if(orientation==2) continue;
1575  put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
1576  }
1577  }
1578  }
1579 }
1580 
1582  int plane_index, i;
1583  uint8_t kstate[32];
1584 
1585  memset(kstate, MID_STATE, sizeof(kstate));
1586 
1587  put_rac(&s->c, kstate, s->keyframe);
1588  if(s->keyframe || s->always_reset){
1590  s->last_spatial_decomposition_type=
1591  s->last_qlog=
1592  s->last_qbias=
1593  s->last_mv_scale=
1594  s->last_block_max_depth= 0;
1595  for(plane_index=0; plane_index<2; plane_index++){
1596  Plane *p= &s->plane[plane_index];
1597  p->last_htaps=0;
1598  p->last_diag_mc=0;
1599  memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
1600  }
1601  }
1602  if(s->keyframe){
1603  put_symbol(&s->c, s->header_state, s->version, 0);
1604  put_rac(&s->c, s->header_state, s->always_reset);
1605  put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
1606  put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
1607  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1608  put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
1609  if (s->nb_planes > 2) {
1610  put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
1611  put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
1612  }
1613  put_rac(&s->c, s->header_state, s->spatial_scalability);
1614 // put_rac(&s->c, s->header_state, s->rate_scalability);
1615  put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
1616 
1617  encode_qlogs(s);
1618  }
1619 
1620  if(!s->keyframe){
1621  int update_mc=0;
1622  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1623  Plane *p= &s->plane[plane_index];
1624  update_mc |= p->last_htaps != p->htaps;
1625  update_mc |= p->last_diag_mc != p->diag_mc;
1626  update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1627  }
1628  put_rac(&s->c, s->header_state, update_mc);
1629  if(update_mc){
1630  for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
1631  Plane *p= &s->plane[plane_index];
1632  put_rac(&s->c, s->header_state, p->diag_mc);
1633  put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
1634  for(i= p->htaps/2; i; i--)
1635  put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
1636  }
1637  }
1638  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1639  put_rac(&s->c, s->header_state, 1);
1640  put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
1641  encode_qlogs(s);
1642  }else
1643  put_rac(&s->c, s->header_state, 0);
1644  }
1645 
1646  put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
1647  put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
1648  put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
1649  put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
1650  put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
1651 
1652 }
1653 
1655  int plane_index;
1656 
1657  if(!s->keyframe){
1658  for(plane_index=0; plane_index<2; plane_index++){
1659  Plane *p= &s->plane[plane_index];
1660  p->last_diag_mc= p->diag_mc;
1661  p->last_htaps = p->htaps;
1662  memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
1663  }
1664  }
1665 
1666  s->last_spatial_decomposition_type = s->spatial_decomposition_type;
1667  s->last_qlog = s->qlog;
1668  s->last_qbias = s->qbias;
1669  s->last_mv_scale = s->mv_scale;
1670  s->last_block_max_depth = s->block_max_depth;
1671  s->last_spatial_decomposition_count = s->spatial_decomposition_count;
1672 }
1673 
1674 static int qscale2qlog(int qscale){
1675  return lrint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
1676  + 61*QROOT/8; ///< 64 > 60
1677 }
1678 
1680 {
1681  SnowContext *const s = &enc->com;
1682  /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
1683  * FIXME we know exact mv bits at this point,
1684  * but ratecontrol isn't set up to include them. */
1685  uint32_t coef_sum= 0;
1686  int level, orientation, delta_qlog;
1687 
1688  for(level=0; level<s->spatial_decomposition_count; level++){
1689  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1690  SubBand *b= &s->plane[0].band[level][orientation];
1691  IDWTELEM *buf= b->ibuf;
1692  const int w= b->width;
1693  const int h= b->height;
1694  const int stride= b->stride;
1695  const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
1696  const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1697  const int qdiv= (1<<16)/qmul;
1698  int x, y;
1699  //FIXME this is ugly
1700  for(y=0; y<h; y++)
1701  for(x=0; x<w; x++)
1702  buf[x+y*stride]= b->buf[x+y*stride];
1703  if(orientation==0)
1704  decorrelate(s, b, buf, stride, 1, 0);
1705  for(y=0; y<h; y++)
1706  for(x=0; x<w; x++)
1707  coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
1708  }
1709  }
1710  emms_c();
1711 
1712  /* ugly, ratecontrol just takes a sqrt again */
1713  av_assert0(coef_sum < INT_MAX);
1714  coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1715 
1716  if(pict->pict_type == AV_PICTURE_TYPE_I){
1717  enc->m.mb_var_sum = coef_sum;
1718  enc->m.mc_mb_var_sum = 0;
1719  }else{
1720  enc->m.mc_mb_var_sum = coef_sum;
1721  enc->m.mb_var_sum = 0;
1722  }
1723 
1724  pict->quality= ff_rate_estimate_qscale(&enc->m, 1);
1725  if (pict->quality < 0)
1726  return INT_MIN;
1727  enc->lambda= pict->quality * 3/2;
1728  delta_qlog= qscale2qlog(pict->quality) - s->qlog;
1729  s->qlog+= delta_qlog;
1730  return delta_qlog;
1731 }
1732 
1734  int width = p->width;
1735  int height= p->height;
1736  int level, orientation, x, y;
1737 
1738  for(level=0; level<s->spatial_decomposition_count; level++){
1739  int64_t error=0;
1740  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1741  SubBand *b= &p->band[level][orientation];
1742  IDWTELEM *ibuf= b->ibuf;
1743 
1744  memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
1745  ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
1746  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
1747  for(y=0; y<height; y++){
1748  for(x=0; x<width; x++){
1749  int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
1750  error += d*d;
1751  }
1752  }
1753  if (orientation == 2)
1754  error /= 2;
1755  b->qlog= (int)(QROOT * log2(352256.0/sqrt(error)) + 0.5);
1756  if (orientation != 1)
1757  error = 0;
1758  }
1759  p->band[level][1].qlog = p->band[level][2].qlog;
1760  }
1761 }
1762 
1764  const AVFrame *pict, int *got_packet)
1765 {
1766  SnowEncContext *const enc = avctx->priv_data;
1767  SnowContext *const s = &enc->com;
1768  MPVEncContext *const mpv = &enc->m.s;
1769  RangeCoder * const c= &s->c;
1770  AVCodecInternal *avci = avctx->internal;
1771  AVFrame *pic;
1772  const int width= s->avctx->width;
1773  const int height= s->avctx->height;
1774  int level, orientation, plane_index, i, y, ret;
1775  uint8_t rc_header_bak[sizeof(s->header_state)];
1776  uint8_t rc_block_bak[sizeof(s->block_state)];
1777 
1778  if ((ret = ff_alloc_packet(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
1779  return ret;
1780 
1782  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1783 
1784  for(i=0; i < s->nb_planes; i++){
1785  int hshift= i ? s->chroma_h_shift : 0;
1786  int vshift= i ? s->chroma_v_shift : 0;
1787  for(y=0; y<AV_CEIL_RSHIFT(height, vshift); y++)
1788  memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
1789  &pict->data[i][y * pict->linesize[i]],
1790  AV_CEIL_RSHIFT(width, hshift));
1791  enc->mpvencdsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
1792  AV_CEIL_RSHIFT(width, hshift), AV_CEIL_RSHIFT(height, vshift),
1793  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1794  EDGE_TOP | EDGE_BOTTOM);
1795 
1796  }
1797  pic = s->input_picture;
1798  pic->pict_type = pict->pict_type;
1799  pic->quality = pict->quality;
1800 
1801  mpv->picture_number = avctx->frame_num;
1802  if(avctx->flags&AV_CODEC_FLAG_PASS2){
1803  mpv->c.pict_type = pic->pict_type = enc->m.rc_context.entry[avctx->frame_num].new_pict_type;
1804  s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
1805  if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
1806  pic->quality = ff_rate_estimate_qscale(&enc->m, 0);
1807  if (pic->quality < 0)
1808  return -1;
1809  }
1810  }else{
1811  s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0;
1812  mpv->c.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1813  }
1814 
1815  if (enc->pass1_rc && avctx->frame_num == 0)
1816  pic->quality = 2*FF_QP2LAMBDA;
1817  if (pic->quality) {
1818  s->qlog = qscale2qlog(pic->quality);
1819  enc->lambda = pic->quality * 3/2;
1820  }
1821  if (s->qlog < 0 || (!pic->quality && (avctx->flags & AV_CODEC_FLAG_QSCALE))) {
1822  s->qlog= LOSSLESS_QLOG;
1823  enc->lambda = 0;
1824  }//else keep previous frame's qlog until after motion estimation
1825 
1826  if (s->current_picture->data[0]) {
1827  int w = s->avctx->width;
1828  int h = s->avctx->height;
1829 
1830  enc->mpvencdsp.draw_edges(s->current_picture->data[0],
1831  s->current_picture->linesize[0], w , h ,
1833  if (s->current_picture->data[2]) {
1834  enc->mpvencdsp.draw_edges(s->current_picture->data[1],
1835  s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1836  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1837  enc->mpvencdsp.draw_edges(s->current_picture->data[2],
1838  s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
1839  EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
1840  }
1841  }
1842 
1844  ret = get_encode_buffer(s, s->current_picture);
1845  if (ret < 0)
1846  return ret;
1847 
1848  mpv->c.cur_pic.ptr = &enc->cur_pic;
1849  mpv->c.cur_pic.ptr->f = s->current_picture;
1850  mpv->c.cur_pic.ptr->f->pts = pict->pts;
1851  if(pic->pict_type == AV_PICTURE_TYPE_P){
1852  int block_width = (width +15)>>4;
1853  int block_height= (height+15)>>4;
1854  int stride= s->current_picture->linesize[0];
1855 
1856  av_assert0(s->current_picture->data[0]);
1857  av_assert0(s->last_picture[0]->data[0]);
1858 
1859  mpv->c.avctx = s->avctx;
1860  mpv->c.last_pic.ptr = &enc->last_pic;
1861  mpv->c.last_pic.ptr->f = s->last_picture[0];
1862  mpv-> new_pic = s->input_picture;
1863  mpv->c.linesize = stride;
1864  mpv->c.uvlinesize = s->current_picture->linesize[1];
1865  mpv->c.width = width;
1866  mpv->c.height = height;
1867  mpv->c.mb_width = block_width;
1868  mpv->c.mb_height = block_height;
1869  mpv->c.mb_stride = mpv->c.mb_width + 1;
1870  mpv->c.b8_stride = 2 * mpv->c.mb_width + 1;
1871  mpv->f_code = 1;
1872  mpv->c.pict_type = pic->pict_type;
1873  mpv->me.motion_est = enc->motion_est;
1874  mpv->me.dia_size = avctx->dia_size;
1875  mpv->c.quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
1876  mpv->c.out_format = FMT_H263;
1877  mpv->me.unrestricted_mv = 1;
1878 
1879  mpv->lambda = enc->lambda;
1880  mpv->c.qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
1881  enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
1882 
1883  mpv->c.qdsp = enc->qdsp; //move
1884  mpv->c.hdsp = s->hdsp;
1885  ff_me_init_pic(mpv);
1886  s->hdsp = mpv->c.hdsp;
1887  }
1888 
1889  if (enc->pass1_rc) {
1890  memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
1891  memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
1892  }
1893 
1894 redo_frame:
1895 
1896  s->spatial_decomposition_count= 5;
1897 
1898  while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
1899  || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
1900  s->spatial_decomposition_count--;
1901 
1902  if (s->spatial_decomposition_count <= 0) {
1903  av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
1904  return AVERROR(EINVAL);
1905  }
1906 
1907  mpv->c.pict_type = pic->pict_type;
1908  s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
1909 
1911 
1912  if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
1913  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1914  calculate_visual_weight(s, &s->plane[plane_index]);
1915  }
1916  }
1917 
1918  encode_header(s);
1919  mpv->misc_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
1920  encode_blocks(enc, 1);
1921  mpv->mv_bits = 8 * (s->c.bytestream - s->c.bytestream_start) - mpv->misc_bits;
1922 
1923  for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1924  Plane *p= &s->plane[plane_index];
1925  int w= p->width;
1926  int h= p->height;
1927  int x, y;
1928 // int bits= put_bits_count(&s->c.pb);
1929 
1930  if (!enc->memc_only) {
1931  //FIXME optimize
1932  if(pict->data[plane_index]) //FIXME gray hack
1933  for(y=0; y<h; y++){
1934  for(x=0; x<w; x++){
1935  s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
1936  }
1937  }
1938  predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
1939 
1940  if( plane_index==0
1941  && pic->pict_type == AV_PICTURE_TYPE_P
1942  && !(avctx->flags&AV_CODEC_FLAG_PASS2)
1943  && mpv->me.scene_change_score > enc->scenechange_threshold) {
1945  ff_build_rac_states(c, (1LL<<32)/20, 256-8);
1947  s->keyframe=1;
1948  s->current_picture->flags |= AV_FRAME_FLAG_KEY;
1949  emms_c();
1950  goto redo_frame;
1951  }
1952 
1953  if(s->qlog == LOSSLESS_QLOG){
1954  for(y=0; y<h; y++){
1955  for(x=0; x<w; x++){
1956  s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
1957  }
1958  }
1959  }else{
1960  for(y=0; y<h; y++){
1961  for(x=0; x<w; x++){
1962  s->spatial_dwt_buffer[y*w + x]= s->spatial_idwt_buffer[y*w + x] * (1 << ENCODER_EXTRA_BITS);
1963  }
1964  }
1965  }
1966 
1967  ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
1968 
1969  if (enc->pass1_rc && plane_index==0) {
1970  int delta_qlog = ratecontrol_1pass(enc, pic);
1971  if (delta_qlog <= INT_MIN)
1972  return -1;
1973  if(delta_qlog){
1974  //reordering qlog in the bitstream would eliminate this reset
1976  memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
1977  memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
1978  encode_header(s);
1979  encode_blocks(enc, 0);
1980  }
1981  }
1982 
1983  for(level=0; level<s->spatial_decomposition_count; level++){
1984  for(orientation=level ? 1 : 0; orientation<4; orientation++){
1985  SubBand *b= &p->band[level][orientation];
1986 
1987  quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
1988  if(orientation==0)
1989  decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
1990  if (!enc->no_bitstream)
1991  encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
1992  av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
1993  if(orientation==0)
1994  correlate(s, b, b->ibuf, b->stride, 1, 0);
1995  }
1996  }
1997 
1998  for(level=0; level<s->spatial_decomposition_count; level++){
1999  for(orientation=level ? 1 : 0; orientation<4; orientation++){
2000  SubBand *b= &p->band[level][orientation];
2001 
2002  dequantize(s, b, b->ibuf, b->stride);
2003  }
2004  }
2005 
2006  ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
2007  if(s->qlog == LOSSLESS_QLOG){
2008  for(y=0; y<h; y++){
2009  for(x=0; x<w; x++){
2010  s->spatial_idwt_buffer[y*w + x] *= 1 << FRAC_BITS;
2011  }
2012  }
2013  }
2014  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2015  }else{
2016  //ME/MC only
2017  if(pic->pict_type == AV_PICTURE_TYPE_I){
2018  for(y=0; y<h; y++){
2019  for(x=0; x<w; x++){
2020  s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
2021  pict->data[plane_index][y*pict->linesize[plane_index] + x];
2022  }
2023  }
2024  }else{
2025  memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
2026  predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
2027  }
2028  }
2029  if(s->avctx->flags&AV_CODEC_FLAG_PSNR){
2030  int64_t error= 0;
2031 
2032  if(pict->data[plane_index]) //FIXME gray hack
2033  for(y=0; y<h; y++){
2034  for(x=0; x<w; x++){
2035  int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
2036  error += d*d;
2037  }
2038  }
2039  s->avctx->error[plane_index] += error;
2040  enc->encoding_error[plane_index] = error;
2041  }
2042 
2043  }
2044  emms_c();
2045 
2047 
2048  av_frame_unref(s->last_picture[s->max_ref_frames - 1]);
2049 
2050  s->current_picture->pict_type = pic->pict_type;
2051  s->current_picture->quality = pic->quality;
2052  enc->m.frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
2053  mpv->p_tex_bits = enc->m.frame_bits - mpv->misc_bits - mpv->mv_bits;
2054  enc->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
2056  enc->cur_pic.coded_picture_number = avctx->frame_num;
2057  enc->cur_pic.f->quality = pic->quality;
2058  if (enc->pass1_rc) {
2059  ret = ff_rate_estimate_qscale(&enc->m, 0);
2060  if (ret < 0)
2061  return ret;
2062  }
2063  if(avctx->flags&AV_CODEC_FLAG_PASS1)
2064  ff_write_pass1_stats(&enc->m);
2065  enc->m.last_pict_type = mpv->c.pict_type;
2066 
2067  ff_encode_add_stats_side_data(pkt, s->current_picture->quality,
2068  enc->encoding_error,
2069  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? SNOW_MAX_PLANES : 0,
2070  s->current_picture->pict_type);
2071  if (s->avctx->flags & AV_CODEC_FLAG_RECON_FRAME) {
2072  av_frame_replace(avci->recon_frame, s->current_picture);
2073  }
2074 
2075  pkt->size = ff_rac_terminate(c, 0);
2076  if (s->current_picture->flags & AV_FRAME_FLAG_KEY)
2078  *got_packet = 1;
2079 
2080  return 0;
2081 }
2082 
2084 {
2085  SnowEncContext *const enc = avctx->priv_data;
2086  SnowContext *const s = &enc->com;
2087 
2090  av_frame_free(&s->input_picture);
2091 
2092  for (int i = 0; i < MAX_REF_FRAMES; i++) {
2093  av_freep(&s->ref_mvs[i]);
2094  av_freep(&s->ref_scores[i]);
2095  }
2096 
2097  enc->m.s.me.temp = NULL;
2098  av_freep(&enc->m.s.me.scratchpad);
2099  av_freep(&enc->emu_edge_buffer);
2100 
2101  av_freep(&avctx->stats_out);
2102 
2103  return 0;
2104 }
2105 
2106 #define OFFSET(x) offsetof(SnowEncContext, x)
2107 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2108 static const AVOption options[] = {
2109  {"motion_est", "motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_ITER, VE, .unit = "motion_est" },
2110  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, VE, .unit = "motion_est" },
2111  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, VE, .unit = "motion_est" },
2112  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, VE, .unit = "motion_est" },
2113  { "iter", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ITER }, 0, 0, VE, .unit = "motion_est" },
2114  { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2115  { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2116  { "intra_penalty", "Penalty for intra blocks in block decision", OFFSET(intra_penalty), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2117  { "iterative_dia_size", "Dia size for the iterative ME", OFFSET(iterative_dia_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
2118  { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, VE },
2119  { "pred", "Spatial decomposition type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, DWT_97, DWT_53, VE, .unit = "pred" },
2120  { "dwt97", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2121  { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, .unit = "pred" },
2122  { "rc_eq", "Set rate control equation. When computing the expression, besides the standard functions "
2123  "defined in the section 'Expression Evaluation', the following functions are available: "
2124  "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2125  "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
2126  OFFSET(m.rc_context.rc_eq), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VE },
2127  { NULL },
2128 };
2129 
2130 static const AVClass snowenc_class = {
2131  .class_name = "snow encoder",
2132  .item_name = av_default_item_name,
2133  .option = options,
2134  .version = LIBAVUTIL_VERSION_INT,
2135 };
2136 
2138  .p.name = "snow",
2139  CODEC_LONG_NAME("Snow"),
2140  .p.type = AVMEDIA_TYPE_VIDEO,
2141  .p.id = AV_CODEC_ID_SNOW,
2142  .p.capabilities = AV_CODEC_CAP_DR1 |
2145  .priv_data_size = sizeof(SnowEncContext),
2146  .init = encode_init,
2148  .close = encode_end,
2151  .color_ranges = AVCOL_RANGE_MPEG,
2152  .p.priv_class = &snowenc_class,
2153  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2154 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:392
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:254
encode_subband
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:1061
MPVEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideoenc.h:137
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1520
MpegEncContext::hdsp
HpelDSPContext hdsp
Definition: mpegvideo.h:159
set_blocks
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
Definition: snow.h:405
P_LEFT
#define P_LEFT
Definition: snowenc.c:368
level
uint8_t level
Definition: svq3.c:208
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:191
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
QEXPSHIFT
#define QEXPSHIFT
Definition: snow.h:432
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
SnowEncContext::lambda
int lambda
Definition: snowenc.c:50
libm.h
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MID_STATE
#define MID_STATE
Definition: snow.h:39
color
Definition: vf_paletteuse.c:513
ratecontrol_1pass
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
Definition: snowenc.c:1679
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:247
FF_ME_EPZS
#define FF_ME_EPZS
Definition: motion_est.h:43
inverse
inverse
Definition: af_crystalizer.c:122
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: snowenc.c:2083
SnowEncContext::scenechange_threshold
int scenechange_threshold
Definition: snowenc.c:60
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
LOG2_MB_SIZE
#define LOG2_MB_SIZE
Definition: snow.h:72
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MotionEstContext
Motion estimation context.
Definition: motion_est.h:49
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:255
int64_t
long long int64_t
Definition: coverity.c:34
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
AV_CODEC_CAP_ENCODER_RECON_FRAME
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Definition: codec.h:159
QBIAS_SHIFT
#define QBIAS_SHIFT
Definition: snow.h:160
h263enc.h
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
MPVEncContext::mv_bits
int mv_bits
Definition: mpegvideoenc.h:133
DWT_97
#define DWT_97
Definition: snow_dwt.h:68
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:536
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:39
update_last_header_values
static void update_last_header_values(SnowContext *s)
Definition: snowenc.c:1654
MpegEncContext::pict_type
enum AVPictureType pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:154
internal.h
iterative_me
static void iterative_me(SnowEncContext *enc)
Definition: snowenc.c:1184
AVPacket::data
uint8_t * data
Definition: packet.h:595
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
SnowEncContext::qdsp
QpelDSPContext qdsp
Definition: snowenc.c:47
DWT_53
#define DWT_53
Definition: snow_dwt.h:69
get_penalty_factor
static int get_penalty_factor(int lambda, int lambda2, int type)
Definition: snowenc.c:343
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
encode_subband_c0run
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
Definition: snowenc.c:941
rangecoder.h
FFCodec
Definition: codec_internal.h:127
MpegEncContext::b8_stride
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:98
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
SnowContext
Definition: snow.h:113
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: snowenc.c:1763
QSHIFT
#define QSHIFT
Definition: snow.h:42
MAX_REF_FRAMES
#define MAX_REF_FRAMES
Definition: snow.h:46
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:269
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:650
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
ff_snow_common_end
av_cold void ff_snow_common_end(SnowContext *s)
Definition: snow.c:637
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:455
ff_spatial_dwt
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:320
BlockNode::type
uint8_t type
Bitfield of BLOCK_*.
Definition: snow.h:55
px
#define px
Definition: ops_tmpl_float.c:35
check_4block_inter
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
Definition: snowenc.c:1135
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_spatial_idwt
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
Definition: snow_dwt.c:732
SnowEncContext::me_cache_generation
unsigned me_cache_generation
Definition: snowenc.c:67
encode_blocks
static void encode_blocks(SnowEncContext *enc, int search)
Definition: snowenc.c:1413
ff_init_range_encoder
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
Definition: rangecoder.c:42
LOG2_OBMC_MAX
#define LOG2_OBMC_MAX
Definition: snow.h:48
BlockNode
Definition: snow.h:50
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:697
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:102
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
check_block_intra
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1068
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
OFFSET
#define OFFSET(x)
Definition: snowenc.c:2106
ff_snow_pred_block
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
Definition: snow.c:379
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
get_4block_rd
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:870
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:919
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:359
FF_CMP_SSE
#define FF_CMP_SSE
Definition: avcodec.h:878
ff_sqrt
#define ff_sqrt
Definition: mathops.h:220
SnowEncContext
Definition: snowenc.c:45
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
ff_snow_common_init_after_header
int ff_snow_common_init_after_header(AVCodecContext *avctx)
Definition: snow.c:545
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:111
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MpegEncContext::qdsp
QpelDSPContext qdsp
Definition: mpegvideo.h:161
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:132
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:649
encode_q_branch
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
Definition: snowenc.c:375
FF_CMP_BIT
#define FF_CMP_BIT
Definition: avcodec.h:882
emms_c
#define emms_c()
Definition: emms.h:89
SnowEncContext::mecc
MECmpContext mecc
Definition: snowenc.c:62
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1229
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:270
BLOCK_OPT
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
Definition: snow.h:58
LOSSLESS_QLOG
#define LOSSLESS_QLOG
Definition: snow.h:44
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:264
calculate_visual_weight
static void calculate_visual_weight(SnowContext *s, Plane *p)
Definition: snowenc.c:1733
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
MpegEncContext::mb_num
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:100
P
#define P
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
add_yblock
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
Definition: snow.h:222
pix_norm1
static int pix_norm1(const uint8_t *pix, int line_size, int w)
Definition: snowenc.c:327
ff_snow_common_init
av_cold int ff_snow_common_init(AVCodecContext *avctx)
Definition: snow.c:489
PTR_ADD
#define PTR_ADD(ptr, off)
Definition: snowenc.c:76
get_encode_buffer
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
Definition: snowenc.c:142
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SnowEncContext::encoding_error
uint64_t encoding_error[SNOW_MAX_PLANES]
Definition: snowenc.c:69
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
MotionEstContext::dia_size
int dia_size
Definition: motion_est.h:71
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
MECmpContext
Definition: me_cmp.h:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:152
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:236
run
uint8_t run
Definition: svq3.c:207
SnowEncContext::me_cache
unsigned me_cache[ME_CACHE_SIZE]
Definition: snowenc.c:66
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
MpegvideoEncDSPContext::draw_edges
void(* draw_edges)(uint8_t *buf, ptrdiff_t wrap, int width, int height, int w, int h, int sides)
Definition: mpegvideoencdsp.h:47
snow.h
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:908
BlockNode::my
int16_t my
Motion vector component Y, see mv_scale.
Definition: snow.h:52
get_block_rd
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
Definition: snowenc.c:767
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
VE
#define VE
Definition: snowenc.c:2107
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:489
ff_rac_terminate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
Definition: rangecoder.c:109
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
mathops.h
options
Definition: swscale.c:45
SnowEncContext::obmc_scratchpad
IDWTELEM obmc_scratchpad[MB_SIZE *MB_SIZE *12 *2]
Definition: snowenc.c:73
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
correlate
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1544
QROOT
#define QROOT
Definition: snow.h:43
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
MpegEncContext::mb_width
int mb_width
Definition: mpegvideo.h:96
MPVMainEncContext
Definition: mpegvideoenc.h:202
ff_h263_get_mv_penalty
const uint8_t(* ff_h263_get_mv_penalty(void))[MAX_DMV *2+1]
Definition: ituh263enc.c:148
FF_ME_XONE
#define FF_ME_XONE
Definition: motion_est.h:44
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init_ref
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
Definition: snowenc.c:78
MB_SIZE
#define MB_SIZE
Definition: cinepakenc.c:54
put_symbol
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
Definition: snowenc.c:95
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:838
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:230
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1324
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:262
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:120
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:526
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
AVPacket::size
int size
Definition: packet.h:596
SNOW_MAX_PLANES
#define SNOW_MAX_PLANES
Definition: snow.h:37
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1017
height
#define height
Definition: dsp.h:89
encode_header
static void encode_header(SnowContext *s)
Definition: snowenc.c:1581
codec_internal.h
FF_CMP_PSNR
#define FF_CMP_PSNR
Definition: avcodec.h:881
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:556
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
SnowEncContext::pass1_rc
int pass1_rc
Definition: snowenc.c:52
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:97
FF_CMP_W53
#define FF_CMP_W53
Definition: avcodec.h:888
size
int size
Definition: twinvq_data.h:10344
ff_build_rac_states
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
Definition: rangecoder.c:68
MotionEstContext::mv_penalty
const uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:100
pix_sum
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
Definition: snowenc.c:311
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:256
SnowEncContext::motion_est
int motion_est
Definition: snowenc.c:58
ff_snow_encoder
const FFCodec ff_snow_encoder
Definition: snowenc.c:2137
SubBand
Definition: cfhd.h:116
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
FF_CMP_SATD
#define FF_CMP_SATD
Definition: avcodec.h:879
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
SnowEncContext::intra_penalty
int intra_penalty
Definition: snowenc.c:57
state
static struct @583 state
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
snow_dwt.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:601
AVCodecInternal
Definition: internal.h:49
FF_CMP_SAD
#define FF_CMP_SAD
Definition: avcodec.h:877
encode_q_branch2
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
Definition: snowenc.c:613
SnowEncContext::iterative_dia_size
int iterative_dia_size
Definition: snowenc.c:59
ff_quant3bA
const int8_t ff_quant3bA[256]
Definition: snowdata.h:105
DWTELEM
int DWTELEM
Definition: dirac_dwt.h:26
emms.h
ff_obmc_tab
const uint8_t *const ff_obmc_tab[4]
Definition: snowdata.h:124
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
ENCODER_EXTRA_BITS
#define ENCODER_EXTRA_BITS
Definition: snow.h:74
AV_CODEC_FLAG_RECON_FRAME
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
Definition: avcodec.h:244
log.h
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
MPVEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideoenc.h:80
FF_CMP_RD
#define FF_CMP_RD
Definition: avcodec.h:883
get_block_bits
static int get_block_bits(SnowContext *s, int x, int y, int w)
Definition: snowenc.c:729
ff_get_mb_score
int ff_get_mb_score(MPVEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
Definition: motion_est_template.c:192
ff_w53_32_c
int ff_w53_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:833
BLOCK_INTRA
#define BLOCK_INTRA
Intra block, inter otherwise.
Definition: snow.h:57
MotionEstContext::motion_est
int motion_est
ME algorithm.
Definition: motion_est.h:51
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
qscale2qlog
static int qscale2qlog(int qscale)
Definition: snowenc.c:1674
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:496
av_always_inline
#define av_always_inline
Definition: attributes.h:68
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AVCodecContext::dia_size
int dia_size
ME diamond size & shape.
Definition: avcodec.h:900
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:887
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodecContext::mb_lmin
int mb_lmin
minimum MB Lagrange multiplier
Definition: avcodec.h:990
ff_qexp
const uint8_t ff_qexp[QROOT]
Definition: snowdata.h:129
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:37
predict_plane
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
Definition: snow.h:398
SnowEncContext::no_bitstream
int no_bitstream
Definition: snowenc.c:56
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
ME_CACHE_SIZE
#define ME_CACHE_SIZE
Definition: snowenc.c:65
SnowEncContext::com
SnowContext com
Definition: snowenc.c:46
FF_ME_ITER
#define FF_ME_ITER
Definition: snowenc.c:43
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
get_dc
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
Definition: snowenc.c:669
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
SnowEncContext::m
MPVMainEncContext m
Definition: snowenc.c:63
log2
#define log2(x)
Definition: libm.h:406
avcodec.h
ff_w97_32_c
int ff_w97_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Definition: snow_dwt.c:838
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1890
mid_pred
#define mid_pred
Definition: mathops.h:115
ret
ret
Definition: filter_design.txt:187
SnowEncContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: snowenc.c:48
pred
static const float pred[4]
Definition: siprdata.h:259
search
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:152
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: snowenc.c:164
options
static const AVOption options[]
Definition: snowenc.c:2108
AVCodecInternal::recon_frame
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
Definition: internal.h:114
square
static int square(int x)
Definition: roqvideoenc.c:196
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_rac
#define put_rac(C, S, B)
ff_snow_reset_contexts
void ff_snow_reset_contexts(SnowContext *s)
Definition: snow.c:157
me_cmp.h
encode_qlogs
static void encode_qlogs(SnowContext *s)
Definition: snowenc.c:1568
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:376
QpelDSPContext
quarterpel DSP context
Definition: qpeldsp.h:72
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AV_CODEC_ID_SNOW
@ AV_CODEC_ID_SNOW
Definition: codec_id.h:267
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
SnowEncContext::cur_pic
MPVPicture cur_pic
Definition: snowenc.c:64
SnowEncContext::last_pic
MPVPicture last_pic
Definition: snowenc.c:64
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:236
FRAC_BITS
#define FRAC_BITS
Definition: g729postfilter.c:36
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
FF_CMP_DCT
#define FF_CMP_DCT
Definition: avcodec.h:880
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:84
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
get_rac_count
static int get_rac_count(RangeCoder *c)
Definition: rangecoder.h:79
AVCodecContext::mb_lmax
int mb_lmax
maximum MB Lagrange multiplier
Definition: avcodec.h:997
put_symbol2
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Definition: snowenc.c:123
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
Plane
Definition: cfhd.h:125
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
BlockNode::level
uint8_t level
Definition: snow.h:60
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
same_block
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
Definition: snow.h:212
mem.h
BlockNode::mx
int16_t mx
Motion vector component X, see mv_scale.
Definition: snow.h:51
w
uint8_t w
Definition: llvidencdsp.c:39
ff_epzs_motion_search
int ff_epzs_motion_search(MPVEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
Definition: motion_est_template.c:977
mcf
#define mcf(dx, dy)
AVPacket
This structure stores compressed data.
Definition: packet.h:572
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:248
ff_snow_frames_prepare
int ff_snow_frames_prepare(SnowContext *s)
Definition: snow.c:606
FF_CMP_DCT264
#define FF_CMP_DCT264
Definition: avcodec.h:891
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
SnowEncContext::emu_edge_buffer
uint8_t * emu_edge_buffer
Definition: snowenc.c:71
quantize
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
Definition: snowenc.c:1437
SnowEncContext::memc_only
int memc_only
Definition: snowenc.c:55
dequantize
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
Definition: snowenc.c:1498
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
HTAPS_MAX
#define HTAPS_MAX
Definition: snow.h:75
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
null_block
static const BlockNode null_block
Definition: snow.h:63
MotionEstContext::scene_change_score
int scene_change_score
Definition: motion_est.h:86
MPVEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideoenc.h:135
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:479
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
IDWTELEM
short IDWTELEM
Definition: dirac_dwt.h:27
h
h
Definition: vp9dsp_template.c:2070
RangeCoder
Definition: mss3.c:63
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:85
stride
#define stride
Definition: h264pred_template.c:536
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
snowenc_class
static const AVClass snowenc_class
Definition: snowenc.c:2130
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
SnowEncContext::pred
int pred
Definition: snowenc.c:54
P_TOP
#define P_TOP
Definition: snowenc.c:369
check_block_inter
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
Definition: snowenc.c:1099
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_snow_alloc_blocks
int ff_snow_alloc_blocks(SnowContext *s)
Definition: snow.c:171
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
BlockNode::ref
uint8_t ref
Reference frame index.
Definition: snow.h:53
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:203
P_TOPRIGHT
#define P_TOPRIGHT
Definition: snowenc.c:370
MpegEncContext::width
int width
Definition: mpegvideo.h:84
src
#define src
Definition: vp8dsp.c:248
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:101
MPVEncContext::picture_number
int picture_number
Definition: mpegvideoenc.h:130
MotionEstContext::me_cmp
me_cmp_func me_cmp[6]
Definition: motion_est.h:89
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:96
P_MEDIAN
#define P_MEDIAN
Definition: snowenc.c:371
FF_ME_ZERO
#define FF_ME_ZERO
Definition: motion_est.h:42
SnowEncContext::lambda2
int lambda2
Definition: snowenc.c:51
FF_CMP_W97
#define FF_CMP_W97
Definition: avcodec.h:889
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:709
MotionEstContext::unrestricted_mv
int unrestricted_mv
mv can point outside of the coded picture
Definition: motion_est.h:72
intmath.h