FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "libavutil/refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
88 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MPVEncContext *const s);
90 static void denoise_dct_c(MPVEncContext *const s, int16_t *block);
91 static int dct_quantize_c(MPVEncContext *const s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
97 
98 static const AVOption mpv_generic_options[] = {
101  { NULL },
102 };
103 
105  .class_name = "generic mpegvideo encoder",
106  .item_name = av_default_item_name,
107  .option = mpv_generic_options,
108  .version = LIBAVUTIL_VERSION_INT,
109 };
110 
111 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
112  uint16_t (*qmat16)[2][64],
113  const uint16_t *quant_matrix,
114  int bias, int qmin, int qmax, int intra)
115 {
116  FDCTDSPContext *fdsp = &s->fdsp;
117  int qscale;
118  int shift = 0;
119 
120  for (qscale = qmin; qscale <= qmax; qscale++) {
121  int i;
122  int qscale2;
123 
124  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
125  else qscale2 = qscale << 1;
126 
127  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
128 #if CONFIG_FAANDCT
129  fdsp->fdct == ff_faandct ||
130 #endif /* CONFIG_FAANDCT */
131  fdsp->fdct == ff_jpeg_fdct_islow_10) {
132  for (i = 0; i < 64; i++) {
133  const int j = s->c.idsp.idct_permutation[i];
134  int64_t den = (int64_t) qscale2 * quant_matrix[j];
135  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
136  * Assume x = qscale2 * quant_matrix[j]
137  * 1 <= x <= 28560
138  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
139  * 4194304 >= (1 << 22) / (x) >= 146 */
140 
141  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
142  }
143  } else if (fdsp->fdct == ff_fdct_ifast) {
144  for (i = 0; i < 64; i++) {
145  const int j = s->c.idsp.idct_permutation[i];
146  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
147  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
148  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
149  * 1247 <= x <= 900239760
150  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
151  * 55107840 >= (1 << 36) / (x) >= 76 */
152 
153  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
154  }
155  } else {
156  for (i = 0; i < 64; i++) {
157  const int j = s->c.idsp.idct_permutation[i];
158  int64_t den = (int64_t) qscale2 * quant_matrix[j];
159  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
160  * Assume x = qscale2 * quant_matrix[j]
161  * 1 <= x <= 28560
162  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
163  * 4194304 >= (1 << 22) / (x) >= 146
164  *
165  * 1 <= x <= 28560
166  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
167  * 131072 >= (1 << 17) / (x) >= 4 */
168 
169  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
170  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
171 
172  if (qmat16[qscale][0][i] == 0 ||
173  qmat16[qscale][0][i] == 128 * 256)
174  qmat16[qscale][0][i] = 128 * 256 - 1;
175  qmat16[qscale][1][i] =
176  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
177  qmat16[qscale][0][i]);
178  }
179  }
180 
181  for (i = intra; i < 64; i++) {
182  int64_t max = 8191;
183  if (fdsp->fdct == ff_fdct_ifast) {
184  max = (8191LL * ff_aanscales[i]) >> 14;
185  }
186  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
187  shift++;
188  }
189  }
190  }
191  if (shift) {
192  av_log(s->c.avctx, AV_LOG_INFO,
193  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194  QMAT_SHIFT - shift);
195  }
196 }
197 
198 static inline void update_qscale(MPVMainEncContext *const m)
199 {
200  MPVEncContext *const s = &m->s;
201 
202  if (s->c.q_scale_type == 1 && 0) {
203  int i;
204  int bestdiff=INT_MAX;
205  int best = 1;
206 
207  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
208  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
209  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
210  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
211  continue;
212  if (diff < bestdiff) {
213  bestdiff = diff;
214  best = i;
215  }
216  }
217  s->c.qscale = best;
218  } else {
219  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
220  (FF_LAMBDA_SHIFT + 7);
221  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
222  }
223 
224  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
226 }
227 
229 {
230  int i;
231 
232  if (matrix) {
233  put_bits(pb, 1, 1);
234  for (i = 0; i < 64; i++) {
235  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
236  }
237  } else
238  put_bits(pb, 1, 0);
239 }
240 
241 /**
242  * init s->c.cur_pic.qscale_table from s->lambda_table
243  */
244 static void init_qscale_tab(MPVEncContext *const s)
245 {
246  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
247 
248  for (int i = 0; i < s->c.mb_num; i++) {
249  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
250  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
251  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
252  s->c.avctx->qmax);
253  }
254 }
255 
257  const MPVEncContext *const src)
258 {
259 #define COPY(a) dst->a = src->a
260  COPY(c.pict_type);
261  COPY(f_code);
262  COPY(b_code);
263  COPY(c.qscale);
264  COPY(lambda);
265  COPY(lambda2);
266  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
267  COPY(c.progressive_frame); // FIXME don't set in encode_header
268  COPY(c.partitioned_frame); // FIXME don't set in encode_header
269 #undef COPY
270 }
271 
273 {
274  for (int i = -16; i < 16; i++)
275  default_fcode_tab[i + MAX_MV] = 1;
276 }
277 
278 /**
279  * Set the given MPVEncContext to defaults for encoding.
280  */
282 {
283  MPVEncContext *const s = &m->s;
284  static AVOnce init_static_once = AV_ONCE_INIT;
285 
287 
288  s->f_code = 1;
289  s->b_code = 1;
290 
291  if (!m->fcode_tab) {
293  ff_thread_once(&init_static_once, mpv_encode_init_static);
294  }
295  if (!s->c.y_dc_scale_table) {
296  s->c.y_dc_scale_table =
297  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
298  }
299 }
300 
302 {
303  s->dct_quantize = dct_quantize_c;
304  s->denoise_dct = denoise_dct_c;
305 
306 #if ARCH_MIPS
308 #elif ARCH_X86
310 #endif
311 
312  if (s->c.avctx->trellis)
313  s->dct_quantize = dct_quantize_trellis_c;
314 }
315 
317 {
318  MPVUnquantDSPContext unquant_dsp_ctx;
319 
320  ff_mpv_unquantize_init(&unquant_dsp_ctx,
321  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
322 
323  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
324  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
325  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
326  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
327  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
328  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
329  } else {
330  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
331  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
332  }
333 }
334 
336 {
337  MPVEncContext *const s = &m->s;
338  MECmpContext mecc;
339  me_cmp_func me_cmp[6];
340  int ret;
341 
342  ff_me_cmp_init(&mecc, avctx);
343  ret = ff_me_init(&s->me, avctx, &mecc, 1);
344  if (ret < 0)
345  return ret;
346  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
347  if (ret < 0)
348  return ret;
349  m->frame_skip_cmp_fn = me_cmp[1];
351  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
352  if (ret < 0)
353  return ret;
354  if (!me_cmp[0] || !me_cmp[4])
355  return AVERROR(EINVAL);
356  s->ildct_cmp[0] = me_cmp[0];
357  s->ildct_cmp[1] = me_cmp[4];
358  }
359 
360  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
361 
362  s->sse_cmp[0] = mecc.sse[0];
363  s->sse_cmp[1] = mecc.sse[1];
364  s->sad_cmp[0] = mecc.sad[0];
365  s->sad_cmp[1] = mecc.sad[1];
366  if (avctx->mb_cmp == FF_CMP_NSSE) {
367  s->n_sse_cmp[0] = mecc.nsse[0];
368  s->n_sse_cmp[1] = mecc.nsse[1];
369  } else {
370  s->n_sse_cmp[0] = mecc.sse[0];
371  s->n_sse_cmp[1] = mecc.sse[1];
372  }
373 
374  return 0;
375 }
376 
377 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
379 {
380  MPVEncContext *const s = &m->s;
381  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
382  const uint16_t *intra_matrix, *inter_matrix;
383  int ret;
384 
385  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
386  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
387  return AVERROR(ENOMEM);
388 
389  if (s->c.out_format == FMT_MJPEG) {
390  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
391  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
392  // No need to set q_inter_matrix
394  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
395  return 0;
396  } else {
397  s->q_chroma_intra_matrix = s->q_intra_matrix;
398  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
399  }
400  if (!m->intra_only) {
401  s->q_inter_matrix = s->q_intra_matrix + 32;
402  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
403  }
404 
405  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
406  s->c.mpeg_quant) {
409  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
410  intra_matrix =
412  } else {
413  /* MPEG-1/2, SpeedHQ */
416  }
417  if (avctx->intra_matrix)
419  if (avctx->inter_matrix)
421 
422  /* init q matrix */
423  for (int i = 0; i < 64; i++) {
424  int j = s->c.idsp.idct_permutation[i];
425 
426  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
427  s->c.inter_matrix[j] = inter_matrix[i];
428  }
429 
430  /* precompute matrix */
432  if (ret < 0)
433  return ret;
434 
435  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
436  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
437  31, 1);
438  if (s->q_inter_matrix)
439  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
440  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
441  31, 0);
442 
443  return 0;
444 }
445 
447 {
448  MPVEncContext *const s = &m->s;
449  // Align the following per-thread buffers to avoid false sharing.
450  enum {
451 #ifndef _MSC_VER
452  /// The number is supposed to match/exceed the cache-line size.
453  ALIGN = FFMAX(128, _Alignof(max_align_t)),
454 #else
455  ALIGN = 128,
456 #endif
457  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
458  };
459  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
460  "Need checks for potential overflow.");
461  unsigned nb_slices = s->c.slice_context_count, mv_table_size, mb_array_size;
462  char *dct_error = NULL;
463  int has_b_frames = !!m->max_b_frames, nb_mv_tables = 1 + 5 * has_b_frames;
464  int16_t (*mv_table)[2];
465 
466  if (m->noise_reduction) {
467  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
468  return AVERROR(ENOMEM);
469  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
470  if (!dct_error)
471  return AVERROR(ENOMEM);
473  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
474  }
475 
476  /* Allocate MB type table */
477  mb_array_size = s->c.mb_stride * s->c.mb_height;
478  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
479  if (!s->mb_type)
480  return AVERROR(ENOMEM);
481  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
482  return AVERROR(ENOMEM);
483 
484  mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
485  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
486  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
487  nb_mv_tables += 8 * has_b_frames;
488  if (!ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * (2 + 4 * has_b_frames), mv_table_size))
489  return AVERROR(ENOMEM);
490  }
491 
492  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
493  if (!mv_table)
494  return AVERROR(ENOMEM);
495  m->mv_table_base = mv_table;
496  mv_table += s->c.mb_stride + 1;
497 
498  for (unsigned i = 0; i < nb_slices; ++i) {
499  MPVEncContext *const s2 = s->c.enc_contexts[i];
500  int16_t (*tmp_mv_table)[2] = mv_table;
501 
502  if (dct_error) {
503  s2->dct_offset = s->dct_offset;
504  s2->dct_error_sum = (void*)dct_error;
505  dct_error += DCT_ERROR_SIZE;
506  }
507 
508  s2->mb_type = s->mb_type;
509  s2->mc_mb_var = s2->mb_type + mb_array_size;
510  s2->mb_var = s2->mc_mb_var + mb_array_size;
511  s2->mb_mean = (uint8_t*)(s2->mb_var + mb_array_size);
512  s2->lambda_table = s->lambda_table;
513 
514  s2->p_mv_table = tmp_mv_table;
515  if (has_b_frames) {
516  s2->b_forw_mv_table = tmp_mv_table += mv_table_size;
517  s2->b_back_mv_table = tmp_mv_table += mv_table_size;
518  s2->b_bidir_forw_mv_table = tmp_mv_table += mv_table_size;
519  s2->b_bidir_back_mv_table = tmp_mv_table += mv_table_size;
520  s2->b_direct_mv_table = tmp_mv_table += mv_table_size;
521  }
522 
523  if (s->p_field_select_table[0]) { // MPEG-4 or INTERLACED_ME above
524  uint8_t *field_select = s->p_field_select_table[0];
526  s2->p_field_select_table[1] = field_select += 2 * mv_table_size;
527 
528  if (has_b_frames) {
529  for (int j = 0; j < 2; j++) {
530  for (int k = 0; k < 2; k++) {
531  for (int l = 0; l < 2; l++)
532  s2->b_field_mv_table[j][k][l] = tmp_mv_table += mv_table_size;
533  s2->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
534  }
535  }
536  }
537  }
538  }
539 
540  return 0;
541 }
542 
543 /* init video encoder */
545 {
546  MPVMainEncContext *const m = avctx->priv_data;
547  MPVEncContext *const s = &m->s;
548  AVCPBProperties *cpb_props;
549  int gcd, ret;
550 
552 
553  switch (avctx->pix_fmt) {
554  case AV_PIX_FMT_YUVJ444P:
555  case AV_PIX_FMT_YUV444P:
556  s->c.chroma_format = CHROMA_444;
557  break;
558  case AV_PIX_FMT_YUVJ422P:
559  case AV_PIX_FMT_YUV422P:
560  s->c.chroma_format = CHROMA_422;
561  break;
562  case AV_PIX_FMT_YUVJ420P:
563  case AV_PIX_FMT_YUV420P:
564  default:
565  s->c.chroma_format = CHROMA_420;
566  break;
567  }
568 
570 
571  m->bit_rate = avctx->bit_rate;
572  s->c.width = avctx->width;
573  s->c.height = avctx->height;
574  if (avctx->gop_size > 600 &&
577  "keyframe interval too large!, reducing it from %d to %d\n",
578  avctx->gop_size, 600);
579  avctx->gop_size = 600;
580  }
581  m->gop_size = avctx->gop_size;
582  s->c.avctx = avctx;
584  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
585  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
587  } else if (avctx->max_b_frames < 0) {
589  "max b frames must be 0 or positive for mpegvideo based encoders\n");
590  return AVERROR(EINVAL);
591  }
593  s->c.codec_id = avctx->codec->id;
595  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
596  return AVERROR(EINVAL);
597  }
598 
599  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
600  s->rtp_mode = !!s->rtp_payload_size;
601  s->c.intra_dc_precision = avctx->intra_dc_precision;
602 
603  // workaround some differences between how applications specify dc precision
604  if (s->c.intra_dc_precision < 0) {
605  s->c.intra_dc_precision += 8;
606  } else if (s->c.intra_dc_precision >= 8)
607  s->c.intra_dc_precision -= 8;
608 
609  if (s->c.intra_dc_precision < 0) {
611  "intra dc precision must be positive, note some applications use"
612  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
613  return AVERROR(EINVAL);
614  }
615 
616  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
617  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
618  return AVERROR(EINVAL);
619  }
621 
622  if (m->gop_size <= 1) {
623  m->intra_only = 1;
624  m->gop_size = 12;
625  } else {
626  m->intra_only = 0;
627  }
628 
629  /* Fixed QSCALE */
631 
632  s->adaptive_quant = (avctx->lumi_masking ||
633  avctx->dark_masking ||
636  avctx->p_masking ||
637  m->border_masking ||
638  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
639  !m->fixed_qscale;
640 
641  s->c.loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
642 
644  switch(avctx->codec_id) {
647  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
648  break;
649  case AV_CODEC_ID_MPEG4:
653  if (avctx->rc_max_rate >= 15000000) {
654  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
655  } else if(avctx->rc_max_rate >= 2000000) {
656  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
657  } else if(avctx->rc_max_rate >= 384000) {
658  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
659  } else
660  avctx->rc_buffer_size = 40;
661  avctx->rc_buffer_size *= 16384;
662  break;
663  }
664  if (avctx->rc_buffer_size) {
665  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
666  }
667  }
668 
669  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
670  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
671  return AVERROR(EINVAL);
672  }
673 
676  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
677  }
678 
680  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
681  return AVERROR(EINVAL);
682  }
683 
685  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
686  return AVERROR(EINVAL);
687  }
688 
689  if (avctx->rc_max_rate &&
693  "impossible bitrate constraints, this will fail\n");
694  }
695 
696  if (avctx->rc_buffer_size &&
699  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
700  return AVERROR(EINVAL);
701  }
702 
703  if (!m->fixed_qscale &&
706  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
708  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
709  if (nbt <= INT_MAX) {
710  avctx->bit_rate_tolerance = nbt;
711  } else
712  avctx->bit_rate_tolerance = INT_MAX;
713  }
714 
715  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
716  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
717  s->c.codec_id != AV_CODEC_ID_FLV1) {
718  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
719  return AVERROR(EINVAL);
720  }
721 
722  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
724  "OBMC is only supported with simple mb decision\n");
725  return AVERROR(EINVAL);
726  }
727 
728  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
729  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
730  return AVERROR(EINVAL);
731  }
732 
733  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
734  s->c.codec_id == AV_CODEC_ID_H263 ||
735  s->c.codec_id == AV_CODEC_ID_H263P) &&
736  (avctx->sample_aspect_ratio.num > 255 ||
737  avctx->sample_aspect_ratio.den > 255)) {
739  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
743  }
744 
745  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
746  s->c.codec_id == AV_CODEC_ID_H263P) &&
747  (avctx->width > 2048 ||
748  avctx->height > 1152 )) {
749  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
750  return AVERROR(EINVAL);
751  }
752  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
753  (avctx->width > 65535 ||
754  avctx->height > 65535 )) {
755  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
756  return AVERROR(EINVAL);
757  }
758  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
759  s->c.codec_id == AV_CODEC_ID_H263P ||
760  s->c.codec_id == AV_CODEC_ID_RV20) &&
761  ((avctx->width &3) ||
762  (avctx->height&3) )) {
763  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
764  return AVERROR(EINVAL);
765  }
766 
767  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
768  (avctx->width &15 ||
769  avctx->height&15 )) {
770  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
771  return AVERROR(EINVAL);
772  }
773 
774  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
775  s->c.codec_id == AV_CODEC_ID_WMV2) &&
776  avctx->width & 1) {
777  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
778  return AVERROR(EINVAL);
779  }
780 
782  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
783  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
784  return AVERROR(EINVAL);
785  }
786 
787  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
788  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
789  return AVERROR(EINVAL);
790  }
791 
792  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
794  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
795  return AVERROR(EINVAL);
796  }
797 
798  if (m->scenechange_threshold < 1000000000 &&
801  "closed gop with scene change detection are not supported yet, "
802  "set threshold to 1000000000\n");
803  return AVERROR_PATCHWELCOME;
804  }
805 
807  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
810  "low delay forcing is only available for mpeg2, "
811  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
812  return AVERROR(EINVAL);
813  }
814  if (m->max_b_frames != 0) {
816  "B-frames cannot be used with low delay\n");
817  return AVERROR(EINVAL);
818  }
819  }
820 
821  if (avctx->slices > 1 &&
823  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
824  return AVERROR(EINVAL);
825  }
826 
829  "notice: b_frame_strategy only affects the first pass\n");
830  m->b_frame_strategy = 0;
831  }
832 
834  if (gcd > 1) {
835  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
836  avctx->time_base.den /= gcd;
837  avctx->time_base.num /= gcd;
838  //return -1;
839  }
840 
841  if (s->c.mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
842  // (a + x * 3 / 8) / x
843  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
844  s->inter_quant_bias = 0;
845  } else {
846  s->intra_quant_bias = 0;
847  // (a - x / 4) / x
848  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
849  }
850 
851  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
852  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
853  return AVERROR(EINVAL);
854  }
855 
856  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
857 
858  switch (avctx->codec->id) {
859 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
861  s->rtp_mode = 1;
862  /* fallthrough */
864  s->c.out_format = FMT_MPEG1;
865  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
866  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
868  break;
869 #endif
870 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
871  case AV_CODEC_ID_MJPEG:
872  case AV_CODEC_ID_AMV:
873  s->c.out_format = FMT_MJPEG;
874  m->intra_only = 1; /* force intra only for jpeg */
875  avctx->delay = 0;
876  s->c.low_delay = 1;
877  break;
878 #endif
879  case AV_CODEC_ID_SPEEDHQ:
880  s->c.out_format = FMT_SPEEDHQ;
881  m->intra_only = 1; /* force intra only for SHQ */
882  avctx->delay = 0;
883  s->c.low_delay = 1;
884  break;
885  case AV_CODEC_ID_H261:
886  s->c.out_format = FMT_H261;
887  avctx->delay = 0;
888  s->c.low_delay = 1;
889  s->rtp_mode = 0; /* Sliced encoding not supported */
890  break;
891  case AV_CODEC_ID_H263:
892  if (!CONFIG_H263_ENCODER)
895  s->c.width, s->c.height) == 8) {
897  "The specified picture size of %dx%d is not valid for "
898  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
899  "352x288, 704x576, and 1408x1152. "
900  "Try H.263+.\n", s->c.width, s->c.height);
901  return AVERROR(EINVAL);
902  }
903  s->c.out_format = FMT_H263;
904  avctx->delay = 0;
905  s->c.low_delay = 1;
906  break;
907  case AV_CODEC_ID_H263P:
908  s->c.out_format = FMT_H263;
909  s->c.h263_plus = 1;
910  /* Fx */
911  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
912  s->c.modified_quant = s->c.h263_aic;
913  s->c.loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
914  s->c.unrestricted_mv = s->c.obmc || s->c.loop_filter || s->c.umvplus;
915  s->c.flipflop_rounding = 1;
916 
917  /* /Fx */
918  /* These are just to be sure */
919  avctx->delay = 0;
920  s->c.low_delay = 1;
921  break;
922  case AV_CODEC_ID_FLV1:
923  s->c.out_format = FMT_H263;
924  s->c.h263_flv = 2; /* format = 1; 11-bit codes */
925  s->c.unrestricted_mv = 1;
926  s->rtp_mode = 0; /* don't allow GOB */
927  avctx->delay = 0;
928  s->c.low_delay = 1;
929  break;
930 #if CONFIG_RV10_ENCODER
931  case AV_CODEC_ID_RV10:
933  s->c.out_format = FMT_H263;
934  avctx->delay = 0;
935  s->c.low_delay = 1;
936  break;
937 #endif
938 #if CONFIG_RV20_ENCODER
939  case AV_CODEC_ID_RV20:
941  s->c.out_format = FMT_H263;
942  avctx->delay = 0;
943  s->c.low_delay = 1;
944  s->c.modified_quant = 1;
945  s->c.h263_aic = 1;
946  s->c.h263_plus = 1;
947  s->c.loop_filter = 1;
948  s->c.unrestricted_mv = 0;
949  break;
950 #endif
951  case AV_CODEC_ID_MPEG4:
952  s->c.out_format = FMT_H263;
953  s->c.h263_pred = 1;
954  s->c.unrestricted_mv = 1;
955  s->c.flipflop_rounding = 1;
956  s->c.low_delay = m->max_b_frames ? 0 : 1;
957  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
958  break;
960  s->c.out_format = FMT_H263;
961  s->c.h263_pred = 1;
962  s->c.unrestricted_mv = 1;
963  s->c.msmpeg4_version = MSMP4_V2;
964  avctx->delay = 0;
965  s->c.low_delay = 1;
966  break;
968  s->c.out_format = FMT_H263;
969  s->c.h263_pred = 1;
970  s->c.unrestricted_mv = 1;
971  s->c.msmpeg4_version = MSMP4_V3;
972  s->c.flipflop_rounding = 1;
973  avctx->delay = 0;
974  s->c.low_delay = 1;
975  break;
976  case AV_CODEC_ID_WMV1:
977  s->c.out_format = FMT_H263;
978  s->c.h263_pred = 1;
979  s->c.unrestricted_mv = 1;
980  s->c.msmpeg4_version = MSMP4_WMV1;
981  s->c.flipflop_rounding = 1;
982  avctx->delay = 0;
983  s->c.low_delay = 1;
984  break;
985  case AV_CODEC_ID_WMV2:
986  s->c.out_format = FMT_H263;
987  s->c.h263_pred = 1;
988  s->c.unrestricted_mv = 1;
989  s->c.msmpeg4_version = MSMP4_WMV2;
990  s->c.flipflop_rounding = 1;
991  avctx->delay = 0;
992  s->c.low_delay = 1;
993  break;
994  default:
995  return AVERROR(EINVAL);
996  }
997 
998  avctx->has_b_frames = !s->c.low_delay;
999 
1000  s->c.encoding = 1;
1001 
1002  s->c.progressive_frame =
1003  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1005  s->c.alternate_scan);
1006 
1009  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1010  (1 << AV_PICTURE_TYPE_P) |
1011  (1 << AV_PICTURE_TYPE_B);
1012  } else if (!m->intra_only) {
1013  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1014  (1 << AV_PICTURE_TYPE_P);
1015  } else {
1016  s->frame_reconstruction_bitfield = 0;
1017  }
1018 
1019  if (m->lmin > m->lmax) {
1020  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1021  m->lmin = m->lmax;
1022  }
1023 
1024  /* ff_mpv_common_init() will copy (memdup) the contents of the main slice
1025  * to the slice contexts, so we initialize various fields of it
1026  * before calling ff_mpv_common_init(). */
1027  s->parent = m;
1028  ff_mpv_idct_init(&s->c);
1029  init_unquantize(&s->c, avctx);
1030  ff_fdctdsp_init(&s->fdsp, avctx);
1031  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1032  ff_pixblockdsp_init(&s->pdsp, avctx);
1033  ret = me_cmp_init(m, avctx);
1034  if (ret < 0)
1035  return ret;
1036 
1037  if (!(avctx->stats_out = av_mallocz(256)) ||
1038  !(s->new_pic = av_frame_alloc()) ||
1039  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1040  return AVERROR(ENOMEM);
1041 
1042  ret = init_matrices(m, avctx);
1043  if (ret < 0)
1044  return ret;
1045 
1047 
1048  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1050 #if CONFIG_MSMPEG4ENC
1051  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1053 #endif
1054  }
1055 
1056  s->c.slice_ctx_size = sizeof(*s);
1057  ret = ff_mpv_common_init(&s->c);
1058  if (ret < 0)
1059  return ret;
1060 
1061  if (s->c.slice_context_count > 1) {
1062  for (int i = 0; i < s->c.slice_context_count; ++i) {
1063  s->c.enc_contexts[i]->rtp_mode = 1;
1064 
1066  s->c.enc_contexts[i]->c.h263_slice_structured = 1;
1067  }
1068  }
1069 
1070  ret = init_buffers(m, avctx);
1071  if (ret < 0)
1072  return ret;
1073 
1075  if (ret < 0)
1076  return ret;
1077 
1078  if (m->b_frame_strategy == 2) {
1079  for (int i = 0; i < m->max_b_frames + 2; i++) {
1080  m->tmp_frames[i] = av_frame_alloc();
1081  if (!m->tmp_frames[i])
1082  return AVERROR(ENOMEM);
1083 
1085  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1086  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1087 
1088  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1089  if (ret < 0)
1090  return ret;
1091  }
1092  }
1093 
1094  cpb_props = ff_encode_add_cpb_side_data(avctx);
1095  if (!cpb_props)
1096  return AVERROR(ENOMEM);
1097  cpb_props->max_bitrate = avctx->rc_max_rate;
1098  cpb_props->min_bitrate = avctx->rc_min_rate;
1099  cpb_props->avg_bitrate = avctx->bit_rate;
1100  cpb_props->buffer_size = avctx->rc_buffer_size;
1101 
1102  return 0;
1103 }
1104 
1106 {
1107  MPVMainEncContext *const m = avctx->priv_data;
1108  MPVEncContext *const s = &m->s;
1109 
1111 
1112  ff_mpv_common_end(&s->c);
1113  av_refstruct_pool_uninit(&s->c.picture_pool);
1114 
1115  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1118  }
1119  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1120  av_frame_free(&m->tmp_frames[i]);
1121 
1122  av_frame_free(&s->new_pic);
1123 
1125 
1126  av_freep(&m->mv_table_base);
1127  av_freep(&s->p_field_select_table[0]);
1129 
1130  av_freep(&s->mb_type);
1131  av_freep(&s->lambda_table);
1132 
1133  av_freep(&s->q_intra_matrix);
1134  av_freep(&s->q_intra_matrix16);
1135  av_freep(&s->dct_offset);
1136 
1137  return 0;
1138 }
1139 
1140 /* put block[] to dest[] */
1141 static inline void put_dct(MPVEncContext *const s,
1142  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1143 {
1144  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1145  s->c.idsp.idct_put(dest, line_size, block);
1146 }
1147 
1148 static inline void add_dequant_dct(MPVEncContext *const s,
1149  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1150 {
1151  if (s->c.block_last_index[i] >= 0) {
1152  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1153 
1154  s->c.idsp.idct_add(dest, line_size, block);
1155  }
1156 }
1157 
1158 /**
1159  * Performs dequantization and IDCT (if necessary)
1160  */
1161 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1162 {
1163  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1164  /* print DCT coefficients */
1165  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1166  for (int i = 0; i < 6; i++) {
1167  for (int j = 0; j < 64; j++) {
1168  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1169  block[i][s->c.idsp.idct_permutation[j]]);
1170  }
1171  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1172  }
1173  }
1174 
1175  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1176  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1177  int dct_linesize, dct_offset;
1178  const int linesize = s->c.cur_pic.linesize[0];
1179  const int uvlinesize = s->c.cur_pic.linesize[1];
1180  const int block_size = 8;
1181 
1182  dct_linesize = linesize << s->c.interlaced_dct;
1183  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1184 
1185  if (!s->c.mb_intra) {
1186  /* No MC, as that was already done otherwise */
1187  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1188  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1189  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1190  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1191 
1192  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1193  if (s->c.chroma_y_shift) {
1194  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1195  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1196  } else {
1197  dct_linesize >>= 1;
1198  dct_offset >>= 1;
1199  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1200  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1201  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1202  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1203  }
1204  }
1205  } else {
1206  /* dct only in intra block */
1207  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1208  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1209  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1210  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1211 
1212  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1213  if (s->c.chroma_y_shift) {
1214  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1215  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1216  } else {
1217  dct_offset >>= 1;
1218  dct_linesize >>= 1;
1219  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1220  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1221  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1222  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1223  }
1224  }
1225  }
1226  }
1227 }
1228 
1229 static int get_sae(const uint8_t *src, int ref, int stride)
1230 {
1231  int x,y;
1232  int acc = 0;
1233 
1234  for (y = 0; y < 16; y++) {
1235  for (x = 0; x < 16; x++) {
1236  acc += FFABS(src[x + y * stride] - ref);
1237  }
1238  }
1239 
1240  return acc;
1241 }
1242 
1243 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1244  const uint8_t *ref, int stride)
1245 {
1246  int x, y, w, h;
1247  int acc = 0;
1248 
1249  w = s->c.width & ~15;
1250  h = s->c.height & ~15;
1251 
1252  for (y = 0; y < h; y += 16) {
1253  for (x = 0; x < w; x += 16) {
1254  int offset = x + y * stride;
1255  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1256  stride, 16);
1257  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1258  int sae = get_sae(src + offset, mean, stride);
1259 
1260  acc += sae + 500 < sad;
1261  }
1262  }
1263  return acc;
1264 }
1265 
1266 /**
1267  * Allocates new buffers for an AVFrame and copies the properties
1268  * from another AVFrame.
1269  */
1270 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1271 {
1272  AVCodecContext *avctx = s->c.avctx;
1273  int ret;
1274 
1275  f->width = avctx->width + 2 * EDGE_WIDTH;
1276  f->height = avctx->height + 2 * EDGE_WIDTH;
1277 
1279  if (ret < 0)
1280  return ret;
1281 
1282  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1283  if (ret < 0)
1284  return ret;
1285 
1286  for (int i = 0; f->data[i]; i++) {
1287  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1288  f->linesize[i] +
1289  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1290  f->data[i] += offset;
1291  }
1292  f->width = avctx->width;
1293  f->height = avctx->height;
1294 
1295  ret = av_frame_copy_props(f, props_frame);
1296  if (ret < 0)
1297  return ret;
1298 
1299  return 0;
1300 }
1301 
1302 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1303 {
1304  MPVEncContext *const s = &m->s;
1305  MPVPicture *pic = NULL;
1306  int64_t pts;
1307  int display_picture_number = 0, ret;
1308  int encoding_delay = m->max_b_frames ? m->max_b_frames
1309  : (s->c.low_delay ? 0 : 1);
1310  int flush_offset = 1;
1311  int direct = 1;
1312 
1313  av_assert1(!m->input_picture[0]);
1314 
1315  if (pic_arg) {
1316  pts = pic_arg->pts;
1317  display_picture_number = m->input_picture_number++;
1318 
1319  if (pts != AV_NOPTS_VALUE) {
1320  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1321  int64_t last = m->user_specified_pts;
1322 
1323  if (pts <= last) {
1324  av_log(s->c.avctx, AV_LOG_ERROR,
1325  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1326  pts, last);
1327  return AVERROR(EINVAL);
1328  }
1329 
1330  if (!s->c.low_delay && display_picture_number == 1)
1331  m->dts_delta = pts - last;
1332  }
1333  m->user_specified_pts = pts;
1334  } else {
1335  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1336  m->user_specified_pts =
1337  pts = m->user_specified_pts + 1;
1338  av_log(s->c.avctx, AV_LOG_INFO,
1339  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1340  pts);
1341  } else {
1342  pts = display_picture_number;
1343  }
1344  }
1345 
1346  if (pic_arg->linesize[0] != s->c.linesize ||
1347  pic_arg->linesize[1] != s->c.uvlinesize ||
1348  pic_arg->linesize[2] != s->c.uvlinesize)
1349  direct = 0;
1350  if ((s->c.width & 15) || (s->c.height & 15))
1351  direct = 0;
1352  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1353  direct = 0;
1354  if (s->c.linesize & (STRIDE_ALIGN-1))
1355  direct = 0;
1356 
1357  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1358  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1359 
1360  pic = av_refstruct_pool_get(s->c.picture_pool);
1361  if (!pic)
1362  return AVERROR(ENOMEM);
1363 
1364  if (direct) {
1365  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1366  goto fail;
1367  pic->shared = 1;
1368  } else {
1369  ret = prepare_picture(s, pic->f, pic_arg);
1370  if (ret < 0)
1371  goto fail;
1372 
1373  for (int i = 0; i < 3; i++) {
1374  ptrdiff_t src_stride = pic_arg->linesize[i];
1375  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1376  int h_shift = i ? s->c.chroma_x_shift : 0;
1377  int v_shift = i ? s->c.chroma_y_shift : 0;
1378  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1379  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1380  const uint8_t *src = pic_arg->data[i];
1381  uint8_t *dst = pic->f->data[i];
1382  int vpad = 16;
1383 
1384  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1385  && !s->c.progressive_sequence
1386  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1387  vpad = 32;
1388 
1389  if (!s->c.avctx->rc_buffer_size)
1390  dst += INPLACE_OFFSET;
1391 
1392  if (src_stride == dst_stride)
1393  memcpy(dst, src, src_stride * h - src_stride + w);
1394  else {
1395  int h2 = h;
1396  uint8_t *dst2 = dst;
1397  while (h2--) {
1398  memcpy(dst2, src, w);
1399  dst2 += dst_stride;
1400  src += src_stride;
1401  }
1402  }
1403  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1404  s->mpvencdsp.draw_edges(dst, dst_stride,
1405  w, h,
1406  16 >> h_shift,
1407  vpad >> v_shift,
1408  EDGE_BOTTOM);
1409  }
1410  }
1411  emms_c();
1412  }
1413 
1414  pic->display_picture_number = display_picture_number;
1415  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1416  } else if (!m->reordered_input_picture[1]) {
1417  /* Flushing: When the above check is true, the encoder is about to run
1418  * out of frames to encode. Check if there are input_pictures left;
1419  * if so, ensure m->input_picture[0] contains the first picture.
1420  * A flush_offset != 1 will only happen if we did not receive enough
1421  * input frames. */
1422  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1423  if (m->input_picture[flush_offset])
1424  break;
1425 
1426  encoding_delay -= flush_offset - 1;
1427  }
1428 
1429  /* shift buffer entries */
1430  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1431  m->input_picture[i - flush_offset] = m->input_picture[i];
1432  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1433  m->input_picture[i] = NULL;
1434 
1435  m->input_picture[encoding_delay] = pic;
1436 
1437  return 0;
1438 fail:
1439  av_refstruct_unref(&pic);
1440  return ret;
1441 }
1442 
1443 static int skip_check(MPVMainEncContext *const m,
1444  const MPVPicture *p, const MPVPicture *ref)
1445 {
1446  MPVEncContext *const s = &m->s;
1447  int score = 0;
1448  int64_t score64 = 0;
1449 
1450  for (int plane = 0; plane < 3; plane++) {
1451  const int stride = p->f->linesize[plane];
1452  const int bw = plane ? 1 : 2;
1453  for (int y = 0; y < s->c.mb_height * bw; y++) {
1454  for (int x = 0; x < s->c.mb_width * bw; x++) {
1455  int off = p->shared ? 0 : 16;
1456  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1457  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1458  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1459 
1460  switch (FFABS(m->frame_skip_exp)) {
1461  case 0: score = FFMAX(score, v); break;
1462  case 1: score += FFABS(v); break;
1463  case 2: score64 += v * (int64_t)v; break;
1464  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1465  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1466  }
1467  }
1468  }
1469  }
1470  emms_c();
1471 
1472  if (score)
1473  score64 = score;
1474  if (m->frame_skip_exp < 0)
1475  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1476  -1.0/m->frame_skip_exp);
1477 
1478  if (score64 < m->frame_skip_threshold)
1479  return 1;
1480  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1481  return 1;
1482  return 0;
1483 }
1484 
1486 {
1487  int ret;
1488  int size = 0;
1489 
1491  if (ret < 0)
1492  return ret;
1493 
1494  do {
1496  if (ret >= 0) {
1497  size += pkt->size;
1499  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1500  return ret;
1501  } while (ret >= 0);
1502 
1503  return size;
1504 }
1505 
1507 {
1508  MPVEncContext *const s = &m->s;
1509  AVPacket *pkt;
1510  const int scale = m->brd_scale;
1511  int width = s->c.width >> scale;
1512  int height = s->c.height >> scale;
1513  int out_size, p_lambda, b_lambda, lambda2;
1514  int64_t best_rd = INT64_MAX;
1515  int best_b_count = -1;
1516  int ret = 0;
1517 
1518  av_assert0(scale >= 0 && scale <= 3);
1519 
1520  pkt = av_packet_alloc();
1521  if (!pkt)
1522  return AVERROR(ENOMEM);
1523 
1524  //emms_c();
1525  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1526  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1527  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1528  if (!b_lambda) // FIXME we should do this somewhere else
1529  b_lambda = p_lambda;
1530  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1532 
1533  for (int i = 0; i < m->max_b_frames + 2; i++) {
1534  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1535  s->c.next_pic.ptr;
1536 
1537  if (pre_input_ptr) {
1538  const uint8_t *data[4];
1539  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1540 
1541  if (!pre_input_ptr->shared && i) {
1542  data[0] += INPLACE_OFFSET;
1543  data[1] += INPLACE_OFFSET;
1544  data[2] += INPLACE_OFFSET;
1545  }
1546 
1547  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1548  m->tmp_frames[i]->linesize[0],
1549  data[0],
1550  pre_input_ptr->f->linesize[0],
1551  width, height);
1552  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1553  m->tmp_frames[i]->linesize[1],
1554  data[1],
1555  pre_input_ptr->f->linesize[1],
1556  width >> 1, height >> 1);
1557  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1558  m->tmp_frames[i]->linesize[2],
1559  data[2],
1560  pre_input_ptr->f->linesize[2],
1561  width >> 1, height >> 1);
1562  }
1563  }
1564 
1565  for (int j = 0; j < m->max_b_frames + 1; j++) {
1566  AVCodecContext *c;
1567  int64_t rd = 0;
1568 
1569  if (!m->input_picture[j])
1570  break;
1571 
1573  if (!c) {
1574  ret = AVERROR(ENOMEM);
1575  goto fail;
1576  }
1577 
1578  c->width = width;
1579  c->height = height;
1581  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1582  c->mb_decision = s->c.avctx->mb_decision;
1583  c->me_cmp = s->c.avctx->me_cmp;
1584  c->mb_cmp = s->c.avctx->mb_cmp;
1585  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1586  c->pix_fmt = AV_PIX_FMT_YUV420P;
1587  c->time_base = s->c.avctx->time_base;
1588  c->max_b_frames = m->max_b_frames;
1589 
1590  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1591  if (ret < 0)
1592  goto fail;
1593 
1594 
1596  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1597 
1598  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1599  if (out_size < 0) {
1600  ret = out_size;
1601  goto fail;
1602  }
1603 
1604  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1605 
1606  for (int i = 0; i < m->max_b_frames + 1; i++) {
1607  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1608 
1609  m->tmp_frames[i + 1]->pict_type = is_p ?
1611  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1612 
1613  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1614  if (out_size < 0) {
1615  ret = out_size;
1616  goto fail;
1617  }
1618 
1619  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1620  }
1621 
1622  /* get the delayed frames */
1624  if (out_size < 0) {
1625  ret = out_size;
1626  goto fail;
1627  }
1628  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1629 
1630  rd += c->error[0] + c->error[1] + c->error[2];
1631 
1632  if (rd < best_rd) {
1633  best_rd = rd;
1634  best_b_count = j;
1635  }
1636 
1637 fail:
1640  if (ret < 0) {
1641  best_b_count = ret;
1642  break;
1643  }
1644  }
1645 
1646  av_packet_free(&pkt);
1647 
1648  return best_b_count;
1649 }
1650 
1651 /**
1652  * Determines whether an input picture is discarded or not
1653  * and if not determines the length of the next chain of B frames
1654  * and moves these pictures (including the P frame) into
1655  * reordered_input_picture.
1656  * input_picture[0] is always NULL when exiting this function, even on error;
1657  * reordered_input_picture[0] is always NULL when exiting this function on error.
1658  */
1660 {
1661  MPVEncContext *const s = &m->s;
1662 
1663  /* Either nothing to do or can't do anything */
1664  if (m->reordered_input_picture[0] || !m->input_picture[0])
1665  return 0;
1666 
1667  /* set next picture type & ordering */
1668  if (m->frame_skip_threshold || m->frame_skip_factor) {
1669  if (m->picture_in_gop_number < m->gop_size &&
1670  s->c.next_pic.ptr &&
1671  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1672  // FIXME check that the gop check above is +-1 correct
1674 
1675  ff_vbv_update(m, 0);
1676 
1677  return 0;
1678  }
1679  }
1680 
1681  if (/* m->picture_in_gop_number >= m->gop_size || */
1682  !s->c.next_pic.ptr || m->intra_only) {
1683  m->reordered_input_picture[0] = m->input_picture[0];
1684  m->input_picture[0] = NULL;
1687  m->coded_picture_number++;
1688  } else {
1689  int b_frames = 0;
1690 
1691  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1692  for (int i = 0; i < m->max_b_frames + 1; i++) {
1693  int pict_num = m->input_picture[0]->display_picture_number + i;
1694 
1695  if (pict_num >= m->rc_context.num_entries)
1696  break;
1697  if (!m->input_picture[i]) {
1698  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1699  break;
1700  }
1701 
1702  m->input_picture[i]->f->pict_type =
1703  m->rc_context.entry[pict_num].new_pict_type;
1704  }
1705  }
1706 
1707  if (m->b_frame_strategy == 0) {
1708  b_frames = m->max_b_frames;
1709  while (b_frames && !m->input_picture[b_frames])
1710  b_frames--;
1711  } else if (m->b_frame_strategy == 1) {
1712  for (int i = 1; i < m->max_b_frames + 1; i++) {
1713  if (m->input_picture[i] &&
1714  m->input_picture[i]->b_frame_score == 0) {
1717  m->input_picture[i ]->f->data[0],
1718  m->input_picture[i - 1]->f->data[0],
1719  s->c.linesize) + 1;
1720  }
1721  }
1722  for (int i = 0;; i++) {
1723  if (i >= m->max_b_frames + 1 ||
1724  !m->input_picture[i] ||
1725  m->input_picture[i]->b_frame_score - 1 >
1726  s->c.mb_num / m->b_sensitivity) {
1727  b_frames = FFMAX(0, i - 1);
1728  break;
1729  }
1730  }
1731 
1732  /* reset scores */
1733  for (int i = 0; i < b_frames + 1; i++)
1734  m->input_picture[i]->b_frame_score = 0;
1735  } else if (m->b_frame_strategy == 2) {
1736  b_frames = estimate_best_b_count(m);
1737  if (b_frames < 0) {
1739  return b_frames;
1740  }
1741  }
1742 
1743  emms_c();
1744 
1745  for (int i = b_frames - 1; i >= 0; i--) {
1746  int type = m->input_picture[i]->f->pict_type;
1747  if (type && type != AV_PICTURE_TYPE_B)
1748  b_frames = i;
1749  }
1750  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1751  b_frames == m->max_b_frames) {
1752  av_log(s->c.avctx, AV_LOG_ERROR,
1753  "warning, too many B-frames in a row\n");
1754  }
1755 
1756  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1757  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1758  m->gop_size > m->picture_in_gop_number) {
1759  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1760  } else {
1761  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1762  b_frames = 0;
1763  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1764  }
1765  }
1766 
1767  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1768  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1769  b_frames--;
1770 
1771  m->reordered_input_picture[0] = m->input_picture[b_frames];
1772  m->input_picture[b_frames] = NULL;
1776  m->coded_picture_number++;
1777  for (int i = 0; i < b_frames; i++) {
1778  m->reordered_input_picture[i + 1] = m->input_picture[i];
1779  m->input_picture[i] = NULL;
1780  m->reordered_input_picture[i + 1]->f->pict_type =
1783  m->coded_picture_number++;
1784  }
1785  }
1786 
1787  return 0;
1788 }
1789 
1791 {
1792  MPVEncContext *const s = &m->s;
1793  int ret;
1794 
1796 
1797  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1800 
1802  av_assert1(!m->input_picture[0]);
1803  if (ret < 0)
1804  return ret;
1805 
1806  av_frame_unref(s->new_pic);
1807 
1808  if (m->reordered_input_picture[0]) {
1811 
1812  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1813  // input is a shared pix, so we can't modify it -> allocate a new
1814  // one & ensure that the shared one is reuseable
1815  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1816 
1817  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1818  if (ret < 0)
1819  goto fail;
1820  } else {
1821  // input is not a shared pix -> reuse buffer for current_pix
1822  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1823  if (ret < 0)
1824  goto fail;
1825  for (int i = 0; i < MPV_MAX_PLANES; i++) {
1826  if (s->new_pic->data[i])
1827  s->new_pic->data[i] += INPLACE_OFFSET;
1828  }
1829  }
1830  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1831  m->reordered_input_picture[0] = NULL;
1832  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1833  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1834  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1835  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1836  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1837  if (ret < 0) {
1838  ff_mpv_unref_picture(&s->c.cur_pic);
1839  return ret;
1840  }
1841  s->c.picture_number = s->c.cur_pic.ptr->display_picture_number;
1842 
1843  }
1844  return 0;
1845 fail:
1847  return ret;
1848 }
1849 
1850 static void frame_end(MPVMainEncContext *const m)
1851 {
1852  MPVEncContext *const s = &m->s;
1853 
1854  if (s->c.unrestricted_mv &&
1855  s->c.cur_pic.reference &&
1856  !m->intra_only) {
1857  int hshift = s->c.chroma_x_shift;
1858  int vshift = s->c.chroma_y_shift;
1859  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1860  s->c.cur_pic.linesize[0],
1861  s->c.h_edge_pos, s->c.v_edge_pos,
1863  EDGE_TOP | EDGE_BOTTOM);
1864  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1865  s->c.cur_pic.linesize[1],
1866  s->c.h_edge_pos >> hshift,
1867  s->c.v_edge_pos >> vshift,
1868  EDGE_WIDTH >> hshift,
1869  EDGE_WIDTH >> vshift,
1870  EDGE_TOP | EDGE_BOTTOM);
1871  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1872  s->c.cur_pic.linesize[2],
1873  s->c.h_edge_pos >> hshift,
1874  s->c.v_edge_pos >> vshift,
1875  EDGE_WIDTH >> hshift,
1876  EDGE_WIDTH >> vshift,
1877  EDGE_TOP | EDGE_BOTTOM);
1878  }
1879 
1880  emms_c();
1881 
1882  m->last_pict_type = s->c.pict_type;
1883  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1884  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1885  m->last_non_b_pict_type = s->c.pict_type;
1886 }
1887 
1889 {
1890  MPVEncContext *const s = &m->s;
1891  int intra, i;
1892 
1893  for (intra = 0; intra < 2; intra++) {
1894  if (s->dct_count[intra] > (1 << 16)) {
1895  for (i = 0; i < 64; i++) {
1896  s->dct_error_sum[intra][i] >>= 1;
1897  }
1898  s->dct_count[intra] >>= 1;
1899  }
1900 
1901  for (i = 0; i < 64; i++) {
1902  s->dct_offset[intra][i] = (m->noise_reduction *
1903  s->dct_count[intra] +
1904  s->dct_error_sum[intra][i] / 2) /
1905  (s->dct_error_sum[intra][i] + 1);
1906  }
1907  }
1908 }
1909 
1910 static void frame_start(MPVMainEncContext *const m)
1911 {
1912  MPVEncContext *const s = &m->s;
1913 
1914  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1915 
1916  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1917  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1918  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1919  }
1920 
1921  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1922  if (s->dct_error_sum) {
1924  }
1925 }
1926 
1928  const AVFrame *pic_arg, int *got_packet)
1929 {
1930  MPVMainEncContext *const m = avctx->priv_data;
1931  MPVEncContext *const s = &m->s;
1932  int stuffing_count, ret;
1933  int context_count = s->c.slice_context_count;
1934 
1935  ff_mpv_unref_picture(&s->c.cur_pic);
1936 
1937  m->vbv_ignore_qmax = 0;
1938 
1939  m->picture_in_gop_number++;
1940 
1941  ret = load_input_picture(m, pic_arg);
1942  if (ret < 0)
1943  return ret;
1944 
1946  if (ret < 0)
1947  return ret;
1948 
1949  /* output? */
1950  if (s->new_pic->data[0]) {
1951  int growing_buffer = context_count == 1 && !s->c.data_partitioning;
1952  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1953  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1954  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1955  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1956  if (ret < 0)
1957  return ret;
1958  }
1959  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1960  return ret;
1962  if (s->mb_info) {
1963  s->mb_info_ptr = av_packet_new_side_data(pkt,
1965  s->c.mb_width*s->c.mb_height*12);
1966  if (!s->mb_info_ptr)
1967  return AVERROR(ENOMEM);
1968  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1969  }
1970 
1971  s->c.pict_type = s->new_pic->pict_type;
1972  //emms_c();
1973  frame_start(m);
1974 vbv_retry:
1975  ret = encode_picture(m, pkt);
1976  if (growing_buffer) {
1977  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1978  pkt->data = s->pb.buf;
1980  }
1981  if (ret < 0)
1982  return -1;
1983 
1984  frame_end(m);
1985 
1986  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1988 
1989  if (avctx->rc_buffer_size) {
1990  RateControlContext *rcc = &m->rc_context;
1991  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1992  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1993  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1994 
1995  if (put_bits_count(&s->pb) > max_size &&
1996  s->lambda < m->lmax) {
1997  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1998  (s->c.qscale + 1) / s->c.qscale);
1999  if (s->adaptive_quant) {
2000  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2001  s->lambda_table[i] =
2002  FFMAX(s->lambda_table[i] + min_step,
2003  s->lambda_table[i] * (s->c.qscale + 1) /
2004  s->c.qscale);
2005  }
2006  s->c.mb_skipped = 0; // done in frame_start()
2007  // done in encode_picture() so we must undo it
2008  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2009  s->c.no_rounding ^= s->c.flipflop_rounding;
2010  }
2011  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2012  s->c.time_base = s->c.last_time_base;
2013  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2014  }
2015  m->vbv_ignore_qmax = 1;
2016  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2017  goto vbv_retry;
2018  }
2019 
2021  }
2022 
2025 
2026  for (int i = 0; i < MPV_MAX_PLANES; i++)
2027  avctx->error[i] += s->encoding_error[i];
2028  ff_side_data_set_encoder_stats(pkt, s->c.cur_pic.ptr->f->quality,
2029  s->encoding_error,
2031  s->c.pict_type);
2032 
2034  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2035  s->misc_bits + s->i_tex_bits +
2036  s->p_tex_bits);
2037  flush_put_bits(&s->pb);
2038  m->frame_bits = put_bits_count(&s->pb);
2039 
2040  stuffing_count = ff_vbv_update(m, m->frame_bits);
2041  m->stuffing_bits = 8*stuffing_count;
2042  if (stuffing_count) {
2043  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2044  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2045  return -1;
2046  }
2047 
2048  switch (s->c.codec_id) {
2051  while (stuffing_count--) {
2052  put_bits(&s->pb, 8, 0);
2053  }
2054  break;
2055  case AV_CODEC_ID_MPEG4:
2056  put_bits(&s->pb, 16, 0);
2057  put_bits(&s->pb, 16, 0x1C3);
2058  stuffing_count -= 4;
2059  while (stuffing_count--) {
2060  put_bits(&s->pb, 8, 0xFF);
2061  }
2062  break;
2063  default:
2064  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2065  m->stuffing_bits = 0;
2066  }
2067  flush_put_bits(&s->pb);
2068  m->frame_bits = put_bits_count(&s->pb);
2069  }
2070 
2071  /* update MPEG-1/2 vbv_delay for CBR */
2072  if (avctx->rc_max_rate &&
2074  s->c.out_format == FMT_MPEG1 &&
2075  90000LL * (avctx->rc_buffer_size - 1) <=
2076  avctx->rc_max_rate * 0xFFFFLL) {
2077  AVCPBProperties *props;
2078  size_t props_size;
2079 
2080  int vbv_delay, min_delay;
2081  double inbits = avctx->rc_max_rate *
2083  int minbits = m->frame_bits - 8 *
2084  (m->vbv_delay_pos - 1);
2085  double bits = m->rc_context.buffer_index + minbits - inbits;
2086  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2087 
2088  if (bits < 0)
2090  "Internal error, negative bits\n");
2091 
2092  av_assert1(s->c.repeat_first_field == 0);
2093 
2094  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2095  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2096  avctx->rc_max_rate;
2097 
2098  vbv_delay = FFMAX(vbv_delay, min_delay);
2099 
2100  av_assert0(vbv_delay < 0xFFFF);
2101 
2102  vbv_delay_ptr[0] &= 0xF8;
2103  vbv_delay_ptr[0] |= vbv_delay >> 13;
2104  vbv_delay_ptr[1] = vbv_delay >> 5;
2105  vbv_delay_ptr[2] &= 0x07;
2106  vbv_delay_ptr[2] |= vbv_delay << 3;
2107 
2108  props = av_cpb_properties_alloc(&props_size);
2109  if (!props)
2110  return AVERROR(ENOMEM);
2111  props->vbv_delay = vbv_delay * 300;
2112 
2114  (uint8_t*)props, props_size);
2115  if (ret < 0) {
2116  av_freep(&props);
2117  return ret;
2118  }
2119  }
2120  m->total_bits += m->frame_bits;
2121 
2122  pkt->pts = s->c.cur_pic.ptr->f->pts;
2123  pkt->duration = s->c.cur_pic.ptr->f->duration;
2124  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2125  if (!s->c.cur_pic.ptr->coded_picture_number)
2126  pkt->dts = pkt->pts - m->dts_delta;
2127  else
2128  pkt->dts = m->reordered_pts;
2129  m->reordered_pts = pkt->pts;
2130  } else
2131  pkt->dts = pkt->pts;
2132 
2133  // the no-delay case is handled in generic code
2135  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2136  if (ret < 0)
2137  return ret;
2138  }
2139 
2140  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2142  if (s->mb_info)
2144  } else {
2145  m->frame_bits = 0;
2146  }
2147 
2148  ff_mpv_unref_picture(&s->c.cur_pic);
2149 
2150  av_assert1((m->frame_bits & 7) == 0);
2151 
2152  pkt->size = m->frame_bits / 8;
2153  *got_packet = !!pkt->size;
2154  return 0;
2155 }
2156 
2158  int n, int threshold)
2159 {
2160  static const char tab[64] = {
2161  3, 2, 2, 1, 1, 1, 1, 1,
2162  1, 1, 1, 1, 1, 1, 1, 1,
2163  1, 1, 1, 1, 1, 1, 1, 1,
2164  0, 0, 0, 0, 0, 0, 0, 0,
2165  0, 0, 0, 0, 0, 0, 0, 0,
2166  0, 0, 0, 0, 0, 0, 0, 0,
2167  0, 0, 0, 0, 0, 0, 0, 0,
2168  0, 0, 0, 0, 0, 0, 0, 0
2169  };
2170  int score = 0;
2171  int run = 0;
2172  int i;
2173  int16_t *block = s->c.block[n];
2174  const int last_index = s->c.block_last_index[n];
2175  int skip_dc;
2176 
2177  if (threshold < 0) {
2178  skip_dc = 0;
2179  threshold = -threshold;
2180  } else
2181  skip_dc = 1;
2182 
2183  /* Are all we could set to zero already zero? */
2184  if (last_index <= skip_dc - 1)
2185  return;
2186 
2187  for (i = 0; i <= last_index; i++) {
2188  const int j = s->c.intra_scantable.permutated[i];
2189  const int level = FFABS(block[j]);
2190  if (level == 1) {
2191  if (skip_dc && i == 0)
2192  continue;
2193  score += tab[run];
2194  run = 0;
2195  } else if (level > 1) {
2196  return;
2197  } else {
2198  run++;
2199  }
2200  }
2201  if (score >= threshold)
2202  return;
2203  for (i = skip_dc; i <= last_index; i++) {
2204  const int j = s->c.intra_scantable.permutated[i];
2205  block[j] = 0;
2206  }
2207  if (block[0])
2208  s->c.block_last_index[n] = 0;
2209  else
2210  s->c.block_last_index[n] = -1;
2211 }
2212 
2213 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2214  int last_index)
2215 {
2216  int i;
2217  const int maxlevel = s->max_qcoeff;
2218  const int minlevel = s->min_qcoeff;
2219  int overflow = 0;
2220 
2221  if (s->c.mb_intra) {
2222  i = 1; // skip clipping of intra dc
2223  } else
2224  i = 0;
2225 
2226  for (; i <= last_index; i++) {
2227  const int j = s->c.intra_scantable.permutated[i];
2228  int level = block[j];
2229 
2230  if (level > maxlevel) {
2231  level = maxlevel;
2232  overflow++;
2233  } else if (level < minlevel) {
2234  level = minlevel;
2235  overflow++;
2236  }
2237 
2238  block[j] = level;
2239  }
2240 
2241  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2242  av_log(s->c.avctx, AV_LOG_INFO,
2243  "warning, clipping %d dct coefficients to %d..%d\n",
2244  overflow, minlevel, maxlevel);
2245 }
2246 
2247 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2248 {
2249  int x, y;
2250  // FIXME optimize
2251  for (y = 0; y < 8; y++) {
2252  for (x = 0; x < 8; x++) {
2253  int x2, y2;
2254  int sum = 0;
2255  int sqr = 0;
2256  int count = 0;
2257 
2258  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2259  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2260  int v = ptr[x2 + y2 * stride];
2261  sum += v;
2262  sqr += v * v;
2263  count++;
2264  }
2265  }
2266  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2267  }
2268  }
2269 }
2270 
2272  int motion_x, int motion_y,
2273  int mb_block_height,
2274  int mb_block_width,
2275  int mb_block_count,
2276  int chroma_x_shift,
2277  int chroma_y_shift,
2278  int chroma_format)
2279 {
2280 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2281  * and neither of these encoders currently supports 444. */
2282 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2283  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2284  int16_t weight[12][64];
2285  int16_t orig[12][64];
2286  const int mb_x = s->c.mb_x;
2287  const int mb_y = s->c.mb_y;
2288  int i;
2289  int skip_dct[12];
2290  int dct_offset = s->c.linesize * 8; // default for progressive frames
2291  int uv_dct_offset = s->c.uvlinesize * 8;
2292  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2293  ptrdiff_t wrap_y, wrap_c;
2294 
2295  for (i = 0; i < mb_block_count; i++)
2296  skip_dct[i] = s->skipdct;
2297 
2298  if (s->adaptive_quant) {
2299  const int last_qp = s->c.qscale;
2300  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2301 
2302  s->lambda = s->lambda_table[mb_xy];
2303  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2305 
2306  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2307  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2308 
2309  if (s->c.out_format == FMT_H263) {
2310  s->dquant = av_clip(s->dquant, -2, 2);
2311 
2312  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2313  if (!s->c.mb_intra) {
2314  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2315  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2316  s->dquant = 0;
2317  }
2318  if (s->c.mv_type == MV_TYPE_8X8)
2319  s->dquant = 0;
2320  }
2321  }
2322  }
2323  }
2324  ff_set_qscale(&s->c, last_qp + s->dquant);
2325  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2326  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2327 
2328  wrap_y = s->c.linesize;
2329  wrap_c = s->c.uvlinesize;
2330  ptr_y = s->new_pic->data[0] +
2331  (mb_y * 16 * wrap_y) + mb_x * 16;
2332  ptr_cb = s->new_pic->data[1] +
2333  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2334  ptr_cr = s->new_pic->data[2] +
2335  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2336 
2337  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2338  s->c.codec_id != AV_CODEC_ID_AMV) {
2339  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2340  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2341  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2342  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2343  wrap_y, wrap_y,
2344  16, 16, mb_x * 16, mb_y * 16,
2345  s->c.width, s->c.height);
2346  ptr_y = ebuf;
2347  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2348  wrap_c, wrap_c,
2349  mb_block_width, mb_block_height,
2350  mb_x * mb_block_width, mb_y * mb_block_height,
2351  cw, ch);
2352  ptr_cb = ebuf + 16 * wrap_y;
2353  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2354  wrap_c, wrap_c,
2355  mb_block_width, mb_block_height,
2356  mb_x * mb_block_width, mb_y * mb_block_height,
2357  cw, ch);
2358  ptr_cr = ebuf + 16 * wrap_y + 16;
2359  }
2360 
2361  if (s->c.mb_intra) {
2362  if (INTERLACED_DCT(s)) {
2363  int progressive_score, interlaced_score;
2364 
2365  s->c.interlaced_dct = 0;
2366  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2367  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2368  NULL, wrap_y, 8) - 400;
2369 
2370  if (progressive_score > 0) {
2371  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2372  NULL, wrap_y * 2, 8) +
2373  s->ildct_cmp[1](s, ptr_y + wrap_y,
2374  NULL, wrap_y * 2, 8);
2375  if (progressive_score > interlaced_score) {
2376  s->c.interlaced_dct = 1;
2377 
2378  dct_offset = wrap_y;
2379  uv_dct_offset = wrap_c;
2380  wrap_y <<= 1;
2381  if (chroma_format == CHROMA_422 ||
2383  wrap_c <<= 1;
2384  }
2385  }
2386  }
2387 
2388  s->pdsp.get_pixels(s->c.block[0], ptr_y, wrap_y);
2389  s->pdsp.get_pixels(s->c.block[1], ptr_y + 8, wrap_y);
2390  s->pdsp.get_pixels(s->c.block[2], ptr_y + dct_offset, wrap_y);
2391  s->pdsp.get_pixels(s->c.block[3], ptr_y + dct_offset + 8, wrap_y);
2392 
2393  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2394  skip_dct[4] = 1;
2395  skip_dct[5] = 1;
2396  } else {
2397  s->pdsp.get_pixels(s->c.block[4], ptr_cb, wrap_c);
2398  s->pdsp.get_pixels(s->c.block[5], ptr_cr, wrap_c);
2399  if (chroma_format == CHROMA_422) {
2400  s->pdsp.get_pixels(s->c.block[6], ptr_cb + uv_dct_offset, wrap_c);
2401  s->pdsp.get_pixels(s->c.block[7], ptr_cr + uv_dct_offset, wrap_c);
2402  } else if (chroma_format == CHROMA_444) {
2403  s->pdsp.get_pixels(s->c.block[ 6], ptr_cb + 8, wrap_c);
2404  s->pdsp.get_pixels(s->c.block[ 7], ptr_cr + 8, wrap_c);
2405  s->pdsp.get_pixels(s->c.block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2406  s->pdsp.get_pixels(s->c.block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2407  s->pdsp.get_pixels(s->c.block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2408  s->pdsp.get_pixels(s->c.block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2409  }
2410  }
2411  } else {
2412  op_pixels_func (*op_pix)[4];
2413  qpel_mc_func (*op_qpix)[16];
2414  uint8_t *dest_y, *dest_cb, *dest_cr;
2415 
2416  dest_y = s->c.dest[0];
2417  dest_cb = s->c.dest[1];
2418  dest_cr = s->c.dest[2];
2419 
2420  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2421  op_pix = s->c.hdsp.put_pixels_tab;
2422  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2423  } else {
2424  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2425  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2426  }
2427 
2428  if (s->c.mv_dir & MV_DIR_FORWARD) {
2429  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2430  s->c.last_pic.data,
2431  op_pix, op_qpix);
2432  op_pix = s->c.hdsp.avg_pixels_tab;
2433  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2434  }
2435  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2436  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2437  s->c.next_pic.data,
2438  op_pix, op_qpix);
2439  }
2440 
2441  if (INTERLACED_DCT(s)) {
2442  int progressive_score, interlaced_score;
2443 
2444  s->c.interlaced_dct = 0;
2445  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2446  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2447  ptr_y + wrap_y * 8,
2448  wrap_y, 8) - 400;
2449 
2450  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2451  progressive_score -= 400;
2452 
2453  if (progressive_score > 0) {
2454  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2455  wrap_y * 2, 8) +
2456  s->ildct_cmp[0](s, dest_y + wrap_y,
2457  ptr_y + wrap_y,
2458  wrap_y * 2, 8);
2459 
2460  if (progressive_score > interlaced_score) {
2461  s->c.interlaced_dct = 1;
2462 
2463  dct_offset = wrap_y;
2464  uv_dct_offset = wrap_c;
2465  wrap_y <<= 1;
2466  if (chroma_format == CHROMA_422)
2467  wrap_c <<= 1;
2468  }
2469  }
2470  }
2471 
2472  s->pdsp.diff_pixels(s->c.block[0], ptr_y, dest_y, wrap_y);
2473  s->pdsp.diff_pixels(s->c.block[1], ptr_y + 8, dest_y + 8, wrap_y);
2474  s->pdsp.diff_pixels(s->c.block[2], ptr_y + dct_offset,
2475  dest_y + dct_offset, wrap_y);
2476  s->pdsp.diff_pixels(s->c.block[3], ptr_y + dct_offset + 8,
2477  dest_y + dct_offset + 8, wrap_y);
2478 
2479  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2480  skip_dct[4] = 1;
2481  skip_dct[5] = 1;
2482  } else {
2483  s->pdsp.diff_pixels(s->c.block[4], ptr_cb, dest_cb, wrap_c);
2484  s->pdsp.diff_pixels(s->c.block[5], ptr_cr, dest_cr, wrap_c);
2485  if (!chroma_y_shift) { /* 422 */
2486  s->pdsp.diff_pixels(s->c.block[6], ptr_cb + uv_dct_offset,
2487  dest_cb + uv_dct_offset, wrap_c);
2488  s->pdsp.diff_pixels(s->c.block[7], ptr_cr + uv_dct_offset,
2489  dest_cr + uv_dct_offset, wrap_c);
2490  }
2491  }
2492  /* pre quantization */
2493  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2494  // FIXME optimize
2495  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2496  skip_dct[0] = 1;
2497  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2498  skip_dct[1] = 1;
2499  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2500  wrap_y, 8) < 20 * s->c.qscale)
2501  skip_dct[2] = 1;
2502  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2503  wrap_y, 8) < 20 * s->c.qscale)
2504  skip_dct[3] = 1;
2505  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2506  skip_dct[4] = 1;
2507  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2508  skip_dct[5] = 1;
2509  if (!chroma_y_shift) { /* 422 */
2510  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2511  dest_cb + uv_dct_offset,
2512  wrap_c, 8) < 20 * s->c.qscale)
2513  skip_dct[6] = 1;
2514  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2515  dest_cr + uv_dct_offset,
2516  wrap_c, 8) < 20 * s->c.qscale)
2517  skip_dct[7] = 1;
2518  }
2519  }
2520  }
2521 
2522  if (s->quantizer_noise_shaping) {
2523  if (!skip_dct[0])
2524  get_visual_weight(weight[0], ptr_y , wrap_y);
2525  if (!skip_dct[1])
2526  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2527  if (!skip_dct[2])
2528  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2529  if (!skip_dct[3])
2530  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2531  if (!skip_dct[4])
2532  get_visual_weight(weight[4], ptr_cb , wrap_c);
2533  if (!skip_dct[5])
2534  get_visual_weight(weight[5], ptr_cr , wrap_c);
2535  if (!chroma_y_shift) { /* 422 */
2536  if (!skip_dct[6])
2537  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2538  wrap_c);
2539  if (!skip_dct[7])
2540  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2541  wrap_c);
2542  }
2543  memcpy(orig[0], s->c.block[0], sizeof(int16_t) * 64 * mb_block_count);
2544  }
2545 
2546  /* DCT & quantize */
2547  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2548  {
2549  for (i = 0; i < mb_block_count; i++) {
2550  if (!skip_dct[i]) {
2551  int overflow;
2552  s->c.block_last_index[i] = s->dct_quantize(s, s->c.block[i], i, s->c.qscale, &overflow);
2553  // FIXME we could decide to change to quantizer instead of
2554  // clipping
2555  // JS: I don't think that would be a good idea it could lower
2556  // quality instead of improve it. Just INTRADC clipping
2557  // deserves changes in quantizer
2558  if (overflow)
2559  clip_coeffs(s, s->c.block[i], s->c.block_last_index[i]);
2560  } else
2561  s->c.block_last_index[i] = -1;
2562  }
2563  if (s->quantizer_noise_shaping) {
2564  for (i = 0; i < mb_block_count; i++) {
2565  if (!skip_dct[i]) {
2566  s->c.block_last_index[i] =
2567  dct_quantize_refine(s, s->c.block[i], weight[i],
2568  orig[i], i, s->c.qscale);
2569  }
2570  }
2571  }
2572 
2573  if (s->luma_elim_threshold && !s->c.mb_intra)
2574  for (i = 0; i < 4; i++)
2575  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2576  if (s->chroma_elim_threshold && !s->c.mb_intra)
2577  for (i = 4; i < mb_block_count; i++)
2578  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2579 
2580  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2581  for (i = 0; i < mb_block_count; i++) {
2582  if (s->c.block_last_index[i] == -1)
2583  s->coded_score[i] = INT_MAX / 256;
2584  }
2585  }
2586  }
2587 
2588  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2589  s->c.block_last_index[4] =
2590  s->c.block_last_index[5] = 0;
2591  s->c.block[4][0] =
2592  s->c.block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2593  if (!chroma_y_shift) { /* 422 / 444 */
2594  for (i=6; i<12; i++) {
2595  s->c.block_last_index[i] = 0;
2596  s->c.block[i][0] = s->c.block[4][0];
2597  }
2598  }
2599  }
2600 
2601  // non c quantize code returns incorrect block_last_index FIXME
2602  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2603  for (i = 0; i < mb_block_count; i++) {
2604  int j;
2605  if (s->c.block_last_index[i] > 0) {
2606  for (j = 63; j > 0; j--) {
2607  if (s->c.block[i][s->c.intra_scantable.permutated[j]])
2608  break;
2609  }
2610  s->c.block_last_index[i] = j;
2611  }
2612  }
2613  }
2614 
2615  s->encode_mb(s, s->c.block, motion_x, motion_y);
2616 }
2617 
2618 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2619 {
2620  if (s->c.chroma_format == CHROMA_420)
2621  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2622  else if (s->c.chroma_format == CHROMA_422)
2623  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2624  else
2625  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2626 }
2627 
2628 typedef struct MBBackup {
2629  struct {
2630  int mv[2][4][2];
2631  int last_mv[2][2][2];
2633  int last_dc[3];
2635  int qscale;
2638  int16_t (*block)[64];
2639  } c;
2641  int dquant;
2644 } MBBackup;
2645 
2646 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2647 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2648  const SRC_TYPE *const s) \
2649 { \
2650  /* FIXME is memcpy faster than a loop? */ \
2651  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2652  \
2653  /* MPEG-1 */ \
2654  d->c.mb_skip_run = s->c.mb_skip_run; \
2655  for (int i = 0; i < 3; i++) \
2656  d->c.last_dc[i] = s->c.last_dc[i]; \
2657  \
2658  /* statistics */ \
2659  d->mv_bits = s->mv_bits; \
2660  d->i_tex_bits = s->i_tex_bits; \
2661  d->p_tex_bits = s->p_tex_bits; \
2662  d->i_count = s->i_count; \
2663  d->misc_bits = s->misc_bits; \
2664  d->last_bits = 0; \
2665  \
2666  d->c.mb_skipped = 0; \
2667  d->c.qscale = s->c.qscale; \
2668  d->dquant = s->dquant; \
2669  \
2670  d->esc3_level_length = s->esc3_level_length; \
2671 } \
2672  \
2673 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2674  const SRC_TYPE *const s, \
2675  int data_partitioning) \
2676 { \
2677  /* FIXME is memcpy faster than a loop? */ \
2678  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2679  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2680  \
2681  /* MPEG-1 */ \
2682  d->c.mb_skip_run = s->c.mb_skip_run; \
2683  for (int i = 0; i < 3; i++) \
2684  d->c.last_dc[i] = s->c.last_dc[i]; \
2685  \
2686  /* statistics */ \
2687  d->mv_bits = s->mv_bits; \
2688  d->i_tex_bits = s->i_tex_bits; \
2689  d->p_tex_bits = s->p_tex_bits; \
2690  d->i_count = s->i_count; \
2691  d->misc_bits = s->misc_bits; \
2692  \
2693  d->c.mb_intra = s->c.mb_intra; \
2694  d->c.mb_skipped = s->c.mb_skipped; \
2695  d->c.mv_type = s->c.mv_type; \
2696  d->c.mv_dir = s->c.mv_dir; \
2697  d->pb = s->pb; \
2698  if (data_partitioning) { \
2699  d->pb2 = s->pb2; \
2700  d->tex_pb = s->tex_pb; \
2701  } \
2702  d->c.block = s->c.block; \
2703  for (int i = 0; i < 8; i++) \
2704  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2705  d->c.interlaced_dct = s->c.interlaced_dct; \
2706  d->c.qscale = s->c.qscale; \
2707  \
2708  d->esc3_level_length = s->esc3_level_length; \
2709 }
2710 
2711 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2712 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2713 
2714 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2715  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2716  int *dmin, int *next_block, int motion_x, int motion_y)
2717 {
2718  int score;
2719  uint8_t *dest_backup[3];
2720 
2721  reset_context_before_encode(s, backup);
2722 
2723  s->c.block = s->c.blocks[*next_block];
2724  s->pb = pb[*next_block];
2725  if (s->c.data_partitioning) {
2726  s->pb2 = pb2 [*next_block];
2727  s->tex_pb= tex_pb[*next_block];
2728  }
2729 
2730  if(*next_block){
2731  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2732  s->c.dest[0] = s->c.sc.rd_scratchpad;
2733  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2734  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2735  av_assert0(s->c.linesize >= 32); //FIXME
2736  }
2737 
2738  encode_mb(s, motion_x, motion_y);
2739 
2740  score= put_bits_count(&s->pb);
2741  if (s->c.data_partitioning) {
2742  score+= put_bits_count(&s->pb2);
2743  score+= put_bits_count(&s->tex_pb);
2744  }
2745 
2746  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2747  mpv_reconstruct_mb(s, s->c.block);
2748 
2749  score *= s->lambda2;
2750  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2751  }
2752 
2753  if(*next_block){
2754  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2755  }
2756 
2757  if(score<*dmin){
2758  *dmin= score;
2759  *next_block^=1;
2760 
2761  save_context_after_encode(best, s, s->c.data_partitioning);
2762  }
2763 }
2764 
2765 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2766 {
2767  const uint32_t *sq = ff_square_tab + 256;
2768  int acc=0;
2769  int x,y;
2770 
2771  if(w==16 && h==16)
2772  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2773  else if(w==8 && h==8)
2774  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2775 
2776  for(y=0; y<h; y++){
2777  for(x=0; x<w; x++){
2778  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2779  }
2780  }
2781 
2782  av_assert2(acc>=0);
2783 
2784  return acc;
2785 }
2786 
2787 static int sse_mb(MPVEncContext *const s)
2788 {
2789  int w= 16;
2790  int h= 16;
2791  int chroma_mb_w = w >> s->c.chroma_x_shift;
2792  int chroma_mb_h = h >> s->c.chroma_y_shift;
2793 
2794  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2795  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2796 
2797  if(w==16 && h==16)
2798  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2799  s->c.dest[0], s->c.linesize, 16) +
2800  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2801  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2802  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2803  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2804  else
2805  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2806  s->c.dest[0], w, h, s->c.linesize) +
2807  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2808  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2809  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2810  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2811 }
2812 
2814  MPVEncContext *const s = *(void**)arg;
2815 
2816 
2817  s->me.pre_pass = 1;
2818  s->me.dia_size = s->c.avctx->pre_dia_size;
2819  s->c.first_slice_line = 1;
2820  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2821  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2822  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2823  s->c.first_slice_line = 0;
2824  }
2825 
2826  s->me.pre_pass = 0;
2827 
2828  return 0;
2829 }
2830 
2832  MPVEncContext *const s = *(void**)arg;
2833 
2834  s->me.dia_size = s->c.avctx->dia_size;
2835  s->c.first_slice_line = 1;
2836  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2837  s->c.mb_x = 0; //for block init below
2838  ff_init_block_index(&s->c);
2839  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2840  s->c.block_index[0] += 2;
2841  s->c.block_index[1] += 2;
2842  s->c.block_index[2] += 2;
2843  s->c.block_index[3] += 2;
2844 
2845  /* compute motion vector & mb_type and store in context */
2846  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2847  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2848  else
2849  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2850  }
2851  s->c.first_slice_line = 0;
2852  }
2853  return 0;
2854 }
2855 
2856 static int mb_var_thread(AVCodecContext *c, void *arg){
2857  MPVEncContext *const s = *(void**)arg;
2858 
2859  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2860  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2861  int xx = mb_x * 16;
2862  int yy = mb_y * 16;
2863  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2864  int varc;
2865  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2866 
2867  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2868  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2869 
2870  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2871  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2872  s->me.mb_var_sum_temp += varc;
2873  }
2874  }
2875  return 0;
2876 }
2877 
2878 static void write_slice_end(MPVEncContext *const s)
2879 {
2880  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2881  if (s->c.partitioned_frame)
2883 
2884  ff_mpeg4_stuffing(&s->pb);
2885  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2886  s->c.out_format == FMT_MJPEG) {
2888  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2890  }
2891 
2892  flush_put_bits(&s->pb);
2893 
2894  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->c.partitioned_frame)
2895  s->misc_bits+= get_bits_diff(s);
2896 }
2897 
2898 static void write_mb_info(MPVEncContext *const s)
2899 {
2900  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2901  int offset = put_bits_count(&s->pb);
2902  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->c.gob_index);
2903  int gobn = s->c.mb_y / s->c.gob_index;
2904  int pred_x, pred_y;
2905  if (CONFIG_H263_ENCODER)
2906  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2907  bytestream_put_le32(&ptr, offset);
2908  bytestream_put_byte(&ptr, s->c.qscale);
2909  bytestream_put_byte(&ptr, gobn);
2910  bytestream_put_le16(&ptr, mba);
2911  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2912  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2913  /* 4MV not implemented */
2914  bytestream_put_byte(&ptr, 0); /* hmv2 */
2915  bytestream_put_byte(&ptr, 0); /* vmv2 */
2916 }
2917 
2918 static void update_mb_info(MPVEncContext *const s, int startcode)
2919 {
2920  if (!s->mb_info)
2921  return;
2922  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2923  s->mb_info_size += 12;
2924  s->prev_mb_info = s->last_mb_info;
2925  }
2926  if (startcode) {
2927  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2928  /* This might have incremented mb_info_size above, and we return without
2929  * actually writing any info into that slot yet. But in that case,
2930  * this will be called again at the start of the after writing the
2931  * start code, actually writing the mb info. */
2932  return;
2933  }
2934 
2935  s->last_mb_info = put_bytes_count(&s->pb, 0);
2936  if (!s->mb_info_size)
2937  s->mb_info_size += 12;
2938  write_mb_info(s);
2939 }
2940 
2941 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2942 {
2943  if (put_bytes_left(&s->pb, 0) < threshold
2944  && s->c.slice_context_count == 1
2945  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2946  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2947 
2948  uint8_t *new_buffer = NULL;
2949  int new_buffer_size = 0;
2950 
2951  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2952  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2953  return AVERROR(ENOMEM);
2954  }
2955 
2956  emms_c();
2957 
2958  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2959  s->c.avctx->internal->byte_buffer_size + size_increase);
2960  if (!new_buffer)
2961  return AVERROR(ENOMEM);
2962 
2963  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2964  av_free(s->c.avctx->internal->byte_buffer);
2965  s->c.avctx->internal->byte_buffer = new_buffer;
2966  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2967  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2968  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2969  }
2970  if (put_bytes_left(&s->pb, 0) < threshold)
2971  return AVERROR(EINVAL);
2972  return 0;
2973 }
2974 
2975 static int encode_thread(AVCodecContext *c, void *arg){
2976  MPVEncContext *const s = *(void**)arg;
2977  int chr_h = 16 >> s->c.chroma_y_shift;
2978  int i;
2979  MBBackup best_s = { 0 }, backup_s;
2980  uint8_t bit_buf[2][MAX_MB_BYTES];
2981  uint8_t bit_buf2[2][MAX_MB_BYTES];
2982  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2983  PutBitContext pb[2], pb2[2], tex_pb[2];
2984 
2985  for(i=0; i<2; i++){
2986  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2987  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2988  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2989  }
2990 
2991  s->last_bits= put_bits_count(&s->pb);
2992  s->mv_bits=0;
2993  s->misc_bits=0;
2994  s->i_tex_bits=0;
2995  s->p_tex_bits=0;
2996  s->i_count=0;
2997 
2998  for(i=0; i<3; i++){
2999  /* init last dc values */
3000  /* note: quant matrix value (8) is implied here */
3001  s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
3002 
3003  s->encoding_error[i] = 0;
3004  }
3005  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3006  s->c.last_dc[0] = 128 * 8 / 13;
3007  s->c.last_dc[1] = 128 * 8 / 14;
3008  s->c.last_dc[2] = 128 * 8 / 14;
3009  }
3010  s->c.mb_skip_run = 0;
3011  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3012 
3013  s->last_mv_dir = 0;
3014 
3015  switch (s->c.codec_id) {
3016  case AV_CODEC_ID_H263:
3017  case AV_CODEC_ID_H263P:
3018  case AV_CODEC_ID_FLV1:
3019  if (CONFIG_H263_ENCODER)
3020  s->c.gob_index = H263_GOB_HEIGHT(s->c.height);
3021  break;
3022  case AV_CODEC_ID_MPEG4:
3023  if (CONFIG_MPEG4_ENCODER && s->c.partitioned_frame)
3025  break;
3026  }
3027 
3028  s->c.resync_mb_x = 0;
3029  s->c.resync_mb_y = 0;
3030  s->c.first_slice_line = 1;
3031  s->ptr_lastgob = s->pb.buf;
3032  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3033  int mb_y;
3034  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3035  int first_in_slice;
3036  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3037  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3039  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 1024 << s->c.intra_dc_precision;
3040  } else {
3041  mb_y = mb_y_order;
3042  }
3043  s->c.mb_x = 0;
3044  s->c.mb_y = mb_y;
3045 
3046  ff_set_qscale(&s->c, s->c.qscale);
3047  ff_init_block_index(&s->c);
3048 
3049  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3050  int mb_type, xy;
3051 // int d;
3052  int dmin= INT_MAX;
3053  int dir;
3054  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3055  + s->c.mb_width*MAX_MB_BYTES;
3056 
3058  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3059  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3060  return -1;
3061  }
3062  if (s->c.data_partitioning) {
3063  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3064  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3065  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3066  return -1;
3067  }
3068  }
3069 
3070  s->c.mb_x = mb_x;
3071  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3072  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3073 
3074  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3076  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3077  mb_type = s->mb_type[xy];
3078 
3079  /* write gob / video packet header */
3080  if(s->rtp_mode){
3081  int current_packet_size, is_gob_start;
3082 
3083  current_packet_size = put_bytes_count(&s->pb, 1)
3084  - (s->ptr_lastgob - s->pb.buf);
3085 
3086  is_gob_start = s->rtp_payload_size &&
3087  current_packet_size >= s->rtp_payload_size &&
3088  mb_y + mb_x > 0;
3089 
3090  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3091 
3092  switch (s->c.codec_id) {
3093  case AV_CODEC_ID_H263:
3094  case AV_CODEC_ID_H263P:
3095  if (!s->c.h263_slice_structured)
3096  if (s->c.mb_x || s->c.mb_y % s->c.gob_index) is_gob_start = 0;
3097  break;
3099  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3101  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3102  s->c.mb_skip_run)
3103  is_gob_start=0;
3104  break;
3105  case AV_CODEC_ID_MJPEG:
3106  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3107  break;
3108  }
3109 
3110  if(is_gob_start){
3111  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3112  write_slice_end(s);
3113 
3114  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->c.partitioned_frame)
3116  }
3117 
3118  av_assert2((put_bits_count(&s->pb)&7) == 0);
3119  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3120 
3121  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3122  int r = put_bytes_count(&s->pb, 0) + s->c.picture_number + 16 + s->c.mb_x + s->c.mb_y;
3123  int d = 100 / s->error_rate;
3124  if(r % d == 0){
3125  current_packet_size=0;
3126  s->pb.buf_ptr= s->ptr_lastgob;
3127  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3128  }
3129  }
3130 
3131  switch (s->c.codec_id) {
3132  case AV_CODEC_ID_MPEG4:
3133  if (CONFIG_MPEG4_ENCODER) {
3136  }
3137  break;
3140  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3143  }
3144  break;
3145  case AV_CODEC_ID_H263:
3146  case AV_CODEC_ID_H263P:
3147  if (CONFIG_H263_ENCODER) {
3148  update_mb_info(s, 1);
3150  }
3151  break;
3152  }
3153 
3154  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3155  int bits= put_bits_count(&s->pb);
3156  s->misc_bits+= bits - s->last_bits;
3157  s->last_bits= bits;
3158  }
3159 
3160  s->ptr_lastgob += current_packet_size;
3161  s->c.first_slice_line = 1;
3162  s->c.resync_mb_x = mb_x;
3163  s->c.resync_mb_y = mb_y;
3164  }
3165  }
3166 
3167  if (s->c.resync_mb_x == s->c.mb_x &&
3168  s->c.resync_mb_y+1 == s->c.mb_y)
3169  s->c.first_slice_line = 0;
3170 
3171  s->c.mb_skipped = 0;
3172  s->dquant=0; //only for QP_RD
3173 
3174  update_mb_info(s, 0);
3175 
3176  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3177  int next_block=0;
3178  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3179 
3180  backup_context_before_encode(&backup_s, s);
3181  backup_s.pb= s->pb;
3182  if (s->c.data_partitioning) {
3183  backup_s.pb2= s->pb2;
3184  backup_s.tex_pb= s->tex_pb;
3185  }
3186 
3187  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3188  s->c.mv_dir = MV_DIR_FORWARD;
3189  s->c.mv_type = MV_TYPE_16X16;
3190  s->c.mb_intra = 0;
3191  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3192  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3193  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3194  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3195  }
3196  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3197  s->c.mv_dir = MV_DIR_FORWARD;
3198  s->c.mv_type = MV_TYPE_FIELD;
3199  s->c.mb_intra = 0;
3200  for(i=0; i<2; i++){
3201  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3202  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3203  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3204  }
3205  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3206  &dmin, &next_block, 0, 0);
3207  }
3208  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3209  s->c.mv_dir = MV_DIR_FORWARD;
3210  s->c.mv_type = MV_TYPE_16X16;
3211  s->c.mb_intra = 0;
3212  s->c.mv[0][0][0] = 0;
3213  s->c.mv[0][0][1] = 0;
3214  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3215  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3216  }
3217  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3218  s->c.mv_dir = MV_DIR_FORWARD;
3219  s->c.mv_type = MV_TYPE_8X8;
3220  s->c.mb_intra = 0;
3221  for(i=0; i<4; i++){
3222  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3223  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3224  }
3225  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3226  &dmin, &next_block, 0, 0);
3227  }
3228  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3229  s->c.mv_dir = MV_DIR_FORWARD;
3230  s->c.mv_type = MV_TYPE_16X16;
3231  s->c.mb_intra = 0;
3232  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3233  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3234  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3235  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3236  }
3237  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3238  s->c.mv_dir = MV_DIR_BACKWARD;
3239  s->c.mv_type = MV_TYPE_16X16;
3240  s->c.mb_intra = 0;
3241  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3242  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3243  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3244  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3245  }
3246  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3247  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3248  s->c.mv_type = MV_TYPE_16X16;
3249  s->c.mb_intra = 0;
3250  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3251  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3252  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3253  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3254  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3255  &dmin, &next_block, 0, 0);
3256  }
3257  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3258  s->c.mv_dir = MV_DIR_FORWARD;
3259  s->c.mv_type = MV_TYPE_FIELD;
3260  s->c.mb_intra = 0;
3261  for(i=0; i<2; i++){
3262  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3263  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3264  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3265  }
3266  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3267  &dmin, &next_block, 0, 0);
3268  }
3269  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3270  s->c.mv_dir = MV_DIR_BACKWARD;
3271  s->c.mv_type = MV_TYPE_FIELD;
3272  s->c.mb_intra = 0;
3273  for(i=0; i<2; i++){
3274  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3275  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3276  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3277  }
3278  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3279  &dmin, &next_block, 0, 0);
3280  }
3281  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3282  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3283  s->c.mv_type = MV_TYPE_FIELD;
3284  s->c.mb_intra = 0;
3285  for(dir=0; dir<2; dir++){
3286  for(i=0; i<2; i++){
3287  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3288  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3289  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3290  }
3291  }
3292  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3293  &dmin, &next_block, 0, 0);
3294  }
3295  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3296  s->c.mv_dir = 0;
3297  s->c.mv_type = MV_TYPE_16X16;
3298  s->c.mb_intra = 1;
3299  s->c.mv[0][0][0] = 0;
3300  s->c.mv[0][0][1] = 0;
3301  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3302  &dmin, &next_block, 0, 0);
3303  s->c.mbintra_table[xy] = 1;
3304  }
3305 
3306  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3307  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3308  const int last_qp = backup_s.c.qscale;
3309  int qpi, qp, dc[6];
3310  int16_t ac[6][16];
3311  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3312  static const int dquant_tab[4]={-1,1,-2,2};
3313  int storecoefs = s->c.mb_intra && s->c.dc_val[0];
3314 
3315  av_assert2(backup_s.dquant == 0);
3316 
3317  //FIXME intra
3318  s->c.mv_dir = best_s.c.mv_dir;
3319  s->c.mv_type = MV_TYPE_16X16;
3320  s->c.mb_intra = best_s.c.mb_intra;
3321  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3322  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3323  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3324  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3325 
3326  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3327  for(; qpi<4; qpi++){
3328  int dquant= dquant_tab[qpi];
3329  qp= last_qp + dquant;
3330  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3331  continue;
3332  backup_s.dquant= dquant;
3333  if(storecoefs){
3334  for(i=0; i<6; i++){
3335  dc[i] = s->c.dc_val[0][s->c.block_index[i]];
3336  memcpy(ac[i], s->c.ac_val[0][s->c.block_index[i]], sizeof(int16_t)*16);
3337  }
3338  }
3339 
3340  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3341  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3342  if (best_s.c.qscale != qp) {
3343  if(storecoefs){
3344  for(i=0; i<6; i++){
3345  s->c.dc_val[0][s->c.block_index[i]] = dc[i];
3346  memcpy(s->c.ac_val[0][s->c.block_index[i]], ac[i], sizeof(int16_t)*16);
3347  }
3348  }
3349  }
3350  }
3351  }
3352  }
3353  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3354  int mx= s->b_direct_mv_table[xy][0];
3355  int my= s->b_direct_mv_table[xy][1];
3356 
3357  backup_s.dquant = 0;
3358  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3359  s->c.mb_intra = 0;
3360  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3361  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3362  &dmin, &next_block, mx, my);
3363  }
3364  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3365  backup_s.dquant = 0;
3366  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3367  s->c.mb_intra = 0;
3368  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3369  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3370  &dmin, &next_block, 0, 0);
3371  }
3372  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3373  int coded=0;
3374  for(i=0; i<6; i++)
3375  coded |= s->c.block_last_index[i];
3376  if(coded){
3377  int mx,my;
3378  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3379  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3380  mx=my=0; //FIXME find the one we actually used
3381  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3382  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3383  mx = s->c.mv[1][0][0];
3384  my = s->c.mv[1][0][1];
3385  }else{
3386  mx = s->c.mv[0][0][0];
3387  my = s->c.mv[0][0][1];
3388  }
3389 
3390  s->c.mv_dir = best_s.c.mv_dir;
3391  s->c.mv_type = best_s.c.mv_type;
3392  s->c.mb_intra = 0;
3393 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3394  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3395  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3396  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3397  backup_s.dquant= 0;
3398  s->skipdct=1;
3399  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3400  &dmin, &next_block, mx, my);
3401  s->skipdct=0;
3402  }
3403  }
3404 
3405  store_context_after_encode(s, &best_s, s->c.data_partitioning);
3406 
3407  pb_bits_count= put_bits_count(&s->pb);
3408  flush_put_bits(&s->pb);
3409  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3410  s->pb= backup_s.pb;
3411 
3412  if (s->c.data_partitioning) {
3413  pb2_bits_count= put_bits_count(&s->pb2);
3414  flush_put_bits(&s->pb2);
3415  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3416  s->pb2= backup_s.pb2;
3417 
3418  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3419  flush_put_bits(&s->tex_pb);
3420  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3421  s->tex_pb= backup_s.tex_pb;
3422  }
3423  s->last_bits= put_bits_count(&s->pb);
3424 
3425  if (CONFIG_H263_ENCODER &&
3426  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3428 
3429  if(next_block==0){ //FIXME 16 vs linesize16
3430  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3431  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3432  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3433  }
3434 
3435  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3436  mpv_reconstruct_mb(s, s->c.block);
3437  } else {
3438  int motion_x = 0, motion_y = 0;
3439  s->c.mv_type = MV_TYPE_16X16;
3440  // only one MB-Type possible
3441 
3442  switch(mb_type){
3444  s->c.mv_dir = 0;
3445  s->c.mb_intra = 1;
3446  motion_x= s->c.mv[0][0][0] = 0;
3447  motion_y= s->c.mv[0][0][1] = 0;
3448  s->c.mbintra_table[xy] = 1;
3449  break;
3451  s->c.mv_dir = MV_DIR_FORWARD;
3452  s->c.mb_intra = 0;
3453  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3454  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3455  break;
3457  s->c.mv_dir = MV_DIR_FORWARD;
3458  s->c.mv_type = MV_TYPE_FIELD;
3459  s->c.mb_intra = 0;
3460  for(i=0; i<2; i++){
3461  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3462  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3463  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3464  }
3465  break;
3467  s->c.mv_dir = MV_DIR_FORWARD;
3468  s->c.mv_type = MV_TYPE_8X8;
3469  s->c.mb_intra = 0;
3470  for(i=0; i<4; i++){
3471  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3472  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3473  }
3474  break;
3476  if (CONFIG_MPEG4_ENCODER) {
3478  s->c.mb_intra = 0;
3479  motion_x=s->b_direct_mv_table[xy][0];
3480  motion_y=s->b_direct_mv_table[xy][1];
3481  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3482  }
3483  break;
3485  if (CONFIG_MPEG4_ENCODER) {
3487  s->c.mb_intra = 0;
3488  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3489  }
3490  break;
3492  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3493  s->c.mb_intra = 0;
3494  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3495  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3496  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3497  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3498  break;
3500  s->c.mv_dir = MV_DIR_BACKWARD;
3501  s->c.mb_intra = 0;
3502  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3503  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3504  break;
3506  s->c.mv_dir = MV_DIR_FORWARD;
3507  s->c.mb_intra = 0;
3508  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3509  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3510  break;
3512  s->c.mv_dir = MV_DIR_FORWARD;
3513  s->c.mv_type = MV_TYPE_FIELD;
3514  s->c.mb_intra = 0;
3515  for(i=0; i<2; i++){
3516  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3517  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3518  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3519  }
3520  break;
3522  s->c.mv_dir = MV_DIR_BACKWARD;
3523  s->c.mv_type = MV_TYPE_FIELD;
3524  s->c.mb_intra = 0;
3525  for(i=0; i<2; i++){
3526  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3527  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3528  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3529  }
3530  break;
3532  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3533  s->c.mv_type = MV_TYPE_FIELD;
3534  s->c.mb_intra = 0;
3535  for(dir=0; dir<2; dir++){
3536  for(i=0; i<2; i++){
3537  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3538  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3539  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3540  }
3541  }
3542  break;
3543  default:
3544  av_log(s->c.avctx, AV_LOG_ERROR, "illegal MB type\n");
3545  }
3546 
3547  encode_mb(s, motion_x, motion_y);
3548 
3549  // RAL: Update last macroblock type
3550  s->last_mv_dir = s->c.mv_dir;
3551 
3552  if (CONFIG_H263_ENCODER &&
3553  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3555 
3556  mpv_reconstruct_mb(s, s->c.block);
3557  }
3558 
3559  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3560 
3561  /* clean the MV table in IPS frames for direct mode in B-frames */
3562  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3563  s->p_mv_table[xy][0]=0;
3564  s->p_mv_table[xy][1]=0;
3565  } else if ((s->c.h263_pred || s->c.h263_aic) && s->c.mbintra_table[xy])
3567 
3568  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3569  int w= 16;
3570  int h= 16;
3571 
3572  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3573  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3574 
3575  s->encoding_error[0] += sse(
3576  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3577  s->c.dest[0], w, h, s->c.linesize);
3578  s->encoding_error[1] += sse(
3579  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3580  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3581  s->encoding_error[2] += sse(
3582  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3583  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3584  }
3585  if (s->c.loop_filter) {
3586  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3587  ff_h263_loop_filter(&s->c);
3588  }
3589  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3590  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3591  }
3592  }
3593 
3594 #if CONFIG_MSMPEG4ENC
3595  //not beautiful here but we must write it before flushing so it has to be here
3596  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3597  s->c.pict_type == AV_PICTURE_TYPE_I)
3599 #endif
3600 
3601  write_slice_end(s);
3602 
3603  return 0;
3604 }
3605 
3606 #define ADD(field) dst->field += src->field;
3607 #define MERGE(field) dst->field += src->field; src->field=0
3609 {
3610  ADD(me.scene_change_score);
3611  ADD(me.mc_mb_var_sum_temp);
3612  ADD(me.mb_var_sum_temp);
3613 }
3614 
3616 {
3617  int i;
3618 
3619  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3620  MERGE(dct_count[1]);
3621  ADD(mv_bits);
3622  ADD(i_tex_bits);
3623  ADD(p_tex_bits);
3624  ADD(i_count);
3625  ADD(misc_bits);
3626  ADD(encoding_error[0]);
3627  ADD(encoding_error[1]);
3628  ADD(encoding_error[2]);
3629 
3630  if (dst->dct_error_sum) {
3631  for(i=0; i<64; i++){
3632  MERGE(dct_error_sum[0][i]);
3633  MERGE(dct_error_sum[1][i]);
3634  }
3635  }
3636 
3637  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3638  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3639  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3640  flush_put_bits(&dst->pb);
3641 }
3642 
3643 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3644 {
3645  MPVEncContext *const s = &m->s;
3646 
3647  if (m->next_lambda){
3648  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3649  if(!dry_run) m->next_lambda= 0;
3650  } else if (!m->fixed_qscale) {
3651  int quality = ff_rate_estimate_qscale(m, dry_run);
3652  s->c.cur_pic.ptr->f->quality = quality;
3653  if (s->c.cur_pic.ptr->f->quality < 0)
3654  return -1;
3655  }
3656 
3657  if(s->adaptive_quant){
3658  init_qscale_tab(s);
3659 
3660  switch (s->c.codec_id) {
3661  case AV_CODEC_ID_MPEG4:
3662  if (CONFIG_MPEG4_ENCODER)
3664  break;
3665  case AV_CODEC_ID_H263:
3666  case AV_CODEC_ID_H263P:
3667  case AV_CODEC_ID_FLV1:
3668  if (CONFIG_H263_ENCODER)
3670  break;
3671  }
3672 
3673  s->lambda = s->lambda_table[0];
3674  //FIXME broken
3675  }else
3676  s->lambda = s->c.cur_pic.ptr->f->quality;
3677  update_qscale(m);
3678  return 0;
3679 }
3680 
3681 /* must be called before writing the header */
3683 {
3684  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3685  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3686 
3687  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3688  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3689  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3690  }else{
3691  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3692  s->c.last_non_b_time = s->c.time;
3693  av_assert1(s->c.picture_number == 0 || s->c.pp_time > 0);
3694  }
3695 }
3696 
3697 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3698 {
3699  MPVEncContext *const s = &m->s;
3700  int i, ret;
3701  int bits;
3702  int context_count = s->c.slice_context_count;
3703 
3704  /* we need to initialize some time vars before we can encode B-frames */
3705  // RAL: Condition added for MPEG1VIDEO
3706  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3708  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3710 
3711 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3712 
3713  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3714  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3715  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3716  s->c.no_rounding ^= s->c.flipflop_rounding;
3717  }
3718 
3719  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3720  ret = estimate_qp(m, 1);
3721  if (ret < 0)
3722  return ret;
3723  ff_get_2pass_fcode(m);
3724  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3725  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3726  s->lambda = m->last_lambda_for[s->c.pict_type];
3727  else
3728  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3729  update_qscale(m);
3730  }
3731 
3732  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3733  for (int i = 0; i < context_count; i++) {
3734  MPVEncContext *const slice = s->c.enc_contexts[i];
3735  int h = s->c.mb_height;
3736  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3737  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3738 
3739  init_put_bits(&slice->pb, start, end - start);
3740 
3741  if (i) {
3742  ret = ff_update_duplicate_context(&slice->c, &s->c);
3743  if (ret < 0)
3744  return ret;
3745  slice->lambda = s->lambda;
3746  slice->lambda2 = s->lambda2;
3747  }
3748  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3749  ff_me_init_pic(slice);
3750  }
3751 
3752  /* Estimate motion for every MB */
3753  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3754  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3755  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3756  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3757  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3758  m->me_pre == 2) {
3759  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3760  &s->c.enc_contexts[0], NULL,
3761  context_count, sizeof(void*));
3762  }
3763  }
3764 
3765  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3766  NULL, context_count, sizeof(void*));
3767  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3768  /* I-Frame */
3769  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3770  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3771 
3772  if (!m->fixed_qscale) {
3773  /* finding spatial complexity for I-frame rate control */
3774  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3775  NULL, context_count, sizeof(void*));
3776  }
3777  }
3778  for(i=1; i<context_count; i++){
3779  merge_context_after_me(s, s->c.enc_contexts[i]);
3780  }
3781  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3782  m->mb_var_sum = s->me. mb_var_sum_temp;
3783  emms_c();
3784 
3785  if (s->me.scene_change_score > m->scenechange_threshold &&
3786  s->c.pict_type == AV_PICTURE_TYPE_P) {
3787  s->c.pict_type = AV_PICTURE_TYPE_I;
3788  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3789  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3790  if (s->c.msmpeg4_version >= MSMP4_V3)
3791  s->c.no_rounding = 1;
3792  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3793  m->mb_var_sum, m->mc_mb_var_sum);
3794  }
3795 
3796  if (!s->c.umvplus) {
3797  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3798  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3799 
3800  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3801  int a,b;
3802  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3803  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3804  s->f_code = FFMAX3(s->f_code, a, b);
3805  }
3806 
3808  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3809  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3810  int j;
3811  for(i=0; i<2; i++){
3812  for(j=0; j<2; j++)
3813  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3814  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3815  }
3816  }
3817  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3818  int a, b;
3819 
3820  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3821  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3822  s->f_code = FFMAX(a, b);
3823 
3824  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3825  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3826  s->b_code = FFMAX(a, b);
3827 
3828  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3829  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3830  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3831  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3832  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3833  int dir, j;
3834  for(dir=0; dir<2; dir++){
3835  for(i=0; i<2; i++){
3836  for(j=0; j<2; j++){
3839  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3840  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3841  }
3842  }
3843  }
3844  }
3845  }
3846  }
3847 
3848  ret = estimate_qp(m, 0);
3849  if (ret < 0)
3850  return ret;
3851 
3852  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3853  s->c.pict_type == AV_PICTURE_TYPE_I &&
3854  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3855  s->c.qscale = 3; //reduce clipping problems
3856 
3857  if (s->c.out_format == FMT_MJPEG) {
3859  (7 + s->c.qscale) / s->c.qscale, 65535);
3860  if (ret < 0)
3861  return ret;
3862 
3863  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3864  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3865  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3866 
3867  if (s->c.avctx->intra_matrix) {
3868  chroma_matrix =
3869  luma_matrix = s->c.avctx->intra_matrix;
3870  }
3871  if (s->c.avctx->chroma_intra_matrix)
3872  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3873 
3874  /* for mjpeg, we do include qscale in the matrix */
3875  for (int i = 1; i < 64; i++) {
3876  int j = s->c.idsp.idct_permutation[i];
3877 
3878  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3879  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3880  }
3881  s->c.y_dc_scale_table =
3882  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3883  s->c.chroma_intra_matrix[0] =
3884  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3885  } else {
3886  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3887  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3888  for (int i = 1; i < 64; i++) {
3889  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3890 
3891  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3892  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3893  }
3894  s->c.y_dc_scale_table = y;
3895  s->c.c_dc_scale_table = c;
3896  s->c.intra_matrix[0] = 13;
3897  s->c.chroma_intra_matrix[0] = 14;
3898  }
3899  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3900  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3901  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3902  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3903  s->c.qscale = 8;
3904  }
3905 
3906  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3907  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3908  } else {
3909  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3910  }
3911  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3912 
3913  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3914  m->picture_in_gop_number = 0;
3915 
3916  s->c.mb_x = s->c.mb_y = 0;
3917  s->last_bits= put_bits_count(&s->pb);
3918  ret = m->encode_picture_header(m);
3919  if (ret < 0)
3920  return ret;
3921  bits= put_bits_count(&s->pb);
3922  m->header_bits = bits - s->last_bits;
3923 
3924  for(i=1; i<context_count; i++){
3925  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3926  }
3927  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3928  NULL, context_count, sizeof(void*));
3929  for(i=1; i<context_count; i++){
3930  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3931  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3932  merge_context_after_encode(s, s->c.enc_contexts[i]);
3933  }
3934  emms_c();
3935  return 0;
3936 }
3937 
3938 static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
3939 {
3940  const int intra = s->c.mb_intra;
3941  int i;
3942 
3943  s->dct_count[intra]++;
3944 
3945  for(i=0; i<64; i++){
3946  int level= block[i];
3947 
3948  if(level){
3949  if(level>0){
3950  s->dct_error_sum[intra][i] += level;
3951  level -= s->dct_offset[intra][i];
3952  if(level<0) level=0;
3953  }else{
3954  s->dct_error_sum[intra][i] -= level;
3955  level += s->dct_offset[intra][i];
3956  if(level>0) level=0;
3957  }
3958  block[i]= level;
3959  }
3960  }
3961 }
3962 
3964  int16_t *block, int n,
3965  int qscale, int *overflow){
3966  const int *qmat;
3967  const uint16_t *matrix;
3968  const uint8_t *scantable;
3969  const uint8_t *perm_scantable;
3970  int max=0;
3971  unsigned int threshold1, threshold2;
3972  int bias=0;
3973  int run_tab[65];
3974  int level_tab[65];
3975  int score_tab[65];
3976  int survivor[65];
3977  int survivor_count;
3978  int last_run=0;
3979  int last_level=0;
3980  int last_score= 0;
3981  int last_i;
3982  int coeff[2][64];
3983  int coeff_count[64];
3984  int qmul, qadd, start_i, last_non_zero, i, dc;
3985  const int esc_length= s->ac_esc_length;
3986  const uint8_t *length, *last_length;
3987  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3988  int mpeg2_qscale;
3989 
3990  s->fdsp.fdct(block);
3991 
3992  if(s->dct_error_sum)
3993  s->denoise_dct(s, block);
3994  qmul= qscale*16;
3995  qadd= ((qscale-1)|1)*8;
3996 
3997  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3998  else mpeg2_qscale = qscale << 1;
3999 
4000  if (s->c.mb_intra) {
4001  int q;
4002  scantable = s->c.intra_scantable.scantable;
4003  perm_scantable = s->c.intra_scantable.permutated;
4004  if (!s->c.h263_aic) {
4005  if (n < 4)
4006  q = s->c.y_dc_scale;
4007  else
4008  q = s->c.c_dc_scale;
4009  q = q << 3;
4010  } else{
4011  /* For AIC we skip quant/dequant of INTRADC */
4012  q = 1 << 3;
4013  qadd=0;
4014  }
4015 
4016  /* note: block[0] is assumed to be positive */
4017  block[0] = (block[0] + (q >> 1)) / q;
4018  start_i = 1;
4019  last_non_zero = 0;
4020  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4021  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4022  if (s->c.mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4023  bias= 1<<(QMAT_SHIFT-1);
4024 
4025  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4026  length = s->intra_chroma_ac_vlc_length;
4027  last_length= s->intra_chroma_ac_vlc_last_length;
4028  } else {
4029  length = s->intra_ac_vlc_length;
4030  last_length= s->intra_ac_vlc_last_length;
4031  }
4032  } else {
4033  scantable = s->c.inter_scantable.scantable;
4034  perm_scantable = s->c.inter_scantable.permutated;
4035  start_i = 0;
4036  last_non_zero = -1;
4037  qmat = s->q_inter_matrix[qscale];
4038  matrix = s->c.inter_matrix;
4039  length = s->inter_ac_vlc_length;
4040  last_length= s->inter_ac_vlc_last_length;
4041  }
4042  last_i= start_i;
4043 
4044  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4045  threshold2= (threshold1<<1);
4046 
4047  for(i=63; i>=start_i; i--) {
4048  const int j = scantable[i];
4049  int64_t level = (int64_t)block[j] * qmat[j];
4050 
4051  if(((uint64_t)(level+threshold1))>threshold2){
4052  last_non_zero = i;
4053  break;
4054  }
4055  }
4056 
4057  for(i=start_i; i<=last_non_zero; i++) {
4058  const int j = scantable[i];
4059  int64_t level = (int64_t)block[j] * qmat[j];
4060 
4061 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4062 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4063  if(((uint64_t)(level+threshold1))>threshold2){
4064  if(level>0){
4065  level= (bias + level)>>QMAT_SHIFT;
4066  coeff[0][i]= level;
4067  coeff[1][i]= level-1;
4068 // coeff[2][k]= level-2;
4069  }else{
4070  level= (bias - level)>>QMAT_SHIFT;
4071  coeff[0][i]= -level;
4072  coeff[1][i]= -level+1;
4073 // coeff[2][k]= -level+2;
4074  }
4075  coeff_count[i]= FFMIN(level, 2);
4076  av_assert2(coeff_count[i]);
4077  max |=level;
4078  }else{
4079  coeff[0][i]= (level>>31)|1;
4080  coeff_count[i]= 1;
4081  }
4082  }
4083 
4084  *overflow= s->max_qcoeff < max; //overflow might have happened
4085 
4086  if(last_non_zero < start_i){
4087  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4088  return last_non_zero;
4089  }
4090 
4091  score_tab[start_i]= 0;
4092  survivor[0]= start_i;
4093  survivor_count= 1;
4094 
4095  for(i=start_i; i<=last_non_zero; i++){
4096  int level_index, j, zero_distortion;
4097  int dct_coeff= FFABS(block[ scantable[i] ]);
4098  int best_score=256*256*256*120;
4099 
4100  if (s->fdsp.fdct == ff_fdct_ifast)
4101  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4102  zero_distortion= dct_coeff*dct_coeff;
4103 
4104  for(level_index=0; level_index < coeff_count[i]; level_index++){
4105  int distortion;
4106  int level= coeff[level_index][i];
4107  const int alevel= FFABS(level);
4108  int unquant_coeff;
4109 
4110  av_assert2(level);
4111 
4112  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4113  unquant_coeff= alevel*qmul + qadd;
4114  } else if (s->c.out_format == FMT_MJPEG) {
4115  j = s->c.idsp.idct_permutation[scantable[i]];
4116  unquant_coeff = alevel * matrix[j] * 8;
4117  }else{ // MPEG-1
4118  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4119  if (s->c.mb_intra) {
4120  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4121  unquant_coeff = (unquant_coeff - 1) | 1;
4122  }else{
4123  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4124  unquant_coeff = (unquant_coeff - 1) | 1;
4125  }
4126  unquant_coeff<<= 3;
4127  }
4128 
4129  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4130  level+=64;
4131  if((level&(~127)) == 0){
4132  for(j=survivor_count-1; j>=0; j--){
4133  int run= i - survivor[j];
4134  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4135  score += score_tab[i-run];
4136 
4137  if(score < best_score){
4138  best_score= score;
4139  run_tab[i+1]= run;
4140  level_tab[i+1]= level-64;
4141  }
4142  }
4143 
4144  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4145  for(j=survivor_count-1; j>=0; j--){
4146  int run= i - survivor[j];
4147  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4148  score += score_tab[i-run];
4149  if(score < last_score){
4150  last_score= score;
4151  last_run= run;
4152  last_level= level-64;
4153  last_i= i+1;
4154  }
4155  }
4156  }
4157  }else{
4158  distortion += esc_length*lambda;
4159  for(j=survivor_count-1; j>=0; j--){
4160  int run= i - survivor[j];
4161  int score= distortion + score_tab[i-run];
4162 
4163  if(score < best_score){
4164  best_score= score;
4165  run_tab[i+1]= run;
4166  level_tab[i+1]= level-64;
4167  }
4168  }
4169 
4170  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4171  for(j=survivor_count-1; j>=0; j--){
4172  int run= i - survivor[j];
4173  int score= distortion + score_tab[i-run];
4174  if(score < last_score){
4175  last_score= score;
4176  last_run= run;
4177  last_level= level-64;
4178  last_i= i+1;
4179  }
4180  }
4181  }
4182  }
4183  }
4184 
4185  score_tab[i+1]= best_score;
4186 
4187  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4188  if(last_non_zero <= 27){
4189  for(; survivor_count; survivor_count--){
4190  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4191  break;
4192  }
4193  }else{
4194  for(; survivor_count; survivor_count--){
4195  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4196  break;
4197  }
4198  }
4199 
4200  survivor[ survivor_count++ ]= i+1;
4201  }
4202 
4203  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4204  last_score= 256*256*256*120;
4205  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4206  int score= score_tab[i];
4207  if (i)
4208  score += lambda * 2; // FIXME more exact?
4209 
4210  if(score < last_score){
4211  last_score= score;
4212  last_i= i;
4213  last_level= level_tab[i];
4214  last_run= run_tab[i];
4215  }
4216  }
4217  }
4218 
4219  s->coded_score[n] = last_score;
4220 
4221  dc= FFABS(block[0]);
4222  last_non_zero= last_i - 1;
4223  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4224 
4225  if(last_non_zero < start_i)
4226  return last_non_zero;
4227 
4228  if(last_non_zero == 0 && start_i == 0){
4229  int best_level= 0;
4230  int best_score= dc * dc;
4231 
4232  for(i=0; i<coeff_count[0]; i++){
4233  int level= coeff[i][0];
4234  int alevel= FFABS(level);
4235  int unquant_coeff, score, distortion;
4236 
4237  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4238  unquant_coeff= (alevel*qmul + qadd)>>3;
4239  } else{ // MPEG-1
4240  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4241  unquant_coeff = (unquant_coeff - 1) | 1;
4242  }
4243  unquant_coeff = (unquant_coeff + 4) >> 3;
4244  unquant_coeff<<= 3 + 3;
4245 
4246  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4247  level+=64;
4248  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4249  else score= distortion + esc_length*lambda;
4250 
4251  if(score < best_score){
4252  best_score= score;
4253  best_level= level - 64;
4254  }
4255  }
4256  block[0]= best_level;
4257  s->coded_score[n] = best_score - dc*dc;
4258  if(best_level == 0) return -1;
4259  else return last_non_zero;
4260  }
4261 
4262  i= last_i;
4263  av_assert2(last_level);
4264 
4265  block[ perm_scantable[last_non_zero] ]= last_level;
4266  i -= last_run + 1;
4267 
4268  for(; i>start_i; i -= run_tab[i] + 1){
4269  block[ perm_scantable[i-1] ]= level_tab[i];
4270  }
4271 
4272  return last_non_zero;
4273 }
4274 
4275 static int16_t basis[64][64];
4276 
4277 static void build_basis(uint8_t *perm){
4278  int i, j, x, y;
4279  emms_c();
4280  for(i=0; i<8; i++){
4281  for(j=0; j<8; j++){
4282  for(y=0; y<8; y++){
4283  for(x=0; x<8; x++){
4284  double s= 0.25*(1<<BASIS_SHIFT);
4285  int index= 8*i + j;
4286  int perm_index= perm[index];
4287  if(i==0) s*= sqrt(0.5);
4288  if(j==0) s*= sqrt(0.5);
4289  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4290  }
4291  }
4292  }
4293  }
4294 }
4295 
4296 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4297  int16_t *block, int16_t *weight, int16_t *orig,
4298  int n, int qscale){
4299  int16_t rem[64];
4300  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4301  const uint8_t *scantable;
4302  const uint8_t *perm_scantable;
4303 // unsigned int threshold1, threshold2;
4304 // int bias=0;
4305  int run_tab[65];
4306  int prev_run=0;
4307  int prev_level=0;
4308  int qmul, qadd, start_i, last_non_zero, i, dc;
4309  const uint8_t *length;
4310  const uint8_t *last_length;
4311  int lambda;
4312  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4313 
4314  if(basis[0][0] == 0)
4315  build_basis(s->c.idsp.idct_permutation);
4316 
4317  qmul= qscale*2;
4318  qadd= (qscale-1)|1;
4319  if (s->c.mb_intra) {
4320  scantable = s->c.intra_scantable.scantable;
4321  perm_scantable = s->c.intra_scantable.permutated;
4322  if (!s->c.h263_aic) {
4323  if (n < 4)
4324  q = s->c.y_dc_scale;
4325  else
4326  q = s->c.c_dc_scale;
4327  } else{
4328  /* For AIC we skip quant/dequant of INTRADC */
4329  q = 1;
4330  qadd=0;
4331  }
4332  q <<= RECON_SHIFT-3;
4333  /* note: block[0] is assumed to be positive */
4334  dc= block[0]*q;
4335 // block[0] = (block[0] + (q >> 1)) / q;
4336  start_i = 1;
4337 // if (s->c.mpeg_quant || s->c.out_format == FMT_MPEG1)
4338 // bias= 1<<(QMAT_SHIFT-1);
4339  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4340  length = s->intra_chroma_ac_vlc_length;
4341  last_length= s->intra_chroma_ac_vlc_last_length;
4342  } else {
4343  length = s->intra_ac_vlc_length;
4344  last_length= s->intra_ac_vlc_last_length;
4345  }
4346  } else {
4347  scantable = s->c.inter_scantable.scantable;
4348  perm_scantable = s->c.inter_scantable.permutated;
4349  dc= 0;
4350  start_i = 0;
4351  length = s->inter_ac_vlc_length;
4352  last_length= s->inter_ac_vlc_last_length;
4353  }
4354  last_non_zero = s->c.block_last_index[n];
4355 
4356  dc += (1<<(RECON_SHIFT-1));
4357  for(i=0; i<64; i++){
4358  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4359  }
4360 
4361  sum=0;
4362  for(i=0; i<64; i++){
4363  int one= 36;
4364  int qns=4;
4365  int w;
4366 
4367  w= FFABS(weight[i]) + qns*one;
4368  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4369 
4370  weight[i] = w;
4371 // w=weight[i] = (63*qns + (w/2)) / w;
4372 
4373  av_assert2(w>0);
4374  av_assert2(w<(1<<6));
4375  sum += w*w;
4376  }
4377  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4378 
4379  run=0;
4380  rle_index=0;
4381  for(i=start_i; i<=last_non_zero; i++){
4382  int j= perm_scantable[i];
4383  const int level= block[j];
4384  int coeff;
4385 
4386  if(level){
4387  if(level<0) coeff= qmul*level - qadd;
4388  else coeff= qmul*level + qadd;
4389  run_tab[rle_index++]=run;
4390  run=0;
4391 
4392  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4393  }else{
4394  run++;
4395  }
4396  }
4397 
4398  for(;;){
4399  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4400  int best_coeff=0;
4401  int best_change=0;
4402  int run2, best_unquant_change=0, analyze_gradient;
4403  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4404 
4405  if(analyze_gradient){
4406  for(i=0; i<64; i++){
4407  int w= weight[i];
4408 
4409  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4410  }
4411  s->fdsp.fdct(d1);
4412  }
4413 
4414  if(start_i){
4415  const int level= block[0];
4416  int change, old_coeff;
4417 
4418  av_assert2(s->c.mb_intra);
4419 
4420  old_coeff= q*level;
4421 
4422  for(change=-1; change<=1; change+=2){
4423  int new_level= level + change;
4424  int score, new_coeff;
4425 
4426  new_coeff= q*new_level;
4427  if(new_coeff >= 2048 || new_coeff < 0)
4428  continue;
4429 
4430  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4431  new_coeff - old_coeff);
4432  if(score<best_score){
4433  best_score= score;
4434  best_coeff= 0;
4435  best_change= change;
4436  best_unquant_change= new_coeff - old_coeff;
4437  }
4438  }
4439  }
4440 
4441  run=0;
4442  rle_index=0;
4443  run2= run_tab[rle_index++];
4444  prev_level=0;
4445  prev_run=0;
4446 
4447  for(i=start_i; i<64; i++){
4448  int j= perm_scantable[i];
4449  const int level= block[j];
4450  int change, old_coeff;
4451 
4452  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4453  break;
4454 
4455  if(level){
4456  if(level<0) old_coeff= qmul*level - qadd;
4457  else old_coeff= qmul*level + qadd;
4458  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4459  }else{
4460  old_coeff=0;
4461  run2--;
4462  av_assert2(run2>=0 || i >= last_non_zero );
4463  }
4464 
4465  for(change=-1; change<=1; change+=2){
4466  int new_level= level + change;
4467  int score, new_coeff, unquant_change;
4468 
4469  score=0;
4470  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4471  continue;
4472 
4473  if(new_level){
4474  if(new_level<0) new_coeff= qmul*new_level - qadd;
4475  else new_coeff= qmul*new_level + qadd;
4476  if(new_coeff >= 2048 || new_coeff <= -2048)
4477  continue;
4478  //FIXME check for overflow
4479 
4480  if(level){
4481  if(level < 63 && level > -63){
4482  if(i < last_non_zero)
4483  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4484  - length[UNI_AC_ENC_INDEX(run, level+64)];
4485  else
4486  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4487  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4488  }
4489  }else{
4490  av_assert2(FFABS(new_level)==1);
4491 
4492  if(analyze_gradient){
4493  int g= d1[ scantable[i] ];
4494  if(g && (g^new_level) >= 0)
4495  continue;
4496  }
4497 
4498  if(i < last_non_zero){
4499  int next_i= i + run2 + 1;
4500  int next_level= block[ perm_scantable[next_i] ] + 64;
4501 
4502  if(next_level&(~127))
4503  next_level= 0;
4504 
4505  if(next_i < last_non_zero)
4506  score += length[UNI_AC_ENC_INDEX(run, 65)]
4507  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4508  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4509  else
4510  score += length[UNI_AC_ENC_INDEX(run, 65)]
4511  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4512  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4513  }else{
4514  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4515  if(prev_level){
4516  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4517  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4518  }
4519  }
4520  }
4521  }else{
4522  new_coeff=0;
4523  av_assert2(FFABS(level)==1);
4524 
4525  if(i < last_non_zero){
4526  int next_i= i + run2 + 1;
4527  int next_level= block[ perm_scantable[next_i] ] + 64;
4528 
4529  if(next_level&(~127))
4530  next_level= 0;
4531 
4532  if(next_i < last_non_zero)
4533  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4534  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4535  - length[UNI_AC_ENC_INDEX(run, 65)];
4536  else
4537  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4538  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4539  - length[UNI_AC_ENC_INDEX(run, 65)];
4540  }else{
4541  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4542  if(prev_level){
4543  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4544  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4545  }
4546  }
4547  }
4548 
4549  score *= lambda;
4550 
4551  unquant_change= new_coeff - old_coeff;
4552  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4553 
4554  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4555  unquant_change);
4556  if(score<best_score){
4557  best_score= score;
4558  best_coeff= i;
4559  best_change= change;
4560  best_unquant_change= unquant_change;
4561  }
4562  }
4563  if(level){
4564  prev_level= level + 64;
4565  if(prev_level&(~127))
4566  prev_level= 0;
4567  prev_run= run;
4568  run=0;
4569  }else{
4570  run++;
4571  }
4572  }
4573 
4574  if(best_change){
4575  int j= perm_scantable[ best_coeff ];
4576 
4577  block[j] += best_change;
4578 
4579  if(best_coeff > last_non_zero){
4580  last_non_zero= best_coeff;
4581  av_assert2(block[j]);
4582  }else{
4583  for(; last_non_zero>=start_i; last_non_zero--){
4584  if(block[perm_scantable[last_non_zero]])
4585  break;
4586  }
4587  }
4588 
4589  run=0;
4590  rle_index=0;
4591  for(i=start_i; i<=last_non_zero; i++){
4592  int j= perm_scantable[i];
4593  const int level= block[j];
4594 
4595  if(level){
4596  run_tab[rle_index++]=run;
4597  run=0;
4598  }else{
4599  run++;
4600  }
4601  }
4602 
4603  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4604  }else{
4605  break;
4606  }
4607  }
4608 
4609  return last_non_zero;
4610 }
4611 
4612 /**
4613  * Permute an 8x8 block according to permutation.
4614  * @param block the block which will be permuted according to
4615  * the given permutation vector
4616  * @param permutation the permutation vector
4617  * @param last the last non zero coefficient in scantable order, used to
4618  * speed the permutation up
4619  * @param scantable the used scantable, this is only used to speed the
4620  * permutation up, the block is not (inverse) permutated
4621  * to scantable order!
4622  */
4623 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4624  const uint8_t *scantable, int last)
4625 {
4626  int i;
4627  int16_t temp[64];
4628 
4629  if (last <= 0)
4630  return;
4631  //FIXME it is ok but not clean and might fail for some permutations
4632  // if (permutation[1] == 1)
4633  // return;
4634 
4635  for (i = 0; i <= last; i++) {
4636  const int j = scantable[i];
4637  temp[j] = block[j];
4638  block[j] = 0;
4639  }
4640 
4641  for (i = 0; i <= last; i++) {
4642  const int j = scantable[i];
4643  const int perm_j = permutation[j];
4644  block[perm_j] = temp[j];
4645  }
4646 }
4647 
4648 static int dct_quantize_c(MPVEncContext *const s,
4649  int16_t *block, int n,
4650  int qscale, int *overflow)
4651 {
4652  int i, last_non_zero, q, start_i;
4653  const int *qmat;
4654  const uint8_t *scantable;
4655  int bias;
4656  int max=0;
4657  unsigned int threshold1, threshold2;
4658 
4659  s->fdsp.fdct(block);
4660 
4661  if(s->dct_error_sum)
4662  s->denoise_dct(s, block);
4663 
4664  if (s->c.mb_intra) {
4665  scantable = s->c.intra_scantable.scantable;
4666  if (!s->c.h263_aic) {
4667  if (n < 4)
4668  q = s->c.y_dc_scale;
4669  else
4670  q = s->c.c_dc_scale;
4671  q = q << 3;
4672  } else
4673  /* For AIC we skip quant/dequant of INTRADC */
4674  q = 1 << 3;
4675 
4676  /* note: block[0] is assumed to be positive */
4677  block[0] = (block[0] + (q >> 1)) / q;
4678  start_i = 1;
4679  last_non_zero = 0;
4680  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4681  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4682  } else {
4683  scantable = s->c.inter_scantable.scantable;
4684  start_i = 0;
4685  last_non_zero = -1;
4686  qmat = s->q_inter_matrix[qscale];
4687  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4688  }
4689  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4690  threshold2= (threshold1<<1);
4691  for(i=63;i>=start_i;i--) {
4692  const int j = scantable[i];
4693  int64_t level = (int64_t)block[j] * qmat[j];
4694 
4695  if(((uint64_t)(level+threshold1))>threshold2){
4696  last_non_zero = i;
4697  break;
4698  }else{
4699  block[j]=0;
4700  }
4701  }
4702  for(i=start_i; i<=last_non_zero; i++) {
4703  const int j = scantable[i];
4704  int64_t level = (int64_t)block[j] * qmat[j];
4705 
4706 // if( bias+level >= (1<<QMAT_SHIFT)
4707 // || bias-level >= (1<<QMAT_SHIFT)){
4708  if(((uint64_t)(level+threshold1))>threshold2){
4709  if(level>0){
4710  level= (bias + level)>>QMAT_SHIFT;
4711  block[j]= level;
4712  }else{
4713  level= (bias - level)>>QMAT_SHIFT;
4714  block[j]= -level;
4715  }
4716  max |=level;
4717  }else{
4718  block[j]=0;
4719  }
4720  }
4721  *overflow= s->max_qcoeff < max; //overflow might have happened
4722 
4723  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4724  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4725  ff_block_permute(block, s->c.idsp.idct_permutation,
4726  scantable, last_non_zero);
4727 
4728  return last_non_zero;
4729 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1485
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3963
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1141
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:384
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:103
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:220
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:242
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:430
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:217
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:235
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:276
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2714
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:224
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:229
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2157
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:186
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:104
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2618
level
uint8_t level
Definition: svq3.c:205
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:45
init_unquantize
static av_cold void init_unquantize(MpegEncContext *const s, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:316
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:526
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:820
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:244
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1888
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2640
mem_internal.h
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:277
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:248
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2640
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3682
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1910
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:113
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2636
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3643
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:672
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:46
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2631
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:230
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:301
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:208
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4275
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1506
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:236
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2831
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:184
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2765
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:269
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:204
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:220
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:272
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2640
AVPacket::data
uint8_t * data
Definition: packet.h:535
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:377
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:51
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:194
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2878
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:488
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
MPVEncContext::b_field_select_table
uint8_t *[2][2] b_field_select_table
allocated jointly with p_field_select_table
Definition: mpegvideoenc.h:90
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:88
MPVEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types.
Definition: mpegvideoenc.h:92
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:211
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:512
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:553
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:286
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2813
MPVEncContext::lambda_table
int * lambda_table
Definition: mpegvideoenc.h:53
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2247
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2646
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:55
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:181
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:244
mpegutils.h
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:590
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:226
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:280
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:183
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:56
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2918
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2640
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:185
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1659
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:355
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1161
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2643
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1927
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:314
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2634
ALIGN
#define ALIGN(a)
aligns the bitstream to the given power of two
Definition: rtjpeg.c:30
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:57
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:203
faandct.h
Floating point AAN DCT.
MPVEncContext::mb_mean
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideoenc.h:95
MPVEncContext::b_bidir_forw_mv_table
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame.
Definition: mpegvideoenc.h:85
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:198
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:829
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2637
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:307
fail
#define fail()
Definition: checkasm.h:196
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:58
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:110
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:271
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:187
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
MPVEncContext::mb_var
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideoenc.h:93
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:275
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:289
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1243
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1353
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2787
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:378
AVRational::num
int num
Numerator.
Definition: rational.h:59
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:274
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:273
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:278
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:221
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1443
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:233
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:183
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:50
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:96
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:130
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4277
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:201
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1229
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:304
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MPVEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:204
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2642
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:188
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:180
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:245
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3608
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:884
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:239
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:49
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:232
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:182
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:238
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4623
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:279
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:274
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2632
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2856
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:56
arg
const char * arg
Definition: jacosubdec.c:67
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:272
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:469
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:55
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:197
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:279
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:124
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:211
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:601
run
uint8_t run
Definition: svq3.c:204
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:205
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:95
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:228
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:275
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:218
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:52
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:335
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1790
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:446
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:545
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:183
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3607
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1105
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2635
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2941
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:119
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1148
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1392
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2638
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:256
MPVMainEncContext
Definition: mpegvideoenc.h:177
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:187
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1302
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:426
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:892
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:237
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:502
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2213
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:536
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:162
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:58
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:215
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:59
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2632
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:216
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:532
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:384
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:283
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:98
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:231
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:288
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3697
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:289
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:67
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:478
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:77
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:534
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:377
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:184
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:189
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:541
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:294
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:241
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:264
denoise_dct_c
static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
Definition: mpegvideo_enc.c:3938
MPVEncContext::p_field_select_table
uint8_t *[2] p_field_select_table
Only the first element is allocated.
Definition: mpegvideoenc.h:89
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:281
MBBackup::c
struct MBBackup::@202 c
MBBackup
Definition: mpegvideo_enc.c:2628
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:270
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:305
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:309
MPVEncContext::b_field_mv_table
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame.
Definition: mpegvideoenc.h:88
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:387
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2633
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:111
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:303
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2630
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:528
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2975
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:249
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2643
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:105
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:126
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:284
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:205
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:43
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:281
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:525
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:234
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:160
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
MPVEncContext::b_bidir_back_mv_table
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame.
Definition: mpegvideoenc.h:86
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:210
MPVEncContext::mc_mb_var
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideoenc.h:94
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:284
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:238
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:493
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1270
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1850
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2271
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
vshift
static int vshift(enum AVPixelFormat fmt, int plane)
Definition: graph.c:99
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:212
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:300
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:94
MPVEncContext::b_direct_mv_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame.
Definition: mpegvideoenc.h:87
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:111
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:276
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:198
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:306
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:223
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2641
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:482
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2634
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:392
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:211
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3606
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:232
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2640
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
MPVEncContext::p_mv_table
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame.
Definition: mpegvideoenc.h:82
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:263
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:222
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:265
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4296
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:544
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1366
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3615
MPVEncContext::b_forw_mv_table
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame.
Definition: mpegvideoenc.h:83
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:202
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:182
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:512
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:206
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:122
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:460
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:610
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2634
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:190
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:880
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4648
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2643
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:287
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:116
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:198
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:178
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2898
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2640
pixblockdsp.h
MPVEncContext::b_back_mv_table
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame.
Definition: mpegvideoenc.h:84
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:954
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:911
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:302
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:104
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h