FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "libavutil/refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
88 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MPVEncContext *const s);
90 static void denoise_dct_c(MPVEncContext *const s, int16_t *block);
91 static int dct_quantize_c(MPVEncContext *const s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
97 
98 static const AVOption mpv_generic_options[] = {
101  { NULL },
102 };
103 
105  .class_name = "generic mpegvideo encoder",
106  .item_name = av_default_item_name,
107  .option = mpv_generic_options,
108  .version = LIBAVUTIL_VERSION_INT,
109 };
110 
111 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
112  uint16_t (*qmat16)[2][64],
113  const uint16_t *quant_matrix,
114  int bias, int qmin, int qmax, int intra)
115 {
116  FDCTDSPContext *fdsp = &s->fdsp;
117  int qscale;
118  int shift = 0;
119 
120  for (qscale = qmin; qscale <= qmax; qscale++) {
121  int i;
122  int qscale2;
123 
124  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
125  else qscale2 = qscale << 1;
126 
127  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
128 #if CONFIG_FAANDCT
129  fdsp->fdct == ff_faandct ||
130 #endif /* CONFIG_FAANDCT */
131  fdsp->fdct == ff_jpeg_fdct_islow_10) {
132  for (i = 0; i < 64; i++) {
133  const int j = s->c.idsp.idct_permutation[i];
134  int64_t den = (int64_t) qscale2 * quant_matrix[j];
135  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
136  * Assume x = qscale2 * quant_matrix[j]
137  * 1 <= x <= 28560
138  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
139  * 4194304 >= (1 << 22) / (x) >= 146 */
140 
141  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
142  }
143  } else if (fdsp->fdct == ff_fdct_ifast) {
144  for (i = 0; i < 64; i++) {
145  const int j = s->c.idsp.idct_permutation[i];
146  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
147  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
148  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
149  * 1247 <= x <= 900239760
150  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
151  * 55107840 >= (1 << 36) / (x) >= 76 */
152 
153  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
154  }
155  } else {
156  for (i = 0; i < 64; i++) {
157  const int j = s->c.idsp.idct_permutation[i];
158  int64_t den = (int64_t) qscale2 * quant_matrix[j];
159  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
160  * Assume x = qscale2 * quant_matrix[j]
161  * 1 <= x <= 28560
162  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
163  * 4194304 >= (1 << 22) / (x) >= 146
164  *
165  * 1 <= x <= 28560
166  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
167  * 131072 >= (1 << 17) / (x) >= 4 */
168 
169  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
170  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
171 
172  if (qmat16[qscale][0][i] == 0 ||
173  qmat16[qscale][0][i] == 128 * 256)
174  qmat16[qscale][0][i] = 128 * 256 - 1;
175  qmat16[qscale][1][i] =
176  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
177  qmat16[qscale][0][i]);
178  }
179  }
180 
181  for (i = intra; i < 64; i++) {
182  int64_t max = 8191;
183  if (fdsp->fdct == ff_fdct_ifast) {
184  max = (8191LL * ff_aanscales[i]) >> 14;
185  }
186  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
187  shift++;
188  }
189  }
190  }
191  if (shift) {
192  av_log(s->c.avctx, AV_LOG_INFO,
193  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194  QMAT_SHIFT - shift);
195  }
196 }
197 
198 static inline void update_qscale(MPVMainEncContext *const m)
199 {
200  MPVEncContext *const s = &m->s;
201 
202  if (s->c.q_scale_type == 1 && 0) {
203  int i;
204  int bestdiff=INT_MAX;
205  int best = 1;
206 
207  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
208  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
209  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
210  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
211  continue;
212  if (diff < bestdiff) {
213  bestdiff = diff;
214  best = i;
215  }
216  }
217  s->c.qscale = best;
218  } else {
219  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
220  (FF_LAMBDA_SHIFT + 7);
221  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
222  }
223 
224  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
226 }
227 
229 {
230  int i;
231 
232  if (matrix) {
233  put_bits(pb, 1, 1);
234  for (i = 0; i < 64; i++) {
235  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
236  }
237  } else
238  put_bits(pb, 1, 0);
239 }
240 
241 /**
242  * init s->c.cur_pic.qscale_table from s->lambda_table
243  */
244 static void init_qscale_tab(MPVEncContext *const s)
245 {
246  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
247 
248  for (int i = 0; i < s->c.mb_num; i++) {
249  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
250  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
251  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
252  s->c.avctx->qmax);
253  }
254 }
255 
257  const MPVEncContext *const src)
258 {
259 #define COPY(a) dst->a = src->a
260  COPY(c.pict_type);
261  COPY(f_code);
262  COPY(b_code);
263  COPY(c.qscale);
264  COPY(lambda);
265  COPY(lambda2);
266  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
267  COPY(c.progressive_frame); // FIXME don't set in encode_header
268  COPY(partitioned_frame); // FIXME don't set in encode_header
269 #undef COPY
270 }
271 
273 {
274  for (int i = -16; i < 16; i++)
275  default_fcode_tab[i + MAX_MV] = 1;
276 }
277 
278 /**
279  * Set the given MPVEncContext to defaults for encoding.
280  */
282 {
283  MPVEncContext *const s = &m->s;
284  static AVOnce init_static_once = AV_ONCE_INIT;
285 
287 
288  s->f_code = 1;
289  s->b_code = 1;
290 
291  if (!m->fcode_tab) {
293  ff_thread_once(&init_static_once, mpv_encode_init_static);
294  }
295  if (!s->c.y_dc_scale_table) {
296  s->c.y_dc_scale_table =
297  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
298  }
299 }
300 
302 {
303  s->dct_quantize = dct_quantize_c;
304  s->denoise_dct = denoise_dct_c;
305 
306 #if ARCH_MIPS
308 #elif ARCH_X86
310 #endif
311 
312  if (s->c.avctx->trellis)
313  s->dct_quantize = dct_quantize_trellis_c;
314 }
315 
317 {
318  MpegEncContext *const s = &s2->c;
319  MPVUnquantDSPContext unquant_dsp_ctx;
320 
321  ff_mpv_unquantize_init(&unquant_dsp_ctx,
322  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
323 
324  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
325  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
326  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
327  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
328  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
329  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
330  } else {
331  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
332  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
333  }
334 }
335 
337 {
338  MPVEncContext *const s = &m->s;
339  MECmpContext mecc;
340  me_cmp_func me_cmp[6];
341  int ret;
342 
343  ff_me_cmp_init(&mecc, avctx);
344  ret = ff_me_init(&s->me, avctx, &mecc, 1);
345  if (ret < 0)
346  return ret;
347  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
348  if (ret < 0)
349  return ret;
350  m->frame_skip_cmp_fn = me_cmp[1];
352  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
353  if (ret < 0)
354  return ret;
355  if (!me_cmp[0] || !me_cmp[4])
356  return AVERROR(EINVAL);
357  s->ildct_cmp[0] = me_cmp[0];
358  s->ildct_cmp[1] = me_cmp[4];
359  }
360 
361  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
362 
363  s->sse_cmp[0] = mecc.sse[0];
364  s->sse_cmp[1] = mecc.sse[1];
365  s->sad_cmp[0] = mecc.sad[0];
366  s->sad_cmp[1] = mecc.sad[1];
367  if (avctx->mb_cmp == FF_CMP_NSSE) {
368  s->n_sse_cmp[0] = mecc.nsse[0];
369  s->n_sse_cmp[1] = mecc.nsse[1];
370  } else {
371  s->n_sse_cmp[0] = mecc.sse[0];
372  s->n_sse_cmp[1] = mecc.sse[1];
373  }
374 
375  return 0;
376 }
377 
378 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
380 {
381  MPVEncContext *const s = &m->s;
382  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
383  const uint16_t *intra_matrix, *inter_matrix;
384  int ret;
385 
386  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
387  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
388  return AVERROR(ENOMEM);
389 
390  if (s->c.out_format == FMT_MJPEG) {
391  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
392  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
393  // No need to set q_inter_matrix
395  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
396  return 0;
397  } else {
398  s->q_chroma_intra_matrix = s->q_intra_matrix;
399  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
400  }
401  if (!m->intra_only) {
402  s->q_inter_matrix = s->q_intra_matrix + 32;
403  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
404  }
405 
406  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
407  s->mpeg_quant) {
410  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
411  intra_matrix =
413  } else {
414  /* MPEG-1/2, SpeedHQ */
417  }
418  if (avctx->intra_matrix)
420  if (avctx->inter_matrix)
422 
423  /* init q matrix */
424  for (int i = 0; i < 64; i++) {
425  int j = s->c.idsp.idct_permutation[i];
426 
427  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
428  s->c.inter_matrix[j] = inter_matrix[i];
429  }
430 
431  /* precompute matrix */
433  if (ret < 0)
434  return ret;
435 
436  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
437  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
438  31, 1);
439  if (s->q_inter_matrix)
440  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
441  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
442  31, 0);
443 
444  return 0;
445 }
446 
448 {
449  MPVEncContext *const s = &m->s;
450  int has_b_frames = !!m->max_b_frames;
451  int16_t (*mv_table)[2];
452 
453  /* Allocate MB type table */
454  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
455  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
456  if (!s->mb_type)
457  return AVERROR(ENOMEM);
458  s->mc_mb_var = s->mb_type + mb_array_size;
459  s->mb_var = s->mc_mb_var + mb_array_size;
460  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
461 
462  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
463  return AVERROR(ENOMEM);
464 
465  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
466  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
467  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
468  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
469  nb_mv_tables += 8 * has_b_frames;
470  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
471  if (!s->p_field_select_table[0])
472  return AVERROR(ENOMEM);
473  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
474  }
475 
476  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
477  if (!mv_table)
478  return AVERROR(ENOMEM);
479  m->mv_table_base = mv_table;
480  mv_table += s->c.mb_stride + 1;
481 
482  s->p_mv_table = mv_table;
483  if (has_b_frames) {
484  s->b_forw_mv_table = mv_table += mv_table_size;
485  s->b_back_mv_table = mv_table += mv_table_size;
486  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
487  s->b_bidir_back_mv_table = mv_table += mv_table_size;
488  s->b_direct_mv_table = mv_table += mv_table_size;
489 
490  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
491  uint8_t *field_select = s->p_field_select_table[1];
492  for (int j = 0; j < 2; j++) {
493  for (int k = 0; k < 2; k++) {
494  for (int l = 0; l < 2; l++)
495  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
496  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
497  }
498  }
499  }
500  }
501 
502  return 0;
503 }
504 
506 {
507  MPVEncContext *const s = &m->s;
508  // Align the following per-thread buffers to avoid false sharing.
509  enum {
510 #ifndef _MSC_VER
511  /// The number is supposed to match/exceed the cache-line size.
512  ALIGN = FFMAX(128, _Alignof(max_align_t)),
513 #else
514  ALIGN = 128,
515 #endif
516  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
517  };
518  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
519  "Need checks for potential overflow.");
520  unsigned nb_slices = s->c.slice_context_count;
521  char *dct_error = NULL;
522 
523  if (m->noise_reduction) {
524  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
525  return AVERROR(ENOMEM);
526  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
527  if (!dct_error)
528  return AVERROR(ENOMEM);
530  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
531  }
532 
533  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
534  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
535  const int yc_size = y_size + 2 * c_size;
536  ptrdiff_t offset = 0;
537 
538  for (unsigned i = 0; i < nb_slices; ++i) {
539  MPVEncContext *const s2 = s->c.enc_contexts[i];
540 
541  s2->block = s2->blocks[0];
542 
543  if (dct_error) {
544  s2->dct_offset = s->dct_offset;
545  s2->dct_error_sum = (void*)dct_error;
546  dct_error += DCT_ERROR_SIZE;
547  }
548 
549  if (s2->c.ac_val) {
550  s2->c.dc_val += offset + i;
551  s2->c.ac_val += offset;
552  offset += yc_size;
553  }
554  }
555  return 0;
556 }
557 
558 /* init video encoder */
560 {
561  MPVMainEncContext *const m = avctx->priv_data;
562  MPVEncContext *const s = &m->s;
563  AVCPBProperties *cpb_props;
564  int gcd, ret;
565 
567 
568  switch (avctx->pix_fmt) {
569  case AV_PIX_FMT_YUVJ444P:
570  case AV_PIX_FMT_YUV444P:
571  s->c.chroma_format = CHROMA_444;
572  break;
573  case AV_PIX_FMT_YUVJ422P:
574  case AV_PIX_FMT_YUV422P:
575  s->c.chroma_format = CHROMA_422;
576  break;
577  default:
578  av_unreachable("Already checked via CODEC_PIXFMTS");
579  case AV_PIX_FMT_YUVJ420P:
580  case AV_PIX_FMT_YUV420P:
581  s->c.chroma_format = CHROMA_420;
582  break;
583  }
584 
586 
587  m->bit_rate = avctx->bit_rate;
588  s->c.width = avctx->width;
589  s->c.height = avctx->height;
590  if (avctx->gop_size > 600 &&
593  "keyframe interval too large!, reducing it from %d to %d\n",
594  avctx->gop_size, 600);
595  avctx->gop_size = 600;
596  }
597  m->gop_size = avctx->gop_size;
598  s->c.avctx = avctx;
600  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
601  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
603  } else if (avctx->max_b_frames < 0) {
605  "max b frames must be 0 or positive for mpegvideo based encoders\n");
606  return AVERROR(EINVAL);
607  }
609  s->c.codec_id = avctx->codec->id;
611  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
612  return AVERROR(EINVAL);
613  }
614 
615  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
616  s->rtp_mode = !!s->rtp_payload_size;
617  s->c.intra_dc_precision = avctx->intra_dc_precision;
618 
619  // workaround some differences between how applications specify dc precision
620  if (s->c.intra_dc_precision < 0) {
621  s->c.intra_dc_precision += 8;
622  } else if (s->c.intra_dc_precision >= 8)
623  s->c.intra_dc_precision -= 8;
624 
625  if (s->c.intra_dc_precision < 0) {
627  "intra dc precision must be positive, note some applications use"
628  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
629  return AVERROR(EINVAL);
630  }
631 
632  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
633  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
634  return AVERROR(EINVAL);
635  }
637 
638  if (m->gop_size <= 1) {
639  m->intra_only = 1;
640  m->gop_size = 12;
641  } else {
642  m->intra_only = 0;
643  }
644 
645  /* Fixed QSCALE */
647 
648  s->adaptive_quant = (avctx->lumi_masking ||
649  avctx->dark_masking ||
652  avctx->p_masking ||
653  m->border_masking ||
654  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
655  !m->fixed_qscale;
656 
657  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
658 
660  switch(avctx->codec_id) {
663  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
664  break;
665  case AV_CODEC_ID_MPEG4:
669  if (avctx->rc_max_rate >= 15000000) {
670  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
671  } else if(avctx->rc_max_rate >= 2000000) {
672  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
673  } else if(avctx->rc_max_rate >= 384000) {
674  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
675  } else
676  avctx->rc_buffer_size = 40;
677  avctx->rc_buffer_size *= 16384;
678  break;
679  }
680  if (avctx->rc_buffer_size) {
681  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
682  }
683  }
684 
685  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
686  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
687  return AVERROR(EINVAL);
688  }
689 
692  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
693  }
694 
696  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
697  return AVERROR(EINVAL);
698  }
699 
701  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
702  return AVERROR(EINVAL);
703  }
704 
705  if (avctx->rc_max_rate &&
709  "impossible bitrate constraints, this will fail\n");
710  }
711 
712  if (avctx->rc_buffer_size &&
715  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
716  return AVERROR(EINVAL);
717  }
718 
719  if (!m->fixed_qscale &&
722  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
724  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
725  if (nbt <= INT_MAX) {
726  avctx->bit_rate_tolerance = nbt;
727  } else
728  avctx->bit_rate_tolerance = INT_MAX;
729  }
730 
731  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
732  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
733  s->c.codec_id != AV_CODEC_ID_FLV1) {
734  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
735  return AVERROR(EINVAL);
736  }
737 
738  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
740  "OBMC is only supported with simple mb decision\n");
741  return AVERROR(EINVAL);
742  }
743 
744  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
745  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
746  return AVERROR(EINVAL);
747  }
748 
749  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
750  s->c.codec_id == AV_CODEC_ID_H263 ||
751  s->c.codec_id == AV_CODEC_ID_H263P) &&
752  (avctx->sample_aspect_ratio.num > 255 ||
753  avctx->sample_aspect_ratio.den > 255)) {
755  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
759  }
760 
761  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
762  s->c.codec_id == AV_CODEC_ID_H263P) &&
763  (avctx->width > 2048 ||
764  avctx->height > 1152 )) {
765  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
766  return AVERROR(EINVAL);
767  }
768  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
769  (avctx->width > 65535 ||
770  avctx->height > 65535 )) {
771  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
772  return AVERROR(EINVAL);
773  }
774  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
775  s->c.codec_id == AV_CODEC_ID_H263P ||
776  s->c.codec_id == AV_CODEC_ID_RV20) &&
777  ((avctx->width &3) ||
778  (avctx->height&3) )) {
779  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
780  return AVERROR(EINVAL);
781  }
782 
783  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
784  (avctx->width &15 ||
785  avctx->height&15 )) {
786  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
787  return AVERROR(EINVAL);
788  }
789 
790  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
791  s->c.codec_id == AV_CODEC_ID_WMV2) &&
792  avctx->width & 1) {
793  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
794  return AVERROR(EINVAL);
795  }
796 
798  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
799  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
800  return AVERROR(EINVAL);
801  }
802 
803  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
804  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
805  return AVERROR(EINVAL);
806  }
807 
808  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
810  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
811  return AVERROR(EINVAL);
812  }
813 
814  if (m->scenechange_threshold < 1000000000 &&
817  "closed gop with scene change detection are not supported yet, "
818  "set threshold to 1000000000\n");
819  return AVERROR_PATCHWELCOME;
820  }
821 
823  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
826  "low delay forcing is only available for mpeg2, "
827  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
828  return AVERROR(EINVAL);
829  }
830  if (m->max_b_frames != 0) {
832  "B-frames cannot be used with low delay\n");
833  return AVERROR(EINVAL);
834  }
835  }
836 
837  if (avctx->slices > 1 &&
839  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
840  return AVERROR(EINVAL);
841  }
842 
845  "notice: b_frame_strategy only affects the first pass\n");
846  m->b_frame_strategy = 0;
847  }
848 
850  if (gcd > 1) {
851  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
852  avctx->time_base.den /= gcd;
853  avctx->time_base.num /= gcd;
854  //return -1;
855  }
856 
857  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
858  // (a + x * 3 / 8) / x
859  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
860  s->inter_quant_bias = 0;
861  } else {
862  s->intra_quant_bias = 0;
863  // (a - x / 4) / x
864  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
865  }
866 
867  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
868  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
869  return AVERROR(EINVAL);
870  }
871 
872  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
873 
874  switch (avctx->codec->id) {
875 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
877  s->rtp_mode = 1;
878  /* fallthrough */
880  s->c.out_format = FMT_MPEG1;
881  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
882  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
884  break;
885 #endif
886 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
887  case AV_CODEC_ID_MJPEG:
888  case AV_CODEC_ID_AMV:
889  s->c.out_format = FMT_MJPEG;
890  m->intra_only = 1; /* force intra only for jpeg */
891  avctx->delay = 0;
892  s->c.low_delay = 1;
893  break;
894 #endif
895  case AV_CODEC_ID_SPEEDHQ:
896  s->c.out_format = FMT_SPEEDHQ;
897  m->intra_only = 1; /* force intra only for SHQ */
898  avctx->delay = 0;
899  s->c.low_delay = 1;
900  break;
901  case AV_CODEC_ID_H261:
902  s->c.out_format = FMT_H261;
903  avctx->delay = 0;
904  s->c.low_delay = 1;
905  s->rtp_mode = 0; /* Sliced encoding not supported */
906  break;
907  case AV_CODEC_ID_H263:
908  if (!CONFIG_H263_ENCODER)
911  s->c.width, s->c.height) == 8) {
913  "The specified picture size of %dx%d is not valid for "
914  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
915  "352x288, 704x576, and 1408x1152. "
916  "Try H.263+.\n", s->c.width, s->c.height);
917  return AVERROR(EINVAL);
918  }
919  s->c.out_format = FMT_H263;
920  avctx->delay = 0;
921  s->c.low_delay = 1;
922  break;
923  case AV_CODEC_ID_H263P:
924  s->c.out_format = FMT_H263;
925  /* Fx */
926  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
927  s->modified_quant = s->c.h263_aic;
928  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
929  s->me.unrestricted_mv = s->c.obmc || s->loop_filter || s->umvplus;
930  s->flipflop_rounding = 1;
931 
932  /* /Fx */
933  /* These are just to be sure */
934  avctx->delay = 0;
935  s->c.low_delay = 1;
936  break;
937  case AV_CODEC_ID_FLV1:
938  s->c.out_format = FMT_H263;
939  s->me.unrestricted_mv = 1;
940  s->rtp_mode = 0; /* don't allow GOB */
941  avctx->delay = 0;
942  s->c.low_delay = 1;
943  break;
944 #if CONFIG_RV10_ENCODER
945  case AV_CODEC_ID_RV10:
947  s->c.out_format = FMT_H263;
948  avctx->delay = 0;
949  s->c.low_delay = 1;
950  break;
951 #endif
952 #if CONFIG_RV20_ENCODER
953  case AV_CODEC_ID_RV20:
955  s->c.out_format = FMT_H263;
956  avctx->delay = 0;
957  s->c.low_delay = 1;
958  s->modified_quant = 1;
959  // Set here to force allocation of dc_val;
960  // will be set later on a per-frame basis.
961  s->c.h263_aic = 1;
962  s->loop_filter = 1;
963  s->me.unrestricted_mv = 0;
964  break;
965 #endif
966  case AV_CODEC_ID_MPEG4:
967  s->c.out_format = FMT_H263;
968  s->c.h263_pred = 1;
969  s->me.unrestricted_mv = 1;
970  s->flipflop_rounding = 1;
971  s->c.low_delay = m->max_b_frames ? 0 : 1;
972  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
973  break;
975  s->c.out_format = FMT_H263;
976  s->c.h263_pred = 1;
977  s->me.unrestricted_mv = 1;
978  s->c.msmpeg4_version = MSMP4_V2;
979  avctx->delay = 0;
980  s->c.low_delay = 1;
981  break;
983  s->c.out_format = FMT_H263;
984  s->c.h263_pred = 1;
985  s->me.unrestricted_mv = 1;
986  s->c.msmpeg4_version = MSMP4_V3;
987  s->flipflop_rounding = 1;
988  avctx->delay = 0;
989  s->c.low_delay = 1;
990  break;
991  case AV_CODEC_ID_WMV1:
992  s->c.out_format = FMT_H263;
993  s->c.h263_pred = 1;
994  s->me.unrestricted_mv = 1;
995  s->c.msmpeg4_version = MSMP4_WMV1;
996  s->flipflop_rounding = 1;
997  avctx->delay = 0;
998  s->c.low_delay = 1;
999  break;
1000  case AV_CODEC_ID_WMV2:
1001  s->c.out_format = FMT_H263;
1002  s->c.h263_pred = 1;
1003  s->me.unrestricted_mv = 1;
1004  s->c.msmpeg4_version = MSMP4_WMV2;
1005  s->flipflop_rounding = 1;
1006  avctx->delay = 0;
1007  s->c.low_delay = 1;
1008  break;
1009  default:
1010  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
1011  }
1012 
1013  avctx->has_b_frames = !s->c.low_delay;
1014 
1015  s->c.encoding = 1;
1016 
1017  s->c.progressive_frame =
1018  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1020  s->c.alternate_scan);
1021 
1024  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1025  (1 << AV_PICTURE_TYPE_P) |
1026  (1 << AV_PICTURE_TYPE_B);
1027  } else if (!m->intra_only) {
1028  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1029  (1 << AV_PICTURE_TYPE_P);
1030  } else {
1031  s->frame_reconstruction_bitfield = 0;
1032  }
1033 
1034  if (m->lmin > m->lmax) {
1035  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1036  m->lmin = m->lmax;
1037  }
1038 
1039  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1040  * main slice to the slice contexts, so we initialize various fields of it
1041  * before calling ff_mpv_init_duplicate_contexts(). */
1042  s->parent = m;
1043  ff_mpv_idct_init(&s->c);
1045  ff_fdctdsp_init(&s->fdsp, avctx);
1046  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1047  ff_pixblockdsp_init(&s->pdsp, 8);
1048  ret = me_cmp_init(m, avctx);
1049  if (ret < 0)
1050  return ret;
1051 
1052  if (!(avctx->stats_out = av_mallocz(256)) ||
1053  !(s->new_pic = av_frame_alloc()) ||
1054  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1055  return AVERROR(ENOMEM);
1056 
1057  ret = init_matrices(m, avctx);
1058  if (ret < 0)
1059  return ret;
1060 
1062 
1063  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1065 #if CONFIG_MSMPEG4ENC
1066  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1068 #endif
1069  }
1070 
1071  s->c.slice_ctx_size = sizeof(*s);
1072  ret = ff_mpv_common_init(&s->c);
1073  if (ret < 0)
1074  return ret;
1075  ret = init_buffers(m);
1076  if (ret < 0)
1077  return ret;
1078  if (s->c.slice_context_count > 1) {
1079  s->rtp_mode = 1;
1081  s->h263_slice_structured = 1;
1082  }
1084  if (ret < 0)
1085  return ret;
1086 
1087  ret = init_slice_buffers(m);
1088  if (ret < 0)
1089  return ret;
1090 
1092  if (ret < 0)
1093  return ret;
1094 
1095  if (m->b_frame_strategy == 2) {
1096  for (int i = 0; i < m->max_b_frames + 2; i++) {
1097  m->tmp_frames[i] = av_frame_alloc();
1098  if (!m->tmp_frames[i])
1099  return AVERROR(ENOMEM);
1100 
1102  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1103  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1104 
1105  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1106  if (ret < 0)
1107  return ret;
1108  }
1109  }
1110 
1111  cpb_props = ff_encode_add_cpb_side_data(avctx);
1112  if (!cpb_props)
1113  return AVERROR(ENOMEM);
1114  cpb_props->max_bitrate = avctx->rc_max_rate;
1115  cpb_props->min_bitrate = avctx->rc_min_rate;
1116  cpb_props->avg_bitrate = avctx->bit_rate;
1117  cpb_props->buffer_size = avctx->rc_buffer_size;
1118 
1119  return 0;
1120 }
1121 
1123 {
1124  MPVMainEncContext *const m = avctx->priv_data;
1125  MPVEncContext *const s = &m->s;
1126 
1128 
1129  ff_mpv_common_end(&s->c);
1130  av_refstruct_pool_uninit(&s->c.picture_pool);
1131 
1132  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1135  }
1136  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1137  av_frame_free(&m->tmp_frames[i]);
1138 
1139  av_frame_free(&s->new_pic);
1140 
1142 
1143  av_freep(&m->mv_table_base);
1144  av_freep(&s->p_field_select_table[0]);
1146 
1147  av_freep(&s->mb_type);
1148  av_freep(&s->lambda_table);
1149 
1150  av_freep(&s->q_intra_matrix);
1151  av_freep(&s->q_intra_matrix16);
1152  av_freep(&s->dct_offset);
1153 
1154  return 0;
1155 }
1156 
1157 /* put block[] to dest[] */
1158 static inline void put_dct(MPVEncContext *const s,
1159  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1160 {
1161  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1162  s->c.idsp.idct_put(dest, line_size, block);
1163 }
1164 
1165 static inline void add_dequant_dct(MPVEncContext *const s,
1166  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1167 {
1168  if (s->c.block_last_index[i] >= 0) {
1169  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1170 
1171  s->c.idsp.idct_add(dest, line_size, block);
1172  }
1173 }
1174 
1175 /**
1176  * Performs dequantization and IDCT (if necessary)
1177  */
1178 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1179 {
1180  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1181  /* print DCT coefficients */
1182  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1183  for (int i = 0; i < 6; i++) {
1184  for (int j = 0; j < 64; j++) {
1185  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1186  block[i][s->c.idsp.idct_permutation[j]]);
1187  }
1188  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1189  }
1190  }
1191 
1192  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1193  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1194  int dct_linesize, dct_offset;
1195  const int linesize = s->c.cur_pic.linesize[0];
1196  const int uvlinesize = s->c.cur_pic.linesize[1];
1197  const int block_size = 8;
1198 
1199  dct_linesize = linesize << s->c.interlaced_dct;
1200  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1201 
1202  if (!s->c.mb_intra) {
1203  /* No MC, as that was already done otherwise */
1204  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1205  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1206  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1207  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1208 
1209  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1210  if (s->c.chroma_y_shift) {
1211  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1212  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1213  } else {
1214  dct_linesize >>= 1;
1215  dct_offset >>= 1;
1216  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1217  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1218  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1219  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1220  }
1221  }
1222  } else {
1223  /* dct only in intra block */
1224  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1225  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1226  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1227  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1228 
1229  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1230  if (s->c.chroma_y_shift) {
1231  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1232  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1233  } else {
1234  dct_offset >>= 1;
1235  dct_linesize >>= 1;
1236  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1237  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1238  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1239  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1240  }
1241  }
1242  }
1243  }
1244 }
1245 
1246 static int get_sae(const uint8_t *src, int ref, int stride)
1247 {
1248  int x,y;
1249  int acc = 0;
1250 
1251  for (y = 0; y < 16; y++) {
1252  for (x = 0; x < 16; x++) {
1253  acc += FFABS(src[x + y * stride] - ref);
1254  }
1255  }
1256 
1257  return acc;
1258 }
1259 
1260 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1261  const uint8_t *ref, int stride)
1262 {
1263  int x, y, w, h;
1264  int acc = 0;
1265 
1266  w = s->c.width & ~15;
1267  h = s->c.height & ~15;
1268 
1269  for (y = 0; y < h; y += 16) {
1270  for (x = 0; x < w; x += 16) {
1271  int offset = x + y * stride;
1272  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1273  stride, 16);
1274  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1275  int sae = get_sae(src + offset, mean, stride);
1276 
1277  acc += sae + 500 < sad;
1278  }
1279  }
1280  return acc;
1281 }
1282 
1283 /**
1284  * Allocates new buffers for an AVFrame and copies the properties
1285  * from another AVFrame.
1286  */
1287 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1288 {
1289  AVCodecContext *avctx = s->c.avctx;
1290  int ret;
1291 
1292  f->width = avctx->width + 2 * EDGE_WIDTH;
1293  f->height = avctx->height + 2 * EDGE_WIDTH;
1294 
1296  if (ret < 0)
1297  return ret;
1298 
1299  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1300  if (ret < 0)
1301  return ret;
1302 
1303  for (int i = 0; f->data[i]; i++) {
1304  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1305  f->linesize[i] +
1306  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1307  f->data[i] += offset;
1308  }
1309  f->width = avctx->width;
1310  f->height = avctx->height;
1311 
1312  ret = av_frame_copy_props(f, props_frame);
1313  if (ret < 0)
1314  return ret;
1315 
1316  return 0;
1317 }
1318 
1319 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1320 {
1321  MPVEncContext *const s = &m->s;
1322  MPVPicture *pic = NULL;
1323  int64_t pts;
1324  int display_picture_number = 0, ret;
1325  int encoding_delay = m->max_b_frames ? m->max_b_frames
1326  : (s->c.low_delay ? 0 : 1);
1327  int flush_offset = 1;
1328  int direct = 1;
1329 
1330  av_assert1(!m->input_picture[0]);
1331 
1332  if (pic_arg) {
1333  pts = pic_arg->pts;
1334  display_picture_number = m->input_picture_number++;
1335 
1336  if (pts != AV_NOPTS_VALUE) {
1337  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1338  int64_t last = m->user_specified_pts;
1339 
1340  if (pts <= last) {
1341  av_log(s->c.avctx, AV_LOG_ERROR,
1342  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1343  pts, last);
1344  return AVERROR(EINVAL);
1345  }
1346 
1347  if (!s->c.low_delay && display_picture_number == 1)
1348  m->dts_delta = pts - last;
1349  }
1350  m->user_specified_pts = pts;
1351  } else {
1352  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1353  m->user_specified_pts =
1354  pts = m->user_specified_pts + 1;
1355  av_log(s->c.avctx, AV_LOG_INFO,
1356  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1357  pts);
1358  } else {
1359  pts = display_picture_number;
1360  }
1361  }
1362 
1363  if (pic_arg->linesize[0] != s->c.linesize ||
1364  pic_arg->linesize[1] != s->c.uvlinesize ||
1365  pic_arg->linesize[2] != s->c.uvlinesize)
1366  direct = 0;
1367  if ((s->c.width & 15) || (s->c.height & 15))
1368  direct = 0;
1369  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1370  direct = 0;
1371  if (s->c.linesize & (STRIDE_ALIGN-1))
1372  direct = 0;
1373 
1374  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1375  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1376 
1377  pic = av_refstruct_pool_get(s->c.picture_pool);
1378  if (!pic)
1379  return AVERROR(ENOMEM);
1380 
1381  if (direct) {
1382  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1383  goto fail;
1384  pic->shared = 1;
1385  } else {
1386  ret = prepare_picture(s, pic->f, pic_arg);
1387  if (ret < 0)
1388  goto fail;
1389 
1390  for (int i = 0; i < 3; i++) {
1391  ptrdiff_t src_stride = pic_arg->linesize[i];
1392  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1393  int h_shift = i ? s->c.chroma_x_shift : 0;
1394  int v_shift = i ? s->c.chroma_y_shift : 0;
1395  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1396  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1397  const uint8_t *src = pic_arg->data[i];
1398  uint8_t *dst = pic->f->data[i];
1399  int vpad = 16;
1400 
1401  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1402  && !s->c.progressive_sequence
1403  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1404  vpad = 32;
1405 
1406  if (!s->c.avctx->rc_buffer_size)
1407  dst += INPLACE_OFFSET;
1408 
1409  if (src_stride == dst_stride)
1410  memcpy(dst, src, src_stride * h - src_stride + w);
1411  else {
1412  int h2 = h;
1413  uint8_t *dst2 = dst;
1414  while (h2--) {
1415  memcpy(dst2, src, w);
1416  dst2 += dst_stride;
1417  src += src_stride;
1418  }
1419  }
1420  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1421  s->mpvencdsp.draw_edges(dst, dst_stride,
1422  w, h,
1423  16 >> h_shift,
1424  vpad >> v_shift,
1425  EDGE_BOTTOM);
1426  }
1427  }
1428  emms_c();
1429  }
1430 
1431  pic->display_picture_number = display_picture_number;
1432  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1433  } else if (!m->reordered_input_picture[1]) {
1434  /* Flushing: When the above check is true, the encoder is about to run
1435  * out of frames to encode. Check if there are input_pictures left;
1436  * if so, ensure m->input_picture[0] contains the first picture.
1437  * A flush_offset != 1 will only happen if we did not receive enough
1438  * input frames. */
1439  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1440  if (m->input_picture[flush_offset])
1441  break;
1442 
1443  encoding_delay -= flush_offset - 1;
1444  }
1445 
1446  /* shift buffer entries */
1447  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1448  m->input_picture[i - flush_offset] = m->input_picture[i];
1449  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1450  m->input_picture[i] = NULL;
1451 
1452  m->input_picture[encoding_delay] = pic;
1453 
1454  return 0;
1455 fail:
1456  av_refstruct_unref(&pic);
1457  return ret;
1458 }
1459 
1460 static int skip_check(MPVMainEncContext *const m,
1461  const MPVPicture *p, const MPVPicture *ref)
1462 {
1463  MPVEncContext *const s = &m->s;
1464  int score = 0;
1465  int64_t score64 = 0;
1466 
1467  for (int plane = 0; plane < 3; plane++) {
1468  const int stride = p->f->linesize[plane];
1469  const int bw = plane ? 1 : 2;
1470  for (int y = 0; y < s->c.mb_height * bw; y++) {
1471  for (int x = 0; x < s->c.mb_width * bw; x++) {
1472  int off = p->shared ? 0 : 16;
1473  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1474  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1475  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1476 
1477  switch (FFABS(m->frame_skip_exp)) {
1478  case 0: score = FFMAX(score, v); break;
1479  case 1: score += FFABS(v); break;
1480  case 2: score64 += v * (int64_t)v; break;
1481  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1482  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1483  }
1484  }
1485  }
1486  }
1487  emms_c();
1488 
1489  if (score)
1490  score64 = score;
1491  if (m->frame_skip_exp < 0)
1492  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1493  -1.0/m->frame_skip_exp);
1494 
1495  if (score64 < m->frame_skip_threshold)
1496  return 1;
1497  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1498  return 1;
1499  return 0;
1500 }
1501 
1503 {
1504  int ret;
1505  int size = 0;
1506 
1508  if (ret < 0)
1509  return ret;
1510 
1511  do {
1513  if (ret >= 0) {
1514  size += pkt->size;
1516  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1517  return ret;
1518  } while (ret >= 0);
1519 
1520  return size;
1521 }
1522 
1524 {
1525  MPVEncContext *const s = &m->s;
1526  AVPacket *pkt;
1527  const int scale = m->brd_scale;
1528  int width = s->c.width >> scale;
1529  int height = s->c.height >> scale;
1530  int out_size, p_lambda, b_lambda, lambda2;
1531  int64_t best_rd = INT64_MAX;
1532  int best_b_count = -1;
1533  int ret = 0;
1534 
1535  av_assert0(scale >= 0 && scale <= 3);
1536 
1537  pkt = av_packet_alloc();
1538  if (!pkt)
1539  return AVERROR(ENOMEM);
1540 
1541  //emms_c();
1542  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1543  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1544  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1545  if (!b_lambda) // FIXME we should do this somewhere else
1546  b_lambda = p_lambda;
1547  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1549 
1550  for (int i = 0; i < m->max_b_frames + 2; i++) {
1551  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1552  s->c.next_pic.ptr;
1553 
1554  if (pre_input_ptr) {
1555  const uint8_t *data[4];
1556  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1557 
1558  if (!pre_input_ptr->shared && i) {
1559  data[0] += INPLACE_OFFSET;
1560  data[1] += INPLACE_OFFSET;
1561  data[2] += INPLACE_OFFSET;
1562  }
1563 
1564  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1565  m->tmp_frames[i]->linesize[0],
1566  data[0],
1567  pre_input_ptr->f->linesize[0],
1568  width, height);
1569  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1570  m->tmp_frames[i]->linesize[1],
1571  data[1],
1572  pre_input_ptr->f->linesize[1],
1573  width >> 1, height >> 1);
1574  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1575  m->tmp_frames[i]->linesize[2],
1576  data[2],
1577  pre_input_ptr->f->linesize[2],
1578  width >> 1, height >> 1);
1579  }
1580  }
1581 
1582  for (int j = 0; j < m->max_b_frames + 1; j++) {
1583  AVCodecContext *c;
1584  int64_t rd = 0;
1585 
1586  if (!m->input_picture[j])
1587  break;
1588 
1590  if (!c) {
1591  ret = AVERROR(ENOMEM);
1592  goto fail;
1593  }
1594 
1595  c->width = width;
1596  c->height = height;
1598  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1599  c->mb_decision = s->c.avctx->mb_decision;
1600  c->me_cmp = s->c.avctx->me_cmp;
1601  c->mb_cmp = s->c.avctx->mb_cmp;
1602  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1603  c->pix_fmt = AV_PIX_FMT_YUV420P;
1604  c->time_base = s->c.avctx->time_base;
1605  c->max_b_frames = m->max_b_frames;
1606 
1607  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1608  if (ret < 0)
1609  goto fail;
1610 
1611 
1613  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1614 
1615  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1616  if (out_size < 0) {
1617  ret = out_size;
1618  goto fail;
1619  }
1620 
1621  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1622 
1623  for (int i = 0; i < m->max_b_frames + 1; i++) {
1624  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1625 
1626  m->tmp_frames[i + 1]->pict_type = is_p ?
1628  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1629 
1630  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1631  if (out_size < 0) {
1632  ret = out_size;
1633  goto fail;
1634  }
1635 
1636  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1637  }
1638 
1639  /* get the delayed frames */
1641  if (out_size < 0) {
1642  ret = out_size;
1643  goto fail;
1644  }
1645  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1646 
1647  rd += c->error[0] + c->error[1] + c->error[2];
1648 
1649  if (rd < best_rd) {
1650  best_rd = rd;
1651  best_b_count = j;
1652  }
1653 
1654 fail:
1657  if (ret < 0) {
1658  best_b_count = ret;
1659  break;
1660  }
1661  }
1662 
1663  av_packet_free(&pkt);
1664 
1665  return best_b_count;
1666 }
1667 
1668 /**
1669  * Determines whether an input picture is discarded or not
1670  * and if not determines the length of the next chain of B frames
1671  * and moves these pictures (including the P frame) into
1672  * reordered_input_picture.
1673  * input_picture[0] is always NULL when exiting this function, even on error;
1674  * reordered_input_picture[0] is always NULL when exiting this function on error.
1675  */
1677 {
1678  MPVEncContext *const s = &m->s;
1679 
1680  /* Either nothing to do or can't do anything */
1681  if (m->reordered_input_picture[0] || !m->input_picture[0])
1682  return 0;
1683 
1684  /* set next picture type & ordering */
1685  if (m->frame_skip_threshold || m->frame_skip_factor) {
1686  if (m->picture_in_gop_number < m->gop_size &&
1687  s->c.next_pic.ptr &&
1688  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1689  // FIXME check that the gop check above is +-1 correct
1691 
1692  ff_vbv_update(m, 0);
1693 
1694  return 0;
1695  }
1696  }
1697 
1698  if (/* m->picture_in_gop_number >= m->gop_size || */
1699  !s->c.next_pic.ptr || m->intra_only) {
1700  m->reordered_input_picture[0] = m->input_picture[0];
1701  m->input_picture[0] = NULL;
1704  m->coded_picture_number++;
1705  } else {
1706  int b_frames = 0;
1707 
1708  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1709  for (int i = 0; i < m->max_b_frames + 1; i++) {
1710  int pict_num = m->input_picture[0]->display_picture_number + i;
1711 
1712  if (pict_num >= m->rc_context.num_entries)
1713  break;
1714  if (!m->input_picture[i]) {
1715  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1716  break;
1717  }
1718 
1719  m->input_picture[i]->f->pict_type =
1720  m->rc_context.entry[pict_num].new_pict_type;
1721  }
1722  }
1723 
1724  if (m->b_frame_strategy == 0) {
1725  b_frames = m->max_b_frames;
1726  while (b_frames && !m->input_picture[b_frames])
1727  b_frames--;
1728  } else if (m->b_frame_strategy == 1) {
1729  for (int i = 1; i < m->max_b_frames + 1; i++) {
1730  if (m->input_picture[i] &&
1731  m->input_picture[i]->b_frame_score == 0) {
1734  m->input_picture[i ]->f->data[0],
1735  m->input_picture[i - 1]->f->data[0],
1736  s->c.linesize) + 1;
1737  }
1738  }
1739  for (int i = 0;; i++) {
1740  if (i >= m->max_b_frames + 1 ||
1741  !m->input_picture[i] ||
1742  m->input_picture[i]->b_frame_score - 1 >
1743  s->c.mb_num / m->b_sensitivity) {
1744  b_frames = FFMAX(0, i - 1);
1745  break;
1746  }
1747  }
1748 
1749  /* reset scores */
1750  for (int i = 0; i < b_frames + 1; i++)
1751  m->input_picture[i]->b_frame_score = 0;
1752  } else if (m->b_frame_strategy == 2) {
1753  b_frames = estimate_best_b_count(m);
1754  if (b_frames < 0) {
1756  return b_frames;
1757  }
1758  }
1759 
1760  emms_c();
1761 
1762  for (int i = b_frames - 1; i >= 0; i--) {
1763  int type = m->input_picture[i]->f->pict_type;
1764  if (type && type != AV_PICTURE_TYPE_B)
1765  b_frames = i;
1766  }
1767  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1768  b_frames == m->max_b_frames) {
1769  av_log(s->c.avctx, AV_LOG_ERROR,
1770  "warning, too many B-frames in a row\n");
1771  }
1772 
1773  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1774  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1775  m->gop_size > m->picture_in_gop_number) {
1776  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1777  } else {
1778  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1779  b_frames = 0;
1780  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1781  }
1782  }
1783 
1784  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1785  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1786  b_frames--;
1787 
1788  m->reordered_input_picture[0] = m->input_picture[b_frames];
1789  m->input_picture[b_frames] = NULL;
1793  m->coded_picture_number++;
1794  for (int i = 0; i < b_frames; i++) {
1795  m->reordered_input_picture[i + 1] = m->input_picture[i];
1796  m->input_picture[i] = NULL;
1797  m->reordered_input_picture[i + 1]->f->pict_type =
1800  m->coded_picture_number++;
1801  }
1802  }
1803 
1804  return 0;
1805 }
1806 
1808 {
1809  MPVEncContext *const s = &m->s;
1810  int ret;
1811 
1813 
1814  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1817 
1819  av_assert1(!m->input_picture[0]);
1820  if (ret < 0)
1821  return ret;
1822 
1823  av_frame_unref(s->new_pic);
1824 
1825  if (m->reordered_input_picture[0]) {
1828 
1829  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1830  // input is a shared pix, so we can't modify it -> allocate a new
1831  // one & ensure that the shared one is reuseable
1832  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1833 
1834  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1835  if (ret < 0)
1836  goto fail;
1837  } else {
1838  // input is not a shared pix -> reuse buffer for current_pix
1839  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1840  if (ret < 0)
1841  goto fail;
1842  for (int i = 0; i < MPV_MAX_PLANES; i++)
1843  s->new_pic->data[i] += INPLACE_OFFSET;
1844  }
1845  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1846  m->reordered_input_picture[0] = NULL;
1847  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1848  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1849  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1850  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1851  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1852  if (ret < 0) {
1853  ff_mpv_unref_picture(&s->c.cur_pic);
1854  return ret;
1855  }
1856  s->picture_number = s->c.cur_pic.ptr->display_picture_number;
1857 
1858  }
1859  return 0;
1860 fail:
1862  return ret;
1863 }
1864 
1865 static void frame_end(MPVMainEncContext *const m)
1866 {
1867  MPVEncContext *const s = &m->s;
1868 
1869  if (s->me.unrestricted_mv &&
1870  s->c.cur_pic.reference &&
1871  !m->intra_only) {
1872  int hshift = s->c.chroma_x_shift;
1873  int vshift = s->c.chroma_y_shift;
1874  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1875  s->c.cur_pic.linesize[0],
1876  s->c.h_edge_pos, s->c.v_edge_pos,
1878  EDGE_TOP | EDGE_BOTTOM);
1879  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1880  s->c.cur_pic.linesize[1],
1881  s->c.h_edge_pos >> hshift,
1882  s->c.v_edge_pos >> vshift,
1883  EDGE_WIDTH >> hshift,
1884  EDGE_WIDTH >> vshift,
1885  EDGE_TOP | EDGE_BOTTOM);
1886  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1887  s->c.cur_pic.linesize[2],
1888  s->c.h_edge_pos >> hshift,
1889  s->c.v_edge_pos >> vshift,
1890  EDGE_WIDTH >> hshift,
1891  EDGE_WIDTH >> vshift,
1892  EDGE_TOP | EDGE_BOTTOM);
1893  }
1894 
1895  emms_c();
1896 
1897  m->last_pict_type = s->c.pict_type;
1898  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1899  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1900  m->last_non_b_pict_type = s->c.pict_type;
1901 }
1902 
1904 {
1905  MPVEncContext *const s = &m->s;
1906  int intra, i;
1907 
1908  for (intra = 0; intra < 2; intra++) {
1909  if (s->dct_count[intra] > (1 << 16)) {
1910  for (i = 0; i < 64; i++) {
1911  s->dct_error_sum[intra][i] >>= 1;
1912  }
1913  s->dct_count[intra] >>= 1;
1914  }
1915 
1916  for (i = 0; i < 64; i++) {
1917  s->dct_offset[intra][i] = (m->noise_reduction *
1918  s->dct_count[intra] +
1919  s->dct_error_sum[intra][i] / 2) /
1920  (s->dct_error_sum[intra][i] + 1);
1921  }
1922  }
1923 }
1924 
1925 static void frame_start(MPVMainEncContext *const m)
1926 {
1927  MPVEncContext *const s = &m->s;
1928 
1929  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1930 
1931  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1932  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1933  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1934  }
1935 
1936  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1937  if (s->dct_error_sum) {
1939  }
1940 }
1941 
1943  const AVFrame *pic_arg, int *got_packet)
1944 {
1945  MPVMainEncContext *const m = avctx->priv_data;
1946  MPVEncContext *const s = &m->s;
1947  int stuffing_count, ret;
1948  int context_count = s->c.slice_context_count;
1949 
1950  ff_mpv_unref_picture(&s->c.cur_pic);
1951 
1952  m->vbv_ignore_qmax = 0;
1953 
1954  m->picture_in_gop_number++;
1955 
1956  ret = load_input_picture(m, pic_arg);
1957  if (ret < 0)
1958  return ret;
1959 
1961  if (ret < 0)
1962  return ret;
1963 
1964  /* output? */
1965  if (s->new_pic->data[0]) {
1966  int growing_buffer = context_count == 1 && !s->data_partitioning;
1967  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1968  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1969  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1970  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1971  if (ret < 0)
1972  return ret;
1973  }
1974  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1975  return ret;
1977  if (s->mb_info) {
1978  s->mb_info_ptr = av_packet_new_side_data(pkt,
1980  s->c.mb_width*s->c.mb_height*12);
1981  if (!s->mb_info_ptr)
1982  return AVERROR(ENOMEM);
1983  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1984  }
1985 
1986  s->c.pict_type = s->new_pic->pict_type;
1987  //emms_c();
1988  frame_start(m);
1989 vbv_retry:
1990  ret = encode_picture(m, pkt);
1991  if (growing_buffer) {
1992  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1993  pkt->data = s->pb.buf;
1995  }
1996  if (ret < 0)
1997  return -1;
1998 
1999  frame_end(m);
2000 
2001  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
2003 
2004  if (avctx->rc_buffer_size) {
2005  RateControlContext *rcc = &m->rc_context;
2006  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
2007  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
2008  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
2009 
2010  if (put_bits_count(&s->pb) > max_size &&
2011  s->lambda < m->lmax) {
2012  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2013  (s->c.qscale + 1) / s->c.qscale);
2014  if (s->adaptive_quant) {
2015  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2016  s->lambda_table[i] =
2017  FFMAX(s->lambda_table[i] + min_step,
2018  s->lambda_table[i] * (s->c.qscale + 1) /
2019  s->c.qscale);
2020  }
2021  s->c.mb_skipped = 0; // done in frame_start()
2022  // done in encode_picture() so we must undo it
2023  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2024  s->c.no_rounding ^= s->flipflop_rounding;
2025  }
2026  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2027  s->c.time_base = s->c.last_time_base;
2028  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2029  }
2030  m->vbv_ignore_qmax = 1;
2031  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2032  goto vbv_retry;
2033  }
2034 
2036  }
2037 
2040 
2041  for (int i = 0; i < MPV_MAX_PLANES; i++)
2042  avctx->error[i] += s->encoding_error[i];
2043  ff_side_data_set_encoder_stats(pkt, s->c.cur_pic.ptr->f->quality,
2044  s->encoding_error,
2046  s->c.pict_type);
2047 
2049  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2050  s->misc_bits + s->i_tex_bits +
2051  s->p_tex_bits);
2052  flush_put_bits(&s->pb);
2053  m->frame_bits = put_bits_count(&s->pb);
2054 
2055  stuffing_count = ff_vbv_update(m, m->frame_bits);
2056  m->stuffing_bits = 8*stuffing_count;
2057  if (stuffing_count) {
2058  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2059  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2060  return -1;
2061  }
2062 
2063  switch (s->c.codec_id) {
2066  while (stuffing_count--) {
2067  put_bits(&s->pb, 8, 0);
2068  }
2069  break;
2070  case AV_CODEC_ID_MPEG4:
2071  put_bits(&s->pb, 16, 0);
2072  put_bits(&s->pb, 16, 0x1C3);
2073  stuffing_count -= 4;
2074  while (stuffing_count--) {
2075  put_bits(&s->pb, 8, 0xFF);
2076  }
2077  break;
2078  default:
2079  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2080  m->stuffing_bits = 0;
2081  }
2082  flush_put_bits(&s->pb);
2083  m->frame_bits = put_bits_count(&s->pb);
2084  }
2085 
2086  /* update MPEG-1/2 vbv_delay for CBR */
2087  if (avctx->rc_max_rate &&
2089  s->c.out_format == FMT_MPEG1 &&
2090  90000LL * (avctx->rc_buffer_size - 1) <=
2091  avctx->rc_max_rate * 0xFFFFLL) {
2092  AVCPBProperties *props;
2093  size_t props_size;
2094 
2095  int vbv_delay, min_delay;
2096  double inbits = avctx->rc_max_rate *
2098  int minbits = m->frame_bits - 8 *
2099  (m->vbv_delay_pos - 1);
2100  double bits = m->rc_context.buffer_index + minbits - inbits;
2101  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2102 
2103  if (bits < 0)
2105  "Internal error, negative bits\n");
2106 
2107  av_assert1(s->c.repeat_first_field == 0);
2108 
2109  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2110  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2111  avctx->rc_max_rate;
2112 
2113  vbv_delay = FFMAX(vbv_delay, min_delay);
2114 
2115  av_assert0(vbv_delay < 0xFFFF);
2116 
2117  vbv_delay_ptr[0] &= 0xF8;
2118  vbv_delay_ptr[0] |= vbv_delay >> 13;
2119  vbv_delay_ptr[1] = vbv_delay >> 5;
2120  vbv_delay_ptr[2] &= 0x07;
2121  vbv_delay_ptr[2] |= vbv_delay << 3;
2122 
2123  props = av_cpb_properties_alloc(&props_size);
2124  if (!props)
2125  return AVERROR(ENOMEM);
2126  props->vbv_delay = vbv_delay * 300;
2127 
2129  (uint8_t*)props, props_size);
2130  if (ret < 0) {
2131  av_freep(&props);
2132  return ret;
2133  }
2134  }
2135  m->total_bits += m->frame_bits;
2136 
2137  pkt->pts = s->c.cur_pic.ptr->f->pts;
2138  pkt->duration = s->c.cur_pic.ptr->f->duration;
2139  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2140  if (!s->c.cur_pic.ptr->coded_picture_number)
2141  pkt->dts = pkt->pts - m->dts_delta;
2142  else
2143  pkt->dts = m->reordered_pts;
2144  m->reordered_pts = pkt->pts;
2145  } else
2146  pkt->dts = pkt->pts;
2147 
2148  // the no-delay case is handled in generic code
2150  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2151  if (ret < 0)
2152  return ret;
2153  }
2154 
2155  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2157  if (s->mb_info)
2159  } else {
2160  m->frame_bits = 0;
2161  }
2162 
2163  ff_mpv_unref_picture(&s->c.cur_pic);
2164 
2165  av_assert1((m->frame_bits & 7) == 0);
2166 
2167  pkt->size = m->frame_bits / 8;
2168  *got_packet = !!pkt->size;
2169  return 0;
2170 }
2171 
2173  int n, int threshold)
2174 {
2175  static const char tab[64] = {
2176  3, 2, 2, 1, 1, 1, 1, 1,
2177  1, 1, 1, 1, 1, 1, 1, 1,
2178  1, 1, 1, 1, 1, 1, 1, 1,
2179  0, 0, 0, 0, 0, 0, 0, 0,
2180  0, 0, 0, 0, 0, 0, 0, 0,
2181  0, 0, 0, 0, 0, 0, 0, 0,
2182  0, 0, 0, 0, 0, 0, 0, 0,
2183  0, 0, 0, 0, 0, 0, 0, 0
2184  };
2185  int score = 0;
2186  int run = 0;
2187  int i;
2188  int16_t *block = s->block[n];
2189  const int last_index = s->c.block_last_index[n];
2190  int skip_dc;
2191 
2192  if (threshold < 0) {
2193  skip_dc = 0;
2194  threshold = -threshold;
2195  } else
2196  skip_dc = 1;
2197 
2198  /* Are all we could set to zero already zero? */
2199  if (last_index <= skip_dc - 1)
2200  return;
2201 
2202  for (i = 0; i <= last_index; i++) {
2203  const int j = s->c.intra_scantable.permutated[i];
2204  const int level = FFABS(block[j]);
2205  if (level == 1) {
2206  if (skip_dc && i == 0)
2207  continue;
2208  score += tab[run];
2209  run = 0;
2210  } else if (level > 1) {
2211  return;
2212  } else {
2213  run++;
2214  }
2215  }
2216  if (score >= threshold)
2217  return;
2218  for (i = skip_dc; i <= last_index; i++) {
2219  const int j = s->c.intra_scantable.permutated[i];
2220  block[j] = 0;
2221  }
2222  if (block[0])
2223  s->c.block_last_index[n] = 0;
2224  else
2225  s->c.block_last_index[n] = -1;
2226 }
2227 
2228 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2229  int last_index)
2230 {
2231  int i;
2232  const int maxlevel = s->max_qcoeff;
2233  const int minlevel = s->min_qcoeff;
2234  int overflow = 0;
2235 
2236  if (s->c.mb_intra) {
2237  i = 1; // skip clipping of intra dc
2238  } else
2239  i = 0;
2240 
2241  for (; i <= last_index; i++) {
2242  const int j = s->c.intra_scantable.permutated[i];
2243  int level = block[j];
2244 
2245  if (level > maxlevel) {
2246  level = maxlevel;
2247  overflow++;
2248  } else if (level < minlevel) {
2249  level = minlevel;
2250  overflow++;
2251  }
2252 
2253  block[j] = level;
2254  }
2255 
2256  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2257  av_log(s->c.avctx, AV_LOG_INFO,
2258  "warning, clipping %d dct coefficients to %d..%d\n",
2259  overflow, minlevel, maxlevel);
2260 }
2261 
2262 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2263 {
2264  int x, y;
2265  // FIXME optimize
2266  for (y = 0; y < 8; y++) {
2267  for (x = 0; x < 8; x++) {
2268  int x2, y2;
2269  int sum = 0;
2270  int sqr = 0;
2271  int count = 0;
2272 
2273  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2274  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2275  int v = ptr[x2 + y2 * stride];
2276  sum += v;
2277  sqr += v * v;
2278  count++;
2279  }
2280  }
2281  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2282  }
2283  }
2284 }
2285 
2287  int motion_x, int motion_y,
2288  int mb_block_height,
2289  int mb_block_width,
2290  int mb_block_count,
2291  int chroma_x_shift,
2292  int chroma_y_shift,
2293  int chroma_format)
2294 {
2295 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2296  * and neither of these encoders currently supports 444. */
2297 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2298  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2299  int16_t weight[12][64];
2300  int16_t orig[12][64];
2301  const int mb_x = s->c.mb_x;
2302  const int mb_y = s->c.mb_y;
2303  int i;
2304  int skip_dct[12];
2305  int dct_offset = s->c.linesize * 8; // default for progressive frames
2306  int uv_dct_offset = s->c.uvlinesize * 8;
2307  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2308  ptrdiff_t wrap_y, wrap_c;
2309 
2310  for (i = 0; i < mb_block_count; i++)
2311  skip_dct[i] = s->skipdct;
2312 
2313  if (s->adaptive_quant) {
2314  const int last_qp = s->c.qscale;
2315  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2316 
2317  s->lambda = s->lambda_table[mb_xy];
2318  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2320 
2321  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2322  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2323 
2324  if (s->c.out_format == FMT_H263) {
2325  s->dquant = av_clip(s->dquant, -2, 2);
2326 
2327  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2328  if (!s->c.mb_intra) {
2329  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2330  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2331  s->dquant = 0;
2332  }
2333  if (s->c.mv_type == MV_TYPE_8X8)
2334  s->dquant = 0;
2335  }
2336  }
2337  }
2338  }
2339  ff_set_qscale(&s->c, last_qp + s->dquant);
2340  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2341  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2342 
2343  wrap_y = s->c.linesize;
2344  wrap_c = s->c.uvlinesize;
2345  ptr_y = s->new_pic->data[0] +
2346  (mb_y * 16 * wrap_y) + mb_x * 16;
2347  ptr_cb = s->new_pic->data[1] +
2348  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2349  ptr_cr = s->new_pic->data[2] +
2350  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2351 
2352  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2353  s->c.codec_id != AV_CODEC_ID_AMV) {
2354  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2355  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2356  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2357  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2358  wrap_y, wrap_y,
2359  16, 16, mb_x * 16, mb_y * 16,
2360  s->c.width, s->c.height);
2361  ptr_y = ebuf;
2362  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2363  wrap_c, wrap_c,
2364  mb_block_width, mb_block_height,
2365  mb_x * mb_block_width, mb_y * mb_block_height,
2366  cw, ch);
2367  ptr_cb = ebuf + 16 * wrap_y;
2368  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2369  wrap_c, wrap_c,
2370  mb_block_width, mb_block_height,
2371  mb_x * mb_block_width, mb_y * mb_block_height,
2372  cw, ch);
2373  ptr_cr = ebuf + 16 * wrap_y + 16;
2374  }
2375 
2376  if (s->c.mb_intra) {
2377  if (INTERLACED_DCT(s)) {
2378  int progressive_score, interlaced_score;
2379 
2380  s->c.interlaced_dct = 0;
2381  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2382  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2383  NULL, wrap_y, 8) - 400;
2384 
2385  if (progressive_score > 0) {
2386  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2387  NULL, wrap_y * 2, 8) +
2388  s->ildct_cmp[1](s, ptr_y + wrap_y,
2389  NULL, wrap_y * 2, 8);
2390  if (progressive_score > interlaced_score) {
2391  s->c.interlaced_dct = 1;
2392 
2393  dct_offset = wrap_y;
2394  uv_dct_offset = wrap_c;
2395  wrap_y <<= 1;
2396  if (chroma_format == CHROMA_422 ||
2398  wrap_c <<= 1;
2399  }
2400  }
2401  }
2402 
2403  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2404  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2405  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2406  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2407 
2408  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2409  skip_dct[4] = 1;
2410  skip_dct[5] = 1;
2411  } else {
2412  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2413  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2414  if (chroma_format == CHROMA_422) {
2415  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2416  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2417  } else if (chroma_format == CHROMA_444) {
2418  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2419  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2420  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2421  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2422  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2423  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2424  }
2425  }
2426  } else {
2427  op_pixels_func (*op_pix)[4];
2428  qpel_mc_func (*op_qpix)[16];
2429  uint8_t *dest_y, *dest_cb, *dest_cr;
2430 
2431  dest_y = s->c.dest[0];
2432  dest_cb = s->c.dest[1];
2433  dest_cr = s->c.dest[2];
2434 
2435  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2436  op_pix = s->c.hdsp.put_pixels_tab;
2437  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2438  } else {
2439  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2440  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2441  }
2442 
2443  if (s->c.mv_dir & MV_DIR_FORWARD) {
2444  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2445  s->c.last_pic.data,
2446  op_pix, op_qpix);
2447  op_pix = s->c.hdsp.avg_pixels_tab;
2448  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2449  }
2450  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2451  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2452  s->c.next_pic.data,
2453  op_pix, op_qpix);
2454  }
2455 
2456  if (INTERLACED_DCT(s)) {
2457  int progressive_score, interlaced_score;
2458 
2459  s->c.interlaced_dct = 0;
2460  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2461  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2462  ptr_y + wrap_y * 8,
2463  wrap_y, 8) - 400;
2464 
2465  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2466  progressive_score -= 400;
2467 
2468  if (progressive_score > 0) {
2469  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2470  wrap_y * 2, 8) +
2471  s->ildct_cmp[0](s, dest_y + wrap_y,
2472  ptr_y + wrap_y,
2473  wrap_y * 2, 8);
2474 
2475  if (progressive_score > interlaced_score) {
2476  s->c.interlaced_dct = 1;
2477 
2478  dct_offset = wrap_y;
2479  uv_dct_offset = wrap_c;
2480  wrap_y <<= 1;
2481  if (chroma_format == CHROMA_422)
2482  wrap_c <<= 1;
2483  }
2484  }
2485  }
2486 
2487  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2488  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2489  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2490  dest_y + dct_offset, wrap_y);
2491  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2492  dest_y + dct_offset + 8, wrap_y);
2493 
2494  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2495  skip_dct[4] = 1;
2496  skip_dct[5] = 1;
2497  } else {
2498  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2499  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2500  if (!chroma_y_shift) { /* 422 */
2501  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2502  dest_cb + uv_dct_offset, wrap_c);
2503  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2504  dest_cr + uv_dct_offset, wrap_c);
2505  }
2506  }
2507  /* pre quantization */
2508  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2509  // FIXME optimize
2510  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2511  skip_dct[0] = 1;
2512  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2513  skip_dct[1] = 1;
2514  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2515  wrap_y, 8) < 20 * s->c.qscale)
2516  skip_dct[2] = 1;
2517  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2518  wrap_y, 8) < 20 * s->c.qscale)
2519  skip_dct[3] = 1;
2520  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2521  skip_dct[4] = 1;
2522  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2523  skip_dct[5] = 1;
2524  if (!chroma_y_shift) { /* 422 */
2525  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2526  dest_cb + uv_dct_offset,
2527  wrap_c, 8) < 20 * s->c.qscale)
2528  skip_dct[6] = 1;
2529  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2530  dest_cr + uv_dct_offset,
2531  wrap_c, 8) < 20 * s->c.qscale)
2532  skip_dct[7] = 1;
2533  }
2534  }
2535  }
2536 
2537  if (s->quantizer_noise_shaping) {
2538  if (!skip_dct[0])
2539  get_visual_weight(weight[0], ptr_y , wrap_y);
2540  if (!skip_dct[1])
2541  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2542  if (!skip_dct[2])
2543  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2544  if (!skip_dct[3])
2545  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2546  if (!skip_dct[4])
2547  get_visual_weight(weight[4], ptr_cb , wrap_c);
2548  if (!skip_dct[5])
2549  get_visual_weight(weight[5], ptr_cr , wrap_c);
2550  if (!chroma_y_shift) { /* 422 */
2551  if (!skip_dct[6])
2552  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2553  wrap_c);
2554  if (!skip_dct[7])
2555  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2556  wrap_c);
2557  }
2558  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2559  }
2560 
2561  /* DCT & quantize */
2562  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2563  {
2564  for (i = 0; i < mb_block_count; i++) {
2565  if (!skip_dct[i]) {
2566  int overflow;
2567  s->c.block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->c.qscale, &overflow);
2568  // FIXME we could decide to change to quantizer instead of
2569  // clipping
2570  // JS: I don't think that would be a good idea it could lower
2571  // quality instead of improve it. Just INTRADC clipping
2572  // deserves changes in quantizer
2573  if (overflow)
2574  clip_coeffs(s, s->block[i], s->c.block_last_index[i]);
2575  } else
2576  s->c.block_last_index[i] = -1;
2577  }
2578  if (s->quantizer_noise_shaping) {
2579  for (i = 0; i < mb_block_count; i++) {
2580  if (!skip_dct[i]) {
2581  s->c.block_last_index[i] =
2582  dct_quantize_refine(s, s->block[i], weight[i],
2583  orig[i], i, s->c.qscale);
2584  }
2585  }
2586  }
2587 
2588  if (s->luma_elim_threshold && !s->c.mb_intra)
2589  for (i = 0; i < 4; i++)
2590  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2591  if (s->chroma_elim_threshold && !s->c.mb_intra)
2592  for (i = 4; i < mb_block_count; i++)
2593  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2594 
2595  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2596  for (i = 0; i < mb_block_count; i++) {
2597  if (s->c.block_last_index[i] == -1)
2598  s->coded_score[i] = INT_MAX / 256;
2599  }
2600  }
2601  }
2602 
2603  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2604  s->c.block_last_index[4] =
2605  s->c.block_last_index[5] = 0;
2606  s->block[4][0] =
2607  s->block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2608  if (!chroma_y_shift) { /* 422 / 444 */
2609  for (i=6; i<12; i++) {
2610  s->c.block_last_index[i] = 0;
2611  s->block[i][0] = s->block[4][0];
2612  }
2613  }
2614  }
2615 
2616  // non c quantize code returns incorrect block_last_index FIXME
2617  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2618  for (i = 0; i < mb_block_count; i++) {
2619  int j;
2620  if (s->c.block_last_index[i] > 0) {
2621  for (j = 63; j > 0; j--) {
2622  if (s->block[i][s->c.intra_scantable.permutated[j]])
2623  break;
2624  }
2625  s->c.block_last_index[i] = j;
2626  }
2627  }
2628  }
2629 
2630  s->encode_mb(s, s->block, motion_x, motion_y);
2631 }
2632 
2633 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2634 {
2635  if (s->c.chroma_format == CHROMA_420)
2636  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2637  else if (s->c.chroma_format == CHROMA_422)
2638  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2639  else
2640  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2641 }
2642 
2643 typedef struct MBBackup {
2644  struct {
2645  int mv[2][4][2];
2646  int last_mv[2][2][2];
2648  int last_dc[3];
2650  int qscale;
2653  } c;
2656  int dquant;
2658  int16_t (*block)[64];
2660 } MBBackup;
2661 
2662 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2663 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2664  const SRC_TYPE *const s) \
2665 { \
2666  /* FIXME is memcpy faster than a loop? */ \
2667  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2668  \
2669  /* MPEG-1 */ \
2670  d->mb_skip_run = s->mb_skip_run; \
2671  for (int i = 0; i < 3; i++) \
2672  d->c.last_dc[i] = s->c.last_dc[i]; \
2673  \
2674  /* statistics */ \
2675  d->mv_bits = s->mv_bits; \
2676  d->i_tex_bits = s->i_tex_bits; \
2677  d->p_tex_bits = s->p_tex_bits; \
2678  d->i_count = s->i_count; \
2679  d->misc_bits = s->misc_bits; \
2680  d->last_bits = 0; \
2681  \
2682  d->c.mb_skipped = 0; \
2683  d->c.qscale = s->c.qscale; \
2684  d->dquant = s->dquant; \
2685  \
2686  d->esc3_level_length = s->esc3_level_length; \
2687 } \
2688  \
2689 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2690  const SRC_TYPE *const s, \
2691  int data_partitioning) \
2692 { \
2693  /* FIXME is memcpy faster than a loop? */ \
2694  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2695  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2696  \
2697  /* MPEG-1 */ \
2698  d->mb_skip_run = s->mb_skip_run; \
2699  for (int i = 0; i < 3; i++) \
2700  d->c.last_dc[i] = s->c.last_dc[i]; \
2701  \
2702  /* statistics */ \
2703  d->mv_bits = s->mv_bits; \
2704  d->i_tex_bits = s->i_tex_bits; \
2705  d->p_tex_bits = s->p_tex_bits; \
2706  d->i_count = s->i_count; \
2707  d->misc_bits = s->misc_bits; \
2708  \
2709  d->c.mb_intra = s->c.mb_intra; \
2710  d->c.mb_skipped = s->c.mb_skipped; \
2711  d->c.mv_type = s->c.mv_type; \
2712  d->c.mv_dir = s->c.mv_dir; \
2713  d->pb = s->pb; \
2714  if (data_partitioning) { \
2715  d->pb2 = s->pb2; \
2716  d->tex_pb = s->tex_pb; \
2717  } \
2718  d->block = s->block; \
2719  for (int i = 0; i < 8; i++) \
2720  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2721  d->c.interlaced_dct = s->c.interlaced_dct; \
2722  d->c.qscale = s->c.qscale; \
2723  \
2724  d->esc3_level_length = s->esc3_level_length; \
2725 }
2726 
2727 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2728 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2729 
2730 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2731  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2732  int *dmin, int *next_block, int motion_x, int motion_y)
2733 {
2734  int score;
2735  uint8_t *dest_backup[3];
2736 
2737  reset_context_before_encode(s, backup);
2738 
2739  s->block = s->blocks[*next_block];
2740  s->pb = pb[*next_block];
2741  if (s->data_partitioning) {
2742  s->pb2 = pb2 [*next_block];
2743  s->tex_pb= tex_pb[*next_block];
2744  }
2745 
2746  if(*next_block){
2747  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2748  s->c.dest[0] = s->c.sc.rd_scratchpad;
2749  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2750  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2751  av_assert0(s->c.linesize >= 32); //FIXME
2752  }
2753 
2754  encode_mb(s, motion_x, motion_y);
2755 
2756  score= put_bits_count(&s->pb);
2757  if (s->data_partitioning) {
2758  score+= put_bits_count(&s->pb2);
2759  score+= put_bits_count(&s->tex_pb);
2760  }
2761 
2762  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2763  mpv_reconstruct_mb(s, s->block);
2764 
2765  score *= s->lambda2;
2766  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2767  }
2768 
2769  if(*next_block){
2770  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2771  }
2772 
2773  if(score<*dmin){
2774  *dmin= score;
2775  *next_block^=1;
2776 
2777  save_context_after_encode(best, s, s->data_partitioning);
2778  }
2779 }
2780 
2781 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2782 {
2783  const uint32_t *sq = ff_square_tab + 256;
2784  int acc=0;
2785  int x,y;
2786 
2787  if(w==16 && h==16)
2788  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2789  else if(w==8 && h==8)
2790  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2791 
2792  for(y=0; y<h; y++){
2793  for(x=0; x<w; x++){
2794  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2795  }
2796  }
2797 
2798  av_assert2(acc>=0);
2799 
2800  return acc;
2801 }
2802 
2803 static int sse_mb(MPVEncContext *const s)
2804 {
2805  int w= 16;
2806  int h= 16;
2807  int chroma_mb_w = w >> s->c.chroma_x_shift;
2808  int chroma_mb_h = h >> s->c.chroma_y_shift;
2809 
2810  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2811  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2812 
2813  if(w==16 && h==16)
2814  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2815  s->c.dest[0], s->c.linesize, 16) +
2816  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2817  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2818  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2819  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2820  else
2821  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2822  s->c.dest[0], w, h, s->c.linesize) +
2823  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2824  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2825  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2826  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2827 }
2828 
2830  MPVEncContext *const s = *(void**)arg;
2831 
2832 
2833  s->me.pre_pass = 1;
2834  s->me.dia_size = s->c.avctx->pre_dia_size;
2835  s->c.first_slice_line = 1;
2836  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2837  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2838  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2839  s->c.first_slice_line = 0;
2840  }
2841 
2842  s->me.pre_pass = 0;
2843 
2844  return 0;
2845 }
2846 
2848  MPVEncContext *const s = *(void**)arg;
2849 
2850  s->me.dia_size = s->c.avctx->dia_size;
2851  s->c.first_slice_line = 1;
2852  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2853  s->c.mb_x = 0; //for block init below
2854  ff_init_block_index(&s->c);
2855  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2856  s->c.block_index[0] += 2;
2857  s->c.block_index[1] += 2;
2858  s->c.block_index[2] += 2;
2859  s->c.block_index[3] += 2;
2860 
2861  /* compute motion vector & mb_type and store in context */
2862  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2863  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2864  else
2865  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2866  }
2867  s->c.first_slice_line = 0;
2868  }
2869  return 0;
2870 }
2871 
2872 static int mb_var_thread(AVCodecContext *c, void *arg){
2873  MPVEncContext *const s = *(void**)arg;
2874 
2875  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2876  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2877  int xx = mb_x * 16;
2878  int yy = mb_y * 16;
2879  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2880  int varc;
2881  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2882 
2883  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2884  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2885 
2886  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2887  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2888  s->me.mb_var_sum_temp += varc;
2889  }
2890  }
2891  return 0;
2892 }
2893 
2894 static void write_slice_end(MPVEncContext *const s)
2895 {
2896  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2897  if (s->partitioned_frame)
2899 
2900  ff_mpeg4_stuffing(&s->pb);
2901  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2902  s->c.out_format == FMT_MJPEG) {
2904  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2906  }
2907 
2908  flush_put_bits(&s->pb);
2909 
2910  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2911  s->misc_bits+= get_bits_diff(s);
2912 }
2913 
2914 static void write_mb_info(MPVEncContext *const s)
2915 {
2916  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2917  int offset = put_bits_count(&s->pb);
2918  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->gob_index);
2919  int gobn = s->c.mb_y / s->gob_index;
2920  int pred_x, pred_y;
2921  if (CONFIG_H263_ENCODER)
2922  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2923  bytestream_put_le32(&ptr, offset);
2924  bytestream_put_byte(&ptr, s->c.qscale);
2925  bytestream_put_byte(&ptr, gobn);
2926  bytestream_put_le16(&ptr, mba);
2927  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2928  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2929  /* 4MV not implemented */
2930  bytestream_put_byte(&ptr, 0); /* hmv2 */
2931  bytestream_put_byte(&ptr, 0); /* vmv2 */
2932 }
2933 
2934 static void update_mb_info(MPVEncContext *const s, int startcode)
2935 {
2936  if (!s->mb_info)
2937  return;
2938  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2939  s->mb_info_size += 12;
2940  s->prev_mb_info = s->last_mb_info;
2941  }
2942  if (startcode) {
2943  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2944  /* This might have incremented mb_info_size above, and we return without
2945  * actually writing any info into that slot yet. But in that case,
2946  * this will be called again at the start of the after writing the
2947  * start code, actually writing the mb info. */
2948  return;
2949  }
2950 
2951  s->last_mb_info = put_bytes_count(&s->pb, 0);
2952  if (!s->mb_info_size)
2953  s->mb_info_size += 12;
2954  write_mb_info(s);
2955 }
2956 
2957 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2958 {
2959  if (put_bytes_left(&s->pb, 0) < threshold
2960  && s->c.slice_context_count == 1
2961  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2962  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2963 
2964  uint8_t *new_buffer = NULL;
2965  int new_buffer_size = 0;
2966 
2967  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2968  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2969  return AVERROR(ENOMEM);
2970  }
2971 
2972  emms_c();
2973 
2974  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2975  s->c.avctx->internal->byte_buffer_size + size_increase);
2976  if (!new_buffer)
2977  return AVERROR(ENOMEM);
2978 
2979  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2980  av_free(s->c.avctx->internal->byte_buffer);
2981  s->c.avctx->internal->byte_buffer = new_buffer;
2982  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2983  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2984  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2985  }
2986  if (put_bytes_left(&s->pb, 0) < threshold)
2987  return AVERROR(EINVAL);
2988  return 0;
2989 }
2990 
2991 static int encode_thread(AVCodecContext *c, void *arg){
2992  MPVEncContext *const s = *(void**)arg;
2993  int chr_h = 16 >> s->c.chroma_y_shift;
2994  int i;
2995  MBBackup best_s = { 0 }, backup_s;
2996  uint8_t bit_buf[2][MAX_MB_BYTES];
2997  // + 2 because ff_copy_bits() overreads
2998  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2999  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
3000  PutBitContext pb[2], pb2[2], tex_pb[2];
3001 
3002  for(i=0; i<2; i++){
3003  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3004  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
3005  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
3006  }
3007 
3008  s->last_bits= put_bits_count(&s->pb);
3009  s->mv_bits=0;
3010  s->misc_bits=0;
3011  s->i_tex_bits=0;
3012  s->p_tex_bits=0;
3013  s->i_count=0;
3014 
3015  for(i=0; i<3; i++){
3016  /* init last dc values */
3017  /* note: quant matrix value (8) is implied here */
3018  s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
3019 
3020  s->encoding_error[i] = 0;
3021  }
3022  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3023  s->c.last_dc[0] = 128 * 8 / 13;
3024  s->c.last_dc[1] = 128 * 8 / 14;
3025  s->c.last_dc[2] = 128 * 8 / 14;
3026 #if CONFIG_MPEG4_ENCODER
3027  } else if (s->partitioned_frame) {
3028  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
3030 #endif
3031  }
3032  s->mb_skip_run = 0;
3033  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3034 
3035  s->last_mv_dir = 0;
3036 
3037  s->c.resync_mb_x = 0;
3038  s->c.resync_mb_y = 0;
3039  s->c.first_slice_line = 1;
3040  s->ptr_lastgob = s->pb.buf;
3041  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3042  int mb_y;
3043  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3044  int first_in_slice;
3045  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3046  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3048  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 1024 << s->c.intra_dc_precision;
3049  } else {
3050  mb_y = mb_y_order;
3051  }
3052  s->c.mb_x = 0;
3053  s->c.mb_y = mb_y;
3054 
3055  ff_set_qscale(&s->c, s->c.qscale);
3056  ff_init_block_index(&s->c);
3057 
3058  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3059  int mb_type, xy;
3060 // int d;
3061  int dmin= INT_MAX;
3062  int dir;
3063  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3064  + s->c.mb_width*MAX_MB_BYTES;
3065 
3067  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3068  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3069  return -1;
3070  }
3071  if (s->data_partitioning) {
3072  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3073  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3074  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3075  return -1;
3076  }
3077  }
3078 
3079  s->c.mb_x = mb_x;
3080  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3081  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3082 
3083  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3085  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3086  mb_type = s->mb_type[xy];
3087 
3088  /* write gob / video packet header */
3089  if(s->rtp_mode){
3090  int current_packet_size, is_gob_start;
3091 
3092  current_packet_size = put_bytes_count(&s->pb, 1)
3093  - (s->ptr_lastgob - s->pb.buf);
3094 
3095  is_gob_start = s->rtp_payload_size &&
3096  current_packet_size >= s->rtp_payload_size &&
3097  mb_y + mb_x > 0;
3098 
3099  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3100 
3101  switch (s->c.codec_id) {
3102  case AV_CODEC_ID_H263:
3103  case AV_CODEC_ID_H263P:
3104  if (!s->h263_slice_structured)
3105  if (s->c.mb_x || s->c.mb_y % s->gob_index) is_gob_start = 0;
3106  break;
3108  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3110  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3111  s->mb_skip_run)
3112  is_gob_start=0;
3113  break;
3114  case AV_CODEC_ID_MJPEG:
3115  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3116  break;
3117  }
3118 
3119  if(is_gob_start){
3120  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3121  write_slice_end(s);
3122 
3123  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->partitioned_frame)
3125  }
3126 
3127  av_assert2((put_bits_count(&s->pb)&7) == 0);
3128  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3129 
3130  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3131  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->c.mb_x + s->c.mb_y;
3132  int d = 100 / s->error_rate;
3133  if(r % d == 0){
3134  current_packet_size=0;
3135  s->pb.buf_ptr= s->ptr_lastgob;
3136  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3137  }
3138  }
3139 
3140  switch (s->c.codec_id) {
3141  case AV_CODEC_ID_MPEG4:
3142  if (CONFIG_MPEG4_ENCODER) {
3146  }
3147  break;
3150  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3153  }
3154  break;
3155 #if CONFIG_H263P_ENCODER
3156  case AV_CODEC_ID_H263P:
3157  if (s->c.dc_val)
3159  // fallthrough
3160 #endif
3161  case AV_CODEC_ID_H263:
3162  if (CONFIG_H263_ENCODER) {
3163  update_mb_info(s, 1);
3165  }
3166  break;
3167  }
3168 
3169  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3170  int bits= put_bits_count(&s->pb);
3171  s->misc_bits+= bits - s->last_bits;
3172  s->last_bits= bits;
3173  }
3174 
3175  s->ptr_lastgob += current_packet_size;
3176  s->c.first_slice_line = 1;
3177  s->c.resync_mb_x = mb_x;
3178  s->c.resync_mb_y = mb_y;
3179  }
3180  }
3181 
3182  if (s->c.resync_mb_x == s->c.mb_x &&
3183  s->c.resync_mb_y+1 == s->c.mb_y)
3184  s->c.first_slice_line = 0;
3185 
3186  s->c.mb_skipped = 0;
3187  s->dquant=0; //only for QP_RD
3188 
3189  update_mb_info(s, 0);
3190 
3191  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3192  int next_block=0;
3193  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3194 
3195  backup_context_before_encode(&backup_s, s);
3196  backup_s.pb= s->pb;
3197  if (s->data_partitioning) {
3198  backup_s.pb2= s->pb2;
3199  backup_s.tex_pb= s->tex_pb;
3200  }
3201 
3202  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3203  s->c.mv_dir = MV_DIR_FORWARD;
3204  s->c.mv_type = MV_TYPE_16X16;
3205  s->c.mb_intra = 0;
3206  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3207  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3208  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3209  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3210  }
3211  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3212  s->c.mv_dir = MV_DIR_FORWARD;
3213  s->c.mv_type = MV_TYPE_FIELD;
3214  s->c.mb_intra = 0;
3215  for(i=0; i<2; i++){
3216  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3217  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3218  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3219  }
3220  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3221  &dmin, &next_block, 0, 0);
3222  }
3223  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3224  s->c.mv_dir = MV_DIR_FORWARD;
3225  s->c.mv_type = MV_TYPE_16X16;
3226  s->c.mb_intra = 0;
3227  s->c.mv[0][0][0] = 0;
3228  s->c.mv[0][0][1] = 0;
3229  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3230  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3231  }
3232  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3233  s->c.mv_dir = MV_DIR_FORWARD;
3234  s->c.mv_type = MV_TYPE_8X8;
3235  s->c.mb_intra = 0;
3236  for(i=0; i<4; i++){
3237  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3238  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3239  }
3240  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3241  &dmin, &next_block, 0, 0);
3242  }
3243  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3244  s->c.mv_dir = MV_DIR_FORWARD;
3245  s->c.mv_type = MV_TYPE_16X16;
3246  s->c.mb_intra = 0;
3247  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3248  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3249  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3250  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3251  }
3252  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3253  s->c.mv_dir = MV_DIR_BACKWARD;
3254  s->c.mv_type = MV_TYPE_16X16;
3255  s->c.mb_intra = 0;
3256  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3257  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3258  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3259  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3260  }
3261  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3262  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3263  s->c.mv_type = MV_TYPE_16X16;
3264  s->c.mb_intra = 0;
3265  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3266  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3267  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3268  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3269  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3270  &dmin, &next_block, 0, 0);
3271  }
3272  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3273  s->c.mv_dir = MV_DIR_FORWARD;
3274  s->c.mv_type = MV_TYPE_FIELD;
3275  s->c.mb_intra = 0;
3276  for(i=0; i<2; i++){
3277  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3278  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3279  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3280  }
3281  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3282  &dmin, &next_block, 0, 0);
3283  }
3284  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3285  s->c.mv_dir = MV_DIR_BACKWARD;
3286  s->c.mv_type = MV_TYPE_FIELD;
3287  s->c.mb_intra = 0;
3288  for(i=0; i<2; i++){
3289  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3290  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3291  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3292  }
3293  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3294  &dmin, &next_block, 0, 0);
3295  }
3296  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3297  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3298  s->c.mv_type = MV_TYPE_FIELD;
3299  s->c.mb_intra = 0;
3300  for(dir=0; dir<2; dir++){
3301  for(i=0; i<2; i++){
3302  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3303  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3304  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3305  }
3306  }
3307  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3308  &dmin, &next_block, 0, 0);
3309  }
3310  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3311  s->c.mv_dir = 0;
3312  s->c.mv_type = MV_TYPE_16X16;
3313  s->c.mb_intra = 1;
3314  s->c.mv[0][0][0] = 0;
3315  s->c.mv[0][0][1] = 0;
3316  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3317  &dmin, &next_block, 0, 0);
3318  s->c.mbintra_table[xy] = 1;
3319  }
3320 
3321  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3322  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3323  const int last_qp = backup_s.c.qscale;
3324  int qpi, qp, dc[6];
3325  int16_t ac[6][16];
3326  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3327  static const int dquant_tab[4]={-1,1,-2,2};
3328  int storecoefs = s->c.mb_intra && s->c.dc_val;
3329 
3330  av_assert2(backup_s.dquant == 0);
3331 
3332  //FIXME intra
3333  s->c.mv_dir = best_s.c.mv_dir;
3334  s->c.mv_type = MV_TYPE_16X16;
3335  s->c.mb_intra = best_s.c.mb_intra;
3336  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3337  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3338  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3339  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3340 
3341  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3342  for(; qpi<4; qpi++){
3343  int dquant= dquant_tab[qpi];
3344  qp= last_qp + dquant;
3345  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3346  continue;
3347  backup_s.dquant= dquant;
3348  if(storecoefs){
3349  for(i=0; i<6; i++){
3350  dc[i] = s->c.dc_val[s->c.block_index[i]];
3351  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3352  }
3353  }
3354 
3355  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3356  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3357  if (best_s.c.qscale != qp) {
3358  if(storecoefs){
3359  for(i=0; i<6; i++){
3360  s->c.dc_val[s->c.block_index[i]] = dc[i];
3361  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3362  }
3363  }
3364  }
3365  }
3366  }
3367  }
3368  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3369  int mx= s->b_direct_mv_table[xy][0];
3370  int my= s->b_direct_mv_table[xy][1];
3371 
3372  backup_s.dquant = 0;
3373  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3374  s->c.mb_intra = 0;
3375  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3376  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3377  &dmin, &next_block, mx, my);
3378  }
3379  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3380  backup_s.dquant = 0;
3381  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3382  s->c.mb_intra = 0;
3383  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3384  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3385  &dmin, &next_block, 0, 0);
3386  }
3387  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3388  int coded=0;
3389  for(i=0; i<6; i++)
3390  coded |= s->c.block_last_index[i];
3391  if(coded){
3392  int mx,my;
3393  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3394  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3395  mx=my=0; //FIXME find the one we actually used
3396  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3397  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3398  mx = s->c.mv[1][0][0];
3399  my = s->c.mv[1][0][1];
3400  }else{
3401  mx = s->c.mv[0][0][0];
3402  my = s->c.mv[0][0][1];
3403  }
3404 
3405  s->c.mv_dir = best_s.c.mv_dir;
3406  s->c.mv_type = best_s.c.mv_type;
3407  s->c.mb_intra = 0;
3408 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3409  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3410  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3411  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3412  backup_s.dquant= 0;
3413  s->skipdct=1;
3414  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3415  &dmin, &next_block, mx, my);
3416  s->skipdct=0;
3417  }
3418  }
3419 
3420  store_context_after_encode(s, &best_s, s->data_partitioning);
3421 
3422  pb_bits_count= put_bits_count(&s->pb);
3423  flush_put_bits(&s->pb);
3424  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3425  s->pb= backup_s.pb;
3426 
3427  if (s->data_partitioning) {
3428  pb2_bits_count= put_bits_count(&s->pb2);
3429  flush_put_bits(&s->pb2);
3430  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3431  s->pb2= backup_s.pb2;
3432 
3433  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3434  flush_put_bits(&s->tex_pb);
3435  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3436  s->tex_pb= backup_s.tex_pb;
3437  }
3438  s->last_bits= put_bits_count(&s->pb);
3439 
3440  if (CONFIG_H263_ENCODER &&
3441  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3443 
3444  if(next_block==0){ //FIXME 16 vs linesize16
3445  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3446  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3447  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3448  }
3449 
3450  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3451  mpv_reconstruct_mb(s, s->block);
3452  } else {
3453  int motion_x = 0, motion_y = 0;
3454  s->c.mv_type = MV_TYPE_16X16;
3455  // only one MB-Type possible
3456 
3457  switch(mb_type){
3459  s->c.mv_dir = 0;
3460  s->c.mb_intra = 1;
3461  motion_x= s->c.mv[0][0][0] = 0;
3462  motion_y= s->c.mv[0][0][1] = 0;
3463  s->c.mbintra_table[xy] = 1;
3464  break;
3466  s->c.mv_dir = MV_DIR_FORWARD;
3467  s->c.mb_intra = 0;
3468  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3469  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3470  break;
3472  s->c.mv_dir = MV_DIR_FORWARD;
3473  s->c.mv_type = MV_TYPE_FIELD;
3474  s->c.mb_intra = 0;
3475  for(i=0; i<2; i++){
3476  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3477  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3478  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3479  }
3480  break;
3482  s->c.mv_dir = MV_DIR_FORWARD;
3483  s->c.mv_type = MV_TYPE_8X8;
3484  s->c.mb_intra = 0;
3485  for(i=0; i<4; i++){
3486  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3487  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3488  }
3489  break;
3491  if (CONFIG_MPEG4_ENCODER) {
3493  s->c.mb_intra = 0;
3494  motion_x=s->b_direct_mv_table[xy][0];
3495  motion_y=s->b_direct_mv_table[xy][1];
3496  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3497  }
3498  break;
3500  if (CONFIG_MPEG4_ENCODER) {
3502  s->c.mb_intra = 0;
3503  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3504  }
3505  break;
3507  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3508  s->c.mb_intra = 0;
3509  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3510  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3511  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3512  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3513  break;
3515  s->c.mv_dir = MV_DIR_BACKWARD;
3516  s->c.mb_intra = 0;
3517  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3518  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3519  break;
3521  s->c.mv_dir = MV_DIR_FORWARD;
3522  s->c.mb_intra = 0;
3523  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3524  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3525  break;
3527  s->c.mv_dir = MV_DIR_FORWARD;
3528  s->c.mv_type = MV_TYPE_FIELD;
3529  s->c.mb_intra = 0;
3530  for(i=0; i<2; i++){
3531  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3532  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3533  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3534  }
3535  break;
3537  s->c.mv_dir = MV_DIR_BACKWARD;
3538  s->c.mv_type = MV_TYPE_FIELD;
3539  s->c.mb_intra = 0;
3540  for(i=0; i<2; i++){
3541  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3542  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3543  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3544  }
3545  break;
3547  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3548  s->c.mv_type = MV_TYPE_FIELD;
3549  s->c.mb_intra = 0;
3550  for(dir=0; dir<2; dir++){
3551  for(i=0; i<2; i++){
3552  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3553  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3554  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3555  }
3556  }
3557  break;
3558  default:
3559  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3560  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3561  "the only candidate (always coupled with INTER) "
3562  "so that it never reaches this switch");
3563  }
3564 
3565  encode_mb(s, motion_x, motion_y);
3566 
3567  // RAL: Update last macroblock type
3568  s->last_mv_dir = s->c.mv_dir;
3569 
3570  if (CONFIG_H263_ENCODER &&
3571  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3573 
3574  mpv_reconstruct_mb(s, s->block);
3575  }
3576 
3577  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3578 
3579  /* clean the MV table in IPS frames for direct mode in B-frames */
3580  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3581  s->p_mv_table[xy][0]=0;
3582  s->p_mv_table[xy][1]=0;
3583 #if CONFIG_H263_ENCODER
3584  } else if (s->c.h263_pred || s->c.h263_aic) {
3586 #endif
3587  }
3588 
3589  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3590  int w= 16;
3591  int h= 16;
3592 
3593  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3594  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3595 
3596  s->encoding_error[0] += sse(
3597  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3598  s->c.dest[0], w, h, s->c.linesize);
3599  s->encoding_error[1] += sse(
3600  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3601  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3602  s->encoding_error[2] += sse(
3603  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3604  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3605  }
3606  if (s->loop_filter) {
3607  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3608  ff_h263_loop_filter(&s->c);
3609  }
3610  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3611  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3612  }
3613  }
3614 
3615 #if CONFIG_MSMPEG4ENC
3616  //not beautiful here but we must write it before flushing so it has to be here
3617  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3618  s->c.pict_type == AV_PICTURE_TYPE_I)
3620 #endif
3621 
3622  write_slice_end(s);
3623 
3624  return 0;
3625 }
3626 
3627 #define ADD(field) dst->field += src->field;
3628 #define MERGE(field) dst->field += src->field; src->field=0
3630 {
3631  ADD(me.scene_change_score);
3632  ADD(me.mc_mb_var_sum_temp);
3633  ADD(me.mb_var_sum_temp);
3634 }
3635 
3637 {
3638  int i;
3639 
3640  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3641  MERGE(dct_count[1]);
3642  ADD(mv_bits);
3643  ADD(i_tex_bits);
3644  ADD(p_tex_bits);
3645  ADD(i_count);
3646  ADD(misc_bits);
3647  ADD(encoding_error[0]);
3648  ADD(encoding_error[1]);
3649  ADD(encoding_error[2]);
3650 
3651  if (dst->dct_error_sum) {
3652  for(i=0; i<64; i++){
3653  MERGE(dct_error_sum[0][i]);
3654  MERGE(dct_error_sum[1][i]);
3655  }
3656  }
3657 
3658  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3659  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3660  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3661  flush_put_bits(&dst->pb);
3662 }
3663 
3664 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3665 {
3666  MPVEncContext *const s = &m->s;
3667 
3668  if (m->next_lambda){
3669  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3670  if(!dry_run) m->next_lambda= 0;
3671  } else if (!m->fixed_qscale) {
3672  int quality = ff_rate_estimate_qscale(m, dry_run);
3673  s->c.cur_pic.ptr->f->quality = quality;
3674  if (s->c.cur_pic.ptr->f->quality < 0)
3675  return -1;
3676  }
3677 
3678  if(s->adaptive_quant){
3679  init_qscale_tab(s);
3680 
3681  switch (s->c.codec_id) {
3682  case AV_CODEC_ID_MPEG4:
3683  if (CONFIG_MPEG4_ENCODER)
3685  break;
3686  case AV_CODEC_ID_H263:
3687  case AV_CODEC_ID_H263P:
3688  case AV_CODEC_ID_FLV1:
3689  if (CONFIG_H263_ENCODER)
3691  break;
3692  }
3693 
3694  s->lambda = s->lambda_table[0];
3695  //FIXME broken
3696  }else
3697  s->lambda = s->c.cur_pic.ptr->f->quality;
3698  update_qscale(m);
3699  return 0;
3700 }
3701 
3702 /* must be called before writing the header */
3704 {
3705  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3706  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3707 
3708  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3709  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3710  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3711  }else{
3712  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3713  s->c.last_non_b_time = s->c.time;
3714  av_assert1(s->picture_number == 0 || s->c.pp_time > 0);
3715  }
3716 }
3717 
3718 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3719 {
3720  MPVEncContext *const s = &m->s;
3721  int i, ret;
3722  int bits;
3723  int context_count = s->c.slice_context_count;
3724 
3725  /* we need to initialize some time vars before we can encode B-frames */
3726  // RAL: Condition added for MPEG1VIDEO
3727  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3729  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3731 
3732 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3733 
3734  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3735  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3736  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3737  s->c.no_rounding ^= s->flipflop_rounding;
3738  }
3739 
3740  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3741  ret = estimate_qp(m, 1);
3742  if (ret < 0)
3743  return ret;
3744  ff_get_2pass_fcode(m);
3745  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3746  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3747  s->lambda = m->last_lambda_for[s->c.pict_type];
3748  else
3749  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3750  update_qscale(m);
3751  }
3752 
3753  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3754  for (int i = 0; i < context_count; i++) {
3755  MPVEncContext *const slice = s->c.enc_contexts[i];
3756  int h = s->c.mb_height;
3757  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3758  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3759 
3760  init_put_bits(&slice->pb, start, end - start);
3761 
3762  if (i) {
3763  ret = ff_update_duplicate_context(&slice->c, &s->c);
3764  if (ret < 0)
3765  return ret;
3766  slice->lambda = s->lambda;
3767  slice->lambda2 = s->lambda2;
3768  }
3769  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3770  ff_me_init_pic(slice);
3771  }
3772 
3773  /* Estimate motion for every MB */
3774  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3775  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3776  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3777  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3778  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3779  m->me_pre == 2) {
3780  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3781  &s->c.enc_contexts[0], NULL,
3782  context_count, sizeof(void*));
3783  }
3784  }
3785 
3786  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3787  NULL, context_count, sizeof(void*));
3788  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3789  /* I-Frame */
3790  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3791  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3792 
3793  if (!m->fixed_qscale) {
3794  /* finding spatial complexity for I-frame rate control */
3795  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3796  NULL, context_count, sizeof(void*));
3797  }
3798  }
3799  for(i=1; i<context_count; i++){
3800  merge_context_after_me(s, s->c.enc_contexts[i]);
3801  }
3802  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3803  m->mb_var_sum = s->me. mb_var_sum_temp;
3804  emms_c();
3805 
3806  if (s->me.scene_change_score > m->scenechange_threshold &&
3807  s->c.pict_type == AV_PICTURE_TYPE_P) {
3808  s->c.pict_type = AV_PICTURE_TYPE_I;
3809  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3810  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3811  if (s->c.msmpeg4_version >= MSMP4_V3)
3812  s->c.no_rounding = 1;
3813  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3814  m->mb_var_sum, m->mc_mb_var_sum);
3815  }
3816 
3817  if (!s->umvplus) {
3818  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3819  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3820 
3821  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3822  int a,b;
3823  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3824  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3825  s->f_code = FFMAX3(s->f_code, a, b);
3826  }
3827 
3829  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3830  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3831  int j;
3832  for(i=0; i<2; i++){
3833  for(j=0; j<2; j++)
3834  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3835  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3836  }
3837  }
3838  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3839  int a, b;
3840 
3841  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3842  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3843  s->f_code = FFMAX(a, b);
3844 
3845  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3846  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3847  s->b_code = FFMAX(a, b);
3848 
3849  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3850  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3851  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3852  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3853  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3854  int dir, j;
3855  for(dir=0; dir<2; dir++){
3856  for(i=0; i<2; i++){
3857  for(j=0; j<2; j++){
3860  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3861  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3862  }
3863  }
3864  }
3865  }
3866  }
3867  }
3868 
3869  ret = estimate_qp(m, 0);
3870  if (ret < 0)
3871  return ret;
3872 
3873  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3874  s->c.pict_type == AV_PICTURE_TYPE_I &&
3875  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3876  s->c.qscale = 3; //reduce clipping problems
3877 
3878  if (s->c.out_format == FMT_MJPEG) {
3880  (7 + s->c.qscale) / s->c.qscale, 65535);
3881  if (ret < 0)
3882  return ret;
3883 
3884  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3885  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3886  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3887 
3888  if (s->c.avctx->intra_matrix) {
3889  chroma_matrix =
3890  luma_matrix = s->c.avctx->intra_matrix;
3891  }
3892  if (s->c.avctx->chroma_intra_matrix)
3893  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3894 
3895  /* for mjpeg, we do include qscale in the matrix */
3896  for (int i = 1; i < 64; i++) {
3897  int j = s->c.idsp.idct_permutation[i];
3898 
3899  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3900  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3901  }
3902  s->c.y_dc_scale_table =
3903  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3904  s->c.chroma_intra_matrix[0] =
3905  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3906  } else {
3907  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3908  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3909  for (int i = 1; i < 64; i++) {
3910  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3911 
3912  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3913  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3914  }
3915  s->c.y_dc_scale_table = y;
3916  s->c.c_dc_scale_table = c;
3917  s->c.intra_matrix[0] = 13;
3918  s->c.chroma_intra_matrix[0] = 14;
3919  }
3920  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3921  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3922  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3923  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3924  s->c.qscale = 8;
3925  }
3926 
3927  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3928  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3929  } else {
3930  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3931  }
3932  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3933 
3934  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3935  m->picture_in_gop_number = 0;
3936 
3937  s->c.mb_x = s->c.mb_y = 0;
3938  s->last_bits= put_bits_count(&s->pb);
3939  ret = m->encode_picture_header(m);
3940  if (ret < 0)
3941  return ret;
3942  bits= put_bits_count(&s->pb);
3943  m->header_bits = bits - s->last_bits;
3944 
3945  for(i=1; i<context_count; i++){
3946  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3947  }
3948  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3949  NULL, context_count, sizeof(void*));
3950  for(i=1; i<context_count; i++){
3951  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3952  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3953  merge_context_after_encode(s, s->c.enc_contexts[i]);
3954  }
3955  emms_c();
3956  return 0;
3957 }
3958 
3959 static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
3960 {
3961  const int intra = s->c.mb_intra;
3962  int i;
3963 
3964  s->dct_count[intra]++;
3965 
3966  for(i=0; i<64; i++){
3967  int level= block[i];
3968 
3969  if(level){
3970  if(level>0){
3971  s->dct_error_sum[intra][i] += level;
3972  level -= s->dct_offset[intra][i];
3973  if(level<0) level=0;
3974  }else{
3975  s->dct_error_sum[intra][i] -= level;
3976  level += s->dct_offset[intra][i];
3977  if(level>0) level=0;
3978  }
3979  block[i]= level;
3980  }
3981  }
3982 }
3983 
3985  int16_t *block, int n,
3986  int qscale, int *overflow){
3987  const int *qmat;
3988  const uint16_t *matrix;
3989  const uint8_t *scantable;
3990  const uint8_t *perm_scantable;
3991  int max=0;
3992  unsigned int threshold1, threshold2;
3993  int bias=0;
3994  int run_tab[65];
3995  int level_tab[65];
3996  int score_tab[65];
3997  int survivor[65];
3998  int survivor_count;
3999  int last_run=0;
4000  int last_level=0;
4001  int last_score= 0;
4002  int last_i;
4003  int coeff[2][64];
4004  int coeff_count[64];
4005  int qmul, qadd, start_i, last_non_zero, i, dc;
4006  const int esc_length= s->ac_esc_length;
4007  const uint8_t *length, *last_length;
4008  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4009  int mpeg2_qscale;
4010 
4011  s->fdsp.fdct(block);
4012 
4013  if(s->dct_error_sum)
4014  s->denoise_dct(s, block);
4015  qmul= qscale*16;
4016  qadd= ((qscale-1)|1)*8;
4017 
4018  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4019  else mpeg2_qscale = qscale << 1;
4020 
4021  if (s->c.mb_intra) {
4022  int q;
4023  scantable = s->c.intra_scantable.scantable;
4024  perm_scantable = s->c.intra_scantable.permutated;
4025  if (!s->c.h263_aic) {
4026  if (n < 4)
4027  q = s->c.y_dc_scale;
4028  else
4029  q = s->c.c_dc_scale;
4030  q = q << 3;
4031  } else{
4032  /* For AIC we skip quant/dequant of INTRADC */
4033  q = 1 << 3;
4034  qadd=0;
4035  }
4036 
4037  /* note: block[0] is assumed to be positive */
4038  block[0] = (block[0] + (q >> 1)) / q;
4039  start_i = 1;
4040  last_non_zero = 0;
4041  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4042  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4043  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4044  bias= 1<<(QMAT_SHIFT-1);
4045 
4046  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4047  length = s->intra_chroma_ac_vlc_length;
4048  last_length= s->intra_chroma_ac_vlc_last_length;
4049  } else {
4050  length = s->intra_ac_vlc_length;
4051  last_length= s->intra_ac_vlc_last_length;
4052  }
4053  } else {
4054  scantable = s->c.inter_scantable.scantable;
4055  perm_scantable = s->c.inter_scantable.permutated;
4056  start_i = 0;
4057  last_non_zero = -1;
4058  qmat = s->q_inter_matrix[qscale];
4059  matrix = s->c.inter_matrix;
4060  length = s->inter_ac_vlc_length;
4061  last_length= s->inter_ac_vlc_last_length;
4062  }
4063  last_i= start_i;
4064 
4065  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4066  threshold2= (threshold1<<1);
4067 
4068  for(i=63; i>=start_i; i--) {
4069  const int j = scantable[i];
4070  int64_t level = (int64_t)block[j] * qmat[j];
4071 
4072  if(((uint64_t)(level+threshold1))>threshold2){
4073  last_non_zero = i;
4074  break;
4075  }
4076  }
4077 
4078  for(i=start_i; i<=last_non_zero; i++) {
4079  const int j = scantable[i];
4080  int64_t level = (int64_t)block[j] * qmat[j];
4081 
4082 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4083 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4084  if(((uint64_t)(level+threshold1))>threshold2){
4085  if(level>0){
4086  level= (bias + level)>>QMAT_SHIFT;
4087  coeff[0][i]= level;
4088  coeff[1][i]= level-1;
4089 // coeff[2][k]= level-2;
4090  }else{
4091  level= (bias - level)>>QMAT_SHIFT;
4092  coeff[0][i]= -level;
4093  coeff[1][i]= -level+1;
4094 // coeff[2][k]= -level+2;
4095  }
4096  coeff_count[i]= FFMIN(level, 2);
4097  av_assert2(coeff_count[i]);
4098  max |=level;
4099  }else{
4100  coeff[0][i]= (level>>31)|1;
4101  coeff_count[i]= 1;
4102  }
4103  }
4104 
4105  *overflow= s->max_qcoeff < max; //overflow might have happened
4106 
4107  if(last_non_zero < start_i){
4108  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4109  return last_non_zero;
4110  }
4111 
4112  score_tab[start_i]= 0;
4113  survivor[0]= start_i;
4114  survivor_count= 1;
4115 
4116  for(i=start_i; i<=last_non_zero; i++){
4117  int level_index, j, zero_distortion;
4118  int dct_coeff= FFABS(block[ scantable[i] ]);
4119  int best_score=256*256*256*120;
4120 
4121  if (s->fdsp.fdct == ff_fdct_ifast)
4122  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4123  zero_distortion= dct_coeff*dct_coeff;
4124 
4125  for(level_index=0; level_index < coeff_count[i]; level_index++){
4126  int distortion;
4127  int level= coeff[level_index][i];
4128  const int alevel= FFABS(level);
4129  int unquant_coeff;
4130 
4131  av_assert2(level);
4132 
4133  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4134  unquant_coeff= alevel*qmul + qadd;
4135  } else if (s->c.out_format == FMT_MJPEG) {
4136  j = s->c.idsp.idct_permutation[scantable[i]];
4137  unquant_coeff = alevel * matrix[j] * 8;
4138  }else{ // MPEG-1
4139  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4140  if (s->c.mb_intra) {
4141  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4142  unquant_coeff = (unquant_coeff - 1) | 1;
4143  }else{
4144  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4145  unquant_coeff = (unquant_coeff - 1) | 1;
4146  }
4147  unquant_coeff<<= 3;
4148  }
4149 
4150  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4151  level+=64;
4152  if((level&(~127)) == 0){
4153  for(j=survivor_count-1; j>=0; j--){
4154  int run= i - survivor[j];
4155  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4156  score += score_tab[i-run];
4157 
4158  if(score < best_score){
4159  best_score= score;
4160  run_tab[i+1]= run;
4161  level_tab[i+1]= level-64;
4162  }
4163  }
4164 
4165  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4166  for(j=survivor_count-1; j>=0; j--){
4167  int run= i - survivor[j];
4168  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4169  score += score_tab[i-run];
4170  if(score < last_score){
4171  last_score= score;
4172  last_run= run;
4173  last_level= level-64;
4174  last_i= i+1;
4175  }
4176  }
4177  }
4178  }else{
4179  distortion += esc_length*lambda;
4180  for(j=survivor_count-1; j>=0; j--){
4181  int run= i - survivor[j];
4182  int score= distortion + score_tab[i-run];
4183 
4184  if(score < best_score){
4185  best_score= score;
4186  run_tab[i+1]= run;
4187  level_tab[i+1]= level-64;
4188  }
4189  }
4190 
4191  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4192  for(j=survivor_count-1; j>=0; j--){
4193  int run= i - survivor[j];
4194  int score= distortion + score_tab[i-run];
4195  if(score < last_score){
4196  last_score= score;
4197  last_run= run;
4198  last_level= level-64;
4199  last_i= i+1;
4200  }
4201  }
4202  }
4203  }
4204  }
4205 
4206  score_tab[i+1]= best_score;
4207 
4208  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4209  if(last_non_zero <= 27){
4210  for(; survivor_count; survivor_count--){
4211  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4212  break;
4213  }
4214  }else{
4215  for(; survivor_count; survivor_count--){
4216  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4217  break;
4218  }
4219  }
4220 
4221  survivor[ survivor_count++ ]= i+1;
4222  }
4223 
4224  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4225  last_score= 256*256*256*120;
4226  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4227  int score= score_tab[i];
4228  if (i)
4229  score += lambda * 2; // FIXME more exact?
4230 
4231  if(score < last_score){
4232  last_score= score;
4233  last_i= i;
4234  last_level= level_tab[i];
4235  last_run= run_tab[i];
4236  }
4237  }
4238  }
4239 
4240  s->coded_score[n] = last_score;
4241 
4242  dc= FFABS(block[0]);
4243  last_non_zero= last_i - 1;
4244  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4245 
4246  if(last_non_zero < start_i)
4247  return last_non_zero;
4248 
4249  if(last_non_zero == 0 && start_i == 0){
4250  int best_level= 0;
4251  int best_score= dc * dc;
4252 
4253  for(i=0; i<coeff_count[0]; i++){
4254  int level= coeff[i][0];
4255  int alevel= FFABS(level);
4256  int unquant_coeff, score, distortion;
4257 
4258  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4259  unquant_coeff= (alevel*qmul + qadd)>>3;
4260  } else{ // MPEG-1
4261  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4262  unquant_coeff = (unquant_coeff - 1) | 1;
4263  }
4264  unquant_coeff = (unquant_coeff + 4) >> 3;
4265  unquant_coeff<<= 3 + 3;
4266 
4267  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4268  level+=64;
4269  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4270  else score= distortion + esc_length*lambda;
4271 
4272  if(score < best_score){
4273  best_score= score;
4274  best_level= level - 64;
4275  }
4276  }
4277  block[0]= best_level;
4278  s->coded_score[n] = best_score - dc*dc;
4279  if(best_level == 0) return -1;
4280  else return last_non_zero;
4281  }
4282 
4283  i= last_i;
4284  av_assert2(last_level);
4285 
4286  block[ perm_scantable[last_non_zero] ]= last_level;
4287  i -= last_run + 1;
4288 
4289  for(; i>start_i; i -= run_tab[i] + 1){
4290  block[ perm_scantable[i-1] ]= level_tab[i];
4291  }
4292 
4293  return last_non_zero;
4294 }
4295 
4296 static int16_t basis[64][64];
4297 
4298 static void build_basis(uint8_t *perm){
4299  int i, j, x, y;
4300  emms_c();
4301  for(i=0; i<8; i++){
4302  for(j=0; j<8; j++){
4303  for(y=0; y<8; y++){
4304  for(x=0; x<8; x++){
4305  double s= 0.25*(1<<BASIS_SHIFT);
4306  int index= 8*i + j;
4307  int perm_index= perm[index];
4308  if(i==0) s*= sqrt(0.5);
4309  if(j==0) s*= sqrt(0.5);
4310  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4311  }
4312  }
4313  }
4314  }
4315 }
4316 
4317 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4318  int16_t *block, int16_t *weight, int16_t *orig,
4319  int n, int qscale){
4320  int16_t rem[64];
4321  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4322  const uint8_t *scantable;
4323  const uint8_t *perm_scantable;
4324 // unsigned int threshold1, threshold2;
4325 // int bias=0;
4326  int run_tab[65];
4327  int prev_run=0;
4328  int prev_level=0;
4329  int qmul, qadd, start_i, last_non_zero, i, dc;
4330  const uint8_t *length;
4331  const uint8_t *last_length;
4332  int lambda;
4333  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4334 
4335  if(basis[0][0] == 0)
4336  build_basis(s->c.idsp.idct_permutation);
4337 
4338  qmul= qscale*2;
4339  qadd= (qscale-1)|1;
4340  if (s->c.mb_intra) {
4341  scantable = s->c.intra_scantable.scantable;
4342  perm_scantable = s->c.intra_scantable.permutated;
4343  if (!s->c.h263_aic) {
4344  if (n < 4)
4345  q = s->c.y_dc_scale;
4346  else
4347  q = s->c.c_dc_scale;
4348  } else{
4349  /* For AIC we skip quant/dequant of INTRADC */
4350  q = 1;
4351  qadd=0;
4352  }
4353  q <<= RECON_SHIFT-3;
4354  /* note: block[0] is assumed to be positive */
4355  dc= block[0]*q;
4356 // block[0] = (block[0] + (q >> 1)) / q;
4357  start_i = 1;
4358 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4359 // bias= 1<<(QMAT_SHIFT-1);
4360  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4361  length = s->intra_chroma_ac_vlc_length;
4362  last_length= s->intra_chroma_ac_vlc_last_length;
4363  } else {
4364  length = s->intra_ac_vlc_length;
4365  last_length= s->intra_ac_vlc_last_length;
4366  }
4367  } else {
4368  scantable = s->c.inter_scantable.scantable;
4369  perm_scantable = s->c.inter_scantable.permutated;
4370  dc= 0;
4371  start_i = 0;
4372  length = s->inter_ac_vlc_length;
4373  last_length= s->inter_ac_vlc_last_length;
4374  }
4375  last_non_zero = s->c.block_last_index[n];
4376 
4377  dc += (1<<(RECON_SHIFT-1));
4378  for(i=0; i<64; i++){
4379  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4380  }
4381 
4382  sum=0;
4383  for(i=0; i<64; i++){
4384  int one= 36;
4385  int qns=4;
4386  int w;
4387 
4388  w= FFABS(weight[i]) + qns*one;
4389  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4390 
4391  weight[i] = w;
4392 // w=weight[i] = (63*qns + (w/2)) / w;
4393 
4394  av_assert2(w>0);
4395  av_assert2(w<(1<<6));
4396  sum += w*w;
4397  }
4398  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4399 
4400  run=0;
4401  rle_index=0;
4402  for(i=start_i; i<=last_non_zero; i++){
4403  int j= perm_scantable[i];
4404  const int level= block[j];
4405  int coeff;
4406 
4407  if(level){
4408  if(level<0) coeff= qmul*level - qadd;
4409  else coeff= qmul*level + qadd;
4410  run_tab[rle_index++]=run;
4411  run=0;
4412 
4413  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4414  }else{
4415  run++;
4416  }
4417  }
4418 
4419  for(;;){
4420  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4421  int best_coeff=0;
4422  int best_change=0;
4423  int run2, best_unquant_change=0, analyze_gradient;
4424  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4425 
4426  if(analyze_gradient){
4427  for(i=0; i<64; i++){
4428  int w= weight[i];
4429 
4430  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4431  }
4432  s->fdsp.fdct(d1);
4433  }
4434 
4435  if(start_i){
4436  const int level= block[0];
4437  int change, old_coeff;
4438 
4439  av_assert2(s->c.mb_intra);
4440 
4441  old_coeff= q*level;
4442 
4443  for(change=-1; change<=1; change+=2){
4444  int new_level= level + change;
4445  int score, new_coeff;
4446 
4447  new_coeff= q*new_level;
4448  if(new_coeff >= 2048 || new_coeff < 0)
4449  continue;
4450 
4451  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4452  new_coeff - old_coeff);
4453  if(score<best_score){
4454  best_score= score;
4455  best_coeff= 0;
4456  best_change= change;
4457  best_unquant_change= new_coeff - old_coeff;
4458  }
4459  }
4460  }
4461 
4462  run=0;
4463  rle_index=0;
4464  run2= run_tab[rle_index++];
4465  prev_level=0;
4466  prev_run=0;
4467 
4468  for(i=start_i; i<64; i++){
4469  int j= perm_scantable[i];
4470  const int level= block[j];
4471  int change, old_coeff;
4472 
4473  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4474  break;
4475 
4476  if(level){
4477  if(level<0) old_coeff= qmul*level - qadd;
4478  else old_coeff= qmul*level + qadd;
4479  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4480  }else{
4481  old_coeff=0;
4482  run2--;
4483  av_assert2(run2>=0 || i >= last_non_zero );
4484  }
4485 
4486  for(change=-1; change<=1; change+=2){
4487  int new_level= level + change;
4488  int score, new_coeff, unquant_change;
4489 
4490  score=0;
4491  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4492  continue;
4493 
4494  if(new_level){
4495  if(new_level<0) new_coeff= qmul*new_level - qadd;
4496  else new_coeff= qmul*new_level + qadd;
4497  if(new_coeff >= 2048 || new_coeff <= -2048)
4498  continue;
4499  //FIXME check for overflow
4500 
4501  if(level){
4502  if(level < 63 && level > -63){
4503  if(i < last_non_zero)
4504  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4505  - length[UNI_AC_ENC_INDEX(run, level+64)];
4506  else
4507  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4508  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4509  }
4510  }else{
4511  av_assert2(FFABS(new_level)==1);
4512 
4513  if(analyze_gradient){
4514  int g= d1[ scantable[i] ];
4515  if(g && (g^new_level) >= 0)
4516  continue;
4517  }
4518 
4519  if(i < last_non_zero){
4520  int next_i= i + run2 + 1;
4521  int next_level= block[ perm_scantable[next_i] ] + 64;
4522 
4523  if(next_level&(~127))
4524  next_level= 0;
4525 
4526  if(next_i < last_non_zero)
4527  score += length[UNI_AC_ENC_INDEX(run, 65)]
4528  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4529  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4530  else
4531  score += length[UNI_AC_ENC_INDEX(run, 65)]
4532  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4533  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4534  }else{
4535  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4536  if(prev_level){
4537  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4538  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4539  }
4540  }
4541  }
4542  }else{
4543  new_coeff=0;
4544  av_assert2(FFABS(level)==1);
4545 
4546  if(i < last_non_zero){
4547  int next_i= i + run2 + 1;
4548  int next_level= block[ perm_scantable[next_i] ] + 64;
4549 
4550  if(next_level&(~127))
4551  next_level= 0;
4552 
4553  if(next_i < last_non_zero)
4554  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4555  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4556  - length[UNI_AC_ENC_INDEX(run, 65)];
4557  else
4558  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4559  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4560  - length[UNI_AC_ENC_INDEX(run, 65)];
4561  }else{
4562  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4563  if(prev_level){
4564  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4565  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4566  }
4567  }
4568  }
4569 
4570  score *= lambda;
4571 
4572  unquant_change= new_coeff - old_coeff;
4573  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4574 
4575  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4576  unquant_change);
4577  if(score<best_score){
4578  best_score= score;
4579  best_coeff= i;
4580  best_change= change;
4581  best_unquant_change= unquant_change;
4582  }
4583  }
4584  if(level){
4585  prev_level= level + 64;
4586  if(prev_level&(~127))
4587  prev_level= 0;
4588  prev_run= run;
4589  run=0;
4590  }else{
4591  run++;
4592  }
4593  }
4594 
4595  if(best_change){
4596  int j= perm_scantable[ best_coeff ];
4597 
4598  block[j] += best_change;
4599 
4600  if(best_coeff > last_non_zero){
4601  last_non_zero= best_coeff;
4602  av_assert2(block[j]);
4603  }else{
4604  for(; last_non_zero>=start_i; last_non_zero--){
4605  if(block[perm_scantable[last_non_zero]])
4606  break;
4607  }
4608  }
4609 
4610  run=0;
4611  rle_index=0;
4612  for(i=start_i; i<=last_non_zero; i++){
4613  int j= perm_scantable[i];
4614  const int level= block[j];
4615 
4616  if(level){
4617  run_tab[rle_index++]=run;
4618  run=0;
4619  }else{
4620  run++;
4621  }
4622  }
4623 
4624  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4625  }else{
4626  break;
4627  }
4628  }
4629 
4630  return last_non_zero;
4631 }
4632 
4633 /**
4634  * Permute an 8x8 block according to permutation.
4635  * @param block the block which will be permuted according to
4636  * the given permutation vector
4637  * @param permutation the permutation vector
4638  * @param last the last non zero coefficient in scantable order, used to
4639  * speed the permutation up
4640  * @param scantable the used scantable, this is only used to speed the
4641  * permutation up, the block is not (inverse) permutated
4642  * to scantable order!
4643  */
4644 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4645  const uint8_t *scantable, int last)
4646 {
4647  int i;
4648  int16_t temp[64];
4649 
4650  if (last <= 0)
4651  return;
4652  //FIXME it is ok but not clean and might fail for some permutations
4653  // if (permutation[1] == 1)
4654  // return;
4655 
4656  for (i = 0; i <= last; i++) {
4657  const int j = scantable[i];
4658  temp[j] = block[j];
4659  block[j] = 0;
4660  }
4661 
4662  for (i = 0; i <= last; i++) {
4663  const int j = scantable[i];
4664  const int perm_j = permutation[j];
4665  block[perm_j] = temp[j];
4666  }
4667 }
4668 
4669 static int dct_quantize_c(MPVEncContext *const s,
4670  int16_t *block, int n,
4671  int qscale, int *overflow)
4672 {
4673  int i, last_non_zero, q, start_i;
4674  const int *qmat;
4675  const uint8_t *scantable;
4676  int bias;
4677  int max=0;
4678  unsigned int threshold1, threshold2;
4679 
4680  s->fdsp.fdct(block);
4681 
4682  if(s->dct_error_sum)
4683  s->denoise_dct(s, block);
4684 
4685  if (s->c.mb_intra) {
4686  scantable = s->c.intra_scantable.scantable;
4687  if (!s->c.h263_aic) {
4688  if (n < 4)
4689  q = s->c.y_dc_scale;
4690  else
4691  q = s->c.c_dc_scale;
4692  q = q << 3;
4693  } else
4694  /* For AIC we skip quant/dequant of INTRADC */
4695  q = 1 << 3;
4696 
4697  /* note: block[0] is assumed to be positive */
4698  block[0] = (block[0] + (q >> 1)) / q;
4699  start_i = 1;
4700  last_non_zero = 0;
4701  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4702  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4703  } else {
4704  scantable = s->c.inter_scantable.scantable;
4705  start_i = 0;
4706  last_non_zero = -1;
4707  qmat = s->q_inter_matrix[qscale];
4708  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4709  }
4710  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4711  threshold2= (threshold1<<1);
4712  for(i=63;i>=start_i;i--) {
4713  const int j = scantable[i];
4714  int64_t level = (int64_t)block[j] * qmat[j];
4715 
4716  if(((uint64_t)(level+threshold1))>threshold2){
4717  last_non_zero = i;
4718  break;
4719  }else{
4720  block[j]=0;
4721  }
4722  }
4723  for(i=start_i; i<=last_non_zero; i++) {
4724  const int j = scantable[i];
4725  int64_t level = (int64_t)block[j] * qmat[j];
4726 
4727 // if( bias+level >= (1<<QMAT_SHIFT)
4728 // || bias-level >= (1<<QMAT_SHIFT)){
4729  if(((uint64_t)(level+threshold1))>threshold2){
4730  if(level>0){
4731  level= (bias + level)>>QMAT_SHIFT;
4732  block[j]= level;
4733  }else{
4734  level= (bias - level)>>QMAT_SHIFT;
4735  block[j]= -level;
4736  }
4737  max |=level;
4738  }else{
4739  block[j]=0;
4740  }
4741  }
4742  *overflow= s->max_qcoeff < max; //overflow might have happened
4743 
4744  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4745  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4746  ff_block_permute(block, s->c.idsp.idct_permutation,
4747  scantable, last_non_zero);
4748 
4749  return last_non_zero;
4750 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1502
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3984
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1158
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:378
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:103
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:220
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:264
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:239
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:257
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:298
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2730
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:246
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:251
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2172
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:175
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:316
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:104
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2633
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:526
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:244
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:118
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1903
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2655
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:270
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2655
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3703
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1925
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:118
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2651
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3664
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2646
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:252
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:301
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:230
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4296
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1523
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:258
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2847
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2781
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:291
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:158
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:242
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:421
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:523
AVFrame::width
int width
Definition: frame.h:493
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2655
AVPacket::data
uint8_t * data
Definition: packet.h:552
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:379
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:216
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2894
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:199
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:491
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:570
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:308
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:81
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2829
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2262
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2662
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:54
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:203
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:266
mpegutils.h
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:607
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:248
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:302
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:442
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:172
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2934
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2655
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:207
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1676
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:377
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1178
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2659
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1942
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:336
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2654
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:56
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:505
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:225
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:198
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:53
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:830
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2652
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:268
fail
#define fail()
Definition: checkasm.h:198
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:57
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:240
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:103
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:293
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:209
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:311
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1260
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2803
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:217
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:379
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:295
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:300
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:243
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1460
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:255
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:205
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:636
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:96
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:145
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4298
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:223
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1246
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:265
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MPVEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:226
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2657
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:210
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:202
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:267
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3629
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:261
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:50
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:254
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:204
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4644
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:279
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:296
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2647
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2872
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:55
arg
const char * arg
Definition: jacosubdec.c:67
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:272
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:447
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:50
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:186
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:301
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
MPVEncContext::block
int16_t(* block)[64]
points into blocks below
Definition: mpegvideoenc.h:114
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:127
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:233
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:597
run
uint8_t run
Definition: svq3.c:207
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:194
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:96
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:190
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:109
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:297
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:206
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:336
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1807
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:524
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:189
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3628
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1122
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2650
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2957
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:120
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1165
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2658
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:256
MPVMainEncContext
Definition: mpegvideoenc.h:199
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:176
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1319
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:813
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:259
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:513
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2228
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:553
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:155
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:237
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2647
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:204
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:543
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:337
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:305
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:290
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:98
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:253
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:310
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3718
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:508
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:289
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:443
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:551
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:378
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:206
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:178
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:558
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:294
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:263
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:286
denoise_dct_c
static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
Definition: mpegvideo_enc.c:3959
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:303
MBBackup
Definition: mpegvideo_enc.c:2643
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:292
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:266
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:309
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:409
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2648
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:104
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:264
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2645
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:545
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2991
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:271
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2659
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:105
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:129
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:284
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:194
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:44
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:281
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:521
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:256
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:153
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:239
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:494
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:232
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:260
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:493
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1287
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1865
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2286
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:447
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:234
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:300
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:241
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:111
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:220
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:267
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:245
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2656
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:493
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2649
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:233
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3627
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:232
mpeg4video.h
MBBackup::c
struct MBBackup::@211 c
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2655
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:244
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:287
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4317
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:559
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:236
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3636
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:224
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:171
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:529
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:238
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:228
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:121
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:466
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:612
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2649
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:212
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:875
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4669
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2659
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:309
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:109
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:63
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:198
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:200
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2914
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:144
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2655
pixblockdsp.h
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:955
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:906
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:263
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:104
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:167