FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12codecs.h"
51 #include "mpeg12data.h"
52 #include "mpeg12enc.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideoenc.h"
56 #include "h261enc.h"
57 #include "h263.h"
58 #include "h263data.h"
59 #include "h263enc.h"
60 #include "mjpegenc_common.h"
61 #include "mathops.h"
62 #include "mpegutils.h"
63 #include "mpegvideo_unquantize.h"
64 #include "mjpegenc.h"
65 #include "speedhqenc.h"
66 #include "msmpeg4enc.h"
67 #include "pixblockdsp.h"
68 #include "qpeldsp.h"
69 #include "faandct.h"
70 #include "aandcttab.h"
71 #include "mpeg4video.h"
72 #include "mpeg4videodata.h"
73 #include "mpeg4videoenc.h"
74 #include "internal.h"
75 #include "bytestream.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "libavutil/refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
88 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MPVEncContext *const s);
90 static void denoise_dct_c(MPVEncContext *const s, int16_t *block);
91 static int dct_quantize_c(MPVEncContext *const s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
97 
98 static const AVOption mpv_generic_options[] = {
101  { NULL },
102 };
103 
105  .class_name = "generic mpegvideo encoder",
106  .item_name = av_default_item_name,
107  .option = mpv_generic_options,
108  .version = LIBAVUTIL_VERSION_INT,
109 };
110 
111 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
112  uint16_t (*qmat16)[2][64],
113  const uint16_t *quant_matrix,
114  int bias, int qmin, int qmax, int intra)
115 {
116  FDCTDSPContext *fdsp = &s->fdsp;
117  int qscale;
118  int shift = 0;
119 
120  for (qscale = qmin; qscale <= qmax; qscale++) {
121  int i;
122  int qscale2;
123 
124  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
125  else qscale2 = qscale << 1;
126 
127  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
128 #if CONFIG_FAANDCT
129  fdsp->fdct == ff_faandct ||
130 #endif /* CONFIG_FAANDCT */
131  fdsp->fdct == ff_jpeg_fdct_islow_10) {
132  for (i = 0; i < 64; i++) {
133  const int j = s->c.idsp.idct_permutation[i];
134  int64_t den = (int64_t) qscale2 * quant_matrix[j];
135  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
136  * Assume x = qscale2 * quant_matrix[j]
137  * 1 <= x <= 28560
138  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
139  * 4194304 >= (1 << 22) / (x) >= 146 */
140 
141  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
142  }
143  } else if (fdsp->fdct == ff_fdct_ifast) {
144  for (i = 0; i < 64; i++) {
145  const int j = s->c.idsp.idct_permutation[i];
146  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
147  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
148  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
149  * 1247 <= x <= 900239760
150  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
151  * 55107840 >= (1 << 36) / (x) >= 76 */
152 
153  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
154  }
155  } else {
156  for (i = 0; i < 64; i++) {
157  const int j = s->c.idsp.idct_permutation[i];
158  int64_t den = (int64_t) qscale2 * quant_matrix[j];
159  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
160  * Assume x = qscale2 * quant_matrix[j]
161  * 1 <= x <= 28560
162  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
163  * 4194304 >= (1 << 22) / (x) >= 146
164  *
165  * 1 <= x <= 28560
166  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
167  * 131072 >= (1 << 17) / (x) >= 4 */
168 
169  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
170  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
171 
172  if (qmat16[qscale][0][i] == 0 ||
173  qmat16[qscale][0][i] == 128 * 256)
174  qmat16[qscale][0][i] = 128 * 256 - 1;
175  qmat16[qscale][1][i] =
176  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
177  qmat16[qscale][0][i]);
178  }
179  }
180 
181  for (i = intra; i < 64; i++) {
182  int64_t max = 8191;
183  if (fdsp->fdct == ff_fdct_ifast) {
184  max = (8191LL * ff_aanscales[i]) >> 14;
185  }
186  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
187  shift++;
188  }
189  }
190  }
191  if (shift) {
192  av_log(s->c.avctx, AV_LOG_INFO,
193  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194  QMAT_SHIFT - shift);
195  }
196 }
197 
198 static inline void update_qscale(MPVMainEncContext *const m)
199 {
200  MPVEncContext *const s = &m->s;
201 
202  if (s->c.q_scale_type == 1 && 0) {
203  int i;
204  int bestdiff=INT_MAX;
205  int best = 1;
206 
207  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
208  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
209  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
210  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
211  continue;
212  if (diff < bestdiff) {
213  bestdiff = diff;
214  best = i;
215  }
216  }
217  s->c.qscale = best;
218  } else {
219  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
220  (FF_LAMBDA_SHIFT + 7);
221  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
222  }
223 
224  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
226 }
227 
229 {
230  int i;
231 
232  if (matrix) {
233  put_bits(pb, 1, 1);
234  for (i = 0; i < 64; i++) {
235  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
236  }
237  } else
238  put_bits(pb, 1, 0);
239 }
240 
241 /**
242  * init s->c.cur_pic.qscale_table from s->lambda_table
243  */
244 static void init_qscale_tab(MPVEncContext *const s)
245 {
246  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
247 
248  for (int i = 0; i < s->c.mb_num; i++) {
249  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
250  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
251  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
252  s->c.avctx->qmax);
253  }
254 }
255 
257  const MPVEncContext *const src)
258 {
259 #define COPY(a) dst->a = src->a
260  COPY(c.pict_type);
261  COPY(f_code);
262  COPY(b_code);
263  COPY(c.qscale);
264  COPY(lambda);
265  COPY(lambda2);
266  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
267  COPY(c.progressive_frame); // FIXME don't set in encode_header
268  COPY(c.partitioned_frame); // FIXME don't set in encode_header
269 #undef COPY
270 }
271 
273 {
274  for (int i = -16; i < 16; i++)
275  default_fcode_tab[i + MAX_MV] = 1;
276 }
277 
278 /**
279  * Set the given MPVEncContext to defaults for encoding.
280  */
282 {
283  MPVEncContext *const s = &m->s;
284  static AVOnce init_static_once = AV_ONCE_INIT;
285 
287 
288  s->f_code = 1;
289  s->b_code = 1;
290 
291  if (!m->fcode_tab) {
293  ff_thread_once(&init_static_once, mpv_encode_init_static);
294  }
295  if (!s->c.y_dc_scale_table) {
296  s->c.y_dc_scale_table =
297  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
298  }
299 }
300 
302 {
303  s->dct_quantize = dct_quantize_c;
304  s->denoise_dct = denoise_dct_c;
305 
306 #if ARCH_MIPS
308 #elif ARCH_X86
310 #endif
311 
312  if (s->c.avctx->trellis)
313  s->dct_quantize = dct_quantize_trellis_c;
314 }
315 
317 {
318  MpegEncContext *const s = &s2->c;
319  MPVUnquantDSPContext unquant_dsp_ctx;
320 
321  ff_mpv_unquantize_init(&unquant_dsp_ctx,
322  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
323 
324  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
325  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
326  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
327  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
328  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
329  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
330  } else {
331  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
332  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
333  }
334 }
335 
337 {
338  MPVEncContext *const s = &m->s;
339  MECmpContext mecc;
340  me_cmp_func me_cmp[6];
341  int ret;
342 
343  ff_me_cmp_init(&mecc, avctx);
344  ret = ff_me_init(&s->me, avctx, &mecc, 1);
345  if (ret < 0)
346  return ret;
347  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
348  if (ret < 0)
349  return ret;
350  m->frame_skip_cmp_fn = me_cmp[1];
352  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
353  if (ret < 0)
354  return ret;
355  if (!me_cmp[0] || !me_cmp[4])
356  return AVERROR(EINVAL);
357  s->ildct_cmp[0] = me_cmp[0];
358  s->ildct_cmp[1] = me_cmp[4];
359  }
360 
361  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
362 
363  s->sse_cmp[0] = mecc.sse[0];
364  s->sse_cmp[1] = mecc.sse[1];
365  s->sad_cmp[0] = mecc.sad[0];
366  s->sad_cmp[1] = mecc.sad[1];
367  if (avctx->mb_cmp == FF_CMP_NSSE) {
368  s->n_sse_cmp[0] = mecc.nsse[0];
369  s->n_sse_cmp[1] = mecc.nsse[1];
370  } else {
371  s->n_sse_cmp[0] = mecc.sse[0];
372  s->n_sse_cmp[1] = mecc.sse[1];
373  }
374 
375  return 0;
376 }
377 
378 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
380 {
381  MPVEncContext *const s = &m->s;
382  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
383  const uint16_t *intra_matrix, *inter_matrix;
384  int ret;
385 
386  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
387  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
388  return AVERROR(ENOMEM);
389 
390  if (s->c.out_format == FMT_MJPEG) {
391  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
392  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
393  // No need to set q_inter_matrix
395  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
396  return 0;
397  } else {
398  s->q_chroma_intra_matrix = s->q_intra_matrix;
399  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
400  }
401  if (!m->intra_only) {
402  s->q_inter_matrix = s->q_intra_matrix + 32;
403  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
404  }
405 
406  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
407  s->mpeg_quant) {
410  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
411  intra_matrix =
413  } else {
414  /* MPEG-1/2, SpeedHQ */
417  }
418  if (avctx->intra_matrix)
420  if (avctx->inter_matrix)
422 
423  /* init q matrix */
424  for (int i = 0; i < 64; i++) {
425  int j = s->c.idsp.idct_permutation[i];
426 
427  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
428  s->c.inter_matrix[j] = inter_matrix[i];
429  }
430 
431  /* precompute matrix */
433  if (ret < 0)
434  return ret;
435 
436  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
437  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
438  31, 1);
439  if (s->q_inter_matrix)
440  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
441  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
442  31, 0);
443 
444  return 0;
445 }
446 
448 {
449  MPVEncContext *const s = &m->s;
450  // Align the following per-thread buffers to avoid false sharing.
451  enum {
452 #ifndef _MSC_VER
453  /// The number is supposed to match/exceed the cache-line size.
454  ALIGN = FFMAX(128, _Alignof(max_align_t)),
455 #else
456  ALIGN = 128,
457 #endif
458  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
459  };
460  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
461  "Need checks for potential overflow.");
462  unsigned nb_slices = s->c.slice_context_count, mv_table_size, mb_array_size;
463  char *dct_error = NULL;
464  int has_b_frames = !!m->max_b_frames, nb_mv_tables = 1 + 5 * has_b_frames;
465  int16_t (*mv_table)[2];
466 
467  if (m->noise_reduction) {
468  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
469  return AVERROR(ENOMEM);
470  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
471  if (!dct_error)
472  return AVERROR(ENOMEM);
474  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
475  }
476 
477  /* Allocate MB type table */
478  mb_array_size = s->c.mb_stride * s->c.mb_height;
479  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
480  if (!s->mb_type)
481  return AVERROR(ENOMEM);
482  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
483  return AVERROR(ENOMEM);
484 
485  mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
486  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
487  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
488  nb_mv_tables += 8 * has_b_frames;
489  if (!ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * (2 + 4 * has_b_frames), mv_table_size))
490  return AVERROR(ENOMEM);
491  }
492 
493  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
494  if (!mv_table)
495  return AVERROR(ENOMEM);
496  m->mv_table_base = mv_table;
497  mv_table += s->c.mb_stride + 1;
498 
499  for (unsigned i = 0; i < nb_slices; ++i) {
500  MPVEncContext *const s2 = s->c.enc_contexts[i];
501  int16_t (*tmp_mv_table)[2] = mv_table;
502 
503  if (dct_error) {
504  s2->dct_offset = s->dct_offset;
505  s2->dct_error_sum = (void*)dct_error;
506  dct_error += DCT_ERROR_SIZE;
507  }
508 
509  s2->mb_type = s->mb_type;
510  s2->mc_mb_var = s2->mb_type + mb_array_size;
511  s2->mb_var = s2->mc_mb_var + mb_array_size;
512  s2->mb_mean = (uint8_t*)(s2->mb_var + mb_array_size);
513  s2->lambda_table = s->lambda_table;
514 
515  s2->p_mv_table = tmp_mv_table;
516  if (has_b_frames) {
517  s2->b_forw_mv_table = tmp_mv_table += mv_table_size;
518  s2->b_back_mv_table = tmp_mv_table += mv_table_size;
519  s2->b_bidir_forw_mv_table = tmp_mv_table += mv_table_size;
520  s2->b_bidir_back_mv_table = tmp_mv_table += mv_table_size;
521  s2->b_direct_mv_table = tmp_mv_table += mv_table_size;
522  }
523 
524  if (s->p_field_select_table[0]) { // MPEG-4 or INTERLACED_ME above
525  uint8_t *field_select = s->p_field_select_table[0];
527  s2->p_field_select_table[1] = field_select += 2 * mv_table_size;
528 
529  if (has_b_frames) {
530  for (int j = 0; j < 2; j++) {
531  for (int k = 0; k < 2; k++) {
532  for (int l = 0; l < 2; l++)
533  s2->b_field_mv_table[j][k][l] = tmp_mv_table += mv_table_size;
534  s2->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
535  }
536  }
537  }
538  }
539  }
540 
541  return 0;
542 }
543 
544 /* init video encoder */
546 {
547  MPVMainEncContext *const m = avctx->priv_data;
548  MPVEncContext *const s = &m->s;
549  AVCPBProperties *cpb_props;
550  int gcd, ret;
551 
553 
554  switch (avctx->pix_fmt) {
555  case AV_PIX_FMT_YUVJ444P:
556  case AV_PIX_FMT_YUV444P:
557  s->c.chroma_format = CHROMA_444;
558  break;
559  case AV_PIX_FMT_YUVJ422P:
560  case AV_PIX_FMT_YUV422P:
561  s->c.chroma_format = CHROMA_422;
562  break;
563  default:
564  av_unreachable("Already checked via CODEC_PIXFMTS");
565  case AV_PIX_FMT_YUVJ420P:
566  case AV_PIX_FMT_YUV420P:
567  s->c.chroma_format = CHROMA_420;
568  break;
569  }
570 
572 
573  m->bit_rate = avctx->bit_rate;
574  s->c.width = avctx->width;
575  s->c.height = avctx->height;
576  if (avctx->gop_size > 600 &&
579  "keyframe interval too large!, reducing it from %d to %d\n",
580  avctx->gop_size, 600);
581  avctx->gop_size = 600;
582  }
583  m->gop_size = avctx->gop_size;
584  s->c.avctx = avctx;
586  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
587  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
589  } else if (avctx->max_b_frames < 0) {
591  "max b frames must be 0 or positive for mpegvideo based encoders\n");
592  return AVERROR(EINVAL);
593  }
595  s->c.codec_id = avctx->codec->id;
597  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
598  return AVERROR(EINVAL);
599  }
600 
601  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
602  s->rtp_mode = !!s->rtp_payload_size;
603  s->c.intra_dc_precision = avctx->intra_dc_precision;
604 
605  // workaround some differences between how applications specify dc precision
606  if (s->c.intra_dc_precision < 0) {
607  s->c.intra_dc_precision += 8;
608  } else if (s->c.intra_dc_precision >= 8)
609  s->c.intra_dc_precision -= 8;
610 
611  if (s->c.intra_dc_precision < 0) {
613  "intra dc precision must be positive, note some applications use"
614  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
615  return AVERROR(EINVAL);
616  }
617 
618  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
619  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
620  return AVERROR(EINVAL);
621  }
623 
624  if (m->gop_size <= 1) {
625  m->intra_only = 1;
626  m->gop_size = 12;
627  } else {
628  m->intra_only = 0;
629  }
630 
631  /* Fixed QSCALE */
633 
634  s->adaptive_quant = (avctx->lumi_masking ||
635  avctx->dark_masking ||
638  avctx->p_masking ||
639  m->border_masking ||
640  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
641  !m->fixed_qscale;
642 
643  s->c.loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
644 
646  switch(avctx->codec_id) {
649  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
650  break;
651  case AV_CODEC_ID_MPEG4:
655  if (avctx->rc_max_rate >= 15000000) {
656  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
657  } else if(avctx->rc_max_rate >= 2000000) {
658  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
659  } else if(avctx->rc_max_rate >= 384000) {
660  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
661  } else
662  avctx->rc_buffer_size = 40;
663  avctx->rc_buffer_size *= 16384;
664  break;
665  }
666  if (avctx->rc_buffer_size) {
667  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
668  }
669  }
670 
671  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
672  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
673  return AVERROR(EINVAL);
674  }
675 
678  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
679  }
680 
682  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
683  return AVERROR(EINVAL);
684  }
685 
687  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
688  return AVERROR(EINVAL);
689  }
690 
691  if (avctx->rc_max_rate &&
695  "impossible bitrate constraints, this will fail\n");
696  }
697 
698  if (avctx->rc_buffer_size &&
701  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
702  return AVERROR(EINVAL);
703  }
704 
705  if (!m->fixed_qscale &&
708  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
710  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
711  if (nbt <= INT_MAX) {
712  avctx->bit_rate_tolerance = nbt;
713  } else
714  avctx->bit_rate_tolerance = INT_MAX;
715  }
716 
717  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
718  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
719  s->c.codec_id != AV_CODEC_ID_FLV1) {
720  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
721  return AVERROR(EINVAL);
722  }
723 
724  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
726  "OBMC is only supported with simple mb decision\n");
727  return AVERROR(EINVAL);
728  }
729 
730  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
731  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
732  return AVERROR(EINVAL);
733  }
734 
735  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
736  s->c.codec_id == AV_CODEC_ID_H263 ||
737  s->c.codec_id == AV_CODEC_ID_H263P) &&
738  (avctx->sample_aspect_ratio.num > 255 ||
739  avctx->sample_aspect_ratio.den > 255)) {
741  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
745  }
746 
747  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
748  s->c.codec_id == AV_CODEC_ID_H263P) &&
749  (avctx->width > 2048 ||
750  avctx->height > 1152 )) {
751  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
752  return AVERROR(EINVAL);
753  }
754  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
755  (avctx->width > 65535 ||
756  avctx->height > 65535 )) {
757  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
758  return AVERROR(EINVAL);
759  }
760  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
761  s->c.codec_id == AV_CODEC_ID_H263P ||
762  s->c.codec_id == AV_CODEC_ID_RV20) &&
763  ((avctx->width &3) ||
764  (avctx->height&3) )) {
765  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
766  return AVERROR(EINVAL);
767  }
768 
769  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
770  (avctx->width &15 ||
771  avctx->height&15 )) {
772  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
773  return AVERROR(EINVAL);
774  }
775 
776  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
777  s->c.codec_id == AV_CODEC_ID_WMV2) &&
778  avctx->width & 1) {
779  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
780  return AVERROR(EINVAL);
781  }
782 
784  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
785  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
786  return AVERROR(EINVAL);
787  }
788 
789  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
790  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
791  return AVERROR(EINVAL);
792  }
793 
794  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
796  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
797  return AVERROR(EINVAL);
798  }
799 
800  if (m->scenechange_threshold < 1000000000 &&
803  "closed gop with scene change detection are not supported yet, "
804  "set threshold to 1000000000\n");
805  return AVERROR_PATCHWELCOME;
806  }
807 
809  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
812  "low delay forcing is only available for mpeg2, "
813  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
814  return AVERROR(EINVAL);
815  }
816  if (m->max_b_frames != 0) {
818  "B-frames cannot be used with low delay\n");
819  return AVERROR(EINVAL);
820  }
821  }
822 
823  if (avctx->slices > 1 &&
825  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
826  return AVERROR(EINVAL);
827  }
828 
831  "notice: b_frame_strategy only affects the first pass\n");
832  m->b_frame_strategy = 0;
833  }
834 
836  if (gcd > 1) {
837  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
838  avctx->time_base.den /= gcd;
839  avctx->time_base.num /= gcd;
840  //return -1;
841  }
842 
843  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
844  // (a + x * 3 / 8) / x
845  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
846  s->inter_quant_bias = 0;
847  } else {
848  s->intra_quant_bias = 0;
849  // (a - x / 4) / x
850  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
851  }
852 
853  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
854  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
855  return AVERROR(EINVAL);
856  }
857 
858  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
859 
860  switch (avctx->codec->id) {
861 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
863  s->rtp_mode = 1;
864  /* fallthrough */
866  s->c.out_format = FMT_MPEG1;
867  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
868  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
870  break;
871 #endif
872 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
873  case AV_CODEC_ID_MJPEG:
874  case AV_CODEC_ID_AMV:
875  s->c.out_format = FMT_MJPEG;
876  m->intra_only = 1; /* force intra only for jpeg */
877  avctx->delay = 0;
878  s->c.low_delay = 1;
879  break;
880 #endif
881  case AV_CODEC_ID_SPEEDHQ:
882  s->c.out_format = FMT_SPEEDHQ;
883  m->intra_only = 1; /* force intra only for SHQ */
884  avctx->delay = 0;
885  s->c.low_delay = 1;
886  break;
887  case AV_CODEC_ID_H261:
888  s->c.out_format = FMT_H261;
889  avctx->delay = 0;
890  s->c.low_delay = 1;
891  s->rtp_mode = 0; /* Sliced encoding not supported */
892  break;
893  case AV_CODEC_ID_H263:
894  if (!CONFIG_H263_ENCODER)
897  s->c.width, s->c.height) == 8) {
899  "The specified picture size of %dx%d is not valid for "
900  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
901  "352x288, 704x576, and 1408x1152. "
902  "Try H.263+.\n", s->c.width, s->c.height);
903  return AVERROR(EINVAL);
904  }
905  s->c.out_format = FMT_H263;
906  avctx->delay = 0;
907  s->c.low_delay = 1;
908  break;
909  case AV_CODEC_ID_H263P:
910  s->c.out_format = FMT_H263;
911  /* Fx */
912  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
913  s->c.modified_quant = s->c.h263_aic;
914  s->c.loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
915  s->c.unrestricted_mv = s->c.obmc || s->c.loop_filter || s->c.umvplus;
916  s->c.flipflop_rounding = 1;
917 
918  /* /Fx */
919  /* These are just to be sure */
920  avctx->delay = 0;
921  s->c.low_delay = 1;
922  break;
923  case AV_CODEC_ID_FLV1:
924  s->c.out_format = FMT_H263;
925  s->c.h263_flv = 2; /* format = 1; 11-bit codes */
926  s->c.unrestricted_mv = 1;
927  s->rtp_mode = 0; /* don't allow GOB */
928  avctx->delay = 0;
929  s->c.low_delay = 1;
930  break;
931 #if CONFIG_RV10_ENCODER
932  case AV_CODEC_ID_RV10:
934  s->c.out_format = FMT_H263;
935  avctx->delay = 0;
936  s->c.low_delay = 1;
937  break;
938 #endif
939 #if CONFIG_RV20_ENCODER
940  case AV_CODEC_ID_RV20:
942  s->c.out_format = FMT_H263;
943  avctx->delay = 0;
944  s->c.low_delay = 1;
945  s->c.modified_quant = 1;
946  // Set here to force allocation of dc_val;
947  // will be set later on a per-frame basis.
948  s->c.h263_aic = 1;
949  s->c.loop_filter = 1;
950  s->c.unrestricted_mv = 0;
951  break;
952 #endif
953  case AV_CODEC_ID_MPEG4:
954  s->c.out_format = FMT_H263;
955  s->c.h263_pred = 1;
956  s->c.unrestricted_mv = 1;
957  s->c.flipflop_rounding = 1;
958  s->c.low_delay = m->max_b_frames ? 0 : 1;
959  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
960  break;
962  s->c.out_format = FMT_H263;
963  s->c.h263_pred = 1;
964  s->c.unrestricted_mv = 1;
965  s->c.msmpeg4_version = MSMP4_V2;
966  avctx->delay = 0;
967  s->c.low_delay = 1;
968  break;
970  s->c.out_format = FMT_H263;
971  s->c.h263_pred = 1;
972  s->c.unrestricted_mv = 1;
973  s->c.msmpeg4_version = MSMP4_V3;
974  s->c.flipflop_rounding = 1;
975  avctx->delay = 0;
976  s->c.low_delay = 1;
977  break;
978  case AV_CODEC_ID_WMV1:
979  s->c.out_format = FMT_H263;
980  s->c.h263_pred = 1;
981  s->c.unrestricted_mv = 1;
982  s->c.msmpeg4_version = MSMP4_WMV1;
983  s->c.flipflop_rounding = 1;
984  avctx->delay = 0;
985  s->c.low_delay = 1;
986  break;
987  case AV_CODEC_ID_WMV2:
988  s->c.out_format = FMT_H263;
989  s->c.h263_pred = 1;
990  s->c.unrestricted_mv = 1;
991  s->c.msmpeg4_version = MSMP4_WMV2;
992  s->c.flipflop_rounding = 1;
993  avctx->delay = 0;
994  s->c.low_delay = 1;
995  break;
996  default:
997  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
998  }
999 
1000  avctx->has_b_frames = !s->c.low_delay;
1001 
1002  s->c.encoding = 1;
1003 
1004  s->c.progressive_frame =
1005  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1007  s->c.alternate_scan);
1008 
1011  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1012  (1 << AV_PICTURE_TYPE_P) |
1013  (1 << AV_PICTURE_TYPE_B);
1014  } else if (!m->intra_only) {
1015  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1016  (1 << AV_PICTURE_TYPE_P);
1017  } else {
1018  s->frame_reconstruction_bitfield = 0;
1019  }
1020 
1021  if (m->lmin > m->lmax) {
1022  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1023  m->lmin = m->lmax;
1024  }
1025 
1026  /* ff_mpv_common_init() will copy (memdup) the contents of the main slice
1027  * to the slice contexts, so we initialize various fields of it
1028  * before calling ff_mpv_common_init(). */
1029  s->parent = m;
1030  ff_mpv_idct_init(&s->c);
1032  ff_fdctdsp_init(&s->fdsp, avctx);
1033  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1034  ff_pixblockdsp_init(&s->pdsp, 8);
1035  ret = me_cmp_init(m, avctx);
1036  if (ret < 0)
1037  return ret;
1038 
1039  if (!(avctx->stats_out = av_mallocz(256)) ||
1040  !(s->new_pic = av_frame_alloc()) ||
1041  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1042  return AVERROR(ENOMEM);
1043 
1044  ret = init_matrices(m, avctx);
1045  if (ret < 0)
1046  return ret;
1047 
1049 
1050  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1052 #if CONFIG_MSMPEG4ENC
1053  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1055 #endif
1056  }
1057 
1058  s->c.slice_ctx_size = sizeof(*s);
1059  ret = ff_mpv_common_init(&s->c);
1060  if (ret < 0)
1061  return ret;
1062 
1063  if (s->c.slice_context_count > 1) {
1064  for (int i = 0; i < s->c.slice_context_count; ++i) {
1065  s->c.enc_contexts[i]->rtp_mode = 1;
1066 
1068  s->c.enc_contexts[i]->c.h263_slice_structured = 1;
1069  }
1070  }
1071 
1072  ret = init_buffers(m, avctx);
1073  if (ret < 0)
1074  return ret;
1075 
1077  if (ret < 0)
1078  return ret;
1079 
1080  if (m->b_frame_strategy == 2) {
1081  for (int i = 0; i < m->max_b_frames + 2; i++) {
1082  m->tmp_frames[i] = av_frame_alloc();
1083  if (!m->tmp_frames[i])
1084  return AVERROR(ENOMEM);
1085 
1087  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1088  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1089 
1090  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1091  if (ret < 0)
1092  return ret;
1093  }
1094  }
1095 
1096  cpb_props = ff_encode_add_cpb_side_data(avctx);
1097  if (!cpb_props)
1098  return AVERROR(ENOMEM);
1099  cpb_props->max_bitrate = avctx->rc_max_rate;
1100  cpb_props->min_bitrate = avctx->rc_min_rate;
1101  cpb_props->avg_bitrate = avctx->bit_rate;
1102  cpb_props->buffer_size = avctx->rc_buffer_size;
1103 
1104  return 0;
1105 }
1106 
1108 {
1109  MPVMainEncContext *const m = avctx->priv_data;
1110  MPVEncContext *const s = &m->s;
1111 
1113 
1114  ff_mpv_common_end(&s->c);
1115  av_refstruct_pool_uninit(&s->c.picture_pool);
1116 
1117  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1120  }
1121  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1122  av_frame_free(&m->tmp_frames[i]);
1123 
1124  av_frame_free(&s->new_pic);
1125 
1127 
1128  av_freep(&m->mv_table_base);
1129  av_freep(&s->p_field_select_table[0]);
1131 
1132  av_freep(&s->mb_type);
1133  av_freep(&s->lambda_table);
1134 
1135  av_freep(&s->q_intra_matrix);
1136  av_freep(&s->q_intra_matrix16);
1137  av_freep(&s->dct_offset);
1138 
1139  return 0;
1140 }
1141 
1142 /* put block[] to dest[] */
1143 static inline void put_dct(MPVEncContext *const s,
1144  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1145 {
1146  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1147  s->c.idsp.idct_put(dest, line_size, block);
1148 }
1149 
1150 static inline void add_dequant_dct(MPVEncContext *const s,
1151  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1152 {
1153  if (s->c.block_last_index[i] >= 0) {
1154  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1155 
1156  s->c.idsp.idct_add(dest, line_size, block);
1157  }
1158 }
1159 
1160 /**
1161  * Performs dequantization and IDCT (if necessary)
1162  */
1163 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1164 {
1165  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1166  /* print DCT coefficients */
1167  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1168  for (int i = 0; i < 6; i++) {
1169  for (int j = 0; j < 64; j++) {
1170  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1171  block[i][s->c.idsp.idct_permutation[j]]);
1172  }
1173  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1174  }
1175  }
1176 
1177  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1178  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1179  int dct_linesize, dct_offset;
1180  const int linesize = s->c.cur_pic.linesize[0];
1181  const int uvlinesize = s->c.cur_pic.linesize[1];
1182  const int block_size = 8;
1183 
1184  dct_linesize = linesize << s->c.interlaced_dct;
1185  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1186 
1187  if (!s->c.mb_intra) {
1188  /* No MC, as that was already done otherwise */
1189  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1190  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1191  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1192  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1193 
1194  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1195  if (s->c.chroma_y_shift) {
1196  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1197  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1198  } else {
1199  dct_linesize >>= 1;
1200  dct_offset >>= 1;
1201  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1202  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1203  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1204  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1205  }
1206  }
1207  } else {
1208  /* dct only in intra block */
1209  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1210  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1211  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1212  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1213 
1214  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1215  if (s->c.chroma_y_shift) {
1216  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1217  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1218  } else {
1219  dct_offset >>= 1;
1220  dct_linesize >>= 1;
1221  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1222  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1223  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1224  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1225  }
1226  }
1227  }
1228  }
1229 }
1230 
1231 static int get_sae(const uint8_t *src, int ref, int stride)
1232 {
1233  int x,y;
1234  int acc = 0;
1235 
1236  for (y = 0; y < 16; y++) {
1237  for (x = 0; x < 16; x++) {
1238  acc += FFABS(src[x + y * stride] - ref);
1239  }
1240  }
1241 
1242  return acc;
1243 }
1244 
1245 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1246  const uint8_t *ref, int stride)
1247 {
1248  int x, y, w, h;
1249  int acc = 0;
1250 
1251  w = s->c.width & ~15;
1252  h = s->c.height & ~15;
1253 
1254  for (y = 0; y < h; y += 16) {
1255  for (x = 0; x < w; x += 16) {
1256  int offset = x + y * stride;
1257  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1258  stride, 16);
1259  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1260  int sae = get_sae(src + offset, mean, stride);
1261 
1262  acc += sae + 500 < sad;
1263  }
1264  }
1265  return acc;
1266 }
1267 
1268 /**
1269  * Allocates new buffers for an AVFrame and copies the properties
1270  * from another AVFrame.
1271  */
1272 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1273 {
1274  AVCodecContext *avctx = s->c.avctx;
1275  int ret;
1276 
1277  f->width = avctx->width + 2 * EDGE_WIDTH;
1278  f->height = avctx->height + 2 * EDGE_WIDTH;
1279 
1281  if (ret < 0)
1282  return ret;
1283 
1284  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1285  if (ret < 0)
1286  return ret;
1287 
1288  for (int i = 0; f->data[i]; i++) {
1289  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1290  f->linesize[i] +
1291  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1292  f->data[i] += offset;
1293  }
1294  f->width = avctx->width;
1295  f->height = avctx->height;
1296 
1297  ret = av_frame_copy_props(f, props_frame);
1298  if (ret < 0)
1299  return ret;
1300 
1301  return 0;
1302 }
1303 
1304 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1305 {
1306  MPVEncContext *const s = &m->s;
1307  MPVPicture *pic = NULL;
1308  int64_t pts;
1309  int display_picture_number = 0, ret;
1310  int encoding_delay = m->max_b_frames ? m->max_b_frames
1311  : (s->c.low_delay ? 0 : 1);
1312  int flush_offset = 1;
1313  int direct = 1;
1314 
1315  av_assert1(!m->input_picture[0]);
1316 
1317  if (pic_arg) {
1318  pts = pic_arg->pts;
1319  display_picture_number = m->input_picture_number++;
1320 
1321  if (pts != AV_NOPTS_VALUE) {
1322  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1323  int64_t last = m->user_specified_pts;
1324 
1325  if (pts <= last) {
1326  av_log(s->c.avctx, AV_LOG_ERROR,
1327  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1328  pts, last);
1329  return AVERROR(EINVAL);
1330  }
1331 
1332  if (!s->c.low_delay && display_picture_number == 1)
1333  m->dts_delta = pts - last;
1334  }
1335  m->user_specified_pts = pts;
1336  } else {
1337  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1338  m->user_specified_pts =
1339  pts = m->user_specified_pts + 1;
1340  av_log(s->c.avctx, AV_LOG_INFO,
1341  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1342  pts);
1343  } else {
1344  pts = display_picture_number;
1345  }
1346  }
1347 
1348  if (pic_arg->linesize[0] != s->c.linesize ||
1349  pic_arg->linesize[1] != s->c.uvlinesize ||
1350  pic_arg->linesize[2] != s->c.uvlinesize)
1351  direct = 0;
1352  if ((s->c.width & 15) || (s->c.height & 15))
1353  direct = 0;
1354  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1355  direct = 0;
1356  if (s->c.linesize & (STRIDE_ALIGN-1))
1357  direct = 0;
1358 
1359  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1360  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1361 
1362  pic = av_refstruct_pool_get(s->c.picture_pool);
1363  if (!pic)
1364  return AVERROR(ENOMEM);
1365 
1366  if (direct) {
1367  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1368  goto fail;
1369  pic->shared = 1;
1370  } else {
1371  ret = prepare_picture(s, pic->f, pic_arg);
1372  if (ret < 0)
1373  goto fail;
1374 
1375  for (int i = 0; i < 3; i++) {
1376  ptrdiff_t src_stride = pic_arg->linesize[i];
1377  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1378  int h_shift = i ? s->c.chroma_x_shift : 0;
1379  int v_shift = i ? s->c.chroma_y_shift : 0;
1380  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1381  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1382  const uint8_t *src = pic_arg->data[i];
1383  uint8_t *dst = pic->f->data[i];
1384  int vpad = 16;
1385 
1386  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1387  && !s->c.progressive_sequence
1388  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1389  vpad = 32;
1390 
1391  if (!s->c.avctx->rc_buffer_size)
1392  dst += INPLACE_OFFSET;
1393 
1394  if (src_stride == dst_stride)
1395  memcpy(dst, src, src_stride * h - src_stride + w);
1396  else {
1397  int h2 = h;
1398  uint8_t *dst2 = dst;
1399  while (h2--) {
1400  memcpy(dst2, src, w);
1401  dst2 += dst_stride;
1402  src += src_stride;
1403  }
1404  }
1405  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1406  s->mpvencdsp.draw_edges(dst, dst_stride,
1407  w, h,
1408  16 >> h_shift,
1409  vpad >> v_shift,
1410  EDGE_BOTTOM);
1411  }
1412  }
1413  emms_c();
1414  }
1415 
1416  pic->display_picture_number = display_picture_number;
1417  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1418  } else if (!m->reordered_input_picture[1]) {
1419  /* Flushing: When the above check is true, the encoder is about to run
1420  * out of frames to encode. Check if there are input_pictures left;
1421  * if so, ensure m->input_picture[0] contains the first picture.
1422  * A flush_offset != 1 will only happen if we did not receive enough
1423  * input frames. */
1424  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1425  if (m->input_picture[flush_offset])
1426  break;
1427 
1428  encoding_delay -= flush_offset - 1;
1429  }
1430 
1431  /* shift buffer entries */
1432  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1433  m->input_picture[i - flush_offset] = m->input_picture[i];
1434  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1435  m->input_picture[i] = NULL;
1436 
1437  m->input_picture[encoding_delay] = pic;
1438 
1439  return 0;
1440 fail:
1441  av_refstruct_unref(&pic);
1442  return ret;
1443 }
1444 
1445 static int skip_check(MPVMainEncContext *const m,
1446  const MPVPicture *p, const MPVPicture *ref)
1447 {
1448  MPVEncContext *const s = &m->s;
1449  int score = 0;
1450  int64_t score64 = 0;
1451 
1452  for (int plane = 0; plane < 3; plane++) {
1453  const int stride = p->f->linesize[plane];
1454  const int bw = plane ? 1 : 2;
1455  for (int y = 0; y < s->c.mb_height * bw; y++) {
1456  for (int x = 0; x < s->c.mb_width * bw; x++) {
1457  int off = p->shared ? 0 : 16;
1458  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1459  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1460  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1461 
1462  switch (FFABS(m->frame_skip_exp)) {
1463  case 0: score = FFMAX(score, v); break;
1464  case 1: score += FFABS(v); break;
1465  case 2: score64 += v * (int64_t)v; break;
1466  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1467  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1468  }
1469  }
1470  }
1471  }
1472  emms_c();
1473 
1474  if (score)
1475  score64 = score;
1476  if (m->frame_skip_exp < 0)
1477  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1478  -1.0/m->frame_skip_exp);
1479 
1480  if (score64 < m->frame_skip_threshold)
1481  return 1;
1482  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1483  return 1;
1484  return 0;
1485 }
1486 
1488 {
1489  int ret;
1490  int size = 0;
1491 
1493  if (ret < 0)
1494  return ret;
1495 
1496  do {
1498  if (ret >= 0) {
1499  size += pkt->size;
1501  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1502  return ret;
1503  } while (ret >= 0);
1504 
1505  return size;
1506 }
1507 
1509 {
1510  MPVEncContext *const s = &m->s;
1511  AVPacket *pkt;
1512  const int scale = m->brd_scale;
1513  int width = s->c.width >> scale;
1514  int height = s->c.height >> scale;
1515  int out_size, p_lambda, b_lambda, lambda2;
1516  int64_t best_rd = INT64_MAX;
1517  int best_b_count = -1;
1518  int ret = 0;
1519 
1520  av_assert0(scale >= 0 && scale <= 3);
1521 
1522  pkt = av_packet_alloc();
1523  if (!pkt)
1524  return AVERROR(ENOMEM);
1525 
1526  //emms_c();
1527  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1528  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1529  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1530  if (!b_lambda) // FIXME we should do this somewhere else
1531  b_lambda = p_lambda;
1532  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1534 
1535  for (int i = 0; i < m->max_b_frames + 2; i++) {
1536  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1537  s->c.next_pic.ptr;
1538 
1539  if (pre_input_ptr) {
1540  const uint8_t *data[4];
1541  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1542 
1543  if (!pre_input_ptr->shared && i) {
1544  data[0] += INPLACE_OFFSET;
1545  data[1] += INPLACE_OFFSET;
1546  data[2] += INPLACE_OFFSET;
1547  }
1548 
1549  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1550  m->tmp_frames[i]->linesize[0],
1551  data[0],
1552  pre_input_ptr->f->linesize[0],
1553  width, height);
1554  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1555  m->tmp_frames[i]->linesize[1],
1556  data[1],
1557  pre_input_ptr->f->linesize[1],
1558  width >> 1, height >> 1);
1559  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1560  m->tmp_frames[i]->linesize[2],
1561  data[2],
1562  pre_input_ptr->f->linesize[2],
1563  width >> 1, height >> 1);
1564  }
1565  }
1566 
1567  for (int j = 0; j < m->max_b_frames + 1; j++) {
1568  AVCodecContext *c;
1569  int64_t rd = 0;
1570 
1571  if (!m->input_picture[j])
1572  break;
1573 
1575  if (!c) {
1576  ret = AVERROR(ENOMEM);
1577  goto fail;
1578  }
1579 
1580  c->width = width;
1581  c->height = height;
1583  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1584  c->mb_decision = s->c.avctx->mb_decision;
1585  c->me_cmp = s->c.avctx->me_cmp;
1586  c->mb_cmp = s->c.avctx->mb_cmp;
1587  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1588  c->pix_fmt = AV_PIX_FMT_YUV420P;
1589  c->time_base = s->c.avctx->time_base;
1590  c->max_b_frames = m->max_b_frames;
1591 
1592  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1593  if (ret < 0)
1594  goto fail;
1595 
1596 
1598  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1599 
1600  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1601  if (out_size < 0) {
1602  ret = out_size;
1603  goto fail;
1604  }
1605 
1606  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1607 
1608  for (int i = 0; i < m->max_b_frames + 1; i++) {
1609  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1610 
1611  m->tmp_frames[i + 1]->pict_type = is_p ?
1613  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1614 
1615  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1616  if (out_size < 0) {
1617  ret = out_size;
1618  goto fail;
1619  }
1620 
1621  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1622  }
1623 
1624  /* get the delayed frames */
1626  if (out_size < 0) {
1627  ret = out_size;
1628  goto fail;
1629  }
1630  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1631 
1632  rd += c->error[0] + c->error[1] + c->error[2];
1633 
1634  if (rd < best_rd) {
1635  best_rd = rd;
1636  best_b_count = j;
1637  }
1638 
1639 fail:
1642  if (ret < 0) {
1643  best_b_count = ret;
1644  break;
1645  }
1646  }
1647 
1648  av_packet_free(&pkt);
1649 
1650  return best_b_count;
1651 }
1652 
1653 /**
1654  * Determines whether an input picture is discarded or not
1655  * and if not determines the length of the next chain of B frames
1656  * and moves these pictures (including the P frame) into
1657  * reordered_input_picture.
1658  * input_picture[0] is always NULL when exiting this function, even on error;
1659  * reordered_input_picture[0] is always NULL when exiting this function on error.
1660  */
1662 {
1663  MPVEncContext *const s = &m->s;
1664 
1665  /* Either nothing to do or can't do anything */
1666  if (m->reordered_input_picture[0] || !m->input_picture[0])
1667  return 0;
1668 
1669  /* set next picture type & ordering */
1670  if (m->frame_skip_threshold || m->frame_skip_factor) {
1671  if (m->picture_in_gop_number < m->gop_size &&
1672  s->c.next_pic.ptr &&
1673  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1674  // FIXME check that the gop check above is +-1 correct
1676 
1677  ff_vbv_update(m, 0);
1678 
1679  return 0;
1680  }
1681  }
1682 
1683  if (/* m->picture_in_gop_number >= m->gop_size || */
1684  !s->c.next_pic.ptr || m->intra_only) {
1685  m->reordered_input_picture[0] = m->input_picture[0];
1686  m->input_picture[0] = NULL;
1689  m->coded_picture_number++;
1690  } else {
1691  int b_frames = 0;
1692 
1693  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1694  for (int i = 0; i < m->max_b_frames + 1; i++) {
1695  int pict_num = m->input_picture[0]->display_picture_number + i;
1696 
1697  if (pict_num >= m->rc_context.num_entries)
1698  break;
1699  if (!m->input_picture[i]) {
1700  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1701  break;
1702  }
1703 
1704  m->input_picture[i]->f->pict_type =
1705  m->rc_context.entry[pict_num].new_pict_type;
1706  }
1707  }
1708 
1709  if (m->b_frame_strategy == 0) {
1710  b_frames = m->max_b_frames;
1711  while (b_frames && !m->input_picture[b_frames])
1712  b_frames--;
1713  } else if (m->b_frame_strategy == 1) {
1714  for (int i = 1; i < m->max_b_frames + 1; i++) {
1715  if (m->input_picture[i] &&
1716  m->input_picture[i]->b_frame_score == 0) {
1719  m->input_picture[i ]->f->data[0],
1720  m->input_picture[i - 1]->f->data[0],
1721  s->c.linesize) + 1;
1722  }
1723  }
1724  for (int i = 0;; i++) {
1725  if (i >= m->max_b_frames + 1 ||
1726  !m->input_picture[i] ||
1727  m->input_picture[i]->b_frame_score - 1 >
1728  s->c.mb_num / m->b_sensitivity) {
1729  b_frames = FFMAX(0, i - 1);
1730  break;
1731  }
1732  }
1733 
1734  /* reset scores */
1735  for (int i = 0; i < b_frames + 1; i++)
1736  m->input_picture[i]->b_frame_score = 0;
1737  } else if (m->b_frame_strategy == 2) {
1738  b_frames = estimate_best_b_count(m);
1739  if (b_frames < 0) {
1741  return b_frames;
1742  }
1743  }
1744 
1745  emms_c();
1746 
1747  for (int i = b_frames - 1; i >= 0; i--) {
1748  int type = m->input_picture[i]->f->pict_type;
1749  if (type && type != AV_PICTURE_TYPE_B)
1750  b_frames = i;
1751  }
1752  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1753  b_frames == m->max_b_frames) {
1754  av_log(s->c.avctx, AV_LOG_ERROR,
1755  "warning, too many B-frames in a row\n");
1756  }
1757 
1758  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1759  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1760  m->gop_size > m->picture_in_gop_number) {
1761  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1762  } else {
1763  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1764  b_frames = 0;
1765  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1766  }
1767  }
1768 
1769  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1770  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1771  b_frames--;
1772 
1773  m->reordered_input_picture[0] = m->input_picture[b_frames];
1774  m->input_picture[b_frames] = NULL;
1778  m->coded_picture_number++;
1779  for (int i = 0; i < b_frames; i++) {
1780  m->reordered_input_picture[i + 1] = m->input_picture[i];
1781  m->input_picture[i] = NULL;
1782  m->reordered_input_picture[i + 1]->f->pict_type =
1785  m->coded_picture_number++;
1786  }
1787  }
1788 
1789  return 0;
1790 }
1791 
1793 {
1794  MPVEncContext *const s = &m->s;
1795  int ret;
1796 
1798 
1799  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1802 
1804  av_assert1(!m->input_picture[0]);
1805  if (ret < 0)
1806  return ret;
1807 
1808  av_frame_unref(s->new_pic);
1809 
1810  if (m->reordered_input_picture[0]) {
1813 
1814  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1815  // input is a shared pix, so we can't modify it -> allocate a new
1816  // one & ensure that the shared one is reuseable
1817  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1818 
1819  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1820  if (ret < 0)
1821  goto fail;
1822  } else {
1823  // input is not a shared pix -> reuse buffer for current_pix
1824  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1825  if (ret < 0)
1826  goto fail;
1827  for (int i = 0; i < MPV_MAX_PLANES; i++) {
1828  if (s->new_pic->data[i])
1829  s->new_pic->data[i] += INPLACE_OFFSET;
1830  }
1831  }
1832  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1833  m->reordered_input_picture[0] = NULL;
1834  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1835  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1836  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1837  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1838  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1839  if (ret < 0) {
1840  ff_mpv_unref_picture(&s->c.cur_pic);
1841  return ret;
1842  }
1843  s->c.picture_number = s->c.cur_pic.ptr->display_picture_number;
1844 
1845  }
1846  return 0;
1847 fail:
1849  return ret;
1850 }
1851 
1852 static void frame_end(MPVMainEncContext *const m)
1853 {
1854  MPVEncContext *const s = &m->s;
1855 
1856  if (s->c.unrestricted_mv &&
1857  s->c.cur_pic.reference &&
1858  !m->intra_only) {
1859  int hshift = s->c.chroma_x_shift;
1860  int vshift = s->c.chroma_y_shift;
1861  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1862  s->c.cur_pic.linesize[0],
1863  s->c.h_edge_pos, s->c.v_edge_pos,
1865  EDGE_TOP | EDGE_BOTTOM);
1866  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1867  s->c.cur_pic.linesize[1],
1868  s->c.h_edge_pos >> hshift,
1869  s->c.v_edge_pos >> vshift,
1870  EDGE_WIDTH >> hshift,
1871  EDGE_WIDTH >> vshift,
1872  EDGE_TOP | EDGE_BOTTOM);
1873  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1874  s->c.cur_pic.linesize[2],
1875  s->c.h_edge_pos >> hshift,
1876  s->c.v_edge_pos >> vshift,
1877  EDGE_WIDTH >> hshift,
1878  EDGE_WIDTH >> vshift,
1879  EDGE_TOP | EDGE_BOTTOM);
1880  }
1881 
1882  emms_c();
1883 
1884  m->last_pict_type = s->c.pict_type;
1885  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1886  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1887  m->last_non_b_pict_type = s->c.pict_type;
1888 }
1889 
1891 {
1892  MPVEncContext *const s = &m->s;
1893  int intra, i;
1894 
1895  for (intra = 0; intra < 2; intra++) {
1896  if (s->dct_count[intra] > (1 << 16)) {
1897  for (i = 0; i < 64; i++) {
1898  s->dct_error_sum[intra][i] >>= 1;
1899  }
1900  s->dct_count[intra] >>= 1;
1901  }
1902 
1903  for (i = 0; i < 64; i++) {
1904  s->dct_offset[intra][i] = (m->noise_reduction *
1905  s->dct_count[intra] +
1906  s->dct_error_sum[intra][i] / 2) /
1907  (s->dct_error_sum[intra][i] + 1);
1908  }
1909  }
1910 }
1911 
1912 static void frame_start(MPVMainEncContext *const m)
1913 {
1914  MPVEncContext *const s = &m->s;
1915 
1916  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1917 
1918  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1919  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1920  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1921  }
1922 
1923  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1924  if (s->dct_error_sum) {
1926  }
1927 }
1928 
1930  const AVFrame *pic_arg, int *got_packet)
1931 {
1932  MPVMainEncContext *const m = avctx->priv_data;
1933  MPVEncContext *const s = &m->s;
1934  int stuffing_count, ret;
1935  int context_count = s->c.slice_context_count;
1936 
1937  ff_mpv_unref_picture(&s->c.cur_pic);
1938 
1939  m->vbv_ignore_qmax = 0;
1940 
1941  m->picture_in_gop_number++;
1942 
1943  ret = load_input_picture(m, pic_arg);
1944  if (ret < 0)
1945  return ret;
1946 
1948  if (ret < 0)
1949  return ret;
1950 
1951  /* output? */
1952  if (s->new_pic->data[0]) {
1953  int growing_buffer = context_count == 1 && !s->c.data_partitioning;
1954  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1955  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1956  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1957  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1958  if (ret < 0)
1959  return ret;
1960  }
1961  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1962  return ret;
1964  if (s->mb_info) {
1965  s->mb_info_ptr = av_packet_new_side_data(pkt,
1967  s->c.mb_width*s->c.mb_height*12);
1968  if (!s->mb_info_ptr)
1969  return AVERROR(ENOMEM);
1970  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1971  }
1972 
1973  s->c.pict_type = s->new_pic->pict_type;
1974  //emms_c();
1975  frame_start(m);
1976 vbv_retry:
1977  ret = encode_picture(m, pkt);
1978  if (growing_buffer) {
1979  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1980  pkt->data = s->pb.buf;
1982  }
1983  if (ret < 0)
1984  return -1;
1985 
1986  frame_end(m);
1987 
1988  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1990 
1991  if (avctx->rc_buffer_size) {
1992  RateControlContext *rcc = &m->rc_context;
1993  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1994  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1995  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1996 
1997  if (put_bits_count(&s->pb) > max_size &&
1998  s->lambda < m->lmax) {
1999  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2000  (s->c.qscale + 1) / s->c.qscale);
2001  if (s->adaptive_quant) {
2002  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2003  s->lambda_table[i] =
2004  FFMAX(s->lambda_table[i] + min_step,
2005  s->lambda_table[i] * (s->c.qscale + 1) /
2006  s->c.qscale);
2007  }
2008  s->c.mb_skipped = 0; // done in frame_start()
2009  // done in encode_picture() so we must undo it
2010  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2011  s->c.no_rounding ^= s->c.flipflop_rounding;
2012  }
2013  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2014  s->c.time_base = s->c.last_time_base;
2015  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2016  }
2017  m->vbv_ignore_qmax = 1;
2018  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2019  goto vbv_retry;
2020  }
2021 
2023  }
2024 
2027 
2028  for (int i = 0; i < MPV_MAX_PLANES; i++)
2029  avctx->error[i] += s->encoding_error[i];
2030  ff_side_data_set_encoder_stats(pkt, s->c.cur_pic.ptr->f->quality,
2031  s->encoding_error,
2033  s->c.pict_type);
2034 
2036  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2037  s->misc_bits + s->i_tex_bits +
2038  s->p_tex_bits);
2039  flush_put_bits(&s->pb);
2040  m->frame_bits = put_bits_count(&s->pb);
2041 
2042  stuffing_count = ff_vbv_update(m, m->frame_bits);
2043  m->stuffing_bits = 8*stuffing_count;
2044  if (stuffing_count) {
2045  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2046  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2047  return -1;
2048  }
2049 
2050  switch (s->c.codec_id) {
2053  while (stuffing_count--) {
2054  put_bits(&s->pb, 8, 0);
2055  }
2056  break;
2057  case AV_CODEC_ID_MPEG4:
2058  put_bits(&s->pb, 16, 0);
2059  put_bits(&s->pb, 16, 0x1C3);
2060  stuffing_count -= 4;
2061  while (stuffing_count--) {
2062  put_bits(&s->pb, 8, 0xFF);
2063  }
2064  break;
2065  default:
2066  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2067  m->stuffing_bits = 0;
2068  }
2069  flush_put_bits(&s->pb);
2070  m->frame_bits = put_bits_count(&s->pb);
2071  }
2072 
2073  /* update MPEG-1/2 vbv_delay for CBR */
2074  if (avctx->rc_max_rate &&
2076  s->c.out_format == FMT_MPEG1 &&
2077  90000LL * (avctx->rc_buffer_size - 1) <=
2078  avctx->rc_max_rate * 0xFFFFLL) {
2079  AVCPBProperties *props;
2080  size_t props_size;
2081 
2082  int vbv_delay, min_delay;
2083  double inbits = avctx->rc_max_rate *
2085  int minbits = m->frame_bits - 8 *
2086  (m->vbv_delay_pos - 1);
2087  double bits = m->rc_context.buffer_index + minbits - inbits;
2088  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2089 
2090  if (bits < 0)
2092  "Internal error, negative bits\n");
2093 
2094  av_assert1(s->c.repeat_first_field == 0);
2095 
2096  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2097  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2098  avctx->rc_max_rate;
2099 
2100  vbv_delay = FFMAX(vbv_delay, min_delay);
2101 
2102  av_assert0(vbv_delay < 0xFFFF);
2103 
2104  vbv_delay_ptr[0] &= 0xF8;
2105  vbv_delay_ptr[0] |= vbv_delay >> 13;
2106  vbv_delay_ptr[1] = vbv_delay >> 5;
2107  vbv_delay_ptr[2] &= 0x07;
2108  vbv_delay_ptr[2] |= vbv_delay << 3;
2109 
2110  props = av_cpb_properties_alloc(&props_size);
2111  if (!props)
2112  return AVERROR(ENOMEM);
2113  props->vbv_delay = vbv_delay * 300;
2114 
2116  (uint8_t*)props, props_size);
2117  if (ret < 0) {
2118  av_freep(&props);
2119  return ret;
2120  }
2121  }
2122  m->total_bits += m->frame_bits;
2123 
2124  pkt->pts = s->c.cur_pic.ptr->f->pts;
2125  pkt->duration = s->c.cur_pic.ptr->f->duration;
2126  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2127  if (!s->c.cur_pic.ptr->coded_picture_number)
2128  pkt->dts = pkt->pts - m->dts_delta;
2129  else
2130  pkt->dts = m->reordered_pts;
2131  m->reordered_pts = pkt->pts;
2132  } else
2133  pkt->dts = pkt->pts;
2134 
2135  // the no-delay case is handled in generic code
2137  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2138  if (ret < 0)
2139  return ret;
2140  }
2141 
2142  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2144  if (s->mb_info)
2146  } else {
2147  m->frame_bits = 0;
2148  }
2149 
2150  ff_mpv_unref_picture(&s->c.cur_pic);
2151 
2152  av_assert1((m->frame_bits & 7) == 0);
2153 
2154  pkt->size = m->frame_bits / 8;
2155  *got_packet = !!pkt->size;
2156  return 0;
2157 }
2158 
2160  int n, int threshold)
2161 {
2162  static const char tab[64] = {
2163  3, 2, 2, 1, 1, 1, 1, 1,
2164  1, 1, 1, 1, 1, 1, 1, 1,
2165  1, 1, 1, 1, 1, 1, 1, 1,
2166  0, 0, 0, 0, 0, 0, 0, 0,
2167  0, 0, 0, 0, 0, 0, 0, 0,
2168  0, 0, 0, 0, 0, 0, 0, 0,
2169  0, 0, 0, 0, 0, 0, 0, 0,
2170  0, 0, 0, 0, 0, 0, 0, 0
2171  };
2172  int score = 0;
2173  int run = 0;
2174  int i;
2175  int16_t *block = s->c.block[n];
2176  const int last_index = s->c.block_last_index[n];
2177  int skip_dc;
2178 
2179  if (threshold < 0) {
2180  skip_dc = 0;
2181  threshold = -threshold;
2182  } else
2183  skip_dc = 1;
2184 
2185  /* Are all we could set to zero already zero? */
2186  if (last_index <= skip_dc - 1)
2187  return;
2188 
2189  for (i = 0; i <= last_index; i++) {
2190  const int j = s->c.intra_scantable.permutated[i];
2191  const int level = FFABS(block[j]);
2192  if (level == 1) {
2193  if (skip_dc && i == 0)
2194  continue;
2195  score += tab[run];
2196  run = 0;
2197  } else if (level > 1) {
2198  return;
2199  } else {
2200  run++;
2201  }
2202  }
2203  if (score >= threshold)
2204  return;
2205  for (i = skip_dc; i <= last_index; i++) {
2206  const int j = s->c.intra_scantable.permutated[i];
2207  block[j] = 0;
2208  }
2209  if (block[0])
2210  s->c.block_last_index[n] = 0;
2211  else
2212  s->c.block_last_index[n] = -1;
2213 }
2214 
2215 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2216  int last_index)
2217 {
2218  int i;
2219  const int maxlevel = s->max_qcoeff;
2220  const int minlevel = s->min_qcoeff;
2221  int overflow = 0;
2222 
2223  if (s->c.mb_intra) {
2224  i = 1; // skip clipping of intra dc
2225  } else
2226  i = 0;
2227 
2228  for (; i <= last_index; i++) {
2229  const int j = s->c.intra_scantable.permutated[i];
2230  int level = block[j];
2231 
2232  if (level > maxlevel) {
2233  level = maxlevel;
2234  overflow++;
2235  } else if (level < minlevel) {
2236  level = minlevel;
2237  overflow++;
2238  }
2239 
2240  block[j] = level;
2241  }
2242 
2243  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2244  av_log(s->c.avctx, AV_LOG_INFO,
2245  "warning, clipping %d dct coefficients to %d..%d\n",
2246  overflow, minlevel, maxlevel);
2247 }
2248 
2249 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2250 {
2251  int x, y;
2252  // FIXME optimize
2253  for (y = 0; y < 8; y++) {
2254  for (x = 0; x < 8; x++) {
2255  int x2, y2;
2256  int sum = 0;
2257  int sqr = 0;
2258  int count = 0;
2259 
2260  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2261  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2262  int v = ptr[x2 + y2 * stride];
2263  sum += v;
2264  sqr += v * v;
2265  count++;
2266  }
2267  }
2268  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2269  }
2270  }
2271 }
2272 
2274  int motion_x, int motion_y,
2275  int mb_block_height,
2276  int mb_block_width,
2277  int mb_block_count,
2278  int chroma_x_shift,
2279  int chroma_y_shift,
2280  int chroma_format)
2281 {
2282 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2283  * and neither of these encoders currently supports 444. */
2284 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2285  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2286  int16_t weight[12][64];
2287  int16_t orig[12][64];
2288  const int mb_x = s->c.mb_x;
2289  const int mb_y = s->c.mb_y;
2290  int i;
2291  int skip_dct[12];
2292  int dct_offset = s->c.linesize * 8; // default for progressive frames
2293  int uv_dct_offset = s->c.uvlinesize * 8;
2294  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2295  ptrdiff_t wrap_y, wrap_c;
2296 
2297  for (i = 0; i < mb_block_count; i++)
2298  skip_dct[i] = s->skipdct;
2299 
2300  if (s->adaptive_quant) {
2301  const int last_qp = s->c.qscale;
2302  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2303 
2304  s->lambda = s->lambda_table[mb_xy];
2305  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2307 
2308  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2309  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2310 
2311  if (s->c.out_format == FMT_H263) {
2312  s->dquant = av_clip(s->dquant, -2, 2);
2313 
2314  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2315  if (!s->c.mb_intra) {
2316  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2317  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2318  s->dquant = 0;
2319  }
2320  if (s->c.mv_type == MV_TYPE_8X8)
2321  s->dquant = 0;
2322  }
2323  }
2324  }
2325  }
2326  ff_set_qscale(&s->c, last_qp + s->dquant);
2327  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2328  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2329 
2330  wrap_y = s->c.linesize;
2331  wrap_c = s->c.uvlinesize;
2332  ptr_y = s->new_pic->data[0] +
2333  (mb_y * 16 * wrap_y) + mb_x * 16;
2334  ptr_cb = s->new_pic->data[1] +
2335  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2336  ptr_cr = s->new_pic->data[2] +
2337  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2338 
2339  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2340  s->c.codec_id != AV_CODEC_ID_AMV) {
2341  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2342  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2343  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2344  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2345  wrap_y, wrap_y,
2346  16, 16, mb_x * 16, mb_y * 16,
2347  s->c.width, s->c.height);
2348  ptr_y = ebuf;
2349  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2350  wrap_c, wrap_c,
2351  mb_block_width, mb_block_height,
2352  mb_x * mb_block_width, mb_y * mb_block_height,
2353  cw, ch);
2354  ptr_cb = ebuf + 16 * wrap_y;
2355  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2356  wrap_c, wrap_c,
2357  mb_block_width, mb_block_height,
2358  mb_x * mb_block_width, mb_y * mb_block_height,
2359  cw, ch);
2360  ptr_cr = ebuf + 16 * wrap_y + 16;
2361  }
2362 
2363  if (s->c.mb_intra) {
2364  if (INTERLACED_DCT(s)) {
2365  int progressive_score, interlaced_score;
2366 
2367  s->c.interlaced_dct = 0;
2368  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2369  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2370  NULL, wrap_y, 8) - 400;
2371 
2372  if (progressive_score > 0) {
2373  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2374  NULL, wrap_y * 2, 8) +
2375  s->ildct_cmp[1](s, ptr_y + wrap_y,
2376  NULL, wrap_y * 2, 8);
2377  if (progressive_score > interlaced_score) {
2378  s->c.interlaced_dct = 1;
2379 
2380  dct_offset = wrap_y;
2381  uv_dct_offset = wrap_c;
2382  wrap_y <<= 1;
2383  if (chroma_format == CHROMA_422 ||
2385  wrap_c <<= 1;
2386  }
2387  }
2388  }
2389 
2390  s->pdsp.get_pixels(s->c.block[0], ptr_y, wrap_y);
2391  s->pdsp.get_pixels(s->c.block[1], ptr_y + 8, wrap_y);
2392  s->pdsp.get_pixels(s->c.block[2], ptr_y + dct_offset, wrap_y);
2393  s->pdsp.get_pixels(s->c.block[3], ptr_y + dct_offset + 8, wrap_y);
2394 
2395  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2396  skip_dct[4] = 1;
2397  skip_dct[5] = 1;
2398  } else {
2399  s->pdsp.get_pixels(s->c.block[4], ptr_cb, wrap_c);
2400  s->pdsp.get_pixels(s->c.block[5], ptr_cr, wrap_c);
2401  if (chroma_format == CHROMA_422) {
2402  s->pdsp.get_pixels(s->c.block[6], ptr_cb + uv_dct_offset, wrap_c);
2403  s->pdsp.get_pixels(s->c.block[7], ptr_cr + uv_dct_offset, wrap_c);
2404  } else if (chroma_format == CHROMA_444) {
2405  s->pdsp.get_pixels(s->c.block[ 6], ptr_cb + 8, wrap_c);
2406  s->pdsp.get_pixels(s->c.block[ 7], ptr_cr + 8, wrap_c);
2407  s->pdsp.get_pixels(s->c.block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2408  s->pdsp.get_pixels(s->c.block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2409  s->pdsp.get_pixels(s->c.block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2410  s->pdsp.get_pixels(s->c.block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2411  }
2412  }
2413  } else {
2414  op_pixels_func (*op_pix)[4];
2415  qpel_mc_func (*op_qpix)[16];
2416  uint8_t *dest_y, *dest_cb, *dest_cr;
2417 
2418  dest_y = s->c.dest[0];
2419  dest_cb = s->c.dest[1];
2420  dest_cr = s->c.dest[2];
2421 
2422  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2423  op_pix = s->c.hdsp.put_pixels_tab;
2424  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2425  } else {
2426  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2427  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2428  }
2429 
2430  if (s->c.mv_dir & MV_DIR_FORWARD) {
2431  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2432  s->c.last_pic.data,
2433  op_pix, op_qpix);
2434  op_pix = s->c.hdsp.avg_pixels_tab;
2435  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2436  }
2437  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2438  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2439  s->c.next_pic.data,
2440  op_pix, op_qpix);
2441  }
2442 
2443  if (INTERLACED_DCT(s)) {
2444  int progressive_score, interlaced_score;
2445 
2446  s->c.interlaced_dct = 0;
2447  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2448  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2449  ptr_y + wrap_y * 8,
2450  wrap_y, 8) - 400;
2451 
2452  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2453  progressive_score -= 400;
2454 
2455  if (progressive_score > 0) {
2456  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2457  wrap_y * 2, 8) +
2458  s->ildct_cmp[0](s, dest_y + wrap_y,
2459  ptr_y + wrap_y,
2460  wrap_y * 2, 8);
2461 
2462  if (progressive_score > interlaced_score) {
2463  s->c.interlaced_dct = 1;
2464 
2465  dct_offset = wrap_y;
2466  uv_dct_offset = wrap_c;
2467  wrap_y <<= 1;
2468  if (chroma_format == CHROMA_422)
2469  wrap_c <<= 1;
2470  }
2471  }
2472  }
2473 
2474  s->pdsp.diff_pixels(s->c.block[0], ptr_y, dest_y, wrap_y);
2475  s->pdsp.diff_pixels(s->c.block[1], ptr_y + 8, dest_y + 8, wrap_y);
2476  s->pdsp.diff_pixels(s->c.block[2], ptr_y + dct_offset,
2477  dest_y + dct_offset, wrap_y);
2478  s->pdsp.diff_pixels(s->c.block[3], ptr_y + dct_offset + 8,
2479  dest_y + dct_offset + 8, wrap_y);
2480 
2481  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2482  skip_dct[4] = 1;
2483  skip_dct[5] = 1;
2484  } else {
2485  s->pdsp.diff_pixels(s->c.block[4], ptr_cb, dest_cb, wrap_c);
2486  s->pdsp.diff_pixels(s->c.block[5], ptr_cr, dest_cr, wrap_c);
2487  if (!chroma_y_shift) { /* 422 */
2488  s->pdsp.diff_pixels(s->c.block[6], ptr_cb + uv_dct_offset,
2489  dest_cb + uv_dct_offset, wrap_c);
2490  s->pdsp.diff_pixels(s->c.block[7], ptr_cr + uv_dct_offset,
2491  dest_cr + uv_dct_offset, wrap_c);
2492  }
2493  }
2494  /* pre quantization */
2495  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2496  // FIXME optimize
2497  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2498  skip_dct[0] = 1;
2499  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2500  skip_dct[1] = 1;
2501  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2502  wrap_y, 8) < 20 * s->c.qscale)
2503  skip_dct[2] = 1;
2504  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2505  wrap_y, 8) < 20 * s->c.qscale)
2506  skip_dct[3] = 1;
2507  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2508  skip_dct[4] = 1;
2509  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2510  skip_dct[5] = 1;
2511  if (!chroma_y_shift) { /* 422 */
2512  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2513  dest_cb + uv_dct_offset,
2514  wrap_c, 8) < 20 * s->c.qscale)
2515  skip_dct[6] = 1;
2516  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2517  dest_cr + uv_dct_offset,
2518  wrap_c, 8) < 20 * s->c.qscale)
2519  skip_dct[7] = 1;
2520  }
2521  }
2522  }
2523 
2524  if (s->quantizer_noise_shaping) {
2525  if (!skip_dct[0])
2526  get_visual_weight(weight[0], ptr_y , wrap_y);
2527  if (!skip_dct[1])
2528  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2529  if (!skip_dct[2])
2530  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2531  if (!skip_dct[3])
2532  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2533  if (!skip_dct[4])
2534  get_visual_weight(weight[4], ptr_cb , wrap_c);
2535  if (!skip_dct[5])
2536  get_visual_weight(weight[5], ptr_cr , wrap_c);
2537  if (!chroma_y_shift) { /* 422 */
2538  if (!skip_dct[6])
2539  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2540  wrap_c);
2541  if (!skip_dct[7])
2542  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2543  wrap_c);
2544  }
2545  memcpy(orig[0], s->c.block[0], sizeof(int16_t) * 64 * mb_block_count);
2546  }
2547 
2548  /* DCT & quantize */
2549  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2550  {
2551  for (i = 0; i < mb_block_count; i++) {
2552  if (!skip_dct[i]) {
2553  int overflow;
2554  s->c.block_last_index[i] = s->dct_quantize(s, s->c.block[i], i, s->c.qscale, &overflow);
2555  // FIXME we could decide to change to quantizer instead of
2556  // clipping
2557  // JS: I don't think that would be a good idea it could lower
2558  // quality instead of improve it. Just INTRADC clipping
2559  // deserves changes in quantizer
2560  if (overflow)
2561  clip_coeffs(s, s->c.block[i], s->c.block_last_index[i]);
2562  } else
2563  s->c.block_last_index[i] = -1;
2564  }
2565  if (s->quantizer_noise_shaping) {
2566  for (i = 0; i < mb_block_count; i++) {
2567  if (!skip_dct[i]) {
2568  s->c.block_last_index[i] =
2569  dct_quantize_refine(s, s->c.block[i], weight[i],
2570  orig[i], i, s->c.qscale);
2571  }
2572  }
2573  }
2574 
2575  if (s->luma_elim_threshold && !s->c.mb_intra)
2576  for (i = 0; i < 4; i++)
2577  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2578  if (s->chroma_elim_threshold && !s->c.mb_intra)
2579  for (i = 4; i < mb_block_count; i++)
2580  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2581 
2582  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2583  for (i = 0; i < mb_block_count; i++) {
2584  if (s->c.block_last_index[i] == -1)
2585  s->coded_score[i] = INT_MAX / 256;
2586  }
2587  }
2588  }
2589 
2590  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2591  s->c.block_last_index[4] =
2592  s->c.block_last_index[5] = 0;
2593  s->c.block[4][0] =
2594  s->c.block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2595  if (!chroma_y_shift) { /* 422 / 444 */
2596  for (i=6; i<12; i++) {
2597  s->c.block_last_index[i] = 0;
2598  s->c.block[i][0] = s->c.block[4][0];
2599  }
2600  }
2601  }
2602 
2603  // non c quantize code returns incorrect block_last_index FIXME
2604  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2605  for (i = 0; i < mb_block_count; i++) {
2606  int j;
2607  if (s->c.block_last_index[i] > 0) {
2608  for (j = 63; j > 0; j--) {
2609  if (s->c.block[i][s->c.intra_scantable.permutated[j]])
2610  break;
2611  }
2612  s->c.block_last_index[i] = j;
2613  }
2614  }
2615  }
2616 
2617  s->encode_mb(s, s->c.block, motion_x, motion_y);
2618 }
2619 
2620 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2621 {
2622  if (s->c.chroma_format == CHROMA_420)
2623  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2624  else if (s->c.chroma_format == CHROMA_422)
2625  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2626  else
2627  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2628 }
2629 
2630 typedef struct MBBackup {
2631  struct {
2632  int mv[2][4][2];
2633  int last_mv[2][2][2];
2635  int last_dc[3];
2637  int qscale;
2640  int16_t (*block)[64];
2641  } c;
2643  int dquant;
2646 } MBBackup;
2647 
2648 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2649 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2650  const SRC_TYPE *const s) \
2651 { \
2652  /* FIXME is memcpy faster than a loop? */ \
2653  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2654  \
2655  /* MPEG-1 */ \
2656  d->c.mb_skip_run = s->c.mb_skip_run; \
2657  for (int i = 0; i < 3; i++) \
2658  d->c.last_dc[i] = s->c.last_dc[i]; \
2659  \
2660  /* statistics */ \
2661  d->mv_bits = s->mv_bits; \
2662  d->i_tex_bits = s->i_tex_bits; \
2663  d->p_tex_bits = s->p_tex_bits; \
2664  d->i_count = s->i_count; \
2665  d->misc_bits = s->misc_bits; \
2666  d->last_bits = 0; \
2667  \
2668  d->c.mb_skipped = 0; \
2669  d->c.qscale = s->c.qscale; \
2670  d->dquant = s->dquant; \
2671  \
2672  d->esc3_level_length = s->esc3_level_length; \
2673 } \
2674  \
2675 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2676  const SRC_TYPE *const s, \
2677  int data_partitioning) \
2678 { \
2679  /* FIXME is memcpy faster than a loop? */ \
2680  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2681  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2682  \
2683  /* MPEG-1 */ \
2684  d->c.mb_skip_run = s->c.mb_skip_run; \
2685  for (int i = 0; i < 3; i++) \
2686  d->c.last_dc[i] = s->c.last_dc[i]; \
2687  \
2688  /* statistics */ \
2689  d->mv_bits = s->mv_bits; \
2690  d->i_tex_bits = s->i_tex_bits; \
2691  d->p_tex_bits = s->p_tex_bits; \
2692  d->i_count = s->i_count; \
2693  d->misc_bits = s->misc_bits; \
2694  \
2695  d->c.mb_intra = s->c.mb_intra; \
2696  d->c.mb_skipped = s->c.mb_skipped; \
2697  d->c.mv_type = s->c.mv_type; \
2698  d->c.mv_dir = s->c.mv_dir; \
2699  d->pb = s->pb; \
2700  if (data_partitioning) { \
2701  d->pb2 = s->pb2; \
2702  d->tex_pb = s->tex_pb; \
2703  } \
2704  d->c.block = s->c.block; \
2705  for (int i = 0; i < 8; i++) \
2706  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2707  d->c.interlaced_dct = s->c.interlaced_dct; \
2708  d->c.qscale = s->c.qscale; \
2709  \
2710  d->esc3_level_length = s->esc3_level_length; \
2711 }
2712 
2713 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2714 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2715 
2716 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2717  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2718  int *dmin, int *next_block, int motion_x, int motion_y)
2719 {
2720  int score;
2721  uint8_t *dest_backup[3];
2722 
2723  reset_context_before_encode(s, backup);
2724 
2725  s->c.block = s->c.blocks[*next_block];
2726  s->pb = pb[*next_block];
2727  if (s->c.data_partitioning) {
2728  s->pb2 = pb2 [*next_block];
2729  s->tex_pb= tex_pb[*next_block];
2730  }
2731 
2732  if(*next_block){
2733  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2734  s->c.dest[0] = s->c.sc.rd_scratchpad;
2735  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2736  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2737  av_assert0(s->c.linesize >= 32); //FIXME
2738  }
2739 
2740  encode_mb(s, motion_x, motion_y);
2741 
2742  score= put_bits_count(&s->pb);
2743  if (s->c.data_partitioning) {
2744  score+= put_bits_count(&s->pb2);
2745  score+= put_bits_count(&s->tex_pb);
2746  }
2747 
2748  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2749  mpv_reconstruct_mb(s, s->c.block);
2750 
2751  score *= s->lambda2;
2752  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2753  }
2754 
2755  if(*next_block){
2756  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2757  }
2758 
2759  if(score<*dmin){
2760  *dmin= score;
2761  *next_block^=1;
2762 
2763  save_context_after_encode(best, s, s->c.data_partitioning);
2764  }
2765 }
2766 
2767 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2768 {
2769  const uint32_t *sq = ff_square_tab + 256;
2770  int acc=0;
2771  int x,y;
2772 
2773  if(w==16 && h==16)
2774  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2775  else if(w==8 && h==8)
2776  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2777 
2778  for(y=0; y<h; y++){
2779  for(x=0; x<w; x++){
2780  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2781  }
2782  }
2783 
2784  av_assert2(acc>=0);
2785 
2786  return acc;
2787 }
2788 
2789 static int sse_mb(MPVEncContext *const s)
2790 {
2791  int w= 16;
2792  int h= 16;
2793  int chroma_mb_w = w >> s->c.chroma_x_shift;
2794  int chroma_mb_h = h >> s->c.chroma_y_shift;
2795 
2796  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2797  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2798 
2799  if(w==16 && h==16)
2800  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2801  s->c.dest[0], s->c.linesize, 16) +
2802  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2803  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2804  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2805  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2806  else
2807  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2808  s->c.dest[0], w, h, s->c.linesize) +
2809  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2810  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2811  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2812  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2813 }
2814 
2816  MPVEncContext *const s = *(void**)arg;
2817 
2818 
2819  s->me.pre_pass = 1;
2820  s->me.dia_size = s->c.avctx->pre_dia_size;
2821  s->c.first_slice_line = 1;
2822  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2823  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2824  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2825  s->c.first_slice_line = 0;
2826  }
2827 
2828  s->me.pre_pass = 0;
2829 
2830  return 0;
2831 }
2832 
2834  MPVEncContext *const s = *(void**)arg;
2835 
2836  s->me.dia_size = s->c.avctx->dia_size;
2837  s->c.first_slice_line = 1;
2838  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2839  s->c.mb_x = 0; //for block init below
2840  ff_init_block_index(&s->c);
2841  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2842  s->c.block_index[0] += 2;
2843  s->c.block_index[1] += 2;
2844  s->c.block_index[2] += 2;
2845  s->c.block_index[3] += 2;
2846 
2847  /* compute motion vector & mb_type and store in context */
2848  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2849  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2850  else
2851  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2852  }
2853  s->c.first_slice_line = 0;
2854  }
2855  return 0;
2856 }
2857 
2858 static int mb_var_thread(AVCodecContext *c, void *arg){
2859  MPVEncContext *const s = *(void**)arg;
2860 
2861  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2862  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2863  int xx = mb_x * 16;
2864  int yy = mb_y * 16;
2865  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2866  int varc;
2867  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2868 
2869  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2870  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2871 
2872  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2873  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2874  s->me.mb_var_sum_temp += varc;
2875  }
2876  }
2877  return 0;
2878 }
2879 
2880 static void write_slice_end(MPVEncContext *const s)
2881 {
2882  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2883  if (s->c.partitioned_frame)
2885 
2886  ff_mpeg4_stuffing(&s->pb);
2887  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2888  s->c.out_format == FMT_MJPEG) {
2890  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2892  }
2893 
2894  flush_put_bits(&s->pb);
2895 
2896  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->c.partitioned_frame)
2897  s->misc_bits+= get_bits_diff(s);
2898 }
2899 
2900 static void write_mb_info(MPVEncContext *const s)
2901 {
2902  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2903  int offset = put_bits_count(&s->pb);
2904  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->c.gob_index);
2905  int gobn = s->c.mb_y / s->c.gob_index;
2906  int pred_x, pred_y;
2907  if (CONFIG_H263_ENCODER)
2908  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2909  bytestream_put_le32(&ptr, offset);
2910  bytestream_put_byte(&ptr, s->c.qscale);
2911  bytestream_put_byte(&ptr, gobn);
2912  bytestream_put_le16(&ptr, mba);
2913  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2914  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2915  /* 4MV not implemented */
2916  bytestream_put_byte(&ptr, 0); /* hmv2 */
2917  bytestream_put_byte(&ptr, 0); /* vmv2 */
2918 }
2919 
2920 static void update_mb_info(MPVEncContext *const s, int startcode)
2921 {
2922  if (!s->mb_info)
2923  return;
2924  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2925  s->mb_info_size += 12;
2926  s->prev_mb_info = s->last_mb_info;
2927  }
2928  if (startcode) {
2929  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2930  /* This might have incremented mb_info_size above, and we return without
2931  * actually writing any info into that slot yet. But in that case,
2932  * this will be called again at the start of the after writing the
2933  * start code, actually writing the mb info. */
2934  return;
2935  }
2936 
2937  s->last_mb_info = put_bytes_count(&s->pb, 0);
2938  if (!s->mb_info_size)
2939  s->mb_info_size += 12;
2940  write_mb_info(s);
2941 }
2942 
2943 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2944 {
2945  if (put_bytes_left(&s->pb, 0) < threshold
2946  && s->c.slice_context_count == 1
2947  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2948  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2949 
2950  uint8_t *new_buffer = NULL;
2951  int new_buffer_size = 0;
2952 
2953  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2954  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2955  return AVERROR(ENOMEM);
2956  }
2957 
2958  emms_c();
2959 
2960  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2961  s->c.avctx->internal->byte_buffer_size + size_increase);
2962  if (!new_buffer)
2963  return AVERROR(ENOMEM);
2964 
2965  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2966  av_free(s->c.avctx->internal->byte_buffer);
2967  s->c.avctx->internal->byte_buffer = new_buffer;
2968  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2969  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2970  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2971  }
2972  if (put_bytes_left(&s->pb, 0) < threshold)
2973  return AVERROR(EINVAL);
2974  return 0;
2975 }
2976 
2977 static int encode_thread(AVCodecContext *c, void *arg){
2978  MPVEncContext *const s = *(void**)arg;
2979  int chr_h = 16 >> s->c.chroma_y_shift;
2980  int i;
2981  MBBackup best_s = { 0 }, backup_s;
2982  uint8_t bit_buf[2][MAX_MB_BYTES];
2983  // + 2 because ff_copy_bits() overreads
2984  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2985  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2986  PutBitContext pb[2], pb2[2], tex_pb[2];
2987 
2988  for(i=0; i<2; i++){
2989  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2990  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
2991  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
2992  }
2993 
2994  s->last_bits= put_bits_count(&s->pb);
2995  s->mv_bits=0;
2996  s->misc_bits=0;
2997  s->i_tex_bits=0;
2998  s->p_tex_bits=0;
2999  s->i_count=0;
3000 
3001  for(i=0; i<3; i++){
3002  /* init last dc values */
3003  /* note: quant matrix value (8) is implied here */
3004  s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
3005 
3006  s->encoding_error[i] = 0;
3007  }
3008  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3009  s->c.last_dc[0] = 128 * 8 / 13;
3010  s->c.last_dc[1] = 128 * 8 / 14;
3011  s->c.last_dc[2] = 128 * 8 / 14;
3012 #if CONFIG_MPEG4_ENCODER
3013  } else if (s->c.partitioned_frame) {
3014  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
3016 #endif
3017  }
3018  s->c.mb_skip_run = 0;
3019  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3020 
3021  s->last_mv_dir = 0;
3022 
3023  s->c.resync_mb_x = 0;
3024  s->c.resync_mb_y = 0;
3025  s->c.first_slice_line = 1;
3026  s->ptr_lastgob = s->pb.buf;
3027  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3028  int mb_y;
3029  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3030  int first_in_slice;
3031  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3032  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3034  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 1024 << s->c.intra_dc_precision;
3035  } else {
3036  mb_y = mb_y_order;
3037  }
3038  s->c.mb_x = 0;
3039  s->c.mb_y = mb_y;
3040 
3041  ff_set_qscale(&s->c, s->c.qscale);
3042  ff_init_block_index(&s->c);
3043 
3044  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3045  int mb_type, xy;
3046 // int d;
3047  int dmin= INT_MAX;
3048  int dir;
3049  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3050  + s->c.mb_width*MAX_MB_BYTES;
3051 
3053  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3054  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3055  return -1;
3056  }
3057  if (s->c.data_partitioning) {
3058  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3059  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3060  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3061  return -1;
3062  }
3063  }
3064 
3065  s->c.mb_x = mb_x;
3066  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3067  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3068 
3069  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3071  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3072  mb_type = s->mb_type[xy];
3073 
3074  /* write gob / video packet header */
3075  if(s->rtp_mode){
3076  int current_packet_size, is_gob_start;
3077 
3078  current_packet_size = put_bytes_count(&s->pb, 1)
3079  - (s->ptr_lastgob - s->pb.buf);
3080 
3081  is_gob_start = s->rtp_payload_size &&
3082  current_packet_size >= s->rtp_payload_size &&
3083  mb_y + mb_x > 0;
3084 
3085  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3086 
3087  switch (s->c.codec_id) {
3088  case AV_CODEC_ID_H263:
3089  case AV_CODEC_ID_H263P:
3090  if (!s->c.h263_slice_structured)
3091  if (s->c.mb_x || s->c.mb_y % s->c.gob_index) is_gob_start = 0;
3092  break;
3094  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3096  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3097  s->c.mb_skip_run)
3098  is_gob_start=0;
3099  break;
3100  case AV_CODEC_ID_MJPEG:
3101  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3102  break;
3103  }
3104 
3105  if(is_gob_start){
3106  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3107  write_slice_end(s);
3108 
3109  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->c.partitioned_frame)
3111  }
3112 
3113  av_assert2((put_bits_count(&s->pb)&7) == 0);
3114  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3115 
3116  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3117  int r = put_bytes_count(&s->pb, 0) + s->c.picture_number + 16 + s->c.mb_x + s->c.mb_y;
3118  int d = 100 / s->error_rate;
3119  if(r % d == 0){
3120  current_packet_size=0;
3121  s->pb.buf_ptr= s->ptr_lastgob;
3122  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3123  }
3124  }
3125 
3126  switch (s->c.codec_id) {
3127  case AV_CODEC_ID_MPEG4:
3128  if (CONFIG_MPEG4_ENCODER) {
3131  }
3132  break;
3135  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3138  }
3139  break;
3140  case AV_CODEC_ID_H263:
3141  case AV_CODEC_ID_H263P:
3142  if (CONFIG_H263_ENCODER) {
3143  update_mb_info(s, 1);
3145  }
3146  break;
3147  }
3148 
3149  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3150  int bits= put_bits_count(&s->pb);
3151  s->misc_bits+= bits - s->last_bits;
3152  s->last_bits= bits;
3153  }
3154 
3155  s->ptr_lastgob += current_packet_size;
3156  s->c.first_slice_line = 1;
3157  s->c.resync_mb_x = mb_x;
3158  s->c.resync_mb_y = mb_y;
3159  }
3160  }
3161 
3162  if (s->c.resync_mb_x == s->c.mb_x &&
3163  s->c.resync_mb_y+1 == s->c.mb_y)
3164  s->c.first_slice_line = 0;
3165 
3166  s->c.mb_skipped = 0;
3167  s->dquant=0; //only for QP_RD
3168 
3169  update_mb_info(s, 0);
3170 
3171  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3172  int next_block=0;
3173  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3174 
3175  backup_context_before_encode(&backup_s, s);
3176  backup_s.pb= s->pb;
3177  if (s->c.data_partitioning) {
3178  backup_s.pb2= s->pb2;
3179  backup_s.tex_pb= s->tex_pb;
3180  }
3181 
3182  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3183  s->c.mv_dir = MV_DIR_FORWARD;
3184  s->c.mv_type = MV_TYPE_16X16;
3185  s->c.mb_intra = 0;
3186  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3187  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3188  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3189  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3190  }
3191  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3192  s->c.mv_dir = MV_DIR_FORWARD;
3193  s->c.mv_type = MV_TYPE_FIELD;
3194  s->c.mb_intra = 0;
3195  for(i=0; i<2; i++){
3196  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3197  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3198  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3199  }
3200  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3201  &dmin, &next_block, 0, 0);
3202  }
3203  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3204  s->c.mv_dir = MV_DIR_FORWARD;
3205  s->c.mv_type = MV_TYPE_16X16;
3206  s->c.mb_intra = 0;
3207  s->c.mv[0][0][0] = 0;
3208  s->c.mv[0][0][1] = 0;
3209  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3210  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3211  }
3212  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3213  s->c.mv_dir = MV_DIR_FORWARD;
3214  s->c.mv_type = MV_TYPE_8X8;
3215  s->c.mb_intra = 0;
3216  for(i=0; i<4; i++){
3217  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3218  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3219  }
3220  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3221  &dmin, &next_block, 0, 0);
3222  }
3223  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3224  s->c.mv_dir = MV_DIR_FORWARD;
3225  s->c.mv_type = MV_TYPE_16X16;
3226  s->c.mb_intra = 0;
3227  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3228  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3229  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3230  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3231  }
3232  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3233  s->c.mv_dir = MV_DIR_BACKWARD;
3234  s->c.mv_type = MV_TYPE_16X16;
3235  s->c.mb_intra = 0;
3236  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3237  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3238  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3239  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3240  }
3241  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3242  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3243  s->c.mv_type = MV_TYPE_16X16;
3244  s->c.mb_intra = 0;
3245  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3246  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3247  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3248  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3249  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3250  &dmin, &next_block, 0, 0);
3251  }
3252  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3253  s->c.mv_dir = MV_DIR_FORWARD;
3254  s->c.mv_type = MV_TYPE_FIELD;
3255  s->c.mb_intra = 0;
3256  for(i=0; i<2; i++){
3257  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3258  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3259  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3260  }
3261  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3262  &dmin, &next_block, 0, 0);
3263  }
3264  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3265  s->c.mv_dir = MV_DIR_BACKWARD;
3266  s->c.mv_type = MV_TYPE_FIELD;
3267  s->c.mb_intra = 0;
3268  for(i=0; i<2; i++){
3269  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3270  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3271  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3272  }
3273  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3274  &dmin, &next_block, 0, 0);
3275  }
3276  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3277  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3278  s->c.mv_type = MV_TYPE_FIELD;
3279  s->c.mb_intra = 0;
3280  for(dir=0; dir<2; dir++){
3281  for(i=0; i<2; i++){
3282  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3283  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3284  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3285  }
3286  }
3287  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3288  &dmin, &next_block, 0, 0);
3289  }
3290  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3291  s->c.mv_dir = 0;
3292  s->c.mv_type = MV_TYPE_16X16;
3293  s->c.mb_intra = 1;
3294  s->c.mv[0][0][0] = 0;
3295  s->c.mv[0][0][1] = 0;
3296  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3297  &dmin, &next_block, 0, 0);
3298  s->c.mbintra_table[xy] = 1;
3299  }
3300 
3301  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3302  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3303  const int last_qp = backup_s.c.qscale;
3304  int qpi, qp, dc[6];
3305  int16_t ac[6][16];
3306  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3307  static const int dquant_tab[4]={-1,1,-2,2};
3308  int storecoefs = s->c.mb_intra && s->c.dc_val[0];
3309 
3310  av_assert2(backup_s.dquant == 0);
3311 
3312  //FIXME intra
3313  s->c.mv_dir = best_s.c.mv_dir;
3314  s->c.mv_type = MV_TYPE_16X16;
3315  s->c.mb_intra = best_s.c.mb_intra;
3316  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3317  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3318  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3319  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3320 
3321  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3322  for(; qpi<4; qpi++){
3323  int dquant= dquant_tab[qpi];
3324  qp= last_qp + dquant;
3325  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3326  continue;
3327  backup_s.dquant= dquant;
3328  if(storecoefs){
3329  for(i=0; i<6; i++){
3330  dc[i] = s->c.dc_val[0][s->c.block_index[i]];
3331  memcpy(ac[i], s->c.ac_val[0][s->c.block_index[i]], sizeof(int16_t)*16);
3332  }
3333  }
3334 
3335  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3336  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3337  if (best_s.c.qscale != qp) {
3338  if(storecoefs){
3339  for(i=0; i<6; i++){
3340  s->c.dc_val[0][s->c.block_index[i]] = dc[i];
3341  memcpy(s->c.ac_val[0][s->c.block_index[i]], ac[i], sizeof(int16_t)*16);
3342  }
3343  }
3344  }
3345  }
3346  }
3347  }
3348  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3349  int mx= s->b_direct_mv_table[xy][0];
3350  int my= s->b_direct_mv_table[xy][1];
3351 
3352  backup_s.dquant = 0;
3353  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3354  s->c.mb_intra = 0;
3355  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3356  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3357  &dmin, &next_block, mx, my);
3358  }
3359  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3360  backup_s.dquant = 0;
3361  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3362  s->c.mb_intra = 0;
3363  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3364  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3365  &dmin, &next_block, 0, 0);
3366  }
3367  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3368  int coded=0;
3369  for(i=0; i<6; i++)
3370  coded |= s->c.block_last_index[i];
3371  if(coded){
3372  int mx,my;
3373  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3374  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3375  mx=my=0; //FIXME find the one we actually used
3376  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3377  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3378  mx = s->c.mv[1][0][0];
3379  my = s->c.mv[1][0][1];
3380  }else{
3381  mx = s->c.mv[0][0][0];
3382  my = s->c.mv[0][0][1];
3383  }
3384 
3385  s->c.mv_dir = best_s.c.mv_dir;
3386  s->c.mv_type = best_s.c.mv_type;
3387  s->c.mb_intra = 0;
3388 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3389  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3390  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3391  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3392  backup_s.dquant= 0;
3393  s->skipdct=1;
3394  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3395  &dmin, &next_block, mx, my);
3396  s->skipdct=0;
3397  }
3398  }
3399 
3400  store_context_after_encode(s, &best_s, s->c.data_partitioning);
3401 
3402  pb_bits_count= put_bits_count(&s->pb);
3403  flush_put_bits(&s->pb);
3404  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3405  s->pb= backup_s.pb;
3406 
3407  if (s->c.data_partitioning) {
3408  pb2_bits_count= put_bits_count(&s->pb2);
3409  flush_put_bits(&s->pb2);
3410  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3411  s->pb2= backup_s.pb2;
3412 
3413  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3414  flush_put_bits(&s->tex_pb);
3415  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3416  s->tex_pb= backup_s.tex_pb;
3417  }
3418  s->last_bits= put_bits_count(&s->pb);
3419 
3420  if (CONFIG_H263_ENCODER &&
3421  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3423 
3424  if(next_block==0){ //FIXME 16 vs linesize16
3425  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3426  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3427  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3428  }
3429 
3430  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3431  mpv_reconstruct_mb(s, s->c.block);
3432  } else {
3433  int motion_x = 0, motion_y = 0;
3434  s->c.mv_type = MV_TYPE_16X16;
3435  // only one MB-Type possible
3436 
3437  switch(mb_type){
3439  s->c.mv_dir = 0;
3440  s->c.mb_intra = 1;
3441  motion_x= s->c.mv[0][0][0] = 0;
3442  motion_y= s->c.mv[0][0][1] = 0;
3443  s->c.mbintra_table[xy] = 1;
3444  break;
3446  s->c.mv_dir = MV_DIR_FORWARD;
3447  s->c.mb_intra = 0;
3448  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3449  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3450  break;
3452  s->c.mv_dir = MV_DIR_FORWARD;
3453  s->c.mv_type = MV_TYPE_FIELD;
3454  s->c.mb_intra = 0;
3455  for(i=0; i<2; i++){
3456  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3457  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3458  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3459  }
3460  break;
3462  s->c.mv_dir = MV_DIR_FORWARD;
3463  s->c.mv_type = MV_TYPE_8X8;
3464  s->c.mb_intra = 0;
3465  for(i=0; i<4; i++){
3466  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3467  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3468  }
3469  break;
3471  if (CONFIG_MPEG4_ENCODER) {
3473  s->c.mb_intra = 0;
3474  motion_x=s->b_direct_mv_table[xy][0];
3475  motion_y=s->b_direct_mv_table[xy][1];
3476  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3477  }
3478  break;
3480  if (CONFIG_MPEG4_ENCODER) {
3482  s->c.mb_intra = 0;
3483  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3484  }
3485  break;
3487  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3488  s->c.mb_intra = 0;
3489  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3490  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3491  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3492  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3493  break;
3495  s->c.mv_dir = MV_DIR_BACKWARD;
3496  s->c.mb_intra = 0;
3497  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3498  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3499  break;
3501  s->c.mv_dir = MV_DIR_FORWARD;
3502  s->c.mb_intra = 0;
3503  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3504  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3505  break;
3507  s->c.mv_dir = MV_DIR_FORWARD;
3508  s->c.mv_type = MV_TYPE_FIELD;
3509  s->c.mb_intra = 0;
3510  for(i=0; i<2; i++){
3511  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3512  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3513  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3514  }
3515  break;
3517  s->c.mv_dir = MV_DIR_BACKWARD;
3518  s->c.mv_type = MV_TYPE_FIELD;
3519  s->c.mb_intra = 0;
3520  for(i=0; i<2; i++){
3521  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3522  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3523  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3524  }
3525  break;
3527  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3528  s->c.mv_type = MV_TYPE_FIELD;
3529  s->c.mb_intra = 0;
3530  for(dir=0; dir<2; dir++){
3531  for(i=0; i<2; i++){
3532  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3533  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3534  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3535  }
3536  }
3537  break;
3538  default:
3539  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3540  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3541  "the only candidate (always coupled with INTER) "
3542  "so that it never reaches this switch");
3543  }
3544 
3545  encode_mb(s, motion_x, motion_y);
3546 
3547  // RAL: Update last macroblock type
3548  s->last_mv_dir = s->c.mv_dir;
3549 
3550  if (CONFIG_H263_ENCODER &&
3551  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3553 
3554  mpv_reconstruct_mb(s, s->c.block);
3555  }
3556 
3557  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3558 
3559  /* clean the MV table in IPS frames for direct mode in B-frames */
3560  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3561  s->p_mv_table[xy][0]=0;
3562  s->p_mv_table[xy][1]=0;
3563  } else if ((s->c.h263_pred || s->c.h263_aic) && s->c.mbintra_table[xy])
3565 
3566  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3567  int w= 16;
3568  int h= 16;
3569 
3570  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3571  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3572 
3573  s->encoding_error[0] += sse(
3574  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3575  s->c.dest[0], w, h, s->c.linesize);
3576  s->encoding_error[1] += sse(
3577  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3578  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3579  s->encoding_error[2] += sse(
3580  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3581  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3582  }
3583  if (s->c.loop_filter) {
3584  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3585  ff_h263_loop_filter(&s->c);
3586  }
3587  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3588  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3589  }
3590  }
3591 
3592 #if CONFIG_MSMPEG4ENC
3593  //not beautiful here but we must write it before flushing so it has to be here
3594  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3595  s->c.pict_type == AV_PICTURE_TYPE_I)
3597 #endif
3598 
3599  write_slice_end(s);
3600 
3601  return 0;
3602 }
3603 
3604 #define ADD(field) dst->field += src->field;
3605 #define MERGE(field) dst->field += src->field; src->field=0
3607 {
3608  ADD(me.scene_change_score);
3609  ADD(me.mc_mb_var_sum_temp);
3610  ADD(me.mb_var_sum_temp);
3611 }
3612 
3614 {
3615  int i;
3616 
3617  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3618  MERGE(dct_count[1]);
3619  ADD(mv_bits);
3620  ADD(i_tex_bits);
3621  ADD(p_tex_bits);
3622  ADD(i_count);
3623  ADD(misc_bits);
3624  ADD(encoding_error[0]);
3625  ADD(encoding_error[1]);
3626  ADD(encoding_error[2]);
3627 
3628  if (dst->dct_error_sum) {
3629  for(i=0; i<64; i++){
3630  MERGE(dct_error_sum[0][i]);
3631  MERGE(dct_error_sum[1][i]);
3632  }
3633  }
3634 
3635  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3636  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3637  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3638  flush_put_bits(&dst->pb);
3639 }
3640 
3641 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3642 {
3643  MPVEncContext *const s = &m->s;
3644 
3645  if (m->next_lambda){
3646  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3647  if(!dry_run) m->next_lambda= 0;
3648  } else if (!m->fixed_qscale) {
3649  int quality = ff_rate_estimate_qscale(m, dry_run);
3650  s->c.cur_pic.ptr->f->quality = quality;
3651  if (s->c.cur_pic.ptr->f->quality < 0)
3652  return -1;
3653  }
3654 
3655  if(s->adaptive_quant){
3656  init_qscale_tab(s);
3657 
3658  switch (s->c.codec_id) {
3659  case AV_CODEC_ID_MPEG4:
3660  if (CONFIG_MPEG4_ENCODER)
3662  break;
3663  case AV_CODEC_ID_H263:
3664  case AV_CODEC_ID_H263P:
3665  case AV_CODEC_ID_FLV1:
3666  if (CONFIG_H263_ENCODER)
3668  break;
3669  }
3670 
3671  s->lambda = s->lambda_table[0];
3672  //FIXME broken
3673  }else
3674  s->lambda = s->c.cur_pic.ptr->f->quality;
3675  update_qscale(m);
3676  return 0;
3677 }
3678 
3679 /* must be called before writing the header */
3681 {
3682  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3683  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3684 
3685  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3686  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3687  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3688  }else{
3689  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3690  s->c.last_non_b_time = s->c.time;
3691  av_assert1(s->c.picture_number == 0 || s->c.pp_time > 0);
3692  }
3693 }
3694 
3695 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3696 {
3697  MPVEncContext *const s = &m->s;
3698  int i, ret;
3699  int bits;
3700  int context_count = s->c.slice_context_count;
3701 
3702  /* we need to initialize some time vars before we can encode B-frames */
3703  // RAL: Condition added for MPEG1VIDEO
3704  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3706  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3708 
3709 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3710 
3711  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3712  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3713  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3714  s->c.no_rounding ^= s->c.flipflop_rounding;
3715  }
3716 
3717  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3718  ret = estimate_qp(m, 1);
3719  if (ret < 0)
3720  return ret;
3721  ff_get_2pass_fcode(m);
3722  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3723  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3724  s->lambda = m->last_lambda_for[s->c.pict_type];
3725  else
3726  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3727  update_qscale(m);
3728  }
3729 
3730  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3731  for (int i = 0; i < context_count; i++) {
3732  MPVEncContext *const slice = s->c.enc_contexts[i];
3733  int h = s->c.mb_height;
3734  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3735  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3736 
3737  init_put_bits(&slice->pb, start, end - start);
3738 
3739  if (i) {
3740  ret = ff_update_duplicate_context(&slice->c, &s->c);
3741  if (ret < 0)
3742  return ret;
3743  slice->lambda = s->lambda;
3744  slice->lambda2 = s->lambda2;
3745  }
3746  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3747  ff_me_init_pic(slice);
3748  }
3749 
3750  /* Estimate motion for every MB */
3751  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3752  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3753  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3754  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3755  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3756  m->me_pre == 2) {
3757  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3758  &s->c.enc_contexts[0], NULL,
3759  context_count, sizeof(void*));
3760  }
3761  }
3762 
3763  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3764  NULL, context_count, sizeof(void*));
3765  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3766  /* I-Frame */
3767  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3768  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3769 
3770  if (!m->fixed_qscale) {
3771  /* finding spatial complexity for I-frame rate control */
3772  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3773  NULL, context_count, sizeof(void*));
3774  }
3775  }
3776  for(i=1; i<context_count; i++){
3777  merge_context_after_me(s, s->c.enc_contexts[i]);
3778  }
3779  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3780  m->mb_var_sum = s->me. mb_var_sum_temp;
3781  emms_c();
3782 
3783  if (s->me.scene_change_score > m->scenechange_threshold &&
3784  s->c.pict_type == AV_PICTURE_TYPE_P) {
3785  s->c.pict_type = AV_PICTURE_TYPE_I;
3786  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3787  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3788  if (s->c.msmpeg4_version >= MSMP4_V3)
3789  s->c.no_rounding = 1;
3790  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3791  m->mb_var_sum, m->mc_mb_var_sum);
3792  }
3793 
3794  if (!s->c.umvplus) {
3795  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3796  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3797 
3798  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3799  int a,b;
3800  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3801  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3802  s->f_code = FFMAX3(s->f_code, a, b);
3803  }
3804 
3806  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3807  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3808  int j;
3809  for(i=0; i<2; i++){
3810  for(j=0; j<2; j++)
3811  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3812  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3813  }
3814  }
3815  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3816  int a, b;
3817 
3818  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3819  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3820  s->f_code = FFMAX(a, b);
3821 
3822  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3823  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3824  s->b_code = FFMAX(a, b);
3825 
3826  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3827  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3828  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3829  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3830  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3831  int dir, j;
3832  for(dir=0; dir<2; dir++){
3833  for(i=0; i<2; i++){
3834  for(j=0; j<2; j++){
3837  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3838  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3839  }
3840  }
3841  }
3842  }
3843  }
3844  }
3845 
3846  ret = estimate_qp(m, 0);
3847  if (ret < 0)
3848  return ret;
3849 
3850  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3851  s->c.pict_type == AV_PICTURE_TYPE_I &&
3852  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3853  s->c.qscale = 3; //reduce clipping problems
3854 
3855  if (s->c.out_format == FMT_MJPEG) {
3857  (7 + s->c.qscale) / s->c.qscale, 65535);
3858  if (ret < 0)
3859  return ret;
3860 
3861  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3862  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3863  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3864 
3865  if (s->c.avctx->intra_matrix) {
3866  chroma_matrix =
3867  luma_matrix = s->c.avctx->intra_matrix;
3868  }
3869  if (s->c.avctx->chroma_intra_matrix)
3870  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3871 
3872  /* for mjpeg, we do include qscale in the matrix */
3873  for (int i = 1; i < 64; i++) {
3874  int j = s->c.idsp.idct_permutation[i];
3875 
3876  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3877  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3878  }
3879  s->c.y_dc_scale_table =
3880  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3881  s->c.chroma_intra_matrix[0] =
3882  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3883  } else {
3884  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3885  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3886  for (int i = 1; i < 64; i++) {
3887  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3888 
3889  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3890  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3891  }
3892  s->c.y_dc_scale_table = y;
3893  s->c.c_dc_scale_table = c;
3894  s->c.intra_matrix[0] = 13;
3895  s->c.chroma_intra_matrix[0] = 14;
3896  }
3897  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3898  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3899  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3900  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3901  s->c.qscale = 8;
3902  }
3903 
3904  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3905  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3906  } else {
3907  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3908  }
3909  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3910 
3911  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3912  m->picture_in_gop_number = 0;
3913 
3914  s->c.mb_x = s->c.mb_y = 0;
3915  s->last_bits= put_bits_count(&s->pb);
3916  ret = m->encode_picture_header(m);
3917  if (ret < 0)
3918  return ret;
3919  bits= put_bits_count(&s->pb);
3920  m->header_bits = bits - s->last_bits;
3921 
3922  for(i=1; i<context_count; i++){
3923  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3924  }
3925  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3926  NULL, context_count, sizeof(void*));
3927  for(i=1; i<context_count; i++){
3928  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3929  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3930  merge_context_after_encode(s, s->c.enc_contexts[i]);
3931  }
3932  emms_c();
3933  return 0;
3934 }
3935 
3936 static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
3937 {
3938  const int intra = s->c.mb_intra;
3939  int i;
3940 
3941  s->dct_count[intra]++;
3942 
3943  for(i=0; i<64; i++){
3944  int level= block[i];
3945 
3946  if(level){
3947  if(level>0){
3948  s->dct_error_sum[intra][i] += level;
3949  level -= s->dct_offset[intra][i];
3950  if(level<0) level=0;
3951  }else{
3952  s->dct_error_sum[intra][i] -= level;
3953  level += s->dct_offset[intra][i];
3954  if(level>0) level=0;
3955  }
3956  block[i]= level;
3957  }
3958  }
3959 }
3960 
3962  int16_t *block, int n,
3963  int qscale, int *overflow){
3964  const int *qmat;
3965  const uint16_t *matrix;
3966  const uint8_t *scantable;
3967  const uint8_t *perm_scantable;
3968  int max=0;
3969  unsigned int threshold1, threshold2;
3970  int bias=0;
3971  int run_tab[65];
3972  int level_tab[65];
3973  int score_tab[65];
3974  int survivor[65];
3975  int survivor_count;
3976  int last_run=0;
3977  int last_level=0;
3978  int last_score= 0;
3979  int last_i;
3980  int coeff[2][64];
3981  int coeff_count[64];
3982  int qmul, qadd, start_i, last_non_zero, i, dc;
3983  const int esc_length= s->ac_esc_length;
3984  const uint8_t *length, *last_length;
3985  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3986  int mpeg2_qscale;
3987 
3988  s->fdsp.fdct(block);
3989 
3990  if(s->dct_error_sum)
3991  s->denoise_dct(s, block);
3992  qmul= qscale*16;
3993  qadd= ((qscale-1)|1)*8;
3994 
3995  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3996  else mpeg2_qscale = qscale << 1;
3997 
3998  if (s->c.mb_intra) {
3999  int q;
4000  scantable = s->c.intra_scantable.scantable;
4001  perm_scantable = s->c.intra_scantable.permutated;
4002  if (!s->c.h263_aic) {
4003  if (n < 4)
4004  q = s->c.y_dc_scale;
4005  else
4006  q = s->c.c_dc_scale;
4007  q = q << 3;
4008  } else{
4009  /* For AIC we skip quant/dequant of INTRADC */
4010  q = 1 << 3;
4011  qadd=0;
4012  }
4013 
4014  /* note: block[0] is assumed to be positive */
4015  block[0] = (block[0] + (q >> 1)) / q;
4016  start_i = 1;
4017  last_non_zero = 0;
4018  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4019  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4020  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4021  bias= 1<<(QMAT_SHIFT-1);
4022 
4023  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4024  length = s->intra_chroma_ac_vlc_length;
4025  last_length= s->intra_chroma_ac_vlc_last_length;
4026  } else {
4027  length = s->intra_ac_vlc_length;
4028  last_length= s->intra_ac_vlc_last_length;
4029  }
4030  } else {
4031  scantable = s->c.inter_scantable.scantable;
4032  perm_scantable = s->c.inter_scantable.permutated;
4033  start_i = 0;
4034  last_non_zero = -1;
4035  qmat = s->q_inter_matrix[qscale];
4036  matrix = s->c.inter_matrix;
4037  length = s->inter_ac_vlc_length;
4038  last_length= s->inter_ac_vlc_last_length;
4039  }
4040  last_i= start_i;
4041 
4042  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4043  threshold2= (threshold1<<1);
4044 
4045  for(i=63; i>=start_i; i--) {
4046  const int j = scantable[i];
4047  int64_t level = (int64_t)block[j] * qmat[j];
4048 
4049  if(((uint64_t)(level+threshold1))>threshold2){
4050  last_non_zero = i;
4051  break;
4052  }
4053  }
4054 
4055  for(i=start_i; i<=last_non_zero; i++) {
4056  const int j = scantable[i];
4057  int64_t level = (int64_t)block[j] * qmat[j];
4058 
4059 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4060 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4061  if(((uint64_t)(level+threshold1))>threshold2){
4062  if(level>0){
4063  level= (bias + level)>>QMAT_SHIFT;
4064  coeff[0][i]= level;
4065  coeff[1][i]= level-1;
4066 // coeff[2][k]= level-2;
4067  }else{
4068  level= (bias - level)>>QMAT_SHIFT;
4069  coeff[0][i]= -level;
4070  coeff[1][i]= -level+1;
4071 // coeff[2][k]= -level+2;
4072  }
4073  coeff_count[i]= FFMIN(level, 2);
4074  av_assert2(coeff_count[i]);
4075  max |=level;
4076  }else{
4077  coeff[0][i]= (level>>31)|1;
4078  coeff_count[i]= 1;
4079  }
4080  }
4081 
4082  *overflow= s->max_qcoeff < max; //overflow might have happened
4083 
4084  if(last_non_zero < start_i){
4085  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4086  return last_non_zero;
4087  }
4088 
4089  score_tab[start_i]= 0;
4090  survivor[0]= start_i;
4091  survivor_count= 1;
4092 
4093  for(i=start_i; i<=last_non_zero; i++){
4094  int level_index, j, zero_distortion;
4095  int dct_coeff= FFABS(block[ scantable[i] ]);
4096  int best_score=256*256*256*120;
4097 
4098  if (s->fdsp.fdct == ff_fdct_ifast)
4099  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4100  zero_distortion= dct_coeff*dct_coeff;
4101 
4102  for(level_index=0; level_index < coeff_count[i]; level_index++){
4103  int distortion;
4104  int level= coeff[level_index][i];
4105  const int alevel= FFABS(level);
4106  int unquant_coeff;
4107 
4108  av_assert2(level);
4109 
4110  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4111  unquant_coeff= alevel*qmul + qadd;
4112  } else if (s->c.out_format == FMT_MJPEG) {
4113  j = s->c.idsp.idct_permutation[scantable[i]];
4114  unquant_coeff = alevel * matrix[j] * 8;
4115  }else{ // MPEG-1
4116  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4117  if (s->c.mb_intra) {
4118  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4119  unquant_coeff = (unquant_coeff - 1) | 1;
4120  }else{
4121  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4122  unquant_coeff = (unquant_coeff - 1) | 1;
4123  }
4124  unquant_coeff<<= 3;
4125  }
4126 
4127  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4128  level+=64;
4129  if((level&(~127)) == 0){
4130  for(j=survivor_count-1; j>=0; j--){
4131  int run= i - survivor[j];
4132  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4133  score += score_tab[i-run];
4134 
4135  if(score < best_score){
4136  best_score= score;
4137  run_tab[i+1]= run;
4138  level_tab[i+1]= level-64;
4139  }
4140  }
4141 
4142  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4143  for(j=survivor_count-1; j>=0; j--){
4144  int run= i - survivor[j];
4145  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4146  score += score_tab[i-run];
4147  if(score < last_score){
4148  last_score= score;
4149  last_run= run;
4150  last_level= level-64;
4151  last_i= i+1;
4152  }
4153  }
4154  }
4155  }else{
4156  distortion += esc_length*lambda;
4157  for(j=survivor_count-1; j>=0; j--){
4158  int run= i - survivor[j];
4159  int score= distortion + score_tab[i-run];
4160 
4161  if(score < best_score){
4162  best_score= score;
4163  run_tab[i+1]= run;
4164  level_tab[i+1]= level-64;
4165  }
4166  }
4167 
4168  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4169  for(j=survivor_count-1; j>=0; j--){
4170  int run= i - survivor[j];
4171  int score= distortion + score_tab[i-run];
4172  if(score < last_score){
4173  last_score= score;
4174  last_run= run;
4175  last_level= level-64;
4176  last_i= i+1;
4177  }
4178  }
4179  }
4180  }
4181  }
4182 
4183  score_tab[i+1]= best_score;
4184 
4185  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4186  if(last_non_zero <= 27){
4187  for(; survivor_count; survivor_count--){
4188  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4189  break;
4190  }
4191  }else{
4192  for(; survivor_count; survivor_count--){
4193  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4194  break;
4195  }
4196  }
4197 
4198  survivor[ survivor_count++ ]= i+1;
4199  }
4200 
4201  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4202  last_score= 256*256*256*120;
4203  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4204  int score= score_tab[i];
4205  if (i)
4206  score += lambda * 2; // FIXME more exact?
4207 
4208  if(score < last_score){
4209  last_score= score;
4210  last_i= i;
4211  last_level= level_tab[i];
4212  last_run= run_tab[i];
4213  }
4214  }
4215  }
4216 
4217  s->coded_score[n] = last_score;
4218 
4219  dc= FFABS(block[0]);
4220  last_non_zero= last_i - 1;
4221  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4222 
4223  if(last_non_zero < start_i)
4224  return last_non_zero;
4225 
4226  if(last_non_zero == 0 && start_i == 0){
4227  int best_level= 0;
4228  int best_score= dc * dc;
4229 
4230  for(i=0; i<coeff_count[0]; i++){
4231  int level= coeff[i][0];
4232  int alevel= FFABS(level);
4233  int unquant_coeff, score, distortion;
4234 
4235  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4236  unquant_coeff= (alevel*qmul + qadd)>>3;
4237  } else{ // MPEG-1
4238  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4239  unquant_coeff = (unquant_coeff - 1) | 1;
4240  }
4241  unquant_coeff = (unquant_coeff + 4) >> 3;
4242  unquant_coeff<<= 3 + 3;
4243 
4244  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4245  level+=64;
4246  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4247  else score= distortion + esc_length*lambda;
4248 
4249  if(score < best_score){
4250  best_score= score;
4251  best_level= level - 64;
4252  }
4253  }
4254  block[0]= best_level;
4255  s->coded_score[n] = best_score - dc*dc;
4256  if(best_level == 0) return -1;
4257  else return last_non_zero;
4258  }
4259 
4260  i= last_i;
4261  av_assert2(last_level);
4262 
4263  block[ perm_scantable[last_non_zero] ]= last_level;
4264  i -= last_run + 1;
4265 
4266  for(; i>start_i; i -= run_tab[i] + 1){
4267  block[ perm_scantable[i-1] ]= level_tab[i];
4268  }
4269 
4270  return last_non_zero;
4271 }
4272 
4273 static int16_t basis[64][64];
4274 
4275 static void build_basis(uint8_t *perm){
4276  int i, j, x, y;
4277  emms_c();
4278  for(i=0; i<8; i++){
4279  for(j=0; j<8; j++){
4280  for(y=0; y<8; y++){
4281  for(x=0; x<8; x++){
4282  double s= 0.25*(1<<BASIS_SHIFT);
4283  int index= 8*i + j;
4284  int perm_index= perm[index];
4285  if(i==0) s*= sqrt(0.5);
4286  if(j==0) s*= sqrt(0.5);
4287  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4288  }
4289  }
4290  }
4291  }
4292 }
4293 
4294 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4295  int16_t *block, int16_t *weight, int16_t *orig,
4296  int n, int qscale){
4297  int16_t rem[64];
4298  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4299  const uint8_t *scantable;
4300  const uint8_t *perm_scantable;
4301 // unsigned int threshold1, threshold2;
4302 // int bias=0;
4303  int run_tab[65];
4304  int prev_run=0;
4305  int prev_level=0;
4306  int qmul, qadd, start_i, last_non_zero, i, dc;
4307  const uint8_t *length;
4308  const uint8_t *last_length;
4309  int lambda;
4310  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4311 
4312  if(basis[0][0] == 0)
4313  build_basis(s->c.idsp.idct_permutation);
4314 
4315  qmul= qscale*2;
4316  qadd= (qscale-1)|1;
4317  if (s->c.mb_intra) {
4318  scantable = s->c.intra_scantable.scantable;
4319  perm_scantable = s->c.intra_scantable.permutated;
4320  if (!s->c.h263_aic) {
4321  if (n < 4)
4322  q = s->c.y_dc_scale;
4323  else
4324  q = s->c.c_dc_scale;
4325  } else{
4326  /* For AIC we skip quant/dequant of INTRADC */
4327  q = 1;
4328  qadd=0;
4329  }
4330  q <<= RECON_SHIFT-3;
4331  /* note: block[0] is assumed to be positive */
4332  dc= block[0]*q;
4333 // block[0] = (block[0] + (q >> 1)) / q;
4334  start_i = 1;
4335 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4336 // bias= 1<<(QMAT_SHIFT-1);
4337  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4338  length = s->intra_chroma_ac_vlc_length;
4339  last_length= s->intra_chroma_ac_vlc_last_length;
4340  } else {
4341  length = s->intra_ac_vlc_length;
4342  last_length= s->intra_ac_vlc_last_length;
4343  }
4344  } else {
4345  scantable = s->c.inter_scantable.scantable;
4346  perm_scantable = s->c.inter_scantable.permutated;
4347  dc= 0;
4348  start_i = 0;
4349  length = s->inter_ac_vlc_length;
4350  last_length= s->inter_ac_vlc_last_length;
4351  }
4352  last_non_zero = s->c.block_last_index[n];
4353 
4354  dc += (1<<(RECON_SHIFT-1));
4355  for(i=0; i<64; i++){
4356  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4357  }
4358 
4359  sum=0;
4360  for(i=0; i<64; i++){
4361  int one= 36;
4362  int qns=4;
4363  int w;
4364 
4365  w= FFABS(weight[i]) + qns*one;
4366  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4367 
4368  weight[i] = w;
4369 // w=weight[i] = (63*qns + (w/2)) / w;
4370 
4371  av_assert2(w>0);
4372  av_assert2(w<(1<<6));
4373  sum += w*w;
4374  }
4375  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4376 
4377  run=0;
4378  rle_index=0;
4379  for(i=start_i; i<=last_non_zero; i++){
4380  int j= perm_scantable[i];
4381  const int level= block[j];
4382  int coeff;
4383 
4384  if(level){
4385  if(level<0) coeff= qmul*level - qadd;
4386  else coeff= qmul*level + qadd;
4387  run_tab[rle_index++]=run;
4388  run=0;
4389 
4390  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4391  }else{
4392  run++;
4393  }
4394  }
4395 
4396  for(;;){
4397  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4398  int best_coeff=0;
4399  int best_change=0;
4400  int run2, best_unquant_change=0, analyze_gradient;
4401  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4402 
4403  if(analyze_gradient){
4404  for(i=0; i<64; i++){
4405  int w= weight[i];
4406 
4407  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4408  }
4409  s->fdsp.fdct(d1);
4410  }
4411 
4412  if(start_i){
4413  const int level= block[0];
4414  int change, old_coeff;
4415 
4416  av_assert2(s->c.mb_intra);
4417 
4418  old_coeff= q*level;
4419 
4420  for(change=-1; change<=1; change+=2){
4421  int new_level= level + change;
4422  int score, new_coeff;
4423 
4424  new_coeff= q*new_level;
4425  if(new_coeff >= 2048 || new_coeff < 0)
4426  continue;
4427 
4428  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4429  new_coeff - old_coeff);
4430  if(score<best_score){
4431  best_score= score;
4432  best_coeff= 0;
4433  best_change= change;
4434  best_unquant_change= new_coeff - old_coeff;
4435  }
4436  }
4437  }
4438 
4439  run=0;
4440  rle_index=0;
4441  run2= run_tab[rle_index++];
4442  prev_level=0;
4443  prev_run=0;
4444 
4445  for(i=start_i; i<64; i++){
4446  int j= perm_scantable[i];
4447  const int level= block[j];
4448  int change, old_coeff;
4449 
4450  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4451  break;
4452 
4453  if(level){
4454  if(level<0) old_coeff= qmul*level - qadd;
4455  else old_coeff= qmul*level + qadd;
4456  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4457  }else{
4458  old_coeff=0;
4459  run2--;
4460  av_assert2(run2>=0 || i >= last_non_zero );
4461  }
4462 
4463  for(change=-1; change<=1; change+=2){
4464  int new_level= level + change;
4465  int score, new_coeff, unquant_change;
4466 
4467  score=0;
4468  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4469  continue;
4470 
4471  if(new_level){
4472  if(new_level<0) new_coeff= qmul*new_level - qadd;
4473  else new_coeff= qmul*new_level + qadd;
4474  if(new_coeff >= 2048 || new_coeff <= -2048)
4475  continue;
4476  //FIXME check for overflow
4477 
4478  if(level){
4479  if(level < 63 && level > -63){
4480  if(i < last_non_zero)
4481  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4482  - length[UNI_AC_ENC_INDEX(run, level+64)];
4483  else
4484  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4485  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4486  }
4487  }else{
4488  av_assert2(FFABS(new_level)==1);
4489 
4490  if(analyze_gradient){
4491  int g= d1[ scantable[i] ];
4492  if(g && (g^new_level) >= 0)
4493  continue;
4494  }
4495 
4496  if(i < last_non_zero){
4497  int next_i= i + run2 + 1;
4498  int next_level= block[ perm_scantable[next_i] ] + 64;
4499 
4500  if(next_level&(~127))
4501  next_level= 0;
4502 
4503  if(next_i < last_non_zero)
4504  score += length[UNI_AC_ENC_INDEX(run, 65)]
4505  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4506  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4507  else
4508  score += length[UNI_AC_ENC_INDEX(run, 65)]
4509  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4510  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4511  }else{
4512  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4513  if(prev_level){
4514  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4515  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4516  }
4517  }
4518  }
4519  }else{
4520  new_coeff=0;
4521  av_assert2(FFABS(level)==1);
4522 
4523  if(i < last_non_zero){
4524  int next_i= i + run2 + 1;
4525  int next_level= block[ perm_scantable[next_i] ] + 64;
4526 
4527  if(next_level&(~127))
4528  next_level= 0;
4529 
4530  if(next_i < last_non_zero)
4531  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4532  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4533  - length[UNI_AC_ENC_INDEX(run, 65)];
4534  else
4535  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4536  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4537  - length[UNI_AC_ENC_INDEX(run, 65)];
4538  }else{
4539  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4540  if(prev_level){
4541  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4542  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4543  }
4544  }
4545  }
4546 
4547  score *= lambda;
4548 
4549  unquant_change= new_coeff - old_coeff;
4550  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4551 
4552  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4553  unquant_change);
4554  if(score<best_score){
4555  best_score= score;
4556  best_coeff= i;
4557  best_change= change;
4558  best_unquant_change= unquant_change;
4559  }
4560  }
4561  if(level){
4562  prev_level= level + 64;
4563  if(prev_level&(~127))
4564  prev_level= 0;
4565  prev_run= run;
4566  run=0;
4567  }else{
4568  run++;
4569  }
4570  }
4571 
4572  if(best_change){
4573  int j= perm_scantable[ best_coeff ];
4574 
4575  block[j] += best_change;
4576 
4577  if(best_coeff > last_non_zero){
4578  last_non_zero= best_coeff;
4579  av_assert2(block[j]);
4580  }else{
4581  for(; last_non_zero>=start_i; last_non_zero--){
4582  if(block[perm_scantable[last_non_zero]])
4583  break;
4584  }
4585  }
4586 
4587  run=0;
4588  rle_index=0;
4589  for(i=start_i; i<=last_non_zero; i++){
4590  int j= perm_scantable[i];
4591  const int level= block[j];
4592 
4593  if(level){
4594  run_tab[rle_index++]=run;
4595  run=0;
4596  }else{
4597  run++;
4598  }
4599  }
4600 
4601  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4602  }else{
4603  break;
4604  }
4605  }
4606 
4607  return last_non_zero;
4608 }
4609 
4610 /**
4611  * Permute an 8x8 block according to permutation.
4612  * @param block the block which will be permuted according to
4613  * the given permutation vector
4614  * @param permutation the permutation vector
4615  * @param last the last non zero coefficient in scantable order, used to
4616  * speed the permutation up
4617  * @param scantable the used scantable, this is only used to speed the
4618  * permutation up, the block is not (inverse) permutated
4619  * to scantable order!
4620  */
4621 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4622  const uint8_t *scantable, int last)
4623 {
4624  int i;
4625  int16_t temp[64];
4626 
4627  if (last <= 0)
4628  return;
4629  //FIXME it is ok but not clean and might fail for some permutations
4630  // if (permutation[1] == 1)
4631  // return;
4632 
4633  for (i = 0; i <= last; i++) {
4634  const int j = scantable[i];
4635  temp[j] = block[j];
4636  block[j] = 0;
4637  }
4638 
4639  for (i = 0; i <= last; i++) {
4640  const int j = scantable[i];
4641  const int perm_j = permutation[j];
4642  block[perm_j] = temp[j];
4643  }
4644 }
4645 
4646 static int dct_quantize_c(MPVEncContext *const s,
4647  int16_t *block, int n,
4648  int qscale, int *overflow)
4649 {
4650  int i, last_non_zero, q, start_i;
4651  const int *qmat;
4652  const uint8_t *scantable;
4653  int bias;
4654  int max=0;
4655  unsigned int threshold1, threshold2;
4656 
4657  s->fdsp.fdct(block);
4658 
4659  if(s->dct_error_sum)
4660  s->denoise_dct(s, block);
4661 
4662  if (s->c.mb_intra) {
4663  scantable = s->c.intra_scantable.scantable;
4664  if (!s->c.h263_aic) {
4665  if (n < 4)
4666  q = s->c.y_dc_scale;
4667  else
4668  q = s->c.c_dc_scale;
4669  q = q << 3;
4670  } else
4671  /* For AIC we skip quant/dequant of INTRADC */
4672  q = 1 << 3;
4673 
4674  /* note: block[0] is assumed to be positive */
4675  block[0] = (block[0] + (q >> 1)) / q;
4676  start_i = 1;
4677  last_non_zero = 0;
4678  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4679  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4680  } else {
4681  scantable = s->c.inter_scantable.scantable;
4682  start_i = 0;
4683  last_non_zero = -1;
4684  qmat = s->q_inter_matrix[qscale];
4685  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4686  }
4687  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4688  threshold2= (threshold1<<1);
4689  for(i=63;i>=start_i;i--) {
4690  const int j = scantable[i];
4691  int64_t level = (int64_t)block[j] * qmat[j];
4692 
4693  if(((uint64_t)(level+threshold1))>threshold2){
4694  last_non_zero = i;
4695  break;
4696  }else{
4697  block[j]=0;
4698  }
4699  }
4700  for(i=start_i; i<=last_non_zero; i++) {
4701  const int j = scantable[i];
4702  int64_t level = (int64_t)block[j] * qmat[j];
4703 
4704 // if( bias+level >= (1<<QMAT_SHIFT)
4705 // || bias-level >= (1<<QMAT_SHIFT)){
4706  if(((uint64_t)(level+threshold1))>threshold2){
4707  if(level>0){
4708  level= (bias + level)>>QMAT_SHIFT;
4709  block[j]= level;
4710  }else{
4711  level= (bias - level)>>QMAT_SHIFT;
4712  block[j]= -level;
4713  }
4714  max |=level;
4715  }else{
4716  block[j]=0;
4717  }
4718  }
4719  *overflow= s->max_qcoeff < max; //overflow might have happened
4720 
4721  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4722  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4723  ff_block_permute(block, s->c.idsp.idct_permutation,
4724  scantable, last_non_zero);
4725 
4726  return last_non_zero;
4727 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1487
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3961
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1143
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:386
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:103
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:220
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:243
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:430
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:218
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:236
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:277
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2716
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:225
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:230
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2159
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:185
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:316
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:104
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2620
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:45
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:526
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:244
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1890
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2642
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:249
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2642
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3680
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1912
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:117
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2638
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3641
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:46
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2633
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:231
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:301
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:209
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4273
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1508
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:237
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2833
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:183
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2767
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:270
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:204
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:221
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2642
AVPacket::data
uint8_t * data
Definition: packet.h:535
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:377
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:51
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:195
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2880
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:490
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
MPVEncContext::b_field_select_table
uint8_t *[2][2] b_field_select_table
allocated jointly with p_field_select_table
Definition: mpegvideoenc.h:90
MPVEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types.
Definition: mpegvideoenc.h:92
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:210
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:514
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:553
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:287
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
MBBackup::c
struct MBBackup::@208 c
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2815
MPVEncContext::lambda_table
int * lambda_table
Definition: mpegvideoenc.h:53
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2249
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2648
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:55
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:182
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:245
mpegutils.h
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:590
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:227
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:281
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:182
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:56
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2920
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2642
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:186
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1661
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:356
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1163
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2645
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1929
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:315
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2636
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:57
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:204
faandct.h
Floating point AAN DCT.
MPVEncContext::mb_mean
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideoenc.h:95
MPVEncContext::b_bidir_forw_mv_table
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame.
Definition: mpegvideoenc.h:85
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:198
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:829
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2639
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:305
fail
#define fail()
Definition: checkasm.h:196
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:58
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:109
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:272
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:188
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
MPVEncContext::mb_var
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideoenc.h:93
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:290
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1245
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1256
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2789
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:379
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:274
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:279
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:222
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1445
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:234
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:184
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:273
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:50
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:96
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:116
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4275
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:202
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1231
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:302
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MPVEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:205
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2644
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:189
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:181
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:246
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3606
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:804
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:240
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:49
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:233
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:183
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:239
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4621
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:279
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:275
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2634
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2858
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:56
arg
const char * arg
Definition: jacosubdec.c:67
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:272
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:471
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:55
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:196
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:280
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:124
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:212
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:601
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:272
run
uint8_t run
Definition: svq3.c:207
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:204
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:95
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:228
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:109
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:276
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:217
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:52
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:336
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1792
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:447
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:547
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:183
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3605
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1107
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2637
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2943
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:120
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1150
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1295
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2640
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:256
MPVMainEncContext
Definition: mpegvideoenc.h:178
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:186
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1304
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:812
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:818
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:238
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:502
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2215
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:536
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:161
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:216
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:59
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2634
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:215
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:532
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:382
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:284
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:269
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:275
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:98
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:232
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:289
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3695
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:289
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:67
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:478
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:77
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:534
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:378
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:185
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:188
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:541
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:294
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:242
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:265
denoise_dct_c
static void denoise_dct_c(MPVEncContext *const s, int16_t *block)
Definition: mpegvideo_enc.c:3936
MPVEncContext::p_field_select_table
uint8_t *[2] p_field_select_table
Only the first element is allocated.
Definition: mpegvideoenc.h:89
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:282
MBBackup
Definition: mpegvideo_enc.c:2630
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:271
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:303
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:309
MPVEncContext::b_field_mv_table
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame.
Definition: mpegvideoenc.h:88
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:388
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2635
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:110
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:301
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2632
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:528
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2977
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:250
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2645
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:105
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:126
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:284
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:204
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:274
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:43
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:281
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:525
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:235
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:159
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
MPVEncContext::b_bidir_back_mv_table
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame.
Definition: mpegvideoenc.h:86
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:211
MPVEncContext::mc_mb_var
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideoenc.h:94
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:239
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:493
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1272
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1852
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2273
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:213
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:300
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:94
MPVEncContext::b_direct_mv_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame.
Definition: mpegvideoenc.h:87
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:111
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:199
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:304
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:224
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2643
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:482
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2636
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:212
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3604
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:232
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2642
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
MPVEncContext::p_mv_table
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame.
Definition: mpegvideoenc.h:82
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:263
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:270
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:223
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:266
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4294
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:545
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1269
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3613
MPVEncContext::b_forw_mv_table
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame.
Definition: mpegvideoenc.h:83
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:203
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:181
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:512
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:207
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:122
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:610
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2636
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:191
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:880
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4646
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2645
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:288
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:115
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:198
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:179
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2900
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2642
pixblockdsp.h
MPVEncContext::b_back_mv_table
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame.
Definition: mpegvideoenc.h:84
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:954
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:911
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:300
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:104
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:150