FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpeg4videoenc.c
Go to the documentation of this file.
1 /*
2  * MPEG-4 encoder
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/attributes.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/thread.h"
28 #include "codec_internal.h"
29 #include "mpegvideo.h"
30 #include "h263.h"
31 #include "h263enc.h"
32 #include "mpeg4video.h"
33 #include "mpeg4videodata.h"
34 #include "mpeg4videodefs.h"
35 #include "mpeg4videoenc.h"
36 #include "mpegvideoenc.h"
37 #include "profiles.h"
38 #include "put_bits.h"
39 #include "version.h"
40 
41 /**
42  * Minimal fcode that a motion vector component would need.
43  */
44 static uint8_t fcode_tab[MAX_MV*2+1];
45 
46 /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC
47  * differences in MPEG-4. Unified in the sense that the specification specifies
48  * this encoding in several steps. */
49 static uint8_t uni_DCtab_lum_len[512];
50 static uint8_t uni_DCtab_chrom_len[512];
51 static uint16_t uni_DCtab_lum_bits[512];
52 static uint16_t uni_DCtab_chrom_bits[512];
53 
54 /* Unified encoding tables for run length encoding of coefficients.
55  * Unified in the sense that the specification specifies the encoding in several steps. */
56 static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2];
57 static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2];
58 static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2];
59 static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
60 
61 //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level))
62 //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64)
63 #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
64 
65 /* MPEG-4
66  * inter
67  * max level: 24/6
68  * max run: 53/63
69  *
70  * intra
71  * max level: 53/16
72  * max run: 29/41
73  */
74 
75 typedef struct Mpeg4EncContext {
77  /// number of bits to represent the fractional part of time
80 
82 {
83  return (Mpeg4EncContext*)m;
84 }
85 
86 /**
87  * Return the number of bits that encoding the 8x8 block in block would need.
88  * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
89  */
90 static inline int get_block_rate(MPVEncContext *const s, int16_t block[64],
91  int block_last_index, const uint8_t scantable[64])
92 {
93  int last = 0;
94  int j;
95  int rate = 0;
96 
97  for (j = 1; j <= block_last_index; j++) {
98  const int index = scantable[j];
99  int level = block[index];
100  if (level) {
101  level += 64;
102  if ((level & (~127)) == 0) {
103  if (j < block_last_index)
104  rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
105  else
106  rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
107  } else
108  rate += s->ac_esc_length;
109 
110  last = j;
111  }
112  }
113 
114  return rate;
115 }
116 
117 /**
118  * Restore the ac coefficients in block that have been changed by decide_ac_pred().
119  * This function also restores s->c.block_last_index.
120  * @param[in,out] block MB coefficients, these will be restored
121  * @param[in] dir ac prediction direction for each 8x8 block
122  * @param[out] st scantable for each 8x8 block
123  * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
124  */
125 static inline void restore_ac_coeffs(MPVEncContext *const s, int16_t block[6][64],
126  const int dir[6], const uint8_t *st[6],
127  const int zigzag_last_index[6])
128 {
129  int i, n;
130  memcpy(s->c.block_last_index, zigzag_last_index, sizeof(int) * 6);
131 
132  for (n = 0; n < 6; n++) {
133  int16_t *ac_val = &s->c.ac_val[0][0][0] + s->c.block_index[n] * 16;
134 
135  st[n] = s->c.intra_scantable.permutated;
136  if (dir[n]) {
137  /* top prediction */
138  for (i = 1; i < 8; i++)
139  block[n][s->c.idsp.idct_permutation[i]] = ac_val[i + 8];
140  } else {
141  /* left prediction */
142  for (i = 1; i < 8; i++)
143  block[n][s->c.idsp.idct_permutation[i << 3]] = ac_val[i];
144  }
145  }
146 }
147 
148 /**
149  * Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
150  * This function will also update s->c.block_last_index and s->c.ac_val.
151  * @param[in,out] block MB coefficients, these will be updated if 1 is returned
152  * @param[in] dir ac prediction direction for each 8x8 block
153  * @param[out] st scantable for each 8x8 block
154  * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
155  */
156 static inline int decide_ac_pred(MPVEncContext *const s, int16_t block[6][64],
157  const int dir[6], const uint8_t *st[6],
158  int zigzag_last_index[6])
159 {
160  int score = 0;
161  int i, n;
162  const int8_t *const qscale_table = s->c.cur_pic.qscale_table;
163 
164  memcpy(zigzag_last_index, s->c.block_last_index, sizeof(int) * 6);
165 
166  for (n = 0; n < 6; n++) {
167  int16_t *ac_val, *ac_val1;
168 
169  score -= get_block_rate(s, block[n], s->c.block_last_index[n],
170  s->c.intra_scantable.permutated);
171 
172  ac_val = &s->c.ac_val[0][0][0] + s->c.block_index[n] * 16;
173  ac_val1 = ac_val;
174  if (dir[n]) {
175  const int xy = s->c.mb_x + s->c.mb_y * s->c.mb_stride - s->c.mb_stride;
176  /* top prediction */
177  ac_val -= s->c.block_wrap[n] * 16;
178  if (s->c.mb_y == 0 || s->c.qscale == qscale_table[xy] || n == 2 || n == 3) {
179  /* same qscale */
180  for (i = 1; i < 8; i++) {
181  const int level = block[n][s->c.idsp.idct_permutation[i]];
182  block[n][s->c.idsp.idct_permutation[i]] = level - ac_val[i + 8];
183  ac_val1[i] = block[n][s->c.idsp.idct_permutation[i << 3]];
184  ac_val1[i + 8] = level;
185  }
186  } else {
187  /* different qscale, we must rescale */
188  for (i = 1; i < 8; i++) {
189  const int level = block[n][s->c.idsp.idct_permutation[i]];
190  block[n][s->c.idsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->c.qscale);
191  ac_val1[i] = block[n][s->c.idsp.idct_permutation[i << 3]];
192  ac_val1[i + 8] = level;
193  }
194  }
195  st[n] = s->c.permutated_intra_h_scantable;
196  } else {
197  const int xy = s->c.mb_x - 1 + s->c.mb_y * s->c.mb_stride;
198  /* left prediction */
199  ac_val -= 16;
200  if (s->c.mb_x == 0 || s->c.qscale == qscale_table[xy] || n == 1 || n == 3) {
201  /* same qscale */
202  for (i = 1; i < 8; i++) {
203  const int level = block[n][s->c.idsp.idct_permutation[i << 3]];
204  block[n][s->c.idsp.idct_permutation[i << 3]] = level - ac_val[i];
205  ac_val1[i] = level;
206  ac_val1[i + 8] = block[n][s->c.idsp.idct_permutation[i]];
207  }
208  } else {
209  /* different qscale, we must rescale */
210  for (i = 1; i < 8; i++) {
211  const int level = block[n][s->c.idsp.idct_permutation[i << 3]];
212  block[n][s->c.idsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->c.qscale);
213  ac_val1[i] = level;
214  ac_val1[i + 8] = block[n][s->c.idsp.idct_permutation[i]];
215  }
216  }
217  st[n] = s->c.permutated_intra_v_scantable;
218  }
219 
220  for (i = 63; i > 0; i--) // FIXME optimize
221  if (block[n][st[n][i]])
222  break;
223  s->c.block_last_index[n] = i;
224 
225  score += get_block_rate(s, block[n], s->c.block_last_index[n], st[n]);
226  }
227 
228  if (score < 0) {
229  return 1;
230  } else {
231  restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
232  return 0;
233  }
234 }
235 
236 /**
237  * modify mb_type & qscale so that encoding is actually possible in MPEG-4
238  */
240 {
242 
243  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
244  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
245  int odd = 0;
246  /* ok, come on, this isn't funny anymore, there's more code for
247  * handling this MPEG-4 mess than for the actual adaptive quantization */
248 
249  for (int i = 0; i < s->c.mb_num; i++) {
250  int mb_xy = s->c.mb_index2xy[i];
251  odd += qscale_table[mb_xy] & 1;
252  }
253 
254  if (2 * odd > s->c.mb_num)
255  odd = 1;
256  else
257  odd = 0;
258 
259  for (int i = 0; i < s->c.mb_num; i++) {
260  int mb_xy = s->c.mb_index2xy[i];
261  if ((qscale_table[mb_xy] & 1) != odd)
262  qscale_table[mb_xy]++;
263  if (qscale_table[mb_xy] > 31)
264  qscale_table[mb_xy] = 31;
265  }
266 
267  for (int i = 1; i < s->c.mb_num; i++) {
268  int mb_xy = s->c.mb_index2xy[i];
269  if (qscale_table[mb_xy] != qscale_table[s->c.mb_index2xy[i - 1]] &&
270  (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) {
271  s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR;
272  }
273  }
274  }
275 }
276 
277 /**
278  * Encode the dc value.
279  * @param n block index (0-3 are luma, 4-5 are chroma)
280  */
281 static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
282 {
283  /* DC will overflow if level is outside the [-255,255] range. */
284  level += 256;
285  if (n < 4) {
286  /* luminance */
288  } else {
289  /* chrominance */
291  }
292 }
293 
294 /**
295  * Encode the AC coefficients of an 8x8 block.
296  */
297 static inline void mpeg4_encode_ac_coeffs(const int16_t block[64],
298  const int last_index, int i,
299  const uint8_t *const scan_table,
300  PutBitContext *const ac_pb,
301  const uint32_t *const bits_tab,
302  const uint8_t *const len_tab)
303 {
304  int last_non_zero = i - 1;
305 
306  /* AC coefs */
307  for (; i < last_index; i++) {
308  int level = block[scan_table[i]];
309  if (level) {
310  int run = i - last_non_zero - 1;
311  level += 64;
312  if ((level & (~127)) == 0) {
313  const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
314  put_bits(ac_pb, len_tab[index], bits_tab[index]);
315  } else { // ESC3
316  put_bits(ac_pb,
317  7 + 2 + 1 + 6 + 1 + 12 + 1,
318  (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) +
319  (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
320  }
321  last_non_zero = i;
322  }
323  }
324  /* if (i <= last_index) */ {
325  int level = block[scan_table[i]];
326  int run = i - last_non_zero - 1;
327  level += 64;
328  if ((level & (~127)) == 0) {
329  const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
330  put_bits(ac_pb, len_tab[index], bits_tab[index]);
331  } else { // ESC3
332  put_bits(ac_pb,
333  7 + 2 + 1 + 6 + 1 + 12 + 1,
334  (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) +
335  (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
336  }
337  }
338 }
339 
341  const int16_t block[6][64],
342  PutBitContext *ac_pb)
343 {
344  /* encode each block */
345  for (int n = 0; n < 6; ++n) {
346  const int last_index = s->c.block_last_index[n];
347  if (last_index < 0)
348  continue;
349 
350  mpeg4_encode_ac_coeffs(block[n], last_index, 0,
351  s->c.intra_scantable.permutated, ac_pb,
353  }
354 }
355 
357  const int16_t block[6][64],
358  const int intra_dc[6],
359  const uint8_t * const *scan_table,
360  PutBitContext *dc_pb,
361  PutBitContext *ac_pb)
362 {
363  /* encode each block */
364  for (int n = 0; n < 6; ++n) {
365  mpeg4_encode_dc(dc_pb, intra_dc[n], n);
366 
367  const int last_index = s->c.block_last_index[n];
368  if (last_index <= 0)
369  continue;
370 
371  mpeg4_encode_ac_coeffs(block[n], last_index, 1,
372  scan_table[n], ac_pb,
374  }
375 }
376 
377 static inline int get_b_cbp(MPVEncContext *const s, int16_t block[6][64],
378  int motion_x, int motion_y, int mb_type)
379 {
380  int cbp = 0, i;
381 
382  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
383  int score = 0;
384  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
385 
386  for (i = 0; i < 6; i++) {
387  if (s->coded_score[i] < 0) {
388  score += s->coded_score[i];
389  cbp |= 1 << (5 - i);
390  }
391  }
392 
393  if (cbp) {
394  int zero_score = -6;
395  if ((motion_x | motion_y | s->dquant | mb_type) == 0)
396  zero_score -= 4; // 2 * MV + mb_type + cbp bit
397 
398  zero_score *= lambda;
399  if (zero_score <= score)
400  cbp = 0;
401  }
402 
403  for (i = 0; i < 6; i++) {
404  if (s->c.block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
405  s->c.block_last_index[i] = -1;
406  s->c.bdsp.clear_block(s->c.block[i]);
407  }
408  }
409  } else {
410  for (i = 0; i < 6; i++) {
411  if (s->c.block_last_index[i] >= 0)
412  cbp |= 1 << (5 - i);
413  }
414  }
415  return cbp;
416 }
417 
418 // FIXME this is duplicated to h263.c
419 static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
420 
421 static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
422  int motion_x, int motion_y)
423 {
424  int cbpc, cbpy, pred_x, pred_y;
425  PutBitContext *const pb2 = s->c.data_partitioning ? &s->pb2 : &s->pb;
426  PutBitContext *const tex_pb = s->c.data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
427  PutBitContext *const dc_pb = s->c.data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
428  const int interleaved_stats = (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->c.data_partitioning ? 1 : 0;
429 
430  if (!s->c.mb_intra) {
431  int i, cbp;
432 
433  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
434  /* convert from mv_dir to type */
435  static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
436  int mb_type = mb_type_table[s->c.mv_dir];
437 
438  if (s->c.mb_x == 0) {
439  for (i = 0; i < 2; i++)
440  s->c.last_mv[i][0][0] =
441  s->c.last_mv[i][0][1] =
442  s->c.last_mv[i][1][0] =
443  s->c.last_mv[i][1][1] = 0;
444  }
445 
446  av_assert2(s->dquant >= -2 && s->dquant <= 2);
447  av_assert2((s->dquant & 1) == 0);
448  av_assert2(mb_type >= 0);
449 
450  /* nothing to do if this MB was skipped in the next P-frame */
451  if (s->c.next_pic.mbskip_table[s->c.mb_y * s->c.mb_stride + s->c.mb_x]) { // FIXME avoid DCT & ...
452  s->c.mv[0][0][0] =
453  s->c.mv[0][0][1] =
454  s->c.mv[1][0][0] =
455  s->c.mv[1][0][1] = 0;
456  s->c.mv_dir = MV_DIR_FORWARD; // doesn't matter
457  s->c.qscale -= s->dquant;
458 // s->c.mb_skipped = 1;
459 
460  return;
461  }
462 
463  cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
464 
465  if ((cbp | motion_x | motion_y | mb_type) == 0) {
466  /* direct MB with MV={0,0} */
467  av_assert2(s->dquant == 0);
468 
469  put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
470 
471  if (interleaved_stats) {
472  s->misc_bits++;
473  s->last_bits++;
474  }
475  return;
476  }
477 
478  put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
479  put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
480  put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
481  if (cbp)
482  put_bits(&s->pb, 6, cbp);
483 
484  if (cbp && mb_type) {
485  if (s->dquant)
486  put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
487  else
488  put_bits(&s->pb, 1, 0);
489  } else
490  s->c.qscale -= s->dquant;
491 
492  if (!s->c.progressive_sequence) {
493  if (cbp)
494  put_bits(&s->pb, 1, s->c.interlaced_dct);
495  if (mb_type) // not direct mode
496  put_bits(&s->pb, 1, s->c.mv_type == MV_TYPE_FIELD);
497  }
498 
499  if (interleaved_stats)
500  s->misc_bits += get_bits_diff(s);
501 
502  if (!mb_type) {
503  av_assert2(s->c.mv_dir & MV_DIRECT);
504  ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
505  } else {
506  av_assert2(mb_type > 0 && mb_type < 4);
507  if (s->c.mv_type != MV_TYPE_FIELD) {
508  if (s->c.mv_dir & MV_DIR_FORWARD) {
510  s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
511  s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
512  s->f_code);
513  s->c.last_mv[0][0][0] =
514  s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
515  s->c.last_mv[0][0][1] =
516  s->c.last_mv[0][1][1] = s->c.mv[0][0][1];
517  }
518  if (s->c.mv_dir & MV_DIR_BACKWARD) {
520  s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
521  s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
522  s->b_code);
523  s->c.last_mv[1][0][0] =
524  s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
525  s->c.last_mv[1][0][1] =
526  s->c.last_mv[1][1][1] = s->c.mv[1][0][1];
527  }
528  } else {
529  if (s->c.mv_dir & MV_DIR_FORWARD) {
530  put_bits(&s->pb, 1, s->c.field_select[0][0]);
531  put_bits(&s->pb, 1, s->c.field_select[0][1]);
532  }
533  if (s->c.mv_dir & MV_DIR_BACKWARD) {
534  put_bits(&s->pb, 1, s->c.field_select[1][0]);
535  put_bits(&s->pb, 1, s->c.field_select[1][1]);
536  }
537  if (s->c.mv_dir & MV_DIR_FORWARD) {
538  for (i = 0; i < 2; i++) {
540  s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
541  s->c.mv[0][i][1] - s->c.last_mv[0][i][1] / 2,
542  s->f_code);
543  s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
544  s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
545  }
546  }
547  if (s->c.mv_dir & MV_DIR_BACKWARD) {
548  for (i = 0; i < 2; i++) {
550  s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
551  s->c.mv[1][i][1] - s->c.last_mv[1][i][1] / 2,
552  s->b_code);
553  s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
554  s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
555  }
556  }
557  }
558  }
559 
560  if (interleaved_stats)
561  s->mv_bits += get_bits_diff(s);
562 
564 
565  if (interleaved_stats)
566  s->p_tex_bits += get_bits_diff(s);
567  } else { /* s->c.pict_type == AV_PICTURE_TYPE_B */
568  cbp = get_p_cbp(s, block, motion_x, motion_y);
569 
570  if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
571  s->c.mv_type == MV_TYPE_16X16) {
572  const MPVMainEncContext *const m = slice_to_mainenc(s);
573  /* Check if the B-frames can skip it too, as we must skip it
574  * if we skip here why didn't they just compress
575  * the skip-mb bits instead of reusing them ?! */
576  if (m->max_b_frames > 0) {
577  int x, y, offset;
578  const uint8_t *p_pic;
579 
580  x = s->c.mb_x * 16;
581  y = s->c.mb_y * 16;
582 
583  offset = x + y * s->c.linesize;
584  p_pic = s->new_pic->data[0] + offset;
585 
586  s->c.mb_skipped = 1;
587  for (int i = 0; i < m->max_b_frames; i++) {
588  const uint8_t *b_pic;
589  int diff;
590  const MPVPicture *pic = m->reordered_input_picture[i + 1];
591 
592  if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
593  break;
594 
595  b_pic = pic->f->data[0] + offset;
596  if (!pic->shared)
597  b_pic += INPLACE_OFFSET;
598 
599  if (x + 16 > s->c.width || y + 16 > s->c.height) {
600  int x1, y1;
601  int xe = FFMIN(16, s->c.width - x);
602  int ye = FFMIN(16, s->c.height - y);
603  diff = 0;
604  for (y1 = 0; y1 < ye; y1++) {
605  for (x1 = 0; x1 < xe; x1++) {
606  diff += FFABS(p_pic[x1 + y1 * s->c.linesize] - b_pic[x1 + y1 * s->c.linesize]);
607  }
608  }
609  diff = diff * 256 / (xe * ye);
610  } else {
611  diff = s->sad_cmp[0](NULL, p_pic, b_pic, s->c.linesize, 16);
612  }
613  if (diff > s->c.qscale * 70) { // FIXME check that 70 is optimal
614  s->c.mb_skipped = 0;
615  break;
616  }
617  }
618  } else
619  s->c.mb_skipped = 1;
620 
621  if (s->c.mb_skipped == 1) {
622  /* skip macroblock */
623  put_bits(&s->pb, 1, 1);
624 
625  if (interleaved_stats) {
626  s->misc_bits++;
627  s->last_bits++;
628  }
629 
630  return;
631  }
632  }
633 
634  put_bits(&s->pb, 1, 0); /* mb coded */
635  cbpc = cbp & 3;
636  cbpy = cbp >> 2;
637  cbpy ^= 0xf;
638  if (s->c.mv_type == MV_TYPE_16X16) {
639  if (s->dquant)
640  cbpc += 8;
641  put_bits(&s->pb,
644 
645  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
646  if (s->dquant)
647  put_bits(pb2, 2, dquant_code[s->dquant + 2]);
648 
649  if (!s->c.progressive_sequence) {
650  if (cbp)
651  put_bits(pb2, 1, s->c.interlaced_dct);
652  put_bits(pb2, 1, 0);
653  }
654 
655  if (interleaved_stats)
656  s->misc_bits += get_bits_diff(s);
657 
658  /* motion vectors: 16x16 mode */
659  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
660 
662  motion_x - pred_x,
663  motion_y - pred_y,
664  s->f_code);
665  } else if (s->c.mv_type == MV_TYPE_FIELD) {
666  if (s->dquant)
667  cbpc += 8;
668  put_bits(&s->pb,
671 
672  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
673  if (s->dquant)
674  put_bits(pb2, 2, dquant_code[s->dquant + 2]);
675 
676  av_assert2(!s->c.progressive_sequence);
677  if (cbp)
678  put_bits(pb2, 1, s->c.interlaced_dct);
679  put_bits(pb2, 1, 1);
680 
681  if (interleaved_stats)
682  s->misc_bits += get_bits_diff(s);
683 
684  /* motion vectors: 16x8 interlaced mode */
685  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
686  pred_y /= 2;
687 
688  put_bits(&s->pb, 1, s->c.field_select[0][0]);
689  put_bits(&s->pb, 1, s->c.field_select[0][1]);
690 
692  s->c.mv[0][0][0] - pred_x,
693  s->c.mv[0][0][1] - pred_y,
694  s->f_code);
696  s->c.mv[0][1][0] - pred_x,
697  s->c.mv[0][1][1] - pred_y,
698  s->f_code);
699  } else {
700  av_assert2(s->c.mv_type == MV_TYPE_8X8);
701  put_bits(&s->pb,
702  ff_h263_inter_MCBPC_bits[cbpc + 16],
703  ff_h263_inter_MCBPC_code[cbpc + 16]);
704  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
705 
706  if (!s->c.progressive_sequence && cbp)
707  put_bits(pb2, 1, s->c.interlaced_dct);
708 
709  if (interleaved_stats)
710  s->misc_bits += get_bits_diff(s);
711 
712  for (i = 0; i < 4; i++) {
713  /* motion vectors: 8x8 mode*/
714  ff_h263_pred_motion(&s->c, i, 0, &pred_x, &pred_y);
715 
717  s->c.cur_pic.motion_val[0][s->c.block_index[i]][0] - pred_x,
718  s->c.cur_pic.motion_val[0][s->c.block_index[i]][1] - pred_y,
719  s->f_code);
720  }
721  }
722 
723  if (interleaved_stats)
724  s->mv_bits += get_bits_diff(s);
725 
727 
728  if (interleaved_stats)
729  s->p_tex_bits += get_bits_diff(s);
730  }
731  } else {
732  int cbp;
733  int dc_diff[6]; // dc values with the dc prediction subtracted
734  int dir[6]; // prediction direction
735  int zigzag_last_index[6];
736  const uint8_t *scan_table[6];
737  int i;
738 
739  for (int i = 0; i < 6; i++) {
740  int pred = ff_mpeg4_pred_dc(&s->c, i, &dir[i]);
741  int scale = i < 4 ? s->c.y_dc_scale : s->c.c_dc_scale;
742 
743  pred = FASTDIV((pred + (scale >> 1)), scale);
744  dc_diff[i] = block[i][0] - pred;
745  s->c.dc_val[0][s->c.block_index[i]] = av_clip_uintp2(block[i][0] * scale, 11);
746  }
747 
748  if (s->c.avctx->flags & AV_CODEC_FLAG_AC_PRED) {
749  s->c.ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
750  } else {
751  for (i = 0; i < 6; i++)
752  scan_table[i] = s->c.intra_scantable.permutated;
753  }
754 
755  /* compute cbp */
756  cbp = 0;
757  for (i = 0; i < 6; i++)
758  if (s->c.block_last_index[i] >= 1)
759  cbp |= 1 << (5 - i);
760 
761  cbpc = cbp & 3;
762  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
763  if (s->dquant)
764  cbpc += 4;
765  put_bits(&s->pb,
768  } else {
769  if (s->dquant)
770  cbpc += 8;
771  put_bits(&s->pb, 1, 0); /* mb coded */
772  put_bits(&s->pb,
773  ff_h263_inter_MCBPC_bits[cbpc + 4],
774  ff_h263_inter_MCBPC_code[cbpc + 4]);
775  }
776  put_bits(pb2, 1, s->c.ac_pred);
777  cbpy = cbp >> 2;
778  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
779  if (s->dquant)
780  put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
781 
782  if (!s->c.progressive_sequence)
783  put_bits(dc_pb, 1, s->c.interlaced_dct);
784 
785  if (interleaved_stats)
786  s->misc_bits += get_bits_diff(s);
787 
788  mpeg4_encode_blocks_intra(s, block, dc_diff, scan_table, dc_pb, tex_pb);
789 
790  if (interleaved_stats)
791  s->i_tex_bits += get_bits_diff(s);
792  s->i_count++;
793 
794  /* restore ac coeffs & last_index stuff
795  * if we messed them up with the prediction */
796  if (s->c.ac_pred)
797  restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
798  }
799 }
800 
801 /**
802  * add MPEG-4 stuffing bits (01...1)
803  */
805 {
806  int length = 8 - (put_bits_count(pbc) & 7);
807 
808  put_bits(pbc, length, (1 << (length - 1)) - 1);
809 }
810 
811 /* must be called before writing the header */
813 {
814  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
816  } else {
817  s->c.last_time_base = s->c.time_base;
818  s->c.time_base = FFUDIV(s->c.time, s->c.avctx->time_base.den);
819  }
820 }
821 
823 {
824  MPVEncContext *const s = &m->s;
825  int64_t hours, minutes, seconds;
826  int64_t time;
827 
828  put_bits32(&s->pb, GOP_STARTCODE);
829 
830  time = s->c.cur_pic.ptr->f->pts;
831  if (m->reordered_input_picture[1])
832  time = FFMIN(time, m->reordered_input_picture[1]->f->pts);
833  time = time * s->c.avctx->time_base.num;
834  s->c.last_time_base = FFUDIV(time, s->c.avctx->time_base.den);
835 
836  seconds = FFUDIV(time, s->c.avctx->time_base.den);
837  minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60);
838  hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60);
839  hours = FFUMOD(hours , 24);
840 
841  put_bits(&s->pb, 5, hours);
842  put_bits(&s->pb, 6, minutes);
843  put_bits(&s->pb, 1, 1);
844  put_bits(&s->pb, 6, seconds);
845 
846  put_bits(&s->pb, 1, !!(s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
847  put_bits(&s->pb, 1, 0); // broken link == NO
848 
849  ff_mpeg4_stuffing(&s->pb);
850 }
851 
853 {
854  MPVEncContext *const s = &m->s;
855  int profile_and_level_indication;
856  int vo_ver_id;
857 
858  if (s->c.avctx->profile != AV_PROFILE_UNKNOWN) {
859  profile_and_level_indication = s->c.avctx->profile << 4;
860  } else if (m->max_b_frames || s->c.quarter_sample) {
861  profile_and_level_indication = 0xF0; // adv simple
862  } else {
863  profile_and_level_indication = 0x00; // simple
864  }
865 
866  if (s->c.avctx->level != AV_LEVEL_UNKNOWN)
867  profile_and_level_indication |= s->c.avctx->level;
868  else
869  profile_and_level_indication |= 1; // level 1
870 
871  if (profile_and_level_indication >> 4 == 0xF)
872  vo_ver_id = 5;
873  else
874  vo_ver_id = 1;
875 
876  // FIXME levels
877 
878  put_bits32(&s->pb, VOS_STARTCODE);
879 
880  put_bits(&s->pb, 8, profile_and_level_indication);
881 
883 
884  put_bits(&s->pb, 1, 1);
885  put_bits(&s->pb, 4, vo_ver_id);
886  put_bits(&s->pb, 3, 1); // priority
887 
888  put_bits(&s->pb, 4, 1); // visual obj type== video obj
889 
890  put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME
891 
892  ff_mpeg4_stuffing(&s->pb);
893 }
894 
896  int vo_number,
897  int vol_number)
898 {
899  MPVEncContext *const s = &m4->m.s;
900  int vo_ver_id, vo_type, aspect_ratio_info;
901 
902  if (m4->m.max_b_frames || s->c.quarter_sample) {
903  vo_ver_id = 5;
904  vo_type = ADV_SIMPLE_VO_TYPE;
905  } else {
906  vo_ver_id = 1;
907  vo_type = SIMPLE_VO_TYPE;
908  }
909 
910  put_bits32(&s->pb, 0x100 + vo_number); /* video obj */
911  put_bits32(&s->pb, 0x120 + vol_number); /* video obj layer */
912 
913  put_bits(&s->pb, 1, 0); /* random access vol */
914  put_bits(&s->pb, 8, vo_type); /* video obj type indication */
915  put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
916  put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
917  put_bits(&s->pb, 3, 1); /* is obj layer priority */
918 
919  aspect_ratio_info = ff_h263_aspect_to_info(s->c.avctx->sample_aspect_ratio);
920 
921  put_bits(&s->pb, 4, aspect_ratio_info); /* aspect ratio info */
922  if (aspect_ratio_info == FF_ASPECT_EXTENDED) {
923  av_reduce(&s->c.avctx->sample_aspect_ratio.num, &s->c.avctx->sample_aspect_ratio.den,
924  s->c.avctx->sample_aspect_ratio.num, s->c.avctx->sample_aspect_ratio.den, 255);
925  put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.num);
926  put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.den);
927  }
928 
929  put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
930  put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
931  put_bits(&s->pb, 1, s->c.low_delay);
932  put_bits(&s->pb, 1, 0); /* vbv parameters= no */
933 
934  put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
935  put_bits(&s->pb, 1, 1); /* marker bit */
936 
937  put_bits(&s->pb, 16, s->c.avctx->time_base.den);
938  if (m4->time_increment_bits < 1)
939  m4->time_increment_bits = 1;
940  put_bits(&s->pb, 1, 1); /* marker bit */
941  put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
942  put_bits(&s->pb, 1, 1); /* marker bit */
943  put_bits(&s->pb, 13, s->c.width); /* vol width */
944  put_bits(&s->pb, 1, 1); /* marker bit */
945  put_bits(&s->pb, 13, s->c.height); /* vol height */
946  put_bits(&s->pb, 1, 1); /* marker bit */
947  put_bits(&s->pb, 1, s->c.progressive_sequence ? 0 : 1);
948  put_bits(&s->pb, 1, 1); /* obmc disable */
949  if (vo_ver_id == 1)
950  put_bits(&s->pb, 1, 0); /* sprite enable */
951  else
952  put_bits(&s->pb, 2, 0); /* sprite enable */
953 
954  put_bits(&s->pb, 1, 0); /* not 8 bit == false */
955  put_bits(&s->pb, 1, s->mpeg_quant); /* quant type = (0 = H.263 style) */
956 
957  if (s->mpeg_quant) {
958  ff_write_quant_matrix(&s->pb, s->c.avctx->intra_matrix);
959  ff_write_quant_matrix(&s->pb, s->c.avctx->inter_matrix);
960  }
961 
962  if (vo_ver_id != 1)
963  put_bits(&s->pb, 1, s->c.quarter_sample);
964  put_bits(&s->pb, 1, 1); /* complexity estimation disable */
965  put_bits(&s->pb, 1, s->rtp_mode ? 0 : 1); /* resync marker disable */
966  put_bits(&s->pb, 1, s->c.data_partitioning ? 1 : 0);
967  if (s->c.data_partitioning)
968  put_bits(&s->pb, 1, 0); /* no rvlc */
969 
970  if (vo_ver_id != 1) {
971  put_bits(&s->pb, 1, 0); /* newpred */
972  put_bits(&s->pb, 1, 0); /* reduced res vop */
973  }
974  put_bits(&s->pb, 1, 0); /* scalability */
975 
976  ff_mpeg4_stuffing(&s->pb);
977 
978  /* user data */
979  if (!(s->c.avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
981  ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
982  }
983 }
984 
985 /* write MPEG-4 VOP header */
987 {
988  Mpeg4EncContext *const m4 = mainctx_to_mpeg4(m);
989  MPVEncContext *const s = &m->s;
990  uint64_t time_incr;
991  int64_t time_div, time_mod;
992 
994 
995  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
996  if (!(s->c.avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
997  if (s->c.avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
999  if (s->c.avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || s->c.picture_number == 0) // HACK, the reference sw is buggy
1000  mpeg4_encode_vol_header(m4, 0, 0);
1001  }
1003  }
1004 
1005  s->c.partitioned_frame = s->c.data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_B;
1006 
1007  put_bits32(&s->pb, VOP_STARTCODE); /* vop header */
1008  put_bits(&s->pb, 2, s->c.pict_type - 1); /* pict type: I = 0 , P = 1 */
1009 
1010  time_div = FFUDIV(s->c.time, s->c.avctx->time_base.den);
1011  time_mod = FFUMOD(s->c.time, s->c.avctx->time_base.den);
1012  time_incr = time_div - s->c.last_time_base;
1013 
1014  // This limits the frame duration to max 1 day
1015  if (time_incr > 3600*24) {
1016  av_log(s->c.avctx, AV_LOG_ERROR, "time_incr %"PRIu64" too large\n", time_incr);
1017  return AVERROR(EINVAL);
1018  }
1019  while (time_incr--)
1020  put_bits(&s->pb, 1, 1);
1021 
1022  put_bits(&s->pb, 1, 0);
1023 
1024  put_bits(&s->pb, 1, 1); /* marker */
1025  put_bits(&s->pb, m4->time_increment_bits, time_mod); /* time increment */
1026  put_bits(&s->pb, 1, 1); /* marker */
1027  put_bits(&s->pb, 1, 1); /* vop coded */
1028  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
1029  put_bits(&s->pb, 1, s->c.no_rounding); /* rounding type */
1030  }
1031  put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
1032  if (!s->c.progressive_sequence) {
1033  put_bits(&s->pb, 1, !!(s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
1034  put_bits(&s->pb, 1, s->c.alternate_scan);
1035  }
1036  // FIXME sprite stuff
1037 
1038  put_bits(&s->pb, 5, s->c.qscale);
1039 
1040  if (s->c.pict_type != AV_PICTURE_TYPE_I)
1041  put_bits(&s->pb, 3, s->f_code); /* fcode_for */
1042  if (s->c.pict_type == AV_PICTURE_TYPE_B)
1043  put_bits(&s->pb, 3, s->b_code); /* fcode_back */
1044 
1045  return 0;
1046 }
1047 
1048 static av_cold void init_uni_dc_tab(void)
1049 {
1050  int level, uni_code, uni_len;
1051 
1052  for (level = -256; level < 256; level++) {
1053  int size, v, l;
1054  /* find number of bits */
1055  size = 0;
1056  v = abs(level);
1057  while (v) {
1058  v >>= 1;
1059  size++;
1060  }
1061 
1062  if (level < 0)
1063  l = (-level) ^ ((1 << size) - 1);
1064  else
1065  l = level;
1066 
1067  /* luminance */
1068  uni_code = ff_mpeg4_DCtab_lum[size][0];
1069  uni_len = ff_mpeg4_DCtab_lum[size][1];
1070 
1071  if (size > 0) {
1072  uni_code <<= size;
1073  uni_code |= l;
1074  uni_len += size;
1075  if (size > 8) {
1076  uni_code <<= 1;
1077  uni_code |= 1;
1078  uni_len++;
1079  }
1080  }
1081  uni_DCtab_lum_bits[level + 256] = uni_code;
1082  uni_DCtab_lum_len[level + 256] = uni_len;
1083 
1084  /* chrominance */
1085  uni_code = ff_mpeg4_DCtab_chrom[size][0];
1086  uni_len = ff_mpeg4_DCtab_chrom[size][1];
1087 
1088  if (size > 0) {
1089  uni_code <<= size;
1090  uni_code |= l;
1091  uni_len += size;
1092  if (size > 8) {
1093  uni_code <<= 1;
1094  uni_code |= 1;
1095  uni_len++;
1096  }
1097  }
1098  uni_DCtab_chrom_bits[level + 256] = uni_code;
1099  uni_DCtab_chrom_len[level + 256] = uni_len;
1100  }
1101 }
1102 
1103 static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
1104  uint8_t *len_tab)
1105 {
1106  // Type 3 escape method. The escape code is the same for both VLCs
1107  // (0x3, seven bits), so it is hardcoded.
1108  memset(len_tab, 30, 2 * 2 * 64 * 64);
1109  len_tab += 64;
1110  bits_tab += 64;
1111  for (int run = 0; run < 64; ++run) {
1112  for (int level = 1;; ++level) {
1113  // Escape code type 3 not last run (6 bits) marker marker
1114  unsigned code = (3 << 23) | (3 << 21) | (0 << 20) | (run << 14) | (1 << 13) | 1;
1115  // first the negative levels
1116  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, -level)] = code | (-level & 0xfff) << 1;
1117  bits_tab[UNI_MPEG4_ENC_INDEX(1, run, -level)] =
1118  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, -level)] | (1 << 20) /* last */;
1119 
1120  if (level == 64) // positive levels have a range of 1..63
1121  break;
1122  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, level)] = code | level << 1;
1123  bits_tab[UNI_MPEG4_ENC_INDEX(1, run, level)] =
1124  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, level)] | (1 << 20) /* last */;
1125  }
1126  // Is this needed at all?
1127  len_tab[UNI_MPEG4_ENC_INDEX(0, run, 0)] =
1128  len_tab[UNI_MPEG4_ENC_INDEX(1, run, 0)] = 0;
1129  }
1130 
1131  uint8_t max_run[2][32] = { 0 };
1132 
1133 #define VLC_NUM_CODES 102 // excluding the escape
1134  av_assert2(rl->n == VLC_NUM_CODES);
1135  for (int i = VLC_NUM_CODES - 1, max_level, cur_run = 0; i >= 0; --i) {
1136  int run = rl->table_run[i], level = rl->table_level[i];
1137  int last = i >= rl->last;
1138  unsigned code = rl->table_vlc[i][0] << 1;
1139  int len = rl->table_vlc[i][1] + 1;
1140 
1141  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, level)] = code;
1142  len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] = len;
1143  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, -level)] = code | 1;
1144  len_tab [UNI_MPEG4_ENC_INDEX(last, run, -level)] = len;
1145 
1146  if (!max_run[last][level])
1147  max_run[last][level] = run + 1;
1148  av_assert2(run + 1 <= max_run[last][level]);
1149 
1150  int run3 = run + max_run[last][level];
1151  int len3 = len + 7 + 2;
1152 
1153  if (run3 < 64 && len3 < len_tab[UNI_MPEG4_ENC_INDEX(last, run3, level)]) {
1154  unsigned code3 = code | (0x3 << 2 | 0x2) << len;
1155  bits_tab[UNI_MPEG4_ENC_INDEX(last, run3, level)] = code3;
1156  len_tab [UNI_MPEG4_ENC_INDEX(last, run3, level)] = len3;
1157  bits_tab[UNI_MPEG4_ENC_INDEX(last, run3, -level)] = code3 | 1;
1158  len_tab [UNI_MPEG4_ENC_INDEX(last, run3, -level)] = len3;
1159  }
1160  // table_run and table_level are ordered so that all the entries
1161  // with the same last and run are consecutive and level is ascending
1162  // among these entries. By traversing downwards we therefore automatically
1163  // encounter max_level of a given run first, needed for escape method 1.
1164  if (run != cur_run) {
1165  max_level = level;
1166  cur_run = run;
1167  } else
1168  av_assert2(max_level > level);
1169 
1170  code |= 0x3 << (len + 1);
1171  len += 7 + 1;
1172  level += max_level;
1173  av_assert2(len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] >= len);
1174  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, level)] = code;
1175  len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] = len;
1176  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, -level)] = code | 1;
1177  len_tab [UNI_MPEG4_ENC_INDEX(last, run, -level)] = len;
1178  }
1179 }
1180 
1182 {
1183  init_uni_dc_tab();
1184 
1187 
1188  for (int f_code = MAX_FCODE; f_code > 0; f_code--) {
1189  for (int mv = -(16 << f_code); mv < (16 << f_code); mv++)
1190  fcode_tab[mv + MAX_MV] = f_code;
1191  }
1192 }
1193 
1195 {
1196  static AVOnce init_static_once = AV_ONCE_INIT;
1197  Mpeg4EncContext *const m4 = avctx->priv_data;
1198  MPVMainEncContext *const m = &m4->m;
1199  MPVEncContext *const s = &m->s;
1200  int ret;
1201 
1202  if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) {
1203  av_log(avctx, AV_LOG_ERROR, "dimensions too large for MPEG-4\n");
1204  return AVERROR(EINVAL);
1205  }
1206 
1208  s->encode_mb = mpeg4_encode_mb;
1209 
1210  m->fcode_tab = fcode_tab + MAX_MV;
1211 
1212  s->min_qcoeff = -2048;
1213  s->max_qcoeff = 2047;
1214  s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
1215  s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64;
1216  s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
1217  s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64;
1218  s->luma_dc_vlc_length = uni_DCtab_lum_len;
1219  s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
1220  s->c.y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
1221  s->c.c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
1222 
1223  ff_qpeldsp_init(&s->c.qdsp);
1224  if ((ret = ff_mpv_encode_init(avctx)) < 0)
1225  return ret;
1226 
1227  ff_thread_once(&init_static_once, mpeg4_encode_init_static);
1228 
1229  if (avctx->time_base.den > (1 << 16) - 1) {
1230  av_log(avctx, AV_LOG_ERROR,
1231  "timebase %d/%d not supported by MPEG 4 standard, "
1232  "the maximum admitted value for the timebase denominator "
1233  "is %d\n", avctx->time_base.num, avctx->time_base.den,
1234  (1 << 16) - 1);
1235  return AVERROR(EINVAL);
1236  }
1237 
1238  m4->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
1239 
1240  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1241  avctx->extradata = av_malloc(1024);
1242  if (!avctx->extradata)
1243  return AVERROR(ENOMEM);
1244  init_put_bits(&s->pb, avctx->extradata, 1024);
1245 
1247  mpeg4_encode_vol_header(m4, 0, 0);
1248 
1249 // ff_mpeg4_stuffing(&s->pb); ?
1250  flush_put_bits(&s->pb);
1251  avctx->extradata_size = put_bytes_output(&s->pb);
1252  }
1253  return 0;
1254 }
1255 
1257 {
1258  uint8_t *start = put_bits_ptr(&s->pb);
1259  uint8_t *end = s->pb.buf_end;
1260  int size = end - start;
1261  int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
1262  int tex_size = (size - 2 * pb_size) & (~3);
1263 
1264  set_put_bits_buffer_size(&s->pb, pb_size);
1265  init_put_bits(&s->tex_pb, start + pb_size, tex_size);
1266  init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
1267 }
1268 
1270 {
1271  const int pb2_len = put_bits_count(&s->pb2);
1272  const int tex_pb_len = put_bits_count(&s->tex_pb);
1273  const int bits = put_bits_count(&s->pb);
1274 
1275  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
1276  put_bits(&s->pb, 19, DC_MARKER);
1277  s->misc_bits += 19 + pb2_len + bits - s->last_bits;
1278  s->i_tex_bits += tex_pb_len;
1279  } else {
1280  put_bits(&s->pb, 17, MOTION_MARKER);
1281  s->misc_bits += 17 + pb2_len;
1282  s->mv_bits += bits - s->last_bits;
1283  s->p_tex_bits += tex_pb_len;
1284  }
1285 
1286  flush_put_bits(&s->pb2);
1287  flush_put_bits(&s->tex_pb);
1288 
1289  set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
1290  ff_copy_bits(&s->pb, s->pb2.buf, pb2_len);
1291  ff_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
1292  s->last_bits = put_bits_count(&s->pb);
1293 }
1294 
1296 {
1297  int mb_num_bits = av_log2(s->c.mb_num - 1) + 1;
1298 
1299  put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s->c.pict_type, s->f_code, s->b_code), 0);
1300  put_bits(&s->pb, 1, 1);
1301 
1302  put_bits(&s->pb, mb_num_bits, s->c.mb_x + s->c.mb_y * s->c.mb_width);
1303  put_bits(&s->pb, 5 /* quant_precision */, s->c.qscale);
1304  put_bits(&s->pb, 1, 0); /* no HEC */
1305 }
1306 
1307 #define OFFSET(x) offsetof(MPVEncContext, x)
1308 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1309 static const AVOption options[] = {
1310  { "data_partitioning", "Use data partitioning.", OFFSET(c.data_partitioning), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
1311  { "alternate_scan", "Enable alternate scantable.", OFFSET(c.alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
1312  { "mpeg_quant", "Use MPEG quantizers instead of H.263",
1313  OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, VE },
1318  { NULL },
1319 };
1320 
1321 static const AVClass mpeg4enc_class = {
1322  .class_name = "MPEG4 encoder",
1323  .item_name = av_default_item_name,
1324  .option = options,
1325  .version = LIBAVUTIL_VERSION_INT,
1326 };
1327 
1329  .p.name = "mpeg4",
1330  CODEC_LONG_NAME("MPEG-4 part 2"),
1331  .p.type = AVMEDIA_TYPE_VIDEO,
1332  .p.id = AV_CODEC_ID_MPEG4,
1333  .priv_data_size = sizeof(Mpeg4EncContext),
1334  .init = encode_init,
1336  .close = ff_mpv_encode_end,
1338  .color_ranges = AVCOL_RANGE_MPEG,
1339  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
1342  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1343  .p.priv_class = &mpeg4enc_class,
1344 };
SIMPLE_VO_TYPE
#define SIMPLE_VO_TYPE
Definition: mpeg4videodefs.h:32
mpeg4_encode_init_static
static av_cold void mpeg4_encode_init_static(void)
Definition: mpeg4videoenc.c:1181
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:218
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:386
FFUMOD
#define FFUMOD(a, b)
Definition: common.h:66
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:277
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:185
mpeg4_encode_ac_coeffs
static void mpeg4_encode_ac_coeffs(const int16_t block[64], const int last_index, int i, const uint8_t *const scan_table, PutBitContext *const ac_pb, const uint32_t *const bits_tab, const uint8_t *const len_tab)
Encode the AC coefficients of an 8x8 block.
Definition: mpeg4videoenc.c:297
FF_ASPECT_EXTENDED
#define FF_ASPECT_EXTENDED
Definition: h263.h:26
level
uint8_t level
Definition: svq3.c:208
MPVEncContext
Definition: mpegvideoenc.h:45
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
put_bits32
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
Definition: put_bits.h:301
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:99
MAX_FCODE
#define MAX_FCODE
Definition: mpegvideoenc.h:264
thread.h
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
mpeg4_encode_gop_header
static void mpeg4_encode_gop_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:822
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:784
h263enc.h
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:183
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
OFFSET
#define OFFSET(x)
Definition: mpeg4videoenc.c:1307
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
VOS_STARTCODE
#define VOS_STARTCODE
Definition: mpeg4videodefs.h:55
AVOption
AVOption.
Definition: opt.h:429
init_uni_dc_tab
static av_cold void init_uni_dc_tab(void)
Definition: mpeg4videoenc.c:1048
FFCodec
Definition: codec_internal.h:127
version.h
mpegvideo.h
Mpeg4EncContext::time_increment_bits
int time_increment_bits
number of bits to represent the fractional part of time
Definition: mpeg4videoenc.c:78
ff_mpeg4_get_video_packet_prefix_length
int ff_mpeg4_get_video_packet_prefix_length(enum AVPictureType pict_type, int f_code, int b_code)
Definition: mpeg4video.c:28
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
mpeg4_encode_mb
static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:421
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:227
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:182
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:318
uni_mpeg4_intra_rl_bits
static uint32_t uni_mpeg4_intra_rl_bits[64 *64 *2 *2]
Definition: mpeg4videoenc.c:56
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:356
mpeg4videoenc.h
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1929
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:315
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
uni_mpeg4_intra_rl_len
static uint8_t uni_mpeg4_intra_rl_len[64 *64 *2 *2]
Definition: mpeg4videoenc.c:57
ff_mpeg4_DCtab_chrom
const uint8_t ff_mpeg4_DCtab_chrom[13][2]
Definition: mpeg4data.h:40
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
mainctx_to_mpeg4
static Mpeg4EncContext * mainctx_to_mpeg4(MPVMainEncContext *m)
Definition: mpeg4videoenc.c:81
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
VOP_STARTCODE
#define VOP_STARTCODE
Definition: mpeg4videodefs.h:59
RLTable
RLTable.
Definition: rl.h:39
mpeg4_encode_visual_object_header
static void mpeg4_encode_visual_object_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:852
uni_mpeg4_inter_rl_bits
static uint32_t uni_mpeg4_inter_rl_bits[64 *64 *2 *2]
Definition: mpeg4videoenc.c:58
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
uni_DCtab_chrom_len
static uint8_t uni_DCtab_chrom_len[512]
Definition: mpeg4videoenc.c:50
FFUDIV
#define FFUDIV(a, b)
Definition: common.h:65
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:290
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1256
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:353
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
dquant_code
static const int dquant_code[5]
Definition: mpeg4videoenc.c:419
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:274
RLTable::n
int n
number of entries of table_vlc minus 1
Definition: rl.h:40
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
AV_PROFILE_UNKNOWN
#define AV_PROFILE_UNKNOWN
Definition: defs.h:65
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
s
#define s(width, name)
Definition: cbs_vp9.c:198
uni_mpeg4_inter_rl_len
static uint8_t uni_mpeg4_inter_rl_len[64 *64 *2 *2]
Definition: mpeg4videoenc.c:59
VLC_NUM_CODES
#define VLC_NUM_CODES
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:189
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:804
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
ff_mpeg4_rl_intra
RLTable ff_mpeg4_rl_intra
Definition: mpeg4data.h:108
uni_DCtab_chrom_bits
static uint16_t uni_DCtab_chrom_bits[512]
Definition: mpeg4videoenc.c:52
bits
uint8_t bits
Definition: vp3data.h:128
UNI_MPEG4_ENC_INDEX
#define UNI_MPEG4_ENC_INDEX(last, run, level)
Definition: mpeg4videoenc.c:63
uni_DCtab_lum_bits
static uint16_t uni_DCtab_lum_bits[512]
Definition: mpeg4videoenc.c:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
DC_MARKER
#define DC_MARKER
Definition: mpeg4videodefs.h:53
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:183
ff_put_string
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
Definition: bitstream.c:39
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:239
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
mpeg4_encode_vol_header
static void mpeg4_encode_vol_header(Mpeg4EncContext *const m4, int vo_number, int vol_number)
Definition: mpeg4videoenc.c:895
init_uni_mpeg4_rl_tab
static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab)
Definition: mpeg4videoenc.c:1103
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpeg4_DCtab_lum
const uint8_t ff_mpeg4_DCtab_lum[13][2]
Definition: mpeg4data.h:34
get_block_rate
static int get_block_rate(MPVEncContext *const s, int16_t block[64], int block_last_index, const uint8_t scantable[64])
Return the number of bits that encoding the 8x8 block in block would need.
Definition: mpeg4videoenc.c:90
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
FF_COMPLIANCE_VERY_STRICT
#define FF_COMPLIANCE_VERY_STRICT
Strictly conform to an older more strict version of the spec or reference software.
Definition: defs.h:58
run
uint8_t run
Definition: svq3.c:207
RLTable::table_vlc
const uint16_t(* table_vlc)[2]
Definition: rl.h:42
ff_mpeg4_pred_dc
static int ff_mpeg4_pred_dc(MpegEncContext *s, int n, int *dir_ptr)
Predict the dc.
Definition: mpeg4video.h:45
AV_LEVEL_UNKNOWN
#define AV_LEVEL_UNKNOWN
Definition: defs.h:206
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
options
Definition: swscale.c:43
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
Mpeg4EncContext
Definition: mpeg4videoenc.c:75
MOTION_MARKER
#define MOTION_MARKER
Definition: mpeg4videodefs.h:52
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1107
abs
#define abs(x)
Definition: cuda_runtime.h:35
FASTDIV
#define FASTDIV(a, b)
Definition: mathops.h:212
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1295
get_p_cbp
static int get_p_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h263enc.h:45
mpeg4_encode_picture_header
static int mpeg4_encode_picture_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:986
MPVMainEncContext
Definition: mpegvideoenc.h:178
VISUAL_OBJ_STARTCODE
#define VISUAL_OBJ_STARTCODE
Definition: mpeg4videodefs.h:58
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:186
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:812
ADV_SIMPLE_VO_TYPE
#define ADV_SIMPLE_VO_TYPE
Definition: mpeg4videodefs.h:40
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
RLTable::table_level
const int8_t * table_level
Definition: rl.h:44
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:502
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_h263_rl_inter
RLTable ff_h263_rl_inter
Definition: h263data.c:159
ff_mpeg4_y_dc_scale_table
const uint8_t ff_mpeg4_y_dc_scale_table[32]
Definition: mpeg4data.h:356
codec_internal.h
Mpeg4EncContext::m
MPVMainEncContext m
Definition: mpeg4videoenc.c:76
put_bits_assume_flushed
static void put_bits_assume_flushed(const PutBitContext *s)
Inform the compiler that a PutBitContext is flushed (i.e.
Definition: put_bits.h:82
ff_h263_cbpy_tab
const uint8_t ff_h263_cbpy_tab[16][2]
Definition: h263data.c:82
size
int size
Definition: twinvq_data.h:10344
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
fcode_tab
static uint8_t fcode_tab[MAX_MV *2+1]
Minimal fcode that a motion vector component would need.
Definition: mpeg4videoenc.c:44
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:188
ff_h263_inter_MCBPC_bits
const uint8_t ff_h263_inter_MCBPC_bits[28]
Definition: h263data.c:47
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:265
FF_MPEG4_PROFILE_OPTS
#define FF_MPEG4_PROFILE_OPTS
Definition: profiles.h:42
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:388
VE
#define VE
Definition: mpeg4videoenc.c:1308
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
RECT_SHAPE
#define RECT_SHAPE
Definition: mpeg4videodefs.h:27
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
uni_DCtab_lum_len
static uint8_t uni_DCtab_lum_len[512]
Definition: mpeg4videoenc.c:49
restore_ac_coeffs
static void restore_ac_coeffs(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], const int zigzag_last_index[6])
Restore the ac coefficients in block that have been changed by decide_ac_pred().
Definition: mpeg4videoenc.c:125
mpeg4enc_class
static const AVClass mpeg4enc_class
Definition: mpeg4videoenc.c:1321
options
static const AVOption options[]
Definition: mpeg4videoenc.c:1309
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_mpeg4_encoder
const FFCodec ff_mpeg4_encoder
Definition: mpeg4videoenc.c:1328
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
ff_h263_aspect_to_info
av_const int ff_h263_aspect_to_info(AVRational aspect)
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:733
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
mpeg4videodefs.h
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: mpeg4videoenc.c:1194
ff_mpeg4_init_direct_mv
void ff_mpeg4_init_direct_mv(MpegEncContext *s)
Definition: mpeg4video.c:70
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
ff_h263_intra_MCBPC_bits
const uint8_t ff_h263_intra_MCBPC_bits[9]
Definition: h263data.c:33
AVCodecContext
main external API structure.
Definition: avcodec.h:431
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
ff_h263_intra_MCBPC_code
const uint8_t ff_h263_intra_MCBPC_code[9]
Definition: h263data.c:32
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
mpeg4video.h
AVRational::den
int den
Denominator.
Definition: rational.h:60
mpeg4_encode_dc
static void mpeg4_encode_dc(PutBitContext *s, int level, int n)
Encode the dc value.
Definition: mpeg4videoenc.c:281
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
RLTable::last
int last
number of values for last = 0
Definition: rl.h:41
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
FF_MPV_COMMON_BFRAME_OPTS
#define FF_MPV_COMMON_BFRAME_OPTS
Definition: mpegvideoenc.h:351
USER_DATA_STARTCODE
#define USER_DATA_STARTCODE
Definition: mpeg4videodefs.h:56
ff_h263_inter_MCBPC_code
const uint8_t ff_h263_inter_MCBPC_code[28]
Definition: h263data.c:38
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:266
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:545
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1269
ff_mpeg4_c_dc_scale_table
const uint8_t ff_mpeg4_c_dc_scale_table[32]
Definition: mpeg4data.h:360
decide_ac_pred
static int decide_ac_pred(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], int zigzag_last_index[6])
Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
Definition: mpeg4videoenc.c:156
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:181
mpeg4_encode_blocks_intra
static void mpeg4_encode_blocks_intra(MPVEncContext *const s, const int16_t block[6][64], const int intra_dc[6], const uint8_t *const *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
Definition: mpeg4videoenc.c:356
slice_to_mainenc
static const MPVMainEncContext * slice_to_mainenc(const MPVEncContext *s)
Definition: mpegvideoenc.h:253
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
mpeg4videodata.h
GOP_STARTCODE
#define GOP_STARTCODE
Definition: mpeg4videodefs.h:57
ff_h263_encode_motion_vector
static void ff_h263_encode_motion_vector(MPVEncContext *s, int x, int y, int f_code)
Definition: h263enc.h:38
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
get_b_cbp
static int get_b_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y, int mb_type)
Definition: mpeg4videoenc.c:377
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
put_bits.h
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:179
mpeg4_encode_blocks_inter
static void mpeg4_encode_blocks_inter(MPVEncContext *const s, const int16_t block[6][64], PutBitContext *ac_pb)
Definition: mpeg4videoenc.c:340
RLTable::table_run
const int8_t * table_run
Definition: rl.h:43
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
h263.h