FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpeg4videoenc.c
Go to the documentation of this file.
1 /*
2  * MPEG-4 encoder
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/attributes.h"
24 #include "libavutil/log.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/thread.h"
28 #include "codec_internal.h"
29 #include "mpegvideo.h"
30 #include "h263.h"
31 #include "h263enc.h"
32 #include "mathops.h"
33 #include "mpeg4video.h"
34 #include "mpeg4videodata.h"
35 #include "mpeg4videodefs.h"
36 #include "mpeg4videoenc.h"
37 #include "mpegvideoenc.h"
38 #include "profiles.h"
39 #include "put_bits.h"
40 #include "version.h"
41 
42 /**
43  * Minimal fcode that a motion vector component would need.
44  */
45 static uint8_t fcode_tab[MAX_MV*2+1];
46 
47 /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC
48  * differences in MPEG-4. Unified in the sense that the specification specifies
49  * this encoding in several steps. */
50 static uint8_t uni_DCtab_lum_len[512];
51 static uint8_t uni_DCtab_chrom_len[512];
52 static uint16_t uni_DCtab_lum_bits[512];
53 static uint16_t uni_DCtab_chrom_bits[512];
54 
55 /* Unified encoding tables for run length encoding of coefficients.
56  * Unified in the sense that the specification specifies the encoding in several steps. */
57 static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2];
58 static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2];
59 static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2];
60 static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
61 
62 //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level))
63 //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64)
64 #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
65 
66 /* MPEG-4
67  * inter
68  * max level: 24/6
69  * max run: 53/63
70  *
71  * intra
72  * max level: 53/16
73  * max run: 29/41
74  */
75 
76 typedef struct Mpeg4EncContext {
78  /// number of bits to represent the fractional part of time
81 
83 {
84  return (Mpeg4EncContext*)m;
85 }
86 
87 /**
88  * Return the number of bits that encoding the 8x8 block in block would need.
89  * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
90  */
91 static inline int get_block_rate(MPVEncContext *const s, int16_t block[64],
92  int block_last_index, const uint8_t scantable[64])
93 {
94  int last = 0;
95  int j;
96  int rate = 0;
97 
98  for (j = 1; j <= block_last_index; j++) {
99  const int index = scantable[j];
100  int level = block[index];
101  if (level) {
102  level += 64;
103  if ((level & (~127)) == 0) {
104  if (j < block_last_index)
105  rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
106  else
107  rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
108  } else
109  rate += s->ac_esc_length;
110 
111  last = j;
112  }
113  }
114 
115  return rate;
116 }
117 
118 /**
119  * Restore the ac coefficients in block that have been changed by decide_ac_pred().
120  * This function also restores s->c.block_last_index.
121  * @param[in,out] block MB coefficients, these will be restored
122  * @param[in] dir ac prediction direction for each 8x8 block
123  * @param[out] st scantable for each 8x8 block
124  * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
125  */
126 static inline void restore_ac_coeffs(MPVEncContext *const s, int16_t block[6][64],
127  const int dir[6], const uint8_t *st[6],
128  const int zigzag_last_index[6])
129 {
130  int i, n;
131  memcpy(s->c.block_last_index, zigzag_last_index, sizeof(int) * 6);
132 
133  for (n = 0; n < 6; n++) {
134  int16_t *ac_val = &s->c.ac_val[0][0] + s->c.block_index[n] * 16;
135 
136  st[n] = s->c.intra_scantable.permutated;
137  if (dir[n]) {
138  /* top prediction */
139  for (i = 1; i < 8; i++)
140  block[n][s->c.idsp.idct_permutation[i]] = ac_val[i + 8];
141  } else {
142  /* left prediction */
143  for (i = 1; i < 8; i++)
144  block[n][s->c.idsp.idct_permutation[i << 3]] = ac_val[i];
145  }
146  }
147 }
148 
149 /**
150  * Predict the dc.
151  * @param n block index (0-3 are luma, 4-5 are chroma)
152  * @param dir_ptr pointer to an integer where the prediction direction will be stored
153  */
154 static int mpeg4_pred_dc(MpegEncContext *s, int n, int *dir_ptr)
155 {
156  const int16_t *const dc_val = s->dc_val + s->block_index[n];
157  const int wrap = s->block_wrap[n];
158 
159  /* B C
160  * A X
161  */
162  const int a = dc_val[-1];
163  const int b = dc_val[-1 - wrap];
164  const int c = dc_val[-wrap];
165  int pred;
166 
167  // There is no need for out-of-slice handling here, as all values are set
168  // appropriately when a new slice is opened.
169  if (abs(a - b) < abs(b - c)) {
170  pred = c;
171  *dir_ptr = 1; /* top */
172  } else {
173  pred = a;
174  *dir_ptr = 0; /* left */
175  }
176  return pred;
177 }
178 
179 /**
180  * Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
181  * This function will also update s->c.block_last_index and s->c.ac_val.
182  * @param[in,out] block MB coefficients, these will be updated if 1 is returned
183  * @param[in] dir ac prediction direction for each 8x8 block
184  * @param[out] st scantable for each 8x8 block
185  * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
186  */
187 static inline int decide_ac_pred(MPVEncContext *const s, int16_t block[6][64],
188  const int dir[6], const uint8_t *st[6],
189  int zigzag_last_index[6])
190 {
191  int score = 0;
192  int i, n;
193  const int8_t *const qscale_table = s->c.cur_pic.qscale_table;
194 
195  memcpy(zigzag_last_index, s->c.block_last_index, sizeof(int) * 6);
196 
197  for (n = 0; n < 6; n++) {
198  int16_t *ac_val, *ac_val1;
199 
200  score -= get_block_rate(s, block[n], s->c.block_last_index[n],
201  s->c.intra_scantable.permutated);
202 
203  ac_val = &s->c.ac_val[0][0] + s->c.block_index[n] * 16;
204  ac_val1 = ac_val;
205  if (dir[n]) {
206  const int xy = s->c.mb_x + s->c.mb_y * s->c.mb_stride - s->c.mb_stride;
207  /* top prediction */
208  ac_val -= s->c.block_wrap[n] * 16;
209  if (s->c.first_slice_line || s->c.qscale == qscale_table[xy] || n == 2 || n == 3) {
210  /* same qscale */
211  for (i = 1; i < 8; i++) {
212  const int level = block[n][s->c.idsp.idct_permutation[i]];
213  block[n][s->c.idsp.idct_permutation[i]] = level - ac_val[i + 8];
214  ac_val1[i] = block[n][s->c.idsp.idct_permutation[i << 3]];
215  ac_val1[i + 8] = level;
216  }
217  } else {
218  /* different qscale, we must rescale */
219  for (i = 1; i < 8; i++) {
220  const int level = block[n][s->c.idsp.idct_permutation[i]];
221  block[n][s->c.idsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->c.qscale);
222  ac_val1[i] = block[n][s->c.idsp.idct_permutation[i << 3]];
223  ac_val1[i + 8] = level;
224  }
225  }
226  st[n] = s->c.permutated_intra_h_scantable;
227  } else {
228  const int xy = s->c.mb_x - 1 + s->c.mb_y * s->c.mb_stride;
229  /* left prediction */
230  ac_val -= 16;
231  if (s->c.mb_x == 0 || s->c.qscale == qscale_table[xy] || n == 1 || n == 3) {
232  /* same qscale */
233  for (i = 1; i < 8; i++) {
234  const int level = block[n][s->c.idsp.idct_permutation[i << 3]];
235  block[n][s->c.idsp.idct_permutation[i << 3]] = level - ac_val[i];
236  ac_val1[i] = level;
237  ac_val1[i + 8] = block[n][s->c.idsp.idct_permutation[i]];
238  }
239  } else {
240  /* different qscale, we must rescale */
241  for (i = 1; i < 8; i++) {
242  const int level = block[n][s->c.idsp.idct_permutation[i << 3]];
243  block[n][s->c.idsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->c.qscale);
244  ac_val1[i] = level;
245  ac_val1[i + 8] = block[n][s->c.idsp.idct_permutation[i]];
246  }
247  }
248  st[n] = s->c.permutated_intra_v_scantable;
249  }
250 
251  for (i = 63; i > 0; i--) // FIXME optimize
252  if (block[n][st[n][i]])
253  break;
254  s->c.block_last_index[n] = i;
255 
256  score += get_block_rate(s, block[n], s->c.block_last_index[n], st[n]);
257  }
258 
259  if (score < 0) {
260  return 1;
261  } else {
262  restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
263  return 0;
264  }
265 }
266 
267 /**
268  * modify mb_type & qscale so that encoding is actually possible in MPEG-4
269  */
271 {
273 
274  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
275  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
276  int odd = 0;
277  /* ok, come on, this isn't funny anymore, there's more code for
278  * handling this MPEG-4 mess than for the actual adaptive quantization */
279 
280  for (int i = 0; i < s->c.mb_num; i++) {
281  int mb_xy = s->c.mb_index2xy[i];
282  odd += qscale_table[mb_xy] & 1;
283  }
284 
285  if (2 * odd > s->c.mb_num)
286  odd = 1;
287  else
288  odd = 0;
289 
290  for (int i = 0; i < s->c.mb_num; i++) {
291  int mb_xy = s->c.mb_index2xy[i];
292  if ((qscale_table[mb_xy] & 1) != odd)
293  qscale_table[mb_xy]++;
294  if (qscale_table[mb_xy] > 31)
295  qscale_table[mb_xy] = 31;
296  }
297 
298  for (int i = 1; i < s->c.mb_num; i++) {
299  int mb_xy = s->c.mb_index2xy[i];
300  if (qscale_table[mb_xy] != qscale_table[s->c.mb_index2xy[i - 1]] &&
301  (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) {
302  s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR;
303  }
304  }
305  }
306 }
307 
308 /**
309  * Encode the dc value.
310  * @param n block index (0-3 are luma, 4-5 are chroma)
311  */
312 static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
313 {
314  /* DC will overflow if level is outside the [-255,255] range. */
315  level += 256;
316  if (n < 4) {
317  /* luminance */
319  } else {
320  /* chrominance */
322  }
323 }
324 
325 /**
326  * Encode the AC coefficients of an 8x8 block.
327  */
328 static inline void mpeg4_encode_ac_coeffs(const int16_t block[64],
329  const int last_index, int i,
330  const uint8_t *const scan_table,
331  PutBitContext *const ac_pb,
332  const uint32_t *const bits_tab,
333  const uint8_t *const len_tab)
334 {
335  int last_non_zero = i - 1;
336 
337  /* AC coefs */
338  for (; i < last_index; i++) {
339  int level = block[scan_table[i]];
340  if (level) {
341  int run = i - last_non_zero - 1;
342  level += 64;
343  if ((level & (~127)) == 0) {
344  const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
345  put_bits(ac_pb, len_tab[index], bits_tab[index]);
346  } else { // ESC3
347  put_bits(ac_pb,
348  7 + 2 + 1 + 6 + 1 + 12 + 1,
349  (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) +
350  (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
351  }
352  last_non_zero = i;
353  }
354  }
355  /* if (i <= last_index) */ {
356  int level = block[scan_table[i]];
357  int run = i - last_non_zero - 1;
358  level += 64;
359  if ((level & (~127)) == 0) {
360  const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
361  put_bits(ac_pb, len_tab[index], bits_tab[index]);
362  } else { // ESC3
363  put_bits(ac_pb,
364  7 + 2 + 1 + 6 + 1 + 12 + 1,
365  (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) +
366  (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
367  }
368  }
369 }
370 
372  const int16_t block[6][64],
373  PutBitContext *ac_pb)
374 {
375  /* encode each block */
376  for (int n = 0; n < 6; ++n) {
377  const int last_index = s->c.block_last_index[n];
378  if (last_index < 0)
379  continue;
380 
381  mpeg4_encode_ac_coeffs(block[n], last_index, 0,
382  s->c.intra_scantable.permutated, ac_pb,
384  }
385 }
386 
388  const int16_t block[6][64],
389  const int intra_dc[6],
390  const uint8_t * const *scan_table,
391  PutBitContext *dc_pb,
392  PutBitContext *ac_pb)
393 {
394  /* encode each block */
395  for (int n = 0; n < 6; ++n) {
396  mpeg4_encode_dc(dc_pb, intra_dc[n], n);
397 
398  const int last_index = s->c.block_last_index[n];
399  if (last_index <= 0)
400  continue;
401 
402  mpeg4_encode_ac_coeffs(block[n], last_index, 1,
403  scan_table[n], ac_pb,
405  }
406 }
407 
408 static inline int get_b_cbp(MPVEncContext *const s, int16_t block[6][64],
409  int motion_x, int motion_y, int mb_type)
410 {
411  int cbp = 0, i;
412 
413  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
414  int score = 0;
415  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
416 
417  for (i = 0; i < 6; i++) {
418  if (s->coded_score[i] < 0) {
419  score += s->coded_score[i];
420  cbp |= 1 << (5 - i);
421  }
422  }
423 
424  if (cbp) {
425  int zero_score = -6;
426  if ((motion_x | motion_y | s->dquant | mb_type) == 0)
427  zero_score -= 4; // 2 * MV + mb_type + cbp bit
428 
429  zero_score *= lambda;
430  if (zero_score <= score)
431  cbp = 0;
432  }
433 
434  for (i = 0; i < 6; i++) {
435  if (s->c.block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
436  s->c.block_last_index[i] = -1;
437  s->c.bdsp.clear_block(s->block[i]);
438  }
439  }
440  } else {
441  for (i = 0; i < 6; i++) {
442  if (s->c.block_last_index[i] >= 0)
443  cbp |= 1 << (5 - i);
444  }
445  }
446  return cbp;
447 }
448 
449 // FIXME this is duplicated to h263.c
450 static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
451 
452 static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
453  int motion_x, int motion_y)
454 {
455  int cbpc, cbpy, pred_x, pred_y;
456  PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
457  PutBitContext *const tex_pb = s->data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
458  PutBitContext *const dc_pb = s->data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
459  const int interleaved_stats = (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->data_partitioning;
460 
461  if (!s->c.mb_intra) {
462  int i, cbp;
463 
464  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
465  /* convert from mv_dir to type */
466  static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
467  int mb_type = mb_type_table[s->c.mv_dir];
468 
469  if (s->c.mb_x == 0) {
470  for (i = 0; i < 2; i++)
471  s->c.last_mv[i][0][0] =
472  s->c.last_mv[i][0][1] =
473  s->c.last_mv[i][1][0] =
474  s->c.last_mv[i][1][1] = 0;
475  }
476 
477  av_assert2(s->dquant >= -2 && s->dquant <= 2);
478  av_assert2((s->dquant & 1) == 0);
479  av_assert2(mb_type >= 0);
480 
481  /* nothing to do if this MB was skipped in the next P-frame */
482  if (s->c.next_pic.mbskip_table[s->c.mb_y * s->c.mb_stride + s->c.mb_x]) { // FIXME avoid DCT & ...
483  s->c.mv[0][0][0] =
484  s->c.mv[0][0][1] =
485  s->c.mv[1][0][0] =
486  s->c.mv[1][0][1] = 0;
487  s->c.mv_dir = MV_DIR_FORWARD; // doesn't matter
488  s->c.qscale -= s->dquant;
489 // s->c.mb_skipped = 1;
490 
491  return;
492  }
493 
494  cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
495 
496  if ((cbp | motion_x | motion_y | mb_type) == 0) {
497  /* direct MB with MV={0,0} */
498  av_assert2(s->dquant == 0);
499 
500  put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
501 
502  if (interleaved_stats) {
503  s->misc_bits++;
504  s->last_bits++;
505  }
506  return;
507  }
508 
509  put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
510  put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
511  put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
512  if (cbp)
513  put_bits(&s->pb, 6, cbp);
514 
515  if (cbp && mb_type) {
516  if (s->dquant)
517  put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
518  else
519  put_bits(&s->pb, 1, 0);
520  } else
521  s->c.qscale -= s->dquant;
522 
523  if (!s->c.progressive_sequence) {
524  if (cbp)
525  put_bits(&s->pb, 1, s->c.interlaced_dct);
526  if (mb_type) // not direct mode
527  put_bits(&s->pb, 1, s->c.mv_type == MV_TYPE_FIELD);
528  }
529 
530  if (interleaved_stats)
531  s->misc_bits += get_bits_diff(s);
532 
533  if (!mb_type) {
534  av_assert2(s->c.mv_dir & MV_DIRECT);
535  ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
536  } else {
537  av_assert2(mb_type > 0 && mb_type < 4);
538  if (s->c.mv_type != MV_TYPE_FIELD) {
539  if (s->c.mv_dir & MV_DIR_FORWARD) {
541  s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
542  s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
543  s->f_code);
544  s->c.last_mv[0][0][0] =
545  s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
546  s->c.last_mv[0][0][1] =
547  s->c.last_mv[0][1][1] = s->c.mv[0][0][1];
548  }
549  if (s->c.mv_dir & MV_DIR_BACKWARD) {
551  s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
552  s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
553  s->b_code);
554  s->c.last_mv[1][0][0] =
555  s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
556  s->c.last_mv[1][0][1] =
557  s->c.last_mv[1][1][1] = s->c.mv[1][0][1];
558  }
559  } else {
560  if (s->c.mv_dir & MV_DIR_FORWARD) {
561  put_bits(&s->pb, 1, s->c.field_select[0][0]);
562  put_bits(&s->pb, 1, s->c.field_select[0][1]);
563  }
564  if (s->c.mv_dir & MV_DIR_BACKWARD) {
565  put_bits(&s->pb, 1, s->c.field_select[1][0]);
566  put_bits(&s->pb, 1, s->c.field_select[1][1]);
567  }
568  if (s->c.mv_dir & MV_DIR_FORWARD) {
569  for (i = 0; i < 2; i++) {
571  s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
572  s->c.mv[0][i][1] - s->c.last_mv[0][i][1] / 2,
573  s->f_code);
574  s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
575  s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
576  }
577  }
578  if (s->c.mv_dir & MV_DIR_BACKWARD) {
579  for (i = 0; i < 2; i++) {
581  s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
582  s->c.mv[1][i][1] - s->c.last_mv[1][i][1] / 2,
583  s->b_code);
584  s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
585  s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
586  }
587  }
588  }
589  }
590 
591  if (interleaved_stats)
592  s->mv_bits += get_bits_diff(s);
593 
595 
596  if (interleaved_stats)
597  s->p_tex_bits += get_bits_diff(s);
598  } else { /* s->c.pict_type == AV_PICTURE_TYPE_B */
599  cbp = get_p_cbp(s, block, motion_x, motion_y);
600 
601  if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
602  s->c.mv_type == MV_TYPE_16X16) {
603  const MPVMainEncContext *const m = slice_to_mainenc(s);
604  /* Check if the B-frames can skip it too, as we must skip it
605  * if we skip here why didn't they just compress
606  * the skip-mb bits instead of reusing them ?! */
607  if (m->max_b_frames > 0) {
608  int x, y, offset;
609  const uint8_t *p_pic;
610 
611  x = s->c.mb_x * 16;
612  y = s->c.mb_y * 16;
613 
614  offset = x + y * s->c.linesize;
615  p_pic = s->new_pic->data[0] + offset;
616 
617  s->c.mb_skipped = 1;
618  for (int i = 0; i < m->max_b_frames; i++) {
619  const uint8_t *b_pic;
620  int diff;
621  const MPVPicture *pic = m->reordered_input_picture[i + 1];
622 
623  if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
624  break;
625 
626  b_pic = pic->f->data[0] + offset;
627  if (!pic->shared)
628  b_pic += INPLACE_OFFSET;
629 
630  if (x + 16 > s->c.width || y + 16 > s->c.height) {
631  int x1, y1;
632  int xe = FFMIN(16, s->c.width - x);
633  int ye = FFMIN(16, s->c.height - y);
634  diff = 0;
635  for (y1 = 0; y1 < ye; y1++) {
636  for (x1 = 0; x1 < xe; x1++) {
637  diff += FFABS(p_pic[x1 + y1 * s->c.linesize] - b_pic[x1 + y1 * s->c.linesize]);
638  }
639  }
640  diff = diff * 256 / (xe * ye);
641  } else {
642  diff = s->sad_cmp[0](NULL, p_pic, b_pic, s->c.linesize, 16);
643  }
644  if (diff > s->c.qscale * 70) { // FIXME check that 70 is optimal
645  s->c.mb_skipped = 0;
646  break;
647  }
648  }
649  } else
650  s->c.mb_skipped = 1;
651 
652  if (s->c.mb_skipped == 1) {
653  /* skip macroblock */
654  put_bits(&s->pb, 1, 1);
655 
656  if (interleaved_stats) {
657  s->misc_bits++;
658  s->last_bits++;
659  }
660 
661  return;
662  }
663  }
664 
665  put_bits(&s->pb, 1, 0); /* mb coded */
666  cbpc = cbp & 3;
667  cbpy = cbp >> 2;
668  cbpy ^= 0xf;
669  if (s->c.mv_type == MV_TYPE_16X16) {
670  if (s->dquant)
671  cbpc += 8;
672  put_bits(&s->pb,
675 
676  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
677  if (s->dquant)
678  put_bits(pb2, 2, dquant_code[s->dquant + 2]);
679 
680  if (!s->c.progressive_sequence) {
681  if (cbp)
682  put_bits(pb2, 1, s->c.interlaced_dct);
683  put_bits(pb2, 1, 0);
684  }
685 
686  if (interleaved_stats)
687  s->misc_bits += get_bits_diff(s);
688 
689  /* motion vectors: 16x16 mode */
690  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
691 
693  motion_x - pred_x,
694  motion_y - pred_y,
695  s->f_code);
696  } else if (s->c.mv_type == MV_TYPE_FIELD) {
697  if (s->dquant)
698  cbpc += 8;
699  put_bits(&s->pb,
702 
703  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
704  if (s->dquant)
705  put_bits(pb2, 2, dquant_code[s->dquant + 2]);
706 
707  av_assert2(!s->c.progressive_sequence);
708  if (cbp)
709  put_bits(pb2, 1, s->c.interlaced_dct);
710  put_bits(pb2, 1, 1);
711 
712  if (interleaved_stats)
713  s->misc_bits += get_bits_diff(s);
714 
715  /* motion vectors: 16x8 interlaced mode */
716  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
717  pred_y /= 2;
718 
719  put_bits(&s->pb, 1, s->c.field_select[0][0]);
720  put_bits(&s->pb, 1, s->c.field_select[0][1]);
721 
723  s->c.mv[0][0][0] - pred_x,
724  s->c.mv[0][0][1] - pred_y,
725  s->f_code);
727  s->c.mv[0][1][0] - pred_x,
728  s->c.mv[0][1][1] - pred_y,
729  s->f_code);
730  } else {
731  av_assert2(s->c.mv_type == MV_TYPE_8X8);
732  put_bits(&s->pb,
733  ff_h263_inter_MCBPC_bits[cbpc + 16],
734  ff_h263_inter_MCBPC_code[cbpc + 16]);
735  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
736 
737  if (!s->c.progressive_sequence && cbp)
738  put_bits(pb2, 1, s->c.interlaced_dct);
739 
740  if (interleaved_stats)
741  s->misc_bits += get_bits_diff(s);
742 
743  for (i = 0; i < 4; i++) {
744  /* motion vectors: 8x8 mode*/
745  ff_h263_pred_motion(&s->c, i, 0, &pred_x, &pred_y);
746 
748  s->c.cur_pic.motion_val[0][s->c.block_index[i]][0] - pred_x,
749  s->c.cur_pic.motion_val[0][s->c.block_index[i]][1] - pred_y,
750  s->f_code);
751  }
752  }
753 
754  if (interleaved_stats)
755  s->mv_bits += get_bits_diff(s);
756 
758 
759  if (interleaved_stats)
760  s->p_tex_bits += get_bits_diff(s);
761  }
762  } else {
763  int cbp;
764  int dc_diff[6]; // dc values with the dc prediction subtracted
765  int dir[6]; // prediction direction
766  int zigzag_last_index[6];
767  const uint8_t *scan_table[6];
768  int i;
769 
770  for (int i = 0; i < 6; i++) {
771  int pred = mpeg4_pred_dc(&s->c, i, &dir[i]);
772  int scale = i < 4 ? s->c.y_dc_scale : s->c.c_dc_scale;
773 
774  pred = FASTDIV((pred + (scale >> 1)), scale);
775  dc_diff[i] = block[i][0] - pred;
776  s->c.dc_val[s->c.block_index[i]] = av_clip_uintp2(block[i][0] * scale, 11);
777  }
778 
779  if (s->c.avctx->flags & AV_CODEC_FLAG_AC_PRED) {
780  s->c.ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
781  } else {
782  for (i = 0; i < 6; i++)
783  scan_table[i] = s->c.intra_scantable.permutated;
784  }
785 
786  /* compute cbp */
787  cbp = 0;
788  for (i = 0; i < 6; i++)
789  if (s->c.block_last_index[i] >= 1)
790  cbp |= 1 << (5 - i);
791 
792  cbpc = cbp & 3;
793  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
794  if (s->dquant)
795  cbpc += 4;
796  put_bits(&s->pb,
799  } else {
800  if (s->dquant)
801  cbpc += 8;
802  put_bits(&s->pb, 1, 0); /* mb coded */
803  put_bits(&s->pb,
804  ff_h263_inter_MCBPC_bits[cbpc + 4],
805  ff_h263_inter_MCBPC_code[cbpc + 4]);
806  }
807  put_bits(pb2, 1, s->c.ac_pred);
808  cbpy = cbp >> 2;
809  put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
810  if (s->dquant)
811  put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
812 
813  if (!s->c.progressive_sequence)
814  put_bits(dc_pb, 1, s->c.interlaced_dct);
815 
816  if (interleaved_stats)
817  s->misc_bits += get_bits_diff(s);
818 
819  mpeg4_encode_blocks_intra(s, block, dc_diff, scan_table, dc_pb, tex_pb);
820 
821  if (interleaved_stats)
822  s->i_tex_bits += get_bits_diff(s);
823  s->i_count++;
824 
825  /* restore ac coeffs & last_index stuff
826  * if we messed them up with the prediction */
827  if (s->c.ac_pred)
828  restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
829  }
830 }
831 
832 /**
833  * add MPEG-4 stuffing bits (01...1)
834  */
836 {
837  int length = 8 - (put_bits_count(pbc) & 7);
838 
839  put_bits(pbc, length, (1 << (length - 1)) - 1);
840 }
841 
842 /* must be called before writing the header */
844 {
845  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
847  } else {
848  s->c.last_time_base = s->c.time_base;
849  s->c.time_base = FFUDIV(s->c.time, s->c.avctx->time_base.den);
850  }
851 }
852 
854 {
855  MPVEncContext *const s = &m->s;
856  int64_t hours, minutes, seconds;
857  int64_t time;
858 
859  put_bits32(&s->pb, GOP_STARTCODE);
860 
861  time = s->c.cur_pic.ptr->f->pts;
862  if (m->reordered_input_picture[1])
863  time = FFMIN(time, m->reordered_input_picture[1]->f->pts);
864  time = time * s->c.avctx->time_base.num;
865  s->c.last_time_base = FFUDIV(time, s->c.avctx->time_base.den);
866 
867  seconds = FFUDIV(time, s->c.avctx->time_base.den);
868  minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60);
869  hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60);
870  hours = FFUMOD(hours , 24);
871 
872  put_bits(&s->pb, 5, hours);
873  put_bits(&s->pb, 6, minutes);
874  put_bits(&s->pb, 1, 1);
875  put_bits(&s->pb, 6, seconds);
876 
877  put_bits(&s->pb, 1, !!(s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
878  put_bits(&s->pb, 1, 0); // broken link == NO
879 
880  ff_mpeg4_stuffing(&s->pb);
881 }
882 
884 {
885  MPVEncContext *const s = &m->s;
886  int profile_and_level_indication;
887  int vo_ver_id;
888 
889  if (s->c.avctx->profile != AV_PROFILE_UNKNOWN) {
890  profile_and_level_indication = s->c.avctx->profile << 4;
891  } else if (m->max_b_frames || s->c.quarter_sample) {
892  profile_and_level_indication = 0xF0; // adv simple
893  } else {
894  profile_and_level_indication = 0x00; // simple
895  }
896 
897  if (s->c.avctx->level != AV_LEVEL_UNKNOWN)
898  profile_and_level_indication |= s->c.avctx->level;
899  else
900  profile_and_level_indication |= 1; // level 1
901 
902  if (profile_and_level_indication >> 4 == 0xF)
903  vo_ver_id = 5;
904  else
905  vo_ver_id = 1;
906 
907  // FIXME levels
908 
909  put_bits32(&s->pb, VOS_STARTCODE);
910 
911  put_bits(&s->pb, 8, profile_and_level_indication);
912 
914 
915  put_bits(&s->pb, 1, 1);
916  put_bits(&s->pb, 4, vo_ver_id);
917  put_bits(&s->pb, 3, 1); // priority
918 
919  put_bits(&s->pb, 4, 1); // visual obj type== video obj
920 
921  put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME
922 
923  ff_mpeg4_stuffing(&s->pb);
924 }
925 
927  int vo_number,
928  int vol_number)
929 {
930  MPVEncContext *const s = &m4->m.s;
931  int vo_ver_id, vo_type, aspect_ratio_info;
932 
933  if (m4->m.max_b_frames || s->c.quarter_sample) {
934  vo_ver_id = 5;
935  vo_type = ADV_SIMPLE_VO_TYPE;
936  } else {
937  vo_ver_id = 1;
938  vo_type = SIMPLE_VO_TYPE;
939  }
940 
941  put_bits32(&s->pb, 0x100 + vo_number); /* video obj */
942  put_bits32(&s->pb, 0x120 + vol_number); /* video obj layer */
943 
944  put_bits(&s->pb, 1, 0); /* random access vol */
945  put_bits(&s->pb, 8, vo_type); /* video obj type indication */
946  put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
947  put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
948  put_bits(&s->pb, 3, 1); /* is obj layer priority */
949 
950  aspect_ratio_info = ff_h263_aspect_to_info(s->c.avctx->sample_aspect_ratio);
951 
952  put_bits(&s->pb, 4, aspect_ratio_info); /* aspect ratio info */
953  if (aspect_ratio_info == FF_ASPECT_EXTENDED) {
954  av_reduce(&s->c.avctx->sample_aspect_ratio.num, &s->c.avctx->sample_aspect_ratio.den,
955  s->c.avctx->sample_aspect_ratio.num, s->c.avctx->sample_aspect_ratio.den, 255);
956  put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.num);
957  put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.den);
958  }
959 
960  put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
961  put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
962  put_bits(&s->pb, 1, s->c.low_delay);
963  put_bits(&s->pb, 1, 0); /* vbv parameters= no */
964 
965  put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
966  put_bits(&s->pb, 1, 1); /* marker bit */
967 
968  put_bits(&s->pb, 16, s->c.avctx->time_base.den);
969  if (m4->time_increment_bits < 1)
970  m4->time_increment_bits = 1;
971  put_bits(&s->pb, 1, 1); /* marker bit */
972  put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
973  put_bits(&s->pb, 1, 1); /* marker bit */
974  put_bits(&s->pb, 13, s->c.width); /* vol width */
975  put_bits(&s->pb, 1, 1); /* marker bit */
976  put_bits(&s->pb, 13, s->c.height); /* vol height */
977  put_bits(&s->pb, 1, 1); /* marker bit */
978  put_bits(&s->pb, 1, s->c.progressive_sequence ? 0 : 1);
979  put_bits(&s->pb, 1, 1); /* obmc disable */
980  if (vo_ver_id == 1)
981  put_bits(&s->pb, 1, 0); /* sprite enable */
982  else
983  put_bits(&s->pb, 2, 0); /* sprite enable */
984 
985  put_bits(&s->pb, 1, 0); /* not 8 bit == false */
986  put_bits(&s->pb, 1, s->mpeg_quant); /* quant type = (0 = H.263 style) */
987 
988  if (s->mpeg_quant) {
989  ff_write_quant_matrix(&s->pb, s->c.avctx->intra_matrix);
990  ff_write_quant_matrix(&s->pb, s->c.avctx->inter_matrix);
991  }
992 
993  if (vo_ver_id != 1)
994  put_bits(&s->pb, 1, s->c.quarter_sample);
995  put_bits(&s->pb, 1, 1); /* complexity estimation disable */
996  put_bits(&s->pb, 1, s->rtp_mode ? 0 : 1); /* resync marker disable */
997  put_bits(&s->pb, 1, s->data_partitioning);
998  if (s->data_partitioning)
999  put_bits(&s->pb, 1, 0); /* no rvlc */
1000 
1001  if (vo_ver_id != 1) {
1002  put_bits(&s->pb, 1, 0); /* newpred */
1003  put_bits(&s->pb, 1, 0); /* reduced res vop */
1004  }
1005  put_bits(&s->pb, 1, 0); /* scalability */
1006 
1007  ff_mpeg4_stuffing(&s->pb);
1008 
1009  /* user data */
1010  if (!(s->c.avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
1012  ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
1013  }
1014 }
1015 
1016 /* write MPEG-4 VOP header */
1018 {
1019  Mpeg4EncContext *const m4 = mainctx_to_mpeg4(m);
1020  MPVEncContext *const s = &m->s;
1021  uint64_t time_incr;
1022  int64_t time_div, time_mod;
1023 
1024  put_bits_assume_flushed(&s->pb);
1025 
1026  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
1027  if (!(s->c.avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
1028  if (s->c.avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
1030  if (s->c.avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || s->picture_number == 0) // HACK, the reference sw is buggy
1031  mpeg4_encode_vol_header(m4, 0, 0);
1032  }
1034  }
1035 
1036  s->partitioned_frame = s->data_partitioning && s->c.pict_type != AV_PICTURE_TYPE_B;
1037 
1038  put_bits32(&s->pb, VOP_STARTCODE); /* vop header */
1039  put_bits(&s->pb, 2, s->c.pict_type - 1); /* pict type: I = 0 , P = 1 */
1040 
1041  time_div = FFUDIV(s->c.time, s->c.avctx->time_base.den);
1042  time_mod = FFUMOD(s->c.time, s->c.avctx->time_base.den);
1043  time_incr = time_div - s->c.last_time_base;
1044 
1045  // This limits the frame duration to max 1 day
1046  if (time_incr > 3600*24) {
1047  av_log(s->c.avctx, AV_LOG_ERROR, "time_incr %"PRIu64" too large\n", time_incr);
1048  return AVERROR(EINVAL);
1049  }
1050  while (time_incr--)
1051  put_bits(&s->pb, 1, 1);
1052 
1053  put_bits(&s->pb, 1, 0);
1054 
1055  put_bits(&s->pb, 1, 1); /* marker */
1056  put_bits(&s->pb, m4->time_increment_bits, time_mod); /* time increment */
1057  put_bits(&s->pb, 1, 1); /* marker */
1058  put_bits(&s->pb, 1, 1); /* vop coded */
1059  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
1060  put_bits(&s->pb, 1, s->c.no_rounding); /* rounding type */
1061  }
1062  put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
1063  if (!s->c.progressive_sequence) {
1064  put_bits(&s->pb, 1, !!(s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
1065  put_bits(&s->pb, 1, s->c.alternate_scan);
1066  }
1067  // FIXME sprite stuff
1068 
1069  put_bits(&s->pb, 5, s->c.qscale);
1070 
1071  if (s->c.pict_type != AV_PICTURE_TYPE_I)
1072  put_bits(&s->pb, 3, s->f_code); /* fcode_for */
1073  if (s->c.pict_type == AV_PICTURE_TYPE_B)
1074  put_bits(&s->pb, 3, s->b_code); /* fcode_back */
1075 
1076  return 0;
1077 }
1078 
1079 static av_cold void init_uni_dc_tab(void)
1080 {
1081  int level, uni_code, uni_len;
1082 
1083  for (level = -256; level < 256; level++) {
1084  int size, v, l;
1085  /* find number of bits */
1086  size = 0;
1087  v = abs(level);
1088  while (v) {
1089  v >>= 1;
1090  size++;
1091  }
1092 
1093  if (level < 0)
1094  l = (-level) ^ ((1 << size) - 1);
1095  else
1096  l = level;
1097 
1098  /* luminance */
1099  uni_code = ff_mpeg4_DCtab_lum[size][0];
1100  uni_len = ff_mpeg4_DCtab_lum[size][1];
1101 
1102  if (size > 0) {
1103  uni_code <<= size;
1104  uni_code |= l;
1105  uni_len += size;
1106  if (size > 8) {
1107  uni_code <<= 1;
1108  uni_code |= 1;
1109  uni_len++;
1110  }
1111  }
1112  uni_DCtab_lum_bits[level + 256] = uni_code;
1113  uni_DCtab_lum_len[level + 256] = uni_len;
1114 
1115  /* chrominance */
1116  uni_code = ff_mpeg4_DCtab_chrom[size][0];
1117  uni_len = ff_mpeg4_DCtab_chrom[size][1];
1118 
1119  if (size > 0) {
1120  uni_code <<= size;
1121  uni_code |= l;
1122  uni_len += size;
1123  if (size > 8) {
1124  uni_code <<= 1;
1125  uni_code |= 1;
1126  uni_len++;
1127  }
1128  }
1129  uni_DCtab_chrom_bits[level + 256] = uni_code;
1130  uni_DCtab_chrom_len[level + 256] = uni_len;
1131  }
1132 }
1133 
1134 static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
1135  uint8_t *len_tab)
1136 {
1137  // Type 3 escape method. The escape code is the same for both VLCs
1138  // (0x3, seven bits), so it is hardcoded.
1139  memset(len_tab, 30, 2 * 2 * 64 * 64);
1140  len_tab += 64;
1141  bits_tab += 64;
1142  for (int run = 0; run < 64; ++run) {
1143  for (int level = 1;; ++level) {
1144  // Escape code type 3 not last run (6 bits) marker marker
1145  unsigned code = (3 << 23) | (3 << 21) | (0 << 20) | (run << 14) | (1 << 13) | 1;
1146  // first the negative levels
1147  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, -level)] = code | (-level & 0xfff) << 1;
1148  bits_tab[UNI_MPEG4_ENC_INDEX(1, run, -level)] =
1149  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, -level)] | (1 << 20) /* last */;
1150 
1151  if (level == 64) // positive levels have a range of 1..63
1152  break;
1153  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, level)] = code | level << 1;
1154  bits_tab[UNI_MPEG4_ENC_INDEX(1, run, level)] =
1155  bits_tab[UNI_MPEG4_ENC_INDEX(0, run, level)] | (1 << 20) /* last */;
1156  }
1157  // Is this needed at all?
1158  len_tab[UNI_MPEG4_ENC_INDEX(0, run, 0)] =
1159  len_tab[UNI_MPEG4_ENC_INDEX(1, run, 0)] = 0;
1160  }
1161 
1162  uint8_t max_run[2][32] = { 0 };
1163 
1164 #define VLC_NUM_CODES 102 // excluding the escape
1165  av_assert2(rl->n == VLC_NUM_CODES);
1166  for (int i = VLC_NUM_CODES - 1, max_level, cur_run = 0; i >= 0; --i) {
1167  int run = rl->table_run[i], level = rl->table_level[i];
1168  int last = i >= rl->last;
1169  unsigned code = rl->table_vlc[i][0] << 1;
1170  int len = rl->table_vlc[i][1] + 1;
1171 
1172  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, level)] = code;
1173  len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] = len;
1174  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, -level)] = code | 1;
1175  len_tab [UNI_MPEG4_ENC_INDEX(last, run, -level)] = len;
1176 
1177  if (!max_run[last][level])
1178  max_run[last][level] = run + 1;
1179  av_assert2(run + 1 <= max_run[last][level]);
1180 
1181  int run3 = run + max_run[last][level];
1182  int len3 = len + 7 + 2;
1183 
1184  if (run3 < 64 && len3 < len_tab[UNI_MPEG4_ENC_INDEX(last, run3, level)]) {
1185  unsigned code3 = code | (0x3 << 2 | 0x2) << len;
1186  bits_tab[UNI_MPEG4_ENC_INDEX(last, run3, level)] = code3;
1187  len_tab [UNI_MPEG4_ENC_INDEX(last, run3, level)] = len3;
1188  bits_tab[UNI_MPEG4_ENC_INDEX(last, run3, -level)] = code3 | 1;
1189  len_tab [UNI_MPEG4_ENC_INDEX(last, run3, -level)] = len3;
1190  }
1191  // table_run and table_level are ordered so that all the entries
1192  // with the same last and run are consecutive and level is ascending
1193  // among these entries. By traversing downwards we therefore automatically
1194  // encounter max_level of a given run first, needed for escape method 1.
1195  if (run != cur_run) {
1196  max_level = level;
1197  cur_run = run;
1198  } else
1199  av_assert2(max_level > level);
1200 
1201  code |= 0x3 << (len + 1);
1202  len += 7 + 1;
1203  level += max_level;
1204  av_assert2(len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] >= len);
1205  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, level)] = code;
1206  len_tab [UNI_MPEG4_ENC_INDEX(last, run, level)] = len;
1207  bits_tab[UNI_MPEG4_ENC_INDEX(last, run, -level)] = code | 1;
1208  len_tab [UNI_MPEG4_ENC_INDEX(last, run, -level)] = len;
1209  }
1210 }
1211 
1213 {
1214  init_uni_dc_tab();
1215 
1218 
1219  for (int f_code = MAX_FCODE; f_code > 0; f_code--) {
1220  for (int mv = -(16 << f_code); mv < (16 << f_code); mv++)
1221  fcode_tab[mv + MAX_MV] = f_code;
1222  }
1223 }
1224 
1226 {
1227  static AVOnce init_static_once = AV_ONCE_INIT;
1228  Mpeg4EncContext *const m4 = avctx->priv_data;
1229  MPVMainEncContext *const m = &m4->m;
1230  MPVEncContext *const s = &m->s;
1231  int ret;
1232 
1233  if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) {
1234  av_log(avctx, AV_LOG_ERROR, "dimensions too large for MPEG-4\n");
1235  return AVERROR(EINVAL);
1236  }
1237 
1239  s->encode_mb = mpeg4_encode_mb;
1240 
1241  m->fcode_tab = fcode_tab + MAX_MV;
1242 
1243  s->min_qcoeff = -2048;
1244  s->max_qcoeff = 2047;
1245  s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
1246  s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64;
1247  s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
1248  s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64;
1249  s->luma_dc_vlc_length = uni_DCtab_lum_len;
1250  s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
1251  s->c.y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
1252  s->c.c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
1253 
1254  ff_qpeldsp_init(&s->c.qdsp);
1255  if ((ret = ff_mpv_encode_init(avctx)) < 0)
1256  return ret;
1257 
1258  ff_thread_once(&init_static_once, mpeg4_encode_init_static);
1259 
1260  if (avctx->time_base.den > (1 << 16) - 1) {
1261  av_log(avctx, AV_LOG_ERROR,
1262  "timebase %d/%d not supported by MPEG 4 standard, "
1263  "the maximum admitted value for the timebase denominator "
1264  "is %d\n", avctx->time_base.num, avctx->time_base.den,
1265  (1 << 16) - 1);
1266  return AVERROR(EINVAL);
1267  }
1268 
1269  m4->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
1270 
1271  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1272  avctx->extradata = av_malloc(1024);
1273  if (!avctx->extradata)
1274  return AVERROR(ENOMEM);
1275  init_put_bits(&s->pb, avctx->extradata, 1024);
1276 
1278  mpeg4_encode_vol_header(m4, 0, 0);
1279 
1280 // ff_mpeg4_stuffing(&s->pb); ?
1281  flush_put_bits(&s->pb);
1282  avctx->extradata_size = put_bytes_output(&s->pb);
1283  }
1284  return 0;
1285 }
1286 
1288 {
1289  uint8_t *start = put_bits_ptr(&s->pb);
1290  uint8_t *end = s->pb.buf_end;
1291  int size = end - start;
1292  int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
1293  int tex_size = (size - 2 * pb_size) & (~3);
1294 
1295  set_put_bits_buffer_size(&s->pb, pb_size);
1296  init_put_bits(&s->tex_pb, start + pb_size, tex_size);
1297  init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
1298 }
1299 
1301 {
1302  const int pb2_len = put_bits_count(&s->pb2);
1303  const int tex_pb_len = put_bits_count(&s->tex_pb);
1304  const int bits = put_bits_count(&s->pb);
1305 
1306  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
1307  put_bits(&s->pb, 19, DC_MARKER);
1308  s->misc_bits += 19 + pb2_len + bits - s->last_bits;
1309  s->i_tex_bits += tex_pb_len;
1310  } else {
1311  put_bits(&s->pb, 17, MOTION_MARKER);
1312  s->misc_bits += 17 + pb2_len;
1313  s->mv_bits += bits - s->last_bits;
1314  s->p_tex_bits += tex_pb_len;
1315  }
1316 
1317  flush_put_bits(&s->pb2);
1318  flush_put_bits(&s->tex_pb);
1319 
1320  set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
1321  ff_copy_bits(&s->pb, s->pb2.buf, pb2_len);
1322  ff_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
1323  s->last_bits = put_bits_count(&s->pb);
1324 }
1325 
1327 {
1328  int mb_num_bits = av_log2(s->c.mb_num - 1) + 1;
1329 
1330  put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s->c.pict_type, s->f_code, s->b_code), 0);
1331  put_bits(&s->pb, 1, 1);
1332 
1333  put_bits(&s->pb, mb_num_bits, s->c.mb_x + s->c.mb_y * s->c.mb_width);
1334  put_bits(&s->pb, 5 /* quant_precision */, s->c.qscale);
1335  put_bits(&s->pb, 1, 0); /* no HEC */
1336 }
1337 
1338 #define OFFSET(x) offsetof(MPVEncContext, x)
1339 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1340 static const AVOption options[] = {
1341  { "data_partitioning", "Use data partitioning.", FF_MPV_OFFSET(data_partitioning), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
1342  { "alternate_scan", "Enable alternate scantable.", OFFSET(c.alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
1343  { "mpeg_quant", "Use MPEG quantizers instead of H.263",
1344  OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, VE },
1349  { NULL },
1350 };
1351 
1352 static const AVClass mpeg4enc_class = {
1353  .class_name = "MPEG4 encoder",
1354  .item_name = av_default_item_name,
1355  .option = options,
1356  .version = LIBAVUTIL_VERSION_INT,
1357 };
1358 
1360  .p.name = "mpeg4",
1361  CODEC_LONG_NAME("MPEG-4 part 2"),
1362  .p.type = AVMEDIA_TYPE_VIDEO,
1363  .p.id = AV_CODEC_ID_MPEG4,
1364  .priv_data_size = sizeof(Mpeg4EncContext),
1365  .init = encode_init,
1367  .close = ff_mpv_encode_end,
1369  .color_ranges = AVCOL_RANGE_MPEG,
1370  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
1373  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1374  .p.priv_class = &mpeg4enc_class,
1375 };
SIMPLE_VO_TYPE
#define SIMPLE_VO_TYPE
Definition: mpeg4videodefs.h:32
mpeg4_encode_init_static
static av_cold void mpeg4_encode_init_static(void)
Definition: mpeg4videoenc.c:1212
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:239
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:386
FFUMOD
#define FFUMOD(a, b)
Definition: common.h:66
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:298
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:175
mpeg4_encode_ac_coeffs
static void mpeg4_encode_ac_coeffs(const int16_t block[64], const int last_index, int i, const uint8_t *const scan_table, PutBitContext *const ac_pb, const uint32_t *const bits_tab, const uint8_t *const len_tab)
Encode the AC coefficients of an 8x8 block.
Definition: mpeg4videoenc.c:328
FF_ASPECT_EXTENDED
#define FF_ASPECT_EXTENDED
Definition: h263.h:26
level
uint8_t level
Definition: svq3.c:208
MPVEncContext
Definition: mpegvideoenc.h:46
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
put_bits32
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
Definition: put_bits.h:301
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:99
MAX_FCODE
#define MAX_FCODE
Definition: mpegvideoenc.h:285
thread.h
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
mpeg4_encode_gop_header
static void mpeg4_encode_gop_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:853
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:784
h263enc.h
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
OFFSET
#define OFFSET(x)
Definition: mpeg4videoenc.c:1338
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:523
VOS_STARTCODE
#define VOS_STARTCODE
Definition: mpeg4videodefs.h:55
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
init_uni_dc_tab
static av_cold void init_uni_dc_tab(void)
Definition: mpeg4videoenc.c:1079
FFCodec
Definition: codec_internal.h:127
version.h
mpegvideo.h
Mpeg4EncContext::time_increment_bits
int time_increment_bits
number of bits to represent the fractional part of time
Definition: mpeg4videoenc.c:79
ff_mpeg4_get_video_packet_prefix_length
int ff_mpeg4_get_video_packet_prefix_length(enum AVPictureType pict_type, int f_code, int b_code)
Definition: mpeg4video.c:28
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
mpeg4_encode_mb
static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:452
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:248
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:442
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:172
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:318
uni_mpeg4_intra_rl_bits
static uint32_t uni_mpeg4_intra_rl_bits[64 *64 *2 *2]
Definition: mpeg4videoenc.c:57
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:649
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:377
mpeg4videoenc.h
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1942
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:336
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
uni_mpeg4_intra_rl_len
static uint8_t uni_mpeg4_intra_rl_len[64 *64 *2 *2]
Definition: mpeg4videoenc.c:58
ff_mpeg4_DCtab_chrom
const uint8_t ff_mpeg4_DCtab_chrom[13][2]
Definition: mpeg4data.h:40
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
mainctx_to_mpeg4
static Mpeg4EncContext * mainctx_to_mpeg4(MPVMainEncContext *m)
Definition: mpeg4videoenc.c:82
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
wrap
#define wrap(func)
Definition: neontest.h:65
VOP_STARTCODE
#define VOP_STARTCODE
Definition: mpeg4videodefs.h:59
RLTable
RLTable.
Definition: rl.h:39
mpeg4_encode_visual_object_header
static void mpeg4_encode_visual_object_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:883
uni_mpeg4_inter_rl_bits
static uint32_t uni_mpeg4_inter_rl_bits[64 *64 *2 *2]
Definition: mpeg4videoenc.c:59
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
uni_DCtab_chrom_len
static uint8_t uni_DCtab_chrom_len[512]
Definition: mpeg4videoenc.c:51
FFUDIV
#define FFUDIV(a, b)
Definition: common.h:65
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:311
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:353
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
dquant_code
static const int dquant_code[5]
Definition: mpeg4videoenc.c:450
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:295
RLTable::n
int n
number of entries of table_vlc minus 1
Definition: rl.h:40
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
AV_PROFILE_UNKNOWN
#define AV_PROFILE_UNKNOWN
Definition: defs.h:65
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
s
#define s(width, name)
Definition: cbs_vp9.c:198
uni_mpeg4_inter_rl_len
static uint8_t uni_mpeg4_inter_rl_len[64 *64 *2 *2]
Definition: mpeg4videoenc.c:60
VLC_NUM_CODES
#define VLC_NUM_CODES
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:210
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
ff_mpeg4_rl_intra
RLTable ff_mpeg4_rl_intra
Definition: mpeg4data.h:108
uni_DCtab_chrom_bits
static uint16_t uni_DCtab_chrom_bits[512]
Definition: mpeg4videoenc.c:53
bits
uint8_t bits
Definition: vp3data.h:128
UNI_MPEG4_ENC_INDEX
#define UNI_MPEG4_ENC_INDEX(last, run, level)
Definition: mpeg4videoenc.c:64
uni_DCtab_lum_bits
static uint16_t uni_DCtab_lum_bits[512]
Definition: mpeg4videoenc.c:52
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:228
DC_MARKER
#define DC_MARKER
Definition: mpeg4videodefs.h:53
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:204
FF_MPV_OFFSET
#define FF_MPV_OFFSET(x)
Definition: mpegvideoenc.h:332
ff_put_string
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
Definition: bitstream.c:39
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
mpeg4_encode_vol_header
static void mpeg4_encode_vol_header(Mpeg4EncContext *const m4, int vo_number, int vol_number)
Definition: mpeg4videoenc.c:926
init_uni_mpeg4_rl_tab
static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab)
Definition: mpeg4videoenc.c:1134
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpeg4_DCtab_lum
const uint8_t ff_mpeg4_DCtab_lum[13][2]
Definition: mpeg4data.h:34
get_block_rate
static int get_block_rate(MPVEncContext *const s, int16_t block[64], int block_last_index, const uint8_t scantable[64])
Return the number of bits that encoding the 8x8 block in block would need.
Definition: mpeg4videoenc.c:91
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
FF_COMPLIANCE_VERY_STRICT
#define FF_COMPLIANCE_VERY_STRICT
Strictly conform to an older more strict version of the spec or reference software.
Definition: defs.h:58
run
uint8_t run
Definition: svq3.c:207
RLTable::table_vlc
const uint16_t(* table_vlc)[2]
Definition: rl.h:42
AV_LEVEL_UNKNOWN
#define AV_LEVEL_UNKNOWN
Definition: defs.h:206
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
mathops.h
options
Definition: swscale.c:43
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
Mpeg4EncContext
Definition: mpeg4videoenc.c:76
MOTION_MARKER
#define MOTION_MARKER
Definition: mpeg4videodefs.h:52
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1122
abs
#define abs(x)
Definition: cuda_runtime.h:35
FASTDIV
#define FASTDIV(a, b)
Definition: mathops.h:213
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
get_p_cbp
static int get_p_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h263enc.h:46
mpeg4_encode_picture_header
static int mpeg4_encode_picture_header(MPVMainEncContext *const m)
Definition: mpeg4videoenc.c:1017
MPVMainEncContext
Definition: mpegvideoenc.h:199
VISUAL_OBJ_STARTCODE
#define VISUAL_OBJ_STARTCODE
Definition: mpeg4videodefs.h:58
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:176
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ADV_SIMPLE_VO_TYPE
#define ADV_SIMPLE_VO_TYPE
Definition: mpeg4videodefs.h:40
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
RLTable::table_level
const int8_t * table_level
Definition: rl.h:44
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:513
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_h263_rl_inter
RLTable ff_h263_rl_inter
Definition: h263data.c:159
ff_mpeg4_y_dc_scale_table
const uint8_t ff_mpeg4_y_dc_scale_table[32]
Definition: mpeg4data.h:356
codec_internal.h
Mpeg4EncContext::m
MPVMainEncContext m
Definition: mpeg4videoenc.c:77
put_bits_assume_flushed
static void put_bits_assume_flushed(const PutBitContext *s)
Inform the compiler that a PutBitContext is flushed (i.e.
Definition: put_bits.h:82
ff_h263_cbpy_tab
const uint8_t ff_h263_cbpy_tab[16][2]
Definition: h263data.c:82
size
int size
Definition: twinvq_data.h:10344
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
fcode_tab
static uint8_t fcode_tab[MAX_MV *2+1]
Minimal fcode that a motion vector component would need.
Definition: mpeg4videoenc.c:45
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:178
ff_h263_inter_MCBPC_bits
const uint8_t ff_h263_inter_MCBPC_bits[28]
Definition: h263data.c:47
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:286
FF_MPEG4_PROFILE_OPTS
#define FF_MPEG4_PROFILE_OPTS
Definition: profiles.h:42
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:409
VE
#define VE
Definition: mpeg4videoenc.c:1339
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
RECT_SHAPE
#define RECT_SHAPE
Definition: mpeg4videodefs.h:27
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
uni_DCtab_lum_len
static uint8_t uni_DCtab_lum_len[512]
Definition: mpeg4videoenc.c:50
restore_ac_coeffs
static void restore_ac_coeffs(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], const int zigzag_last_index[6])
Restore the ac coefficients in block that have been changed by decide_ac_pred().
Definition: mpeg4videoenc.c:126
mpeg4enc_class
static const AVClass mpeg4enc_class
Definition: mpeg4videoenc.c:1352
options
static const AVOption options[]
Definition: mpeg4videoenc.c:1340
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_mpeg4_encoder
const FFCodec ff_mpeg4_encoder
Definition: mpeg4videoenc.c:1359
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
ff_h263_aspect_to_info
av_const int ff_h263_aspect_to_info(AVRational aspect)
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:750
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
mpeg4videodefs.h
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: mpeg4videoenc.c:1225
ff_mpeg4_init_direct_mv
void ff_mpeg4_init_direct_mv(MpegEncContext *s)
Definition: mpeg4video.c:73
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
ff_h263_intra_MCBPC_bits
const uint8_t ff_h263_intra_MCBPC_bits[9]
Definition: h263data.c:33
AVCodecContext
main external API structure.
Definition: avcodec.h:431
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
ff_h263_intra_MCBPC_code
const uint8_t ff_h263_intra_MCBPC_code[9]
Definition: h263data.c:32
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
mpeg4video.h
AVRational::den
int den
Denominator.
Definition: rational.h:60
mpeg4_encode_dc
static void mpeg4_encode_dc(PutBitContext *s, int level, int n)
Encode the dc value.
Definition: mpeg4videoenc.c:312
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
RLTable::last
int last
number of values for last = 0
Definition: rl.h:41
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
FF_MPV_COMMON_BFRAME_OPTS
#define FF_MPV_COMMON_BFRAME_OPTS
Definition: mpegvideoenc.h:372
USER_DATA_STARTCODE
#define USER_DATA_STARTCODE
Definition: mpeg4videodefs.h:56
ff_h263_inter_MCBPC_code
const uint8_t ff_h263_inter_MCBPC_code[28]
Definition: h263data.c:38
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:287
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:559
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
ff_mpeg4_c_dc_scale_table
const uint8_t ff_mpeg4_c_dc_scale_table[32]
Definition: mpeg4data.h:360
decide_ac_pred
static int decide_ac_pred(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], int zigzag_last_index[6])
Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
Definition: mpeg4videoenc.c:187
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:171
mpeg4_encode_blocks_intra
static void mpeg4_encode_blocks_intra(MPVEncContext *const s, const int16_t block[6][64], const int intra_dc[6], const uint8_t *const *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
Definition: mpeg4videoenc.c:387
slice_to_mainenc
static const MPVMainEncContext * slice_to_mainenc(const MPVEncContext *s)
Definition: mpegvideoenc.h:274
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
mpeg4videodata.h
GOP_STARTCODE
#define GOP_STARTCODE
Definition: mpeg4videodefs.h:57
ff_h263_encode_motion_vector
static void ff_h263_encode_motion_vector(MPVEncContext *s, int x, int y, int f_code)
Definition: h263enc.h:39
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
get_b_cbp
static int get_b_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y, int mb_type)
Definition: mpeg4videoenc.c:408
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
mpeg4_pred_dc
static int mpeg4_pred_dc(MpegEncContext *s, int n, int *dir_ptr)
Predict the dc.
Definition: mpeg4videoenc.c:154
put_bits.h
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:63
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:200
mpeg4_encode_blocks_inter
static void mpeg4_encode_blocks_inter(MPVEncContext *const s, const int16_t block[6][64], PutBitContext *ac_pb)
Definition: mpeg4videoenc.c:371
RLTable::table_run
const int8_t * table_run
Definition: rl.h:43
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
h263.h