FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mem_internal.h"
38 #include "libavutil/reverse.h"
39 #include "libavutil/stereo3d.h"
40 #include "libavutil/timecode.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "error_resilience.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "mpeg_er.h"
51 #include "mpeg12.h"
52 #include "mpeg12codecs.h"
53 #include "mpeg12data.h"
54 #include "mpeg12dec.h"
55 #include "mpegutils.h"
56 #include "mpegvideo.h"
57 #include "mpegvideodata.h"
58 #include "mpegvideodec.h"
59 #include "profiles.h"
60 #include "startcode.h"
61 #include "thread.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
70 };
71 
72 typedef struct Mpeg1Context {
74  int repeat_field; /* true if we must repeat the field */
75  AVPanScan pan_scan; /* some temporary storage for the panscan */
80  uint8_t afd;
81  int has_afd;
86  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
87  unsigned frame_rate_index;
88  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
90  int tmpgexs;
93  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
94 } Mpeg1Context;
95 
96 /* as H.263, but only 17 codes */
97 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
98 {
99  int code, sign, val, shift;
100 
101  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
102  if (code == 0)
103  return pred;
104  if (code < 0)
105  return 0xffff;
106 
107  sign = get_bits1(&s->gb);
108  shift = fcode - 1;
109  val = code;
110  if (shift) {
111  val = (val - 1) << shift;
112  val |= get_bits(&s->gb, shift);
113  val++;
114  }
115  if (sign)
116  val = -val;
117  val += pred;
118 
119  /* modulo decoding */
120  return sign_extend(val, 5 + shift);
121 }
122 
123 #define MAX_INDEX (64 - 1)
124 #define check_scantable_index(ctx, x) \
125  do { \
126  if ((x) > MAX_INDEX) { \
127  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
128  ctx->mb_x, ctx->mb_y); \
129  return AVERROR_INVALIDDATA; \
130  } \
131  } while (0)
132 
134  int16_t *block, int n)
135 {
136  int level, i, j, run;
137  const uint8_t *const scantable = s->intra_scantable.permutated;
138  const uint16_t *quant_matrix = s->inter_matrix;
139  const int qscale = s->qscale;
140 
141  {
142  OPEN_READER(re, &s->gb);
143  i = -1;
144  // special case for first coefficient, no need to add second VLC table
145  UPDATE_CACHE(re, &s->gb);
146  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
147  level = (3 * qscale * quant_matrix[0]) >> 5;
148  level = (level - 1) | 1;
149  if (GET_CACHE(re, &s->gb) & 0x40000000)
150  level = -level;
151  block[0] = level;
152  i++;
153  SKIP_BITS(re, &s->gb, 2);
154  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
155  goto end;
156  }
157  /* now quantify & encode AC coefficients */
158  for (;;) {
159  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
160  TEX_VLC_BITS, 2, 0);
161 
162  if (level != 0) {
163  i += run;
164  if (i > MAX_INDEX)
165  break;
166  j = scantable[i];
167  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
168  level = (level - 1) | 1;
169  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
170  SHOW_SBITS(re, &s->gb, 1);
171  SKIP_BITS(re, &s->gb, 1);
172  } else {
173  /* escape */
174  run = SHOW_UBITS(re, &s->gb, 6) + 1;
175  LAST_SKIP_BITS(re, &s->gb, 6);
176  UPDATE_CACHE(re, &s->gb);
177  level = SHOW_SBITS(re, &s->gb, 8);
178  SKIP_BITS(re, &s->gb, 8);
179  if (level == -128) {
180  level = SHOW_UBITS(re, &s->gb, 8) - 256;
181  SKIP_BITS(re, &s->gb, 8);
182  } else if (level == 0) {
183  level = SHOW_UBITS(re, &s->gb, 8);
184  SKIP_BITS(re, &s->gb, 8);
185  }
186  i += run;
187  if (i > MAX_INDEX)
188  break;
189  j = scantable[i];
190  if (level < 0) {
191  level = -level;
192  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
193  level = (level - 1) | 1;
194  level = -level;
195  } else {
196  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
197  level = (level - 1) | 1;
198  }
199  }
200 
201  block[j] = level;
202  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
203  break;
204  UPDATE_CACHE(re, &s->gb);
205  }
206 end:
207  LAST_SKIP_BITS(re, &s->gb, 2);
208  CLOSE_READER(re, &s->gb);
209  }
210 
212 
213  s->block_last_index[n] = i;
214  return 0;
215 }
216 
218  int16_t *block, int n)
219 {
220  int level, i, j, run;
221  const uint8_t *const scantable = s->intra_scantable.permutated;
222  const uint16_t *quant_matrix;
223  const int qscale = s->qscale;
224  int mismatch;
225 
226  mismatch = 1;
227 
228  {
229  OPEN_READER(re, &s->gb);
230  i = -1;
231  if (n < 4)
232  quant_matrix = s->inter_matrix;
233  else
234  quant_matrix = s->chroma_inter_matrix;
235 
236  // Special case for first coefficient, no need to add second VLC table.
237  UPDATE_CACHE(re, &s->gb);
238  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
239  level = (3 * qscale * quant_matrix[0]) >> 5;
240  if (GET_CACHE(re, &s->gb) & 0x40000000)
241  level = -level;
242  block[0] = level;
243  mismatch ^= level;
244  i++;
245  SKIP_BITS(re, &s->gb, 2);
246  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
247  goto end;
248  }
249 
250  /* now quantify & encode AC coefficients */
251  for (;;) {
252  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
253  TEX_VLC_BITS, 2, 0);
254 
255  if (level != 0) {
256  i += run;
257  if (i > MAX_INDEX)
258  break;
259  j = scantable[i];
260  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
261  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
262  SHOW_SBITS(re, &s->gb, 1);
263  SKIP_BITS(re, &s->gb, 1);
264  } else {
265  /* escape */
266  run = SHOW_UBITS(re, &s->gb, 6) + 1;
267  LAST_SKIP_BITS(re, &s->gb, 6);
268  UPDATE_CACHE(re, &s->gb);
269  level = SHOW_SBITS(re, &s->gb, 12);
270  SKIP_BITS(re, &s->gb, 12);
271 
272  i += run;
273  if (i > MAX_INDEX)
274  break;
275  j = scantable[i];
276  if (level < 0) {
277  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
278  level = -level;
279  } else {
280  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
281  }
282  }
283 
284  mismatch ^= level;
285  block[j] = level;
286  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
287  break;
288  UPDATE_CACHE(re, &s->gb);
289  }
290 end:
291  LAST_SKIP_BITS(re, &s->gb, 2);
292  CLOSE_READER(re, &s->gb);
293  }
294  block[63] ^= (mismatch & 1);
295 
297 
298  s->block_last_index[n] = i;
299  return 0;
300 }
301 
303  int16_t *block, int n)
304 {
305  int level, dc, diff, i, j, run;
306  int component;
307  const RL_VLC_ELEM *rl_vlc;
308  const uint8_t *const scantable = s->intra_scantable.permutated;
309  const uint16_t *quant_matrix;
310  const int qscale = s->qscale;
311  int mismatch;
312 
313  /* DC coefficient */
314  if (n < 4) {
315  quant_matrix = s->intra_matrix;
316  component = 0;
317  } else {
318  quant_matrix = s->chroma_intra_matrix;
319  component = (n & 1) + 1;
320  }
321  diff = decode_dc(&s->gb, component);
322  dc = s->last_dc[component];
323  dc += diff;
324  s->last_dc[component] = dc;
325  block[0] = dc * (1 << (3 - s->intra_dc_precision));
326  ff_tlog(s->avctx, "dc=%d\n", block[0]);
327  mismatch = block[0] ^ 1;
328  i = 0;
329  if (s->intra_vlc_format)
331  else
333 
334  {
335  OPEN_READER(re, &s->gb);
336  /* now quantify & encode AC coefficients */
337  for (;;) {
338  UPDATE_CACHE(re, &s->gb);
339  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
340  TEX_VLC_BITS, 2, 0);
341 
342  if (level == 127) {
343  break;
344  } else if (level != 0) {
345  i += run;
346  if (i > MAX_INDEX)
347  break;
348  j = scantable[i];
349  level = (level * qscale * quant_matrix[j]) >> 4;
350  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
351  SHOW_SBITS(re, &s->gb, 1);
352  LAST_SKIP_BITS(re, &s->gb, 1);
353  } else {
354  /* escape */
355  run = SHOW_UBITS(re, &s->gb, 6) + 1;
356  SKIP_BITS(re, &s->gb, 6);
357  level = SHOW_SBITS(re, &s->gb, 12);
358  LAST_SKIP_BITS(re, &s->gb, 12);
359  i += run;
360  if (i > MAX_INDEX)
361  break;
362  j = scantable[i];
363  if (level < 0) {
364  level = (-level * qscale * quant_matrix[j]) >> 4;
365  level = -level;
366  } else {
367  level = (level * qscale * quant_matrix[j]) >> 4;
368  }
369  }
370 
371  mismatch ^= level;
372  block[j] = level;
373  }
374  CLOSE_READER(re, &s->gb);
375  }
376  block[63] ^= mismatch & 1;
377 
379 
380  s->block_last_index[n] = i;
381  return 0;
382 }
383 
384 /******************************************/
385 /* decoding */
386 
387 static inline int get_dmv(MpegEncContext *s)
388 {
389  if (get_bits1(&s->gb))
390  return 1 - (get_bits1(&s->gb) << 1);
391  else
392  return 0;
393 }
394 
395 /* motion type (for MPEG-2) */
396 #define MT_FIELD 1
397 #define MT_FRAME 2
398 #define MT_16X8 2
399 #define MT_DMV 3
400 
401 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
402 {
403  int i, j, k, cbp, val, mb_type, motion_type;
404  const int mb_block_count = 4 + (1 << s->chroma_format);
405  int ret;
406 
407  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
408 
409  av_assert2(s->mb_skipped == 0);
410 
411  if (s->mb_skip_run-- != 0) {
412  if (s->pict_type == AV_PICTURE_TYPE_P) {
413  s->mb_skipped = 1;
414  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
416  } else {
417  int mb_type;
418 
419  if (s->mb_x)
420  mb_type = s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
421  else
422  // FIXME not sure if this is allowed in MPEG at all
423  mb_type = s->cur_pic.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
424  if (IS_INTRA(mb_type)) {
425  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
426  return AVERROR_INVALIDDATA;
427  }
428  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
429  mb_type | MB_TYPE_SKIP;
430 
431  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
432  s->mb_skipped = 1;
433  }
434 
435  return 0;
436  }
437 
438  switch (s->pict_type) {
439  default:
440  case AV_PICTURE_TYPE_I:
441  if (get_bits1(&s->gb) == 0) {
442  if (get_bits1(&s->gb) == 0) {
443  av_log(s->avctx, AV_LOG_ERROR,
444  "Invalid mb type in I-frame at %d %d\n",
445  s->mb_x, s->mb_y);
446  return AVERROR_INVALIDDATA;
447  }
448  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
449  } else {
450  mb_type = MB_TYPE_INTRA;
451  }
452  break;
453  case AV_PICTURE_TYPE_P:
454  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
455  if (mb_type < 0) {
456  av_log(s->avctx, AV_LOG_ERROR,
457  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
458  return AVERROR_INVALIDDATA;
459  }
460  break;
461  case AV_PICTURE_TYPE_B:
462  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
463  if (mb_type < 0) {
464  av_log(s->avctx, AV_LOG_ERROR,
465  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
466  return AVERROR_INVALIDDATA;
467  }
468  break;
469  }
470  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
471 // motion_type = 0; /* avoid warning */
472  if (IS_INTRA(mb_type)) {
473  s->bdsp.clear_blocks(s->block[0]);
474 
475  if (!s->chroma_y_shift)
476  s->bdsp.clear_blocks(s->block[6]);
477 
478  /* compute DCT type */
479  // FIXME: add an interlaced_dct coded var?
480  if (s->picture_structure == PICT_FRAME &&
481  !s->frame_pred_frame_dct)
482  s->interlaced_dct = get_bits1(&s->gb);
483 
484  if (IS_QUANT(mb_type))
485  s->qscale = mpeg_get_qscale(s);
486 
487  if (s->concealment_motion_vectors) {
488  /* just parse them */
489  if (s->picture_structure != PICT_FRAME)
490  skip_bits1(&s->gb); /* field select */
491 
492  s->mv[0][0][0] =
493  s->last_mv[0][0][0] =
494  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
495  s->last_mv[0][0][0]);
496  s->mv[0][0][1] =
497  s->last_mv[0][0][1] =
498  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
499  s->last_mv[0][0][1]);
500 
501  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
502  } else {
503  /* reset mv prediction */
504  memset(s->last_mv, 0, sizeof(s->last_mv));
505  }
506  s->mb_intra = 1;
507 
508  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
509  for (i = 0; i < mb_block_count; i++)
510  if ((ret = mpeg2_decode_block_intra(s, s->block[i], i)) < 0)
511  return ret;
512  } else {
513  for (i = 0; i < 6; i++) {
515  s->intra_matrix,
516  s->intra_scantable.permutated,
517  s->last_dc, s->block[i],
518  i, s->qscale);
519  if (ret < 0) {
520  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
521  s->mb_x, s->mb_y);
522  return ret;
523  }
524 
525  s->block_last_index[i] = ret;
526  }
527  }
528  } else {
529  if (mb_type & MB_TYPE_ZERO_MV) {
530  av_assert2(mb_type & MB_TYPE_CBP);
531 
532  s->mv_dir = MV_DIR_FORWARD;
533  if (s->picture_structure == PICT_FRAME) {
534  if (s->picture_structure == PICT_FRAME
535  && !s->frame_pred_frame_dct)
536  s->interlaced_dct = get_bits1(&s->gb);
537  s->mv_type = MV_TYPE_16X16;
538  } else {
539  s->mv_type = MV_TYPE_FIELD;
540  mb_type |= MB_TYPE_INTERLACED;
541  s->field_select[0][0] = s->picture_structure - 1;
542  }
543 
544  if (IS_QUANT(mb_type))
545  s->qscale = mpeg_get_qscale(s);
546 
547  s->last_mv[0][0][0] = 0;
548  s->last_mv[0][0][1] = 0;
549  s->last_mv[0][1][0] = 0;
550  s->last_mv[0][1][1] = 0;
551  s->mv[0][0][0] = 0;
552  s->mv[0][0][1] = 0;
553  } else {
554  av_assert2(mb_type & MB_TYPE_BIDIR_MV);
555  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
556  /* get additional motion vector type */
557  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
558  motion_type = MT_FRAME;
559  } else {
560  motion_type = get_bits(&s->gb, 2);
561  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
562  s->interlaced_dct = get_bits1(&s->gb);
563  }
564 
565  if (IS_QUANT(mb_type))
566  s->qscale = mpeg_get_qscale(s);
567 
568  /* motion vectors */
569  s->mv_dir = MB_TYPE_MV_2_MV_DIR(mb_type);
570  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
571  switch (motion_type) {
572  case MT_FRAME: /* or MT_16X8 */
573  if (s->picture_structure == PICT_FRAME) {
574  mb_type |= MB_TYPE_16x16;
575  s->mv_type = MV_TYPE_16X16;
576  for (i = 0; i < 2; i++) {
577  if (HAS_MV(mb_type, i)) {
578  /* MT_FRAME */
579  s->mv[i][0][0] =
580  s->last_mv[i][0][0] =
581  s->last_mv[i][1][0] =
582  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
583  s->last_mv[i][0][0]);
584  s->mv[i][0][1] =
585  s->last_mv[i][0][1] =
586  s->last_mv[i][1][1] =
587  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
588  s->last_mv[i][0][1]);
589  /* full_pel: only for MPEG-1 */
590  if (s->full_pel[i]) {
591  s->mv[i][0][0] *= 2;
592  s->mv[i][0][1] *= 2;
593  }
594  }
595  }
596  } else {
597  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
598  s->mv_type = MV_TYPE_16X8;
599  for (i = 0; i < 2; i++) {
600  if (HAS_MV(mb_type, i)) {
601  /* MT_16X8 */
602  for (j = 0; j < 2; j++) {
603  s->field_select[i][j] = get_bits1(&s->gb);
604  for (k = 0; k < 2; k++) {
605  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
606  s->last_mv[i][j][k]);
607  s->last_mv[i][j][k] = val;
608  s->mv[i][j][k] = val;
609  }
610  }
611  }
612  }
613  }
614  break;
615  case MT_FIELD:
616  s->mv_type = MV_TYPE_FIELD;
617  if (s->picture_structure == PICT_FRAME) {
618  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
619  for (i = 0; i < 2; i++) {
620  if (HAS_MV(mb_type, i)) {
621  for (j = 0; j < 2; j++) {
622  s->field_select[i][j] = get_bits1(&s->gb);
623  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
624  s->last_mv[i][j][0]);
625  s->last_mv[i][j][0] = val;
626  s->mv[i][j][0] = val;
627  ff_tlog(s->avctx, "fmx=%d\n", val);
628  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
629  s->last_mv[i][j][1] >> 1);
630  s->last_mv[i][j][1] = 2 * val;
631  s->mv[i][j][1] = val;
632  ff_tlog(s->avctx, "fmy=%d\n", val);
633  }
634  }
635  }
636  } else {
637  av_assert0(!s->progressive_sequence);
638  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
639  for (i = 0; i < 2; i++) {
640  if (HAS_MV(mb_type, i)) {
641  s->field_select[i][0] = get_bits1(&s->gb);
642  for (k = 0; k < 2; k++) {
643  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
644  s->last_mv[i][0][k]);
645  s->last_mv[i][0][k] = val;
646  s->last_mv[i][1][k] = val;
647  s->mv[i][0][k] = val;
648  }
649  }
650  }
651  }
652  break;
653  case MT_DMV:
654  if (s->progressive_sequence){
655  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
656  return AVERROR_INVALIDDATA;
657  }
658  s->mv_type = MV_TYPE_DMV;
659  for (i = 0; i < 2; i++) {
660  if (HAS_MV(mb_type, i)) {
661  int dmx, dmy, mx, my, m;
662  const int my_shift = s->picture_structure == PICT_FRAME;
663 
664  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
665  s->last_mv[i][0][0]);
666  s->last_mv[i][0][0] = mx;
667  s->last_mv[i][1][0] = mx;
668  dmx = get_dmv(s);
669  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
670  s->last_mv[i][0][1] >> my_shift);
671  dmy = get_dmv(s);
672 
673 
674  s->last_mv[i][0][1] = my * (1 << my_shift);
675  s->last_mv[i][1][1] = my * (1 << my_shift);
676 
677  s->mv[i][0][0] = mx;
678  s->mv[i][0][1] = my;
679  s->mv[i][1][0] = mx; // not used
680  s->mv[i][1][1] = my; // not used
681 
682  if (s->picture_structure == PICT_FRAME) {
683  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
684 
685  // m = 1 + 2 * s->top_field_first;
686  m = s->top_field_first ? 1 : 3;
687 
688  /* top -> top pred */
689  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
690  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
691  m = 4 - m;
692  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
693  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
694  } else {
695  mb_type |= MB_TYPE_16x16;
696 
697  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
698  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
699  if (s->picture_structure == PICT_TOP_FIELD)
700  s->mv[i][2][1]--;
701  else
702  s->mv[i][2][1]++;
703  }
704  }
705  }
706  break;
707  default:
708  av_log(s->avctx, AV_LOG_ERROR,
709  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
710  return AVERROR_INVALIDDATA;
711  }
712  }
713 
714  s->mb_intra = 0;
715  if (HAS_CBP(mb_type)) {
716  s->bdsp.clear_blocks(s->block[0]);
717 
718  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
719  if (mb_block_count > 6) {
720  cbp *= 1 << mb_block_count - 6;
721  cbp |= get_bits(&s->gb, mb_block_count - 6);
722  s->bdsp.clear_blocks(s->block[6]);
723  }
724  if (cbp <= 0) {
725  av_log(s->avctx, AV_LOG_ERROR,
726  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
727  return AVERROR_INVALIDDATA;
728  }
729 
730  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
731  cbp <<= 12 - mb_block_count;
732 
733  for (i = 0; i < mb_block_count; i++) {
734  if (cbp & (1 << 11)) {
735  if ((ret = mpeg2_decode_block_non_intra(s, s->block[i], i)) < 0)
736  return ret;
737  } else {
738  s->block_last_index[i] = -1;
739  }
740  cbp += cbp;
741  }
742  } else {
743  for (i = 0; i < 6; i++) {
744  if (cbp & 32) {
745  if ((ret = mpeg1_decode_block_inter(s, s->block[i], i)) < 0)
746  return ret;
747  } else {
748  s->block_last_index[i] = -1;
749  }
750  cbp += cbp;
751  }
752  }
753  } else {
754  for (i = 0; i < 12; i++)
755  s->block_last_index[i] = -1;
756  }
757  }
758 
759  s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
760 
761  return 0;
762 }
763 
765 {
766  Mpeg1Context *s = avctx->priv_data;
767  MpegEncContext *s2 = &s->mpeg_enc_ctx;
768  int ret;
769 
770  s2->out_format = FMT_MPEG1;
771 
772  if ( avctx->codec_tag != AV_RL32("VCR2")
773  && avctx->codec_tag != AV_RL32("BW10"))
774  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
775  ret = ff_mpv_decode_init(s2, avctx);
776  if (ret < 0)
777  return ret;
778 
780 
781  s2->chroma_format = 1;
782  s->repeat_field = 0;
783  avctx->color_range = AVCOL_RANGE_MPEG;
784  return 0;
785 }
786 
787 #if HAVE_THREADS
788 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
789  const AVCodecContext *avctx_from)
790 {
791  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
792  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
793  int err;
794 
795  if (avctx == avctx_from || !s1->context_initialized)
796  return 0;
797 
798  err = ff_mpeg_update_thread_context(avctx, avctx_from);
799  if (err)
800  return err;
801 
802  if (!s->context_initialized)
803  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
804 
805  return 0;
806 }
807 #endif
808 
810 #if CONFIG_MPEG1_NVDEC_HWACCEL
812 #endif
813 #if CONFIG_MPEG1_VDPAU_HWACCEL
815 #endif
818 };
819 
821 #if CONFIG_MPEG2_NVDEC_HWACCEL
823 #endif
824 #if CONFIG_MPEG2_VDPAU_HWACCEL
826 #endif
827 #if CONFIG_MPEG2_DXVA2_HWACCEL
829 #endif
830 #if CONFIG_MPEG2_D3D11VA_HWACCEL
833 #endif
834 #if CONFIG_MPEG2_D3D12VA_HWACCEL
836 #endif
837 #if CONFIG_MPEG2_VAAPI_HWACCEL
839 #endif
840 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
842 #endif
845 };
846 
847 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
850 };
851 
852 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
855 };
856 
858 {
859  Mpeg1Context *s1 = avctx->priv_data;
860  MpegEncContext *s = &s1->mpeg_enc_ctx;
861  const enum AVPixelFormat *pix_fmts;
862 
863  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
864  return AV_PIX_FMT_GRAY8;
865 
866  if (s->chroma_format < 2)
870  else if (s->chroma_format == 2)
872  else
874 
875  return ff_get_format(avctx, pix_fmts);
876 }
877 
878 /* Call this function when we know all parameters.
879  * It may be called in different places for MPEG-1 and MPEG-2. */
881 {
882  Mpeg1Context *s1 = avctx->priv_data;
883  MpegEncContext *s = &s1->mpeg_enc_ctx;
884  int ret;
885 
886  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
887  // MPEG-1 aspect
888  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
889  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
890  } else { // MPEG-2
891  // MPEG-2 aspect
892  if (s1->aspect_ratio_info > 1) {
893  AVRational dar =
895  (AVRational) { s1->pan_scan.width,
896  s1->pan_scan.height }),
897  (AVRational) { s->width, s->height });
898 
899  /* We ignore the spec here and guess a bit as reality does not
900  * match the spec, see for example res_change_ffmpeg_aspect.ts
901  * and sequence-display-aspect.mpg.
902  * issue1613, 621, 562 */
903  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
904  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
905  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
906  s->avctx->sample_aspect_ratio =
908  (AVRational) { s->width, s->height });
909  } else {
910  s->avctx->sample_aspect_ratio =
912  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
913 // issue1613 4/3 16/9 -> 16/9
914 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
915 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
916 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
917  ff_dlog(avctx, "aspect A %d/%d\n",
920  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
921  s->avctx->sample_aspect_ratio.den);
922  }
923  } else {
924  s->avctx->sample_aspect_ratio =
926  }
927  } // MPEG-2
928 
929  if (av_image_check_sar(s->width, s->height,
930  avctx->sample_aspect_ratio) < 0) {
931  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
932  avctx->sample_aspect_ratio.num,
933  avctx->sample_aspect_ratio.den);
934  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
935  }
936 
937  if (!s->context_initialized ||
938  avctx->coded_width != s->width ||
939  avctx->coded_height != s->height ||
940  s1->save_width != s->width ||
941  s1->save_height != s->height ||
942  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
943  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
944  0) {
945  if (s->context_initialized)
947 
948  ret = ff_set_dimensions(avctx, s->width, s->height);
949  if (ret < 0)
950  return ret;
951 
952  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate &&
953  (s->bit_rate != 0x3FFFF*400)) {
954  avctx->rc_max_rate = s->bit_rate;
955  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
956  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
957  avctx->bit_rate = s->bit_rate;
958  }
959  s1->save_aspect = s->avctx->sample_aspect_ratio;
960  s1->save_width = s->width;
961  s1->save_height = s->height;
962  s1->save_progressive_seq = s->progressive_sequence;
963 
964  /* low_delay may be forced, in this case we will have B-frames
965  * that behave like P-frames. */
966  avctx->has_b_frames = !s->low_delay;
967 
968  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
969  // MPEG-1 fps
971 #if FF_API_TICKS_PER_FRAME
973  avctx->ticks_per_frame = 1;
975 #endif
976 
978  } else { // MPEG-2
979  // MPEG-2 fps
980  av_reduce(&s->avctx->framerate.num,
981  &s->avctx->framerate.den,
984  1 << 30);
985 #if FF_API_TICKS_PER_FRAME
987  avctx->ticks_per_frame = 2;
989 #endif
990 
991  switch (s->chroma_format) {
992  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
993  case 2:
994  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
995  default: av_assert0(0);
996  }
997  } // MPEG-2
998 
999  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1000 
1001  if ((ret = ff_mpv_common_init(s)) < 0)
1002  return ret;
1003  if (!s->avctx->lowres)
1005  }
1006  return 0;
1007 }
1008 
1009 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1010  int buf_size)
1011 {
1012  Mpeg1Context *s1 = avctx->priv_data;
1013  MpegEncContext *s = &s1->mpeg_enc_ctx;
1014  int ref, f_code, vbv_delay, ret;
1015 
1016  ret = init_get_bits8(&s->gb, buf, buf_size);
1017  if (ret < 0)
1018  return ret;
1019 
1020  ref = get_bits(&s->gb, 10); /* temporal ref */
1021  s->pict_type = get_bits(&s->gb, 3);
1022  if (s->pict_type == 0 || s->pict_type > 3)
1023  return AVERROR_INVALIDDATA;
1024 
1025  vbv_delay = get_bits(&s->gb, 16);
1026  s->vbv_delay = vbv_delay;
1027  if (s->pict_type == AV_PICTURE_TYPE_P ||
1028  s->pict_type == AV_PICTURE_TYPE_B) {
1029  s->full_pel[0] = get_bits1(&s->gb);
1030  f_code = get_bits(&s->gb, 3);
1031  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1032  return AVERROR_INVALIDDATA;
1033  f_code += !f_code;
1034  s->mpeg_f_code[0][0] = f_code;
1035  s->mpeg_f_code[0][1] = f_code;
1036  }
1037  if (s->pict_type == AV_PICTURE_TYPE_B) {
1038  s->full_pel[1] = get_bits1(&s->gb);
1039  f_code = get_bits(&s->gb, 3);
1040  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1041  return AVERROR_INVALIDDATA;
1042  f_code += !f_code;
1043  s->mpeg_f_code[1][0] = f_code;
1044  s->mpeg_f_code[1][1] = f_code;
1045  }
1046 
1047  if (avctx->debug & FF_DEBUG_PICT_INFO)
1048  av_log(avctx, AV_LOG_DEBUG,
1049  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1050 
1051  s->y_dc_scale = 8;
1052  s->c_dc_scale = 8;
1053  return 0;
1054 }
1055 
1057 {
1058  MpegEncContext *s = &s1->mpeg_enc_ctx;
1059  int horiz_size_ext, vert_size_ext;
1060  int bit_rate_ext;
1061 
1062  skip_bits(&s->gb, 1); /* profile and level esc*/
1063  s->avctx->profile = get_bits(&s->gb, 3);
1064  s->avctx->level = get_bits(&s->gb, 4);
1065  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1066  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1067 
1068  if (!s->chroma_format) {
1069  s->chroma_format = 1;
1070  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1071  }
1072 
1073  horiz_size_ext = get_bits(&s->gb, 2);
1074  vert_size_ext = get_bits(&s->gb, 2);
1075  s->width |= (horiz_size_ext << 12);
1076  s->height |= (vert_size_ext << 12);
1077  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1078  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1079  check_marker(s->avctx, &s->gb, "after bit rate extension");
1080  s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1081 
1082  s->low_delay = get_bits1(&s->gb);
1083  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1084  s->low_delay = 1;
1085 
1086  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1087  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1088 
1089  ff_dlog(s->avctx, "sequence extension\n");
1090  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1091 
1092  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1093  av_log(s->avctx, AV_LOG_DEBUG,
1094  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1095  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1096  s->avctx->rc_buffer_size, s->bit_rate);
1097 }
1098 
1100 {
1101  MpegEncContext *s = &s1->mpeg_enc_ctx;
1102  int color_description, w, h;
1103 
1104  skip_bits(&s->gb, 3); /* video format */
1105  color_description = get_bits1(&s->gb);
1106  if (color_description) {
1107  s->avctx->color_primaries = get_bits(&s->gb, 8);
1108  s->avctx->color_trc = get_bits(&s->gb, 8);
1109  s->avctx->colorspace = get_bits(&s->gb, 8);
1110  }
1111  w = get_bits(&s->gb, 14);
1112  skip_bits(&s->gb, 1); // marker
1113  h = get_bits(&s->gb, 14);
1114  // remaining 3 bits are zero padding
1115 
1116  s1->pan_scan.width = 16 * w;
1117  s1->pan_scan.height = 16 * h;
1118 
1119  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1120  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1121 }
1122 
1124 {
1125  MpegEncContext *s = &s1->mpeg_enc_ctx;
1126  int i, nofco;
1127 
1128  nofco = 1;
1129  if (s->progressive_sequence) {
1130  if (s->repeat_first_field) {
1131  nofco++;
1132  if (s->top_field_first)
1133  nofco++;
1134  }
1135  } else {
1136  if (s->picture_structure == PICT_FRAME) {
1137  nofco++;
1138  if (s->repeat_first_field)
1139  nofco++;
1140  }
1141  }
1142  for (i = 0; i < nofco; i++) {
1143  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1144  skip_bits(&s->gb, 1); // marker
1145  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1146  skip_bits(&s->gb, 1); // marker
1147  }
1148 
1149  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1150  av_log(s->avctx, AV_LOG_DEBUG,
1151  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1152  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1153  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1154  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1155 }
1156 
1157 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1158  uint16_t matrix1[64], int intra)
1159 {
1160  int i;
1161 
1162  for (i = 0; i < 64; i++) {
1163  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1164  int v = get_bits(&s->gb, 8);
1165  if (v == 0) {
1166  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1167  return AVERROR_INVALIDDATA;
1168  }
1169  if (intra && i == 0 && v != 8) {
1170  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1171  v = 8; // needed by pink.mpg / issue1046
1172  }
1173  matrix0[j] = v;
1174  if (matrix1)
1175  matrix1[j] = v;
1176  }
1177  return 0;
1178 }
1179 
1181 {
1182  ff_dlog(s->avctx, "matrix extension\n");
1183 
1184  if (get_bits1(&s->gb))
1185  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1186  if (get_bits1(&s->gb))
1187  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1188  if (get_bits1(&s->gb))
1189  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1190  if (get_bits1(&s->gb))
1191  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1192 }
1193 
1195 {
1196  MpegEncContext *s = &s1->mpeg_enc_ctx;
1197 
1198  s->full_pel[0] = s->full_pel[1] = 0;
1199  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1200  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1201  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1202  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1203  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1204  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1205  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1206  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1207  if (!s->pict_type && s->context_initialized) {
1208  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1209  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1210  return AVERROR_INVALIDDATA;
1211  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1212  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1213  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1214  s->pict_type = AV_PICTURE_TYPE_I;
1215  else
1216  s->pict_type = AV_PICTURE_TYPE_P;
1217  } else
1218  s->pict_type = AV_PICTURE_TYPE_B;
1219  }
1220 
1221  s->intra_dc_precision = get_bits(&s->gb, 2);
1222  s->picture_structure = get_bits(&s->gb, 2);
1223  s->top_field_first = get_bits1(&s->gb);
1224  s->frame_pred_frame_dct = get_bits1(&s->gb);
1225  s->concealment_motion_vectors = get_bits1(&s->gb);
1226  s->q_scale_type = get_bits1(&s->gb);
1227  s->intra_vlc_format = get_bits1(&s->gb);
1228  s->alternate_scan = get_bits1(&s->gb);
1229  s->repeat_first_field = get_bits1(&s->gb);
1230  s->chroma_420_type = get_bits1(&s->gb);
1231  s->progressive_frame = get_bits1(&s->gb);
1232 
1233  // We only initialize intra_scantable, as both scantables always coincide
1234  // and all code therefore only uses the intra one.
1235  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable,
1236  s->alternate_scan ? ff_alternate_vertical_scan : ff_zigzag_direct);
1237 
1238  /* composite display not parsed */
1239  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1240  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1241  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1242  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1243  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1244  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1245  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1246  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1247  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1248 
1249  return 0;
1250 }
1251 
1252 static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
1253 {
1254  MpegEncContext *s = &s1->mpeg_enc_ctx;
1255  AVCodecContext *avctx = s->avctx;
1256  int second_field = 0;
1257  int ret;
1258 
1259  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1260  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1261  return AVERROR_INVALIDDATA;
1262  }
1263 
1264  /* start frame decoding */
1265  if (s->first_field || s->picture_structure == PICT_FRAME) {
1266  AVFrameSideData *pan_scan;
1267 
1268  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1269  return ret;
1270 
1271  if (s->picture_structure != PICT_FRAME) {
1272  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
1273  (s->picture_structure == PICT_TOP_FIELD);
1274 
1275  for (int i = 0; i < 3; i++) {
1276  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1277  s->cur_pic.data[i] = FF_PTR_ADD(s->cur_pic.data[i],
1278  s->cur_pic.linesize[i]);
1279  }
1280  s->cur_pic.linesize[i] *= 2;
1281  }
1282  }
1283 
1285 
1286  /* first check if we must repeat the frame */
1287  s->cur_pic.ptr->f->repeat_pict = 0;
1288  if (s->repeat_first_field) {
1289  if (s->progressive_sequence) {
1290  if (s->top_field_first)
1291  s->cur_pic.ptr->f->repeat_pict = 4;
1292  else
1293  s->cur_pic.ptr->f->repeat_pict = 2;
1294  } else if (s->progressive_frame) {
1295  s->cur_pic.ptr->f->repeat_pict = 1;
1296  }
1297  }
1298 
1299  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1300  AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
1301  &pan_scan);
1302  if (ret < 0)
1303  return ret;
1304  if (pan_scan)
1305  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1306 
1307  if (s1->a53_buf_ref) {
1309  s->avctx, s->cur_pic.ptr->f, AV_FRAME_DATA_A53_CC,
1310  &s1->a53_buf_ref);
1311  if (ret < 0)
1312  return ret;
1313  }
1314 
1315  if (s1->has_stereo3d) {
1316  AVStereo3D *stereo = av_stereo3d_create_side_data(s->cur_pic.ptr->f);
1317  if (!stereo)
1318  return AVERROR(ENOMEM);
1319 
1320  stereo->type = s1->stereo3d_type;
1321  s1->has_stereo3d = 0;
1322  }
1323 
1324  if (s1->has_afd) {
1325  AVFrameSideData *sd;
1326  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1327  AV_FRAME_DATA_AFD, 1, &sd);
1328  if (ret < 0)
1329  return ret;
1330  if (sd)
1331  *sd->data = s1->afd;
1332  s1->has_afd = 0;
1333  }
1334 
1335  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1336  ff_thread_finish_setup(avctx);
1337  } else { // second field
1338  second_field = 1;
1339  if (!s->cur_pic.ptr) {
1340  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1341  return AVERROR_INVALIDDATA;
1342  }
1343 
1344  if (s->avctx->hwaccel) {
1345  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1346  av_log(avctx, AV_LOG_ERROR,
1347  "hardware accelerator failed to decode first field\n");
1348  return ret;
1349  }
1350  }
1352  if (ret < 0)
1353  return ret;
1354 
1355  for (int i = 0; i < 3; i++) {
1356  s->cur_pic.data[i] = s->cur_pic.ptr->f->data[i];
1357  if (s->picture_structure == PICT_BOTTOM_FIELD)
1358  s->cur_pic.data[i] +=
1359  s->cur_pic.ptr->f->linesize[i];
1360  }
1361  }
1362 
1363  if (avctx->hwaccel) {
1364  if ((ret = FF_HW_CALL(avctx, start_frame, buf, buf_size)) < 0)
1365  return ret;
1366  } else if (s->codec_tag == MKTAG('V', 'C', 'R', '2')) {
1367  // Exchange UV
1368  FFSWAP(uint8_t*, s->cur_pic.data[1], s->cur_pic.data[2]);
1369  FFSWAP(ptrdiff_t, s->cur_pic.linesize[1], s->cur_pic.linesize[2]);
1370  if (!second_field) {
1371  FFSWAP(uint8_t*, s->next_pic.data[1], s->next_pic.data[2]);
1372  FFSWAP(ptrdiff_t, s->next_pic.linesize[1], s->next_pic.linesize[2]);
1373  FFSWAP(uint8_t*, s->last_pic.data[1], s->last_pic.data[2]);
1374  FFSWAP(ptrdiff_t, s->last_pic.linesize[1], s->last_pic.linesize[2]);
1375  }
1376  }
1377 
1378  return 0;
1379 }
1380 
1381 #define DECODE_SLICE_ERROR -1
1382 #define DECODE_SLICE_OK 0
1383 
1384 /**
1385  * Decode a slice.
1386  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1387  * @return DECODE_SLICE_ERROR if the slice is damaged,
1388  * DECODE_SLICE_OK if this slice is OK
1389  */
1390 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1391  const uint8_t **buf, int buf_size)
1392 {
1393  AVCodecContext *avctx = s->avctx;
1394  const int lowres = s->avctx->lowres;
1395  const int field_pic = s->picture_structure != PICT_FRAME;
1396  int ret;
1397 
1398  s->resync_mb_x =
1399  s->resync_mb_y = -1;
1400 
1401  av_assert0(mb_y < s->mb_height);
1402 
1403  ret = init_get_bits8(&s->gb, *buf, buf_size);
1404  if (ret < 0)
1405  return ret;
1406 
1407  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1408  skip_bits(&s->gb, 3);
1409 
1411  s->interlaced_dct = 0;
1412 
1413  s->qscale = mpeg_get_qscale(s);
1414 
1415  if (s->qscale == 0) {
1416  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1417  return AVERROR_INVALIDDATA;
1418  }
1419 
1420  /* extra slice info */
1421  if (skip_1stop_8data_bits(&s->gb) < 0)
1422  return AVERROR_INVALIDDATA;
1423 
1424  s->mb_x = 0;
1425 
1426  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1427  skip_bits1(&s->gb);
1428  } else {
1429  while (get_bits_left(&s->gb) > 0) {
1430  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1431  MBINCR_VLC_BITS, 2);
1432  if (code < 0) {
1433  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1434  return AVERROR_INVALIDDATA;
1435  }
1436  if (code >= 33) {
1437  if (code == 33)
1438  s->mb_x += 33;
1439  /* otherwise, stuffing, nothing to do */
1440  } else {
1441  s->mb_x += code;
1442  break;
1443  }
1444  }
1445  }
1446 
1447  if (s->mb_x >= (unsigned) s->mb_width) {
1448  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1449  return AVERROR_INVALIDDATA;
1450  }
1451 
1452  if (avctx->hwaccel) {
1453  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1454  int start_code = -1;
1455  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1456  if (buf_end < *buf + buf_size)
1457  buf_end -= 4;
1458  s->mb_y = mb_y;
1459  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1460  return DECODE_SLICE_ERROR;
1461  *buf = buf_end;
1462  return DECODE_SLICE_OK;
1463  }
1464 
1465  s->resync_mb_x = s->mb_x;
1466  s->resync_mb_y = s->mb_y = mb_y;
1467  s->mb_skip_run = 0;
1469 
1470  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1471  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1472  av_log(s->avctx, AV_LOG_DEBUG,
1473  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1474  s->qscale,
1475  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1476  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1477  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1478  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1479  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1480  s->progressive_sequence ? "ps" : "",
1481  s->progressive_frame ? "pf" : "",
1482  s->alternate_scan ? "alt" : "",
1483  s->top_field_first ? "top" : "",
1484  s->intra_dc_precision, s->picture_structure,
1485  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1486  s->q_scale_type, s->intra_vlc_format,
1487  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1488  }
1489  }
1490 
1491  for (;;) {
1492  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1493  return ret;
1494 
1495  // Note motion_val is normally NULL unless we want to extract the MVs.
1496  if (s->cur_pic.motion_val[0]) {
1497  const int wrap = s->b8_stride;
1498  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1499  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1500  int motion_x, motion_y, dir, i;
1501 
1502  for (i = 0; i < 2; i++) {
1503  for (dir = 0; dir < 2; dir++) {
1504  if (s->mb_intra ||
1505  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1506  motion_x = motion_y = 0;
1507  } else if (s->mv_type == MV_TYPE_16X16 ||
1508  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1509  motion_x = s->mv[dir][0][0];
1510  motion_y = s->mv[dir][0][1];
1511  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1512  motion_x = s->mv[dir][i][0];
1513  motion_y = s->mv[dir][i][1];
1514  }
1515 
1516  s->cur_pic.motion_val[dir][xy][0] = motion_x;
1517  s->cur_pic.motion_val[dir][xy][1] = motion_y;
1518  s->cur_pic.motion_val[dir][xy + 1][0] = motion_x;
1519  s->cur_pic.motion_val[dir][xy + 1][1] = motion_y;
1520  s->cur_pic.ref_index [dir][b8_xy] =
1521  s->cur_pic.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1522  av_assert2(s->field_select[dir][i] == 0 ||
1523  s->field_select[dir][i] == 1);
1524  }
1525  xy += wrap;
1526  b8_xy += 2;
1527  }
1528  }
1529 
1530  s->dest[0] += 16 >> lowres;
1531  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1532  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1533 
1534  ff_mpv_reconstruct_mb(s, s->block);
1535 
1536  if (++s->mb_x >= s->mb_width) {
1537  const int mb_size = 16 >> s->avctx->lowres;
1538  int left;
1539 
1540  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1542 
1543  s->mb_x = 0;
1544  s->mb_y += 1 << field_pic;
1545 
1546  if (s->mb_y >= s->mb_height) {
1547  int left = get_bits_left(&s->gb);
1548  int is_d10 = s->chroma_format == 2 &&
1549  s->pict_type == AV_PICTURE_TYPE_I &&
1550  avctx->profile == 0 && avctx->level == 5 &&
1551  s->intra_dc_precision == 2 &&
1552  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1553  s->progressive_frame == 0
1554  /* vbv_delay == 0xBBB || 0xE10 */;
1555 
1556  if (left >= 32 && !is_d10) {
1557  GetBitContext gb = s->gb;
1558  align_get_bits(&gb);
1559  if (show_bits(&gb, 24) == 0x060E2B) {
1560  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1561  is_d10 = 1;
1562  }
1563  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1564  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1565  goto eos;
1566  }
1567  }
1568 
1569  if (left < 0 ||
1570  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1571  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1572  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1573  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1574  return AVERROR_INVALIDDATA;
1575  } else
1576  goto eos;
1577  }
1578  // There are some files out there which are missing the last slice
1579  // in cases where the slice is completely outside the visible
1580  // area, we detect this here instead of running into the end expecting
1581  // more data
1582  left = get_bits_left(&s->gb);
1583  if (s->mb_y >= ((s->height + 15) >> 4) &&
1584  !s->progressive_sequence &&
1585  left <= 25 &&
1586  left >= 0 &&
1587  s->mb_skip_run == -1 &&
1588  (!left || show_bits(&s->gb, left) == 0))
1589  goto eos;
1590 
1592  }
1593 
1594  /* skip mb handling */
1595  if (s->mb_skip_run == -1) {
1596  /* read increment again */
1597  s->mb_skip_run = 0;
1598  for (;;) {
1599  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1600  MBINCR_VLC_BITS, 2);
1601  if (code < 0) {
1602  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1603  return AVERROR_INVALIDDATA;
1604  }
1605  if (code >= 33) {
1606  if (code == 33) {
1607  s->mb_skip_run += 33;
1608  } else if (code == 35) {
1609  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1610  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1611  return AVERROR_INVALIDDATA;
1612  }
1613  goto eos; /* end of slice */
1614  }
1615  /* otherwise, stuffing, nothing to do */
1616  } else {
1617  s->mb_skip_run += code;
1618  break;
1619  }
1620  }
1621  if (s->mb_skip_run) {
1622  int i;
1623  if (s->pict_type == AV_PICTURE_TYPE_I) {
1624  av_log(s->avctx, AV_LOG_ERROR,
1625  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1626  return AVERROR_INVALIDDATA;
1627  }
1628 
1629  /* skip mb */
1630  s->mb_intra = 0;
1631  for (i = 0; i < 12; i++)
1632  s->block_last_index[i] = -1;
1633  if (s->picture_structure == PICT_FRAME)
1634  s->mv_type = MV_TYPE_16X16;
1635  else
1636  s->mv_type = MV_TYPE_FIELD;
1637  if (s->pict_type == AV_PICTURE_TYPE_P) {
1638  /* if P type, zero motion vector is implied */
1639  s->mv_dir = MV_DIR_FORWARD;
1640  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1641  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1642  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1643  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1644  } else {
1645  /* if B type, reuse previous vectors and directions */
1646  s->mv[0][0][0] = s->last_mv[0][0][0];
1647  s->mv[0][0][1] = s->last_mv[0][0][1];
1648  s->mv[1][0][0] = s->last_mv[1][0][0];
1649  s->mv[1][0][1] = s->last_mv[1][0][1];
1650  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1651  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1652  }
1653  }
1654  }
1655  }
1656 eos: // end of slice
1657  if (get_bits_left(&s->gb) < 0) {
1658  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1659  return AVERROR_INVALIDDATA;
1660  }
1661  *buf += (get_bits_count(&s->gb) - 1) / 8;
1662  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1663  return 0;
1664 }
1665 
1667 {
1668  MpegEncContext *s = *(void **) arg;
1669  const uint8_t *buf = s->gb.buffer;
1670  int mb_y = s->start_mb_y;
1671  const int field_pic = s->picture_structure != PICT_FRAME;
1672 
1673  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1674 
1675  for (;;) {
1676  uint32_t start_code;
1677  int ret;
1678 
1679  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1680  emms_c();
1681  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1682  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1683  s->start_mb_y, s->end_mb_y, s->er.error_count);
1684  if (ret < 0) {
1685  if (c->err_recognition & AV_EF_EXPLODE)
1686  return ret;
1687  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1688  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1689  s->mb_x, s->mb_y,
1691  } else {
1692  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1693  s->mb_x - 1, s->mb_y,
1695  }
1696 
1697  if (s->mb_y == s->end_mb_y)
1698  return 0;
1699 
1700  start_code = -1;
1701  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1702  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1703  return AVERROR_INVALIDDATA;
1705  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1706  mb_y += (*buf&0xE0)<<2;
1707  mb_y <<= field_pic;
1708  if (s->picture_structure == PICT_BOTTOM_FIELD)
1709  mb_y++;
1710  if (mb_y >= s->end_mb_y)
1711  return AVERROR_INVALIDDATA;
1712  }
1713 }
1714 
1715 /**
1716  * Handle slice ends.
1717  * @return 1 if it seems to be the last slice
1718  */
1719 static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
1720 {
1721  Mpeg1Context *s1 = avctx->priv_data;
1722  MpegEncContext *s = &s1->mpeg_enc_ctx;
1723 
1724  if (!s->context_initialized || !s->cur_pic.ptr)
1725  return 0;
1726 
1727  if (s->avctx->hwaccel) {
1728  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1729  if (ret < 0) {
1730  av_log(avctx, AV_LOG_ERROR,
1731  "hardware accelerator failed to decode picture\n");
1732  return ret;
1733  }
1734  }
1735 
1736  /* end of slice reached */
1737  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1738  /* end of image */
1739 
1740  ff_er_frame_end(&s->er, NULL);
1741 
1743 
1744  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1745  int ret = av_frame_ref(pict, s->cur_pic.ptr->f);
1746  if (ret < 0)
1747  return ret;
1748  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1749  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1750  *got_output = 1;
1751  } else {
1752  /* latency of 1 frame for I- and P-frames */
1753  if (s->last_pic.ptr && !s->last_pic.ptr->dummy) {
1754  int ret = av_frame_ref(pict, s->last_pic.ptr->f);
1755  if (ret < 0)
1756  return ret;
1757  ff_print_debug_info(s, s->last_pic.ptr, pict);
1758  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1759  *got_output = 1;
1760  }
1761  }
1762 
1763  return 1;
1764  } else {
1765  return 0;
1766  }
1767 }
1768 
1770  const uint8_t *buf, int buf_size)
1771 {
1772  Mpeg1Context *s1 = avctx->priv_data;
1773  MpegEncContext *s = &s1->mpeg_enc_ctx;
1774  int width, height;
1775  int i, v, j;
1776 
1777  int ret = init_get_bits8(&s->gb, buf, buf_size);
1778  if (ret < 0)
1779  return ret;
1780 
1781  width = get_bits(&s->gb, 12);
1782  height = get_bits(&s->gb, 12);
1783  if (width == 0 || height == 0) {
1784  av_log(avctx, AV_LOG_WARNING,
1785  "Invalid horizontal or vertical size value.\n");
1787  return AVERROR_INVALIDDATA;
1788  }
1789  s1->aspect_ratio_info = get_bits(&s->gb, 4);
1790  if (s1->aspect_ratio_info == 0) {
1791  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1793  return AVERROR_INVALIDDATA;
1794  }
1795  s1->frame_rate_index = get_bits(&s->gb, 4);
1796  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1797  av_log(avctx, AV_LOG_WARNING,
1798  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1799  s1->frame_rate_index = 1;
1800  }
1801  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
1802  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
1803  return AVERROR_INVALIDDATA;
1804  }
1805 
1806  s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
1807  skip_bits(&s->gb, 1);
1808 
1809  /* get matrix */
1810  if (get_bits1(&s->gb)) {
1811  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1812  } else {
1813  for (i = 0; i < 64; i++) {
1814  j = s->idsp.idct_permutation[i];
1816  s->intra_matrix[j] = v;
1817  s->chroma_intra_matrix[j] = v;
1818  }
1819  }
1820  if (get_bits1(&s->gb)) {
1821  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1822  } else {
1823  for (i = 0; i < 64; i++) {
1824  int j = s->idsp.idct_permutation[i];
1826  s->inter_matrix[j] = v;
1827  s->chroma_inter_matrix[j] = v;
1828  }
1829  }
1830 
1831  if (show_bits(&s->gb, 23) != 0) {
1832  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1833  return AVERROR_INVALIDDATA;
1834  }
1835 
1836  s->width = width;
1837  s->height = height;
1838 
1839  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1840  s->progressive_sequence = 1;
1841  s->progressive_frame = 1;
1842  s->picture_structure = PICT_FRAME;
1843  s->first_field = 0;
1844  s->frame_pred_frame_dct = 1;
1845  s->chroma_format = 1;
1846  s->codec_id =
1847  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1848  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1849  s->low_delay = 1;
1850 
1851  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1852  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1853  s->avctx->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
1854 
1855  return 0;
1856 }
1857 
1859 {
1860  Mpeg1Context *s1 = avctx->priv_data;
1861  MpegEncContext *s = &s1->mpeg_enc_ctx;
1862  int i, v, ret;
1863 
1864  /* start new MPEG-1 context decoding */
1865  if (s->context_initialized)
1867 
1868  s->width = avctx->coded_width;
1869  s->height = avctx->coded_height;
1870  avctx->has_b_frames = 0; // true?
1871  s->low_delay = 1;
1872 
1873  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1874 
1875  if ((ret = ff_mpv_common_init(s)) < 0)
1876  return ret;
1877  if (!s->avctx->lowres)
1879 
1880  for (i = 0; i < 64; i++) {
1881  int j = s->idsp.idct_permutation[i];
1883  s->intra_matrix[j] = v;
1884  s->chroma_intra_matrix[j] = v;
1885 
1887  s->inter_matrix[j] = v;
1888  s->chroma_inter_matrix[j] = v;
1889  }
1890 
1891  s->progressive_sequence = 1;
1892  s->progressive_frame = 1;
1893  s->picture_structure = PICT_FRAME;
1894  s->first_field = 0;
1895  s->frame_pred_frame_dct = 1;
1896  s->chroma_format = 1;
1897  if (s->codec_tag == AV_RL32("BW10")) {
1898  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1899  } else {
1900  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1901  }
1902  s1->save_width = s->width;
1903  s1->save_height = s->height;
1904  s1->save_progressive_seq = s->progressive_sequence;
1905  return 0;
1906 }
1907 
1909  const char *label)
1910 {
1911  Mpeg1Context *s1 = avctx->priv_data;
1912 
1914 
1915  if (!s1->cc_format) {
1916  s1->cc_format = format;
1917 
1918  av_log(avctx, AV_LOG_DEBUG, "CC: first seen substream is %s format\n", label);
1919  }
1920 }
1921 
1923  const uint8_t *p, int buf_size)
1924 {
1925  Mpeg1Context *s1 = avctx->priv_data;
1926 
1927  if ((!s1->cc_format || s1->cc_format == CC_FORMAT_A53_PART4) &&
1928  buf_size >= 6 &&
1929  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1930  p[4] == 3 && (p[5] & 0x40)) {
1931  /* extract A53 Part 4 CC data */
1932  int cc_count = p[5] & 0x1f;
1933  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1934  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1935  const uint64_t new_size = (old_size + cc_count
1936  * UINT64_C(3));
1937  int ret;
1938 
1939  if (new_size > 3*A53_MAX_CC_COUNT)
1940  return AVERROR(EINVAL);
1941 
1942  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1943  if (ret >= 0)
1944  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1945 
1947  mpeg_set_cc_format(avctx, CC_FORMAT_A53_PART4, "A/53 Part 4");
1948  }
1949  return 1;
1950  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_SCTE20) &&
1951  buf_size >= 2 &&
1952  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1953  /* extract SCTE-20 CC data */
1954  GetBitContext gb;
1955  int cc_count = 0;
1956  int i, ret;
1957 
1958  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1959  if (ret < 0)
1960  return ret;
1961  cc_count = get_bits(&gb, 5);
1962  if (cc_count > 0) {
1963  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1964  const uint64_t new_size = (old_size + cc_count
1965  * UINT64_C(3));
1966  if (new_size > 3*A53_MAX_CC_COUNT)
1967  return AVERROR(EINVAL);
1968 
1969  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1970  if (ret >= 0) {
1971  uint8_t field, cc1, cc2;
1972  uint8_t *cap = s1->a53_buf_ref->data;
1973 
1974  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
1975  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
1976  skip_bits(&gb, 2); // priority
1977  field = get_bits(&gb, 2);
1978  skip_bits(&gb, 5); // line_offset
1979  cc1 = get_bits(&gb, 8);
1980  cc2 = get_bits(&gb, 8);
1981  skip_bits(&gb, 1); // marker
1982 
1983  if (!field) { // forbidden
1984  cap[0] = cap[1] = cap[2] = 0x00;
1985  } else {
1986  field = (field == 2 ? 1 : 0);
1987  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
1988  cap[0] = 0x04 | field;
1989  cap[1] = ff_reverse[cc1];
1990  cap[2] = ff_reverse[cc2];
1991  }
1992  cap += 3;
1993  }
1994  }
1995 
1997  mpeg_set_cc_format(avctx, CC_FORMAT_SCTE20, "SCTE-20");
1998  }
1999  return 1;
2000  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DVD) &&
2001  buf_size >= 11 &&
2002  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2003  /* extract DVD CC data
2004  *
2005  * uint32_t user_data_start_code 0x000001B2 (big endian)
2006  * uint16_t user_identifier 0x4343 "CC"
2007  * uint8_t user_data_type_code 0x01
2008  * uint8_t caption_block_size 0xF8
2009  * uint8_t
2010  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2011  * bit 6 caption_filler 0
2012  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2013  * bit 0 caption_extra_field_added 1=one additional caption word
2014  *
2015  * struct caption_field_block {
2016  * uint8_t
2017  * bit 7:1 caption_filler 0x7F (all 1s)
2018  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2019  * uint8_t caption_first_byte
2020  * uint8_t caption_second_byte
2021  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2022  *
2023  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2024  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2025  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2026  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2027  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2028  int cc_count = 0;
2029  int i, ret;
2030  // There is a caption count field in the data, but it is often
2031  // incorrect. So count the number of captions present.
2032  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2033  cc_count++;
2034  // Transform the DVD format into A53 Part 4 format
2035  if (cc_count > 0) {
2036  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2037  const uint64_t new_size = (old_size + cc_count
2038  * UINT64_C(6));
2039  if (new_size > 3*A53_MAX_CC_COUNT)
2040  return AVERROR(EINVAL);
2041 
2042  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2043  if (ret >= 0) {
2044  uint8_t field1 = !!(p[4] & 0x80);
2045  uint8_t *cap = s1->a53_buf_ref->data;
2046  p += 5;
2047  for (i = 0; i < cc_count; i++) {
2048  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2049  cap[1] = p[1];
2050  cap[2] = p[2];
2051  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2052  cap[4] = p[4];
2053  cap[5] = p[5];
2054  cap += 6;
2055  p += 6;
2056  }
2057  }
2058 
2060  mpeg_set_cc_format(avctx, CC_FORMAT_DVD, "DVD");
2061  }
2062  return 1;
2063  }
2064  return 0;
2065 }
2066 
2068  const uint8_t *p, int buf_size)
2069 {
2070  Mpeg1Context *s = avctx->priv_data;
2071  const uint8_t *buf_end = p + buf_size;
2072  Mpeg1Context *s1 = avctx->priv_data;
2073 
2074 #if 0
2075  int i;
2076  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2077  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2078  }
2079  av_log(avctx, AV_LOG_ERROR, "\n");
2080 #endif
2081 
2082  if (buf_size > 29){
2083  int i;
2084  for(i=0; i<20; i++)
2085  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2086  s->tmpgexs= 1;
2087  }
2088  }
2089  /* we parse the DTG active format information */
2090  if (buf_end - p >= 5 &&
2091  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2092  int flags = p[4];
2093  p += 5;
2094  if (flags & 0x80) {
2095  /* skip event id */
2096  p += 2;
2097  }
2098  if (flags & 0x40) {
2099  if (buf_end - p < 1)
2100  return;
2101  s1->has_afd = 1;
2102  s1->afd = p[0] & 0x0f;
2103  }
2104  } else if (buf_end - p >= 6 &&
2105  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2106  p[4] == 0x03) { // S3D_video_format_length
2107  // the 0x7F mask ignores the reserved_bit value
2108  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2109 
2110  if (S3D_video_format_type == 0x03 ||
2111  S3D_video_format_type == 0x04 ||
2112  S3D_video_format_type == 0x08 ||
2113  S3D_video_format_type == 0x23) {
2114 
2115  s1->has_stereo3d = 1;
2116 
2117  switch (S3D_video_format_type) {
2118  case 0x03:
2120  break;
2121  case 0x04:
2123  break;
2124  case 0x08:
2126  break;
2127  case 0x23:
2129  break;
2130  }
2131  }
2132  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2133  return;
2134  }
2135 }
2136 
2138  const uint8_t *buf, int buf_size)
2139 {
2140  Mpeg1Context *s1 = avctx->priv_data;
2141  MpegEncContext *s = &s1->mpeg_enc_ctx;
2142  int broken_link;
2143  int64_t tc;
2144 
2145  int ret = init_get_bits8(&s->gb, buf, buf_size);
2146  if (ret < 0)
2147  return ret;
2148 
2149  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2150 
2151  s1->closed_gop = get_bits1(&s->gb);
2152  /* broken_link indicates that after editing the
2153  * reference frames of the first B-Frames after GOP I-Frame
2154  * are missing (open gop) */
2155  broken_link = get_bits1(&s->gb);
2156 
2157  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2158  char tcbuf[AV_TIMECODE_STR_SIZE];
2160  av_log(s->avctx, AV_LOG_DEBUG,
2161  "GOP (%s) closed_gop=%d broken_link=%d\n",
2162  tcbuf, s1->closed_gop, broken_link);
2163  }
2164 
2165  return 0;
2166 }
2167 
2168 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2169  int *got_output, const uint8_t *buf, int buf_size)
2170 {
2171  Mpeg1Context *s = avctx->priv_data;
2172  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2173  const uint8_t *buf_ptr = buf;
2174  const uint8_t *buf_end = buf + buf_size;
2175  int ret, input_size;
2176  int last_code = 0, skip_frame = 0;
2177  int picture_start_code_seen = 0;
2178 
2179  for (;;) {
2180  /* find next start code */
2181  uint32_t start_code = -1;
2182  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2183  if (start_code > 0x1ff) {
2184  if (!skip_frame) {
2185  if (HAVE_THREADS &&
2186  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2187  !avctx->hwaccel) {
2188  int i;
2189  av_assert0(avctx->thread_count > 1);
2190 
2191  avctx->execute(avctx, slice_decode_thread,
2192  &s2->thread_context[0], NULL,
2193  s->slice_count, sizeof(void *));
2194  for (i = 0; i < s->slice_count; i++)
2196  }
2197 
2198  ret = slice_end(avctx, picture, got_output);
2199  if (ret < 0)
2200  return ret;
2201  }
2202  s2->pict_type = 0;
2203 
2204  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2205  return AVERROR_INVALIDDATA;
2206 
2207  return FFMAX(0, buf_ptr - buf);
2208  }
2209 
2210  input_size = buf_end - buf_ptr;
2211 
2212  if (avctx->debug & FF_DEBUG_STARTCODE)
2213  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2214  start_code, buf_ptr - buf, input_size);
2215 
2216  /* prepare data for next start code */
2217  switch (start_code) {
2218  case SEQ_START_CODE:
2219  if (last_code == 0) {
2220  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2221  if (buf != avctx->extradata)
2222  s->sync = 1;
2223  } else {
2224  av_log(avctx, AV_LOG_ERROR,
2225  "ignoring SEQ_START_CODE after %X\n", last_code);
2226  if (avctx->err_recognition & AV_EF_EXPLODE)
2227  return AVERROR_INVALIDDATA;
2228  }
2229  break;
2230 
2231  case PICTURE_START_CODE:
2232  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2233  /* If it's a frame picture, there can't be more than one picture header.
2234  Yet, it does happen and we need to handle it. */
2235  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2236  break;
2237  }
2238  picture_start_code_seen = 1;
2239 
2240  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2241  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2242  break;
2243  }
2244 
2245  if (s2->width <= 0 || s2->height <= 0) {
2246  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2247  s2->width, s2->height);
2248  return AVERROR_INVALIDDATA;
2249  }
2250 
2251  if (s->tmpgexs){
2252  s2->intra_dc_precision= 3;
2253  s2->intra_matrix[0]= 1;
2254  }
2255  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2256  !avctx->hwaccel && s->slice_count) {
2257  int i;
2258 
2259  avctx->execute(avctx, slice_decode_thread,
2260  s2->thread_context, NULL,
2261  s->slice_count, sizeof(void *));
2262  for (i = 0; i < s->slice_count; i++)
2264  s->slice_count = 0;
2265  }
2266  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2267  ret = mpeg_decode_postinit(avctx);
2268  if (ret < 0) {
2269  av_log(avctx, AV_LOG_ERROR,
2270  "mpeg_decode_postinit() failure\n");
2271  return ret;
2272  }
2273 
2274  /* We have a complete image: we try to decompress it. */
2275  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2276  s2->pict_type = 0;
2277  s->first_slice = 1;
2278  last_code = PICTURE_START_CODE;
2279  } else {
2280  av_log(avctx, AV_LOG_ERROR,
2281  "ignoring pic after %X\n", last_code);
2282  if (avctx->err_recognition & AV_EF_EXPLODE)
2283  return AVERROR_INVALIDDATA;
2284  }
2285  break;
2286  case EXT_START_CODE:
2287  ret = init_get_bits8(&s2->gb, buf_ptr, input_size);
2288  if (ret < 0)
2289  return ret;
2290 
2291  switch (get_bits(&s2->gb, 4)) {
2292  case 0x1:
2293  if (last_code == 0) {
2295  } else {
2296  av_log(avctx, AV_LOG_ERROR,
2297  "ignoring seq ext after %X\n", last_code);
2298  if (avctx->err_recognition & AV_EF_EXPLODE)
2299  return AVERROR_INVALIDDATA;
2300  }
2301  break;
2302  case 0x2:
2304  break;
2305  case 0x3:
2307  break;
2308  case 0x7:
2310  break;
2311  case 0x8:
2312  if (last_code == PICTURE_START_CODE) {
2314  if (ret < 0)
2315  return ret;
2316  } else {
2317  av_log(avctx, AV_LOG_ERROR,
2318  "ignoring pic cod ext after %X\n", last_code);
2319  if (avctx->err_recognition & AV_EF_EXPLODE)
2320  return AVERROR_INVALIDDATA;
2321  }
2322  break;
2323  }
2324  break;
2325  case USER_START_CODE:
2326  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2327  break;
2328  case GOP_START_CODE:
2329  if (last_code == 0) {
2330  s2->first_field = 0;
2331  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2332  if (ret < 0)
2333  return ret;
2334  s->sync = 1;
2335  } else {
2336  av_log(avctx, AV_LOG_ERROR,
2337  "ignoring GOP_START_CODE after %X\n", last_code);
2338  if (avctx->err_recognition & AV_EF_EXPLODE)
2339  return AVERROR_INVALIDDATA;
2340  }
2341  break;
2342  default:
2344  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2345  if (s2->progressive_sequence && !s2->progressive_frame) {
2346  s2->progressive_frame = 1;
2347  av_log(s2->avctx, AV_LOG_ERROR,
2348  "interlaced frame in progressive sequence, ignoring\n");
2349  }
2350 
2351  if (s2->picture_structure == 0 ||
2353  av_log(s2->avctx, AV_LOG_ERROR,
2354  "picture_structure %d invalid, ignoring\n",
2355  s2->picture_structure);
2357  }
2358 
2360  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2361 
2362  if (s2->picture_structure == PICT_FRAME) {
2363  s2->first_field = 0;
2364  s2->v_edge_pos = 16 * s2->mb_height;
2365  } else {
2366  s2->first_field ^= 1;
2367  s2->v_edge_pos = 8 * s2->mb_height;
2368  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2369  }
2370  }
2372  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2373  const int field_pic = s2->picture_structure != PICT_FRAME;
2374  int mb_y = start_code - SLICE_MIN_START_CODE;
2375  last_code = SLICE_MIN_START_CODE;
2376  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2377  mb_y += (*buf_ptr&0xE0)<<2;
2378 
2379  mb_y <<= field_pic;
2381  mb_y++;
2382 
2383  if (buf_end - buf_ptr < 2) {
2384  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2385  return AVERROR_INVALIDDATA;
2386  }
2387 
2388  if (mb_y >= s2->mb_height) {
2389  av_log(s2->avctx, AV_LOG_ERROR,
2390  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2391  return AVERROR_INVALIDDATA;
2392  }
2393 
2394  if (!s2->last_pic.ptr) {
2395  /* Skip B-frames if we do not have reference frames and
2396  * GOP is not closed. */
2397  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2398  if (!s->closed_gop) {
2399  skip_frame = 1;
2400  av_log(s2->avctx, AV_LOG_DEBUG,
2401  "Skipping B slice due to open GOP\n");
2402  break;
2403  }
2404  }
2405  }
2407  s->sync = 1;
2408  if (!s2->next_pic.ptr) {
2409  /* Skip P-frames if we do not have a reference frame or
2410  * we have an invalid header. */
2411  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2412  skip_frame = 1;
2413  av_log(s2->avctx, AV_LOG_DEBUG,
2414  "Skipping P slice due to !sync\n");
2415  break;
2416  }
2417  }
2418  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2419  s2->pict_type == AV_PICTURE_TYPE_B) ||
2420  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2421  s2->pict_type != AV_PICTURE_TYPE_I) ||
2422  avctx->skip_frame >= AVDISCARD_ALL) {
2423  skip_frame = 1;
2424  break;
2425  }
2426 
2427  if (!s2->context_initialized)
2428  break;
2429 
2430  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2431  if (mb_y < avctx->skip_top ||
2432  mb_y >= s2->mb_height - avctx->skip_bottom)
2433  break;
2434  }
2435 
2436  if (!s2->pict_type) {
2437  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2438  if (avctx->err_recognition & AV_EF_EXPLODE)
2439  return AVERROR_INVALIDDATA;
2440  break;
2441  }
2442 
2443  if (s->first_slice) {
2444  skip_frame = 0;
2445  s->first_slice = 0;
2446  if ((ret = mpeg_field_start(s, buf, buf_size)) < 0)
2447  return ret;
2448  }
2449  if (!s2->cur_pic.ptr) {
2450  av_log(avctx, AV_LOG_ERROR,
2451  "current_picture not initialized\n");
2452  return AVERROR_INVALIDDATA;
2453  }
2454 
2455  if (HAVE_THREADS &&
2456  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2457  !avctx->hwaccel) {
2458  int threshold = (s2->mb_height * s->slice_count +
2459  s2->slice_context_count / 2) /
2460  s2->slice_context_count;
2461  av_assert0(avctx->thread_count > 1);
2462  if (threshold <= mb_y) {
2463  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2464 
2465  thread_context->start_mb_y = mb_y;
2466  thread_context->end_mb_y = s2->mb_height;
2467  if (s->slice_count) {
2468  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2469  ret = ff_update_duplicate_context(thread_context, s2);
2470  if (ret < 0)
2471  return ret;
2472  }
2473  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2474  if (ret < 0)
2475  return ret;
2476  s->slice_count++;
2477  }
2478  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2479  } else {
2480  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2481  emms_c();
2482 
2483  if (ret < 0) {
2484  if (avctx->err_recognition & AV_EF_EXPLODE)
2485  return ret;
2486  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2487  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2488  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2490  } else {
2491  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2492  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2494  }
2495  }
2496  }
2497  break;
2498  }
2499  }
2500 }
2501 
2502 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2503  int *got_output, AVPacket *avpkt)
2504 {
2505  const uint8_t *buf = avpkt->data;
2506  int ret;
2507  int buf_size = avpkt->size;
2508  Mpeg1Context *s = avctx->priv_data;
2509  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2510 
2511  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2512  /* special case for last picture */
2513  if (s2->low_delay == 0 && s2->next_pic.ptr) {
2514  int ret = av_frame_ref(picture, s2->next_pic.ptr->f);
2515  if (ret < 0)
2516  return ret;
2517 
2519 
2520  *got_output = 1;
2521  }
2522  return buf_size;
2523  }
2524 
2525  if (!s2->context_initialized &&
2526  (s2->codec_tag == AV_RL32("VCR2") || s2->codec_tag == AV_RL32("BW10")))
2527  vcr2_init_sequence(avctx);
2528 
2529  s->slice_count = 0;
2530 
2531  if (avctx->extradata && !s->extradata_decoded) {
2532  ret = decode_chunks(avctx, picture, got_output,
2533  avctx->extradata, avctx->extradata_size);
2534  if (*got_output) {
2535  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2536  av_frame_unref(picture);
2537  *got_output = 0;
2538  }
2539  s->extradata_decoded = 1;
2540  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2542  return ret;
2543  }
2544  }
2545 
2546  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2547  if (ret<0 || *got_output) {
2549 
2550  if (s->timecode_frame_start != -1 && *got_output) {
2551  char tcbuf[AV_TIMECODE_STR_SIZE];
2552  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2554  sizeof(int64_t));
2555  if (!tcside)
2556  return AVERROR(ENOMEM);
2557  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2558 
2559  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2560  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2561 
2562  s->timecode_frame_start = -1;
2563  }
2564  }
2565 
2566  return ret;
2567 }
2568 
2569 static void flush(AVCodecContext *avctx)
2570 {
2571  Mpeg1Context *s = avctx->priv_data;
2572 
2573  s->sync = 0;
2574  s->closed_gop = 0;
2575 
2576  av_buffer_unref(&s->a53_buf_ref);
2577  ff_mpeg_flush(avctx);
2578 }
2579 
2581 {
2582  Mpeg1Context *s = avctx->priv_data;
2583 
2584  av_buffer_unref(&s->a53_buf_ref);
2585  return ff_mpv_decode_close(avctx);
2586 }
2587 
2589  .p.name = "mpeg1video",
2590  CODEC_LONG_NAME("MPEG-1 video"),
2591  .p.type = AVMEDIA_TYPE_VIDEO,
2592  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2593  .priv_data_size = sizeof(Mpeg1Context),
2595  .close = mpeg_decode_end,
2597  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2599  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2600  .flush = flush,
2601  .p.max_lowres = 3,
2602  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2603  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2604 #if CONFIG_MPEG1_NVDEC_HWACCEL
2605  HWACCEL_NVDEC(mpeg1),
2606 #endif
2607 #if CONFIG_MPEG1_VDPAU_HWACCEL
2608  HWACCEL_VDPAU(mpeg1),
2609 #endif
2610 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2611  HWACCEL_VIDEOTOOLBOX(mpeg1),
2612 #endif
2613  NULL
2614  },
2615 };
2616 
2617 #define M2V_OFFSET(x) offsetof(Mpeg1Context, x)
2618 #define M2V_PARAM AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2619 
2620 static const AVOption mpeg2video_options[] = {
2621  { "cc_format", "extract a specific Closed Captions format",
2622  M2V_OFFSET(cc_format), AV_OPT_TYPE_INT, { .i64 = CC_FORMAT_AUTO },
2623  CC_FORMAT_AUTO, CC_FORMAT_DVD, M2V_PARAM, .unit = "cc_format" },
2624 
2625  { "auto", "pick first seen CC substream", 0, AV_OPT_TYPE_CONST,
2626  { .i64 = CC_FORMAT_AUTO }, .flags = M2V_PARAM, .unit = "cc_format" },
2627  { "a53", "pick A/53 Part 4 CC substream", 0, AV_OPT_TYPE_CONST,
2628  { .i64 = CC_FORMAT_A53_PART4 }, .flags = M2V_PARAM, .unit = "cc_format" },
2629  { "scte20", "pick SCTE-20 CC substream", 0, AV_OPT_TYPE_CONST,
2630  { .i64 = CC_FORMAT_SCTE20 }, .flags = M2V_PARAM, .unit = "cc_format" },
2631  { "dvd", "pick DVD CC substream", 0, AV_OPT_TYPE_CONST,
2632  { .i64 = CC_FORMAT_DVD }, .flags = M2V_PARAM, .unit = "cc_format" },
2633  { NULL }
2634 };
2635 
2636 static const AVClass mpeg2video_class = {
2637  .class_name = "MPEG-2 video",
2638  .item_name = av_default_item_name,
2639  .option = mpeg2video_options,
2640  .version = LIBAVUTIL_VERSION_INT,
2641  .category = AV_CLASS_CATEGORY_DECODER,
2642 };
2643 
2645  .p.name = "mpeg2video",
2646  CODEC_LONG_NAME("MPEG-2 video"),
2647  .p.type = AVMEDIA_TYPE_VIDEO,
2648  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2649  .p.priv_class = &mpeg2video_class,
2650  .priv_data_size = sizeof(Mpeg1Context),
2652  .close = mpeg_decode_end,
2654  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2656  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2657  .flush = flush,
2658  .p.max_lowres = 3,
2660  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2661 #if CONFIG_MPEG2_DXVA2_HWACCEL
2662  HWACCEL_DXVA2(mpeg2),
2663 #endif
2664 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2665  HWACCEL_D3D11VA(mpeg2),
2666 #endif
2667 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2668  HWACCEL_D3D11VA2(mpeg2),
2669 #endif
2670 #if CONFIG_MPEG2_D3D12VA_HWACCEL
2671  HWACCEL_D3D12VA(mpeg2),
2672 #endif
2673 #if CONFIG_MPEG2_NVDEC_HWACCEL
2674  HWACCEL_NVDEC(mpeg2),
2675 #endif
2676 #if CONFIG_MPEG2_VAAPI_HWACCEL
2677  HWACCEL_VAAPI(mpeg2),
2678 #endif
2679 #if CONFIG_MPEG2_VDPAU_HWACCEL
2680  HWACCEL_VDPAU(mpeg2),
2681 #endif
2682 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2683  HWACCEL_VIDEOTOOLBOX(mpeg2),
2684 #endif
2685  NULL
2686  },
2687 };
2688 
2689 //legacy decoder
2691  .p.name = "mpegvideo",
2692  CODEC_LONG_NAME("MPEG-1 video"),
2693  .p.type = AVMEDIA_TYPE_VIDEO,
2694  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2695  .priv_data_size = sizeof(Mpeg1Context),
2697  .close = mpeg_decode_end,
2699  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2701  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2702  .flush = flush,
2703  .p.max_lowres = 3,
2704 };
2705 
2706 typedef struct IPUContext {
2708 
2709  int flags;
2710  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2711 } IPUContext;
2712 
2714  int *got_frame, AVPacket *avpkt)
2715 {
2716  IPUContext *s = avctx->priv_data;
2717  MpegEncContext *m = &s->m;
2718  GetBitContext *gb = &m->gb;
2719  int ret;
2720 
2721  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2722  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2LL + 3*4 + 2*2 + 2*6))
2723  return AVERROR_INVALIDDATA;
2724 
2725  ret = ff_get_buffer(avctx, frame, 0);
2726  if (ret < 0)
2727  return ret;
2728 
2729  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2730  if (ret < 0)
2731  return ret;
2732 
2733  s->flags = get_bits(gb, 8);
2734  m->intra_dc_precision = s->flags & 3;
2735  m->q_scale_type = !!(s->flags & 0x40);
2736  m->intra_vlc_format = !!(s->flags & 0x20);
2737  m->alternate_scan = !!(s->flags & 0x10);
2738 
2740  s->flags & 0x10 ? ff_alternate_vertical_scan : ff_zigzag_direct);
2741 
2742  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2743  m->qscale = 1;
2744 
2745  for (int y = 0; y < avctx->height; y += 16) {
2746  int intraquant;
2747 
2748  for (int x = 0; x < avctx->width; x += 16) {
2749  if (x || y) {
2750  if (!get_bits1(gb))
2751  return AVERROR_INVALIDDATA;
2752  }
2753  if (get_bits1(gb)) {
2754  intraquant = 0;
2755  } else {
2756  if (!get_bits1(gb))
2757  return AVERROR_INVALIDDATA;
2758  intraquant = 1;
2759  }
2760 
2761  if (s->flags & 4)
2762  skip_bits1(gb);
2763 
2764  if (intraquant)
2765  m->qscale = mpeg_get_qscale(m);
2766 
2767  memset(s->block, 0, sizeof(s->block));
2768 
2769  for (int n = 0; n < 6; n++) {
2770  if (s->flags & 0x80) {
2772  m->intra_matrix,
2774  m->last_dc, s->block[n],
2775  n, m->qscale);
2776  } else {
2777  ret = mpeg2_decode_block_intra(m, s->block[n], n);
2778  }
2779 
2780  if (ret < 0)
2781  return ret;
2782  }
2783 
2784  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2785  frame->linesize[0], s->block[0]);
2786  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2787  frame->linesize[0], s->block[1]);
2788  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2789  frame->linesize[0], s->block[2]);
2790  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2791  frame->linesize[0], s->block[3]);
2792  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2793  frame->linesize[1], s->block[4]);
2794  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2795  frame->linesize[2], s->block[5]);
2796  }
2797  }
2798 
2799  align_get_bits(gb);
2800  if (get_bits_left(gb) != 32)
2801  return AVERROR_INVALIDDATA;
2802 
2803  *got_frame = 1;
2804 
2805  return avpkt->size;
2806 }
2807 
2809 {
2810  IPUContext *s = avctx->priv_data;
2811  MpegEncContext *m = &s->m;
2812 
2813  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2814  m->avctx = avctx;
2815 
2816  ff_idctdsp_init(&m->idsp, avctx);
2818 
2819  for (int i = 0; i < 64; i++) {
2820  int j = m->idsp.idct_permutation[i];
2822  m->intra_matrix[j] = v;
2823  m->chroma_intra_matrix[j] = v;
2824  }
2825 
2826  return 0;
2827 }
2828 
2830  .p.name = "ipu",
2831  CODEC_LONG_NAME("IPU Video"),
2832  .p.type = AVMEDIA_TYPE_VIDEO,
2833  .p.id = AV_CODEC_ID_IPU,
2834  .priv_data_size = sizeof(IPUContext),
2835  .init = ipu_decode_init,
2837  .p.capabilities = AV_CODEC_CAP_DR1,
2838 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1858
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:687
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1437
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:265
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:81
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:442
M2V_OFFSET
#define M2V_OFFSET(x)
Definition: mpeg12dec.c:2617
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:148
level
uint8_t level
Definition: svq3.c:205
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:78
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
AVPanScan::position
int16_t position[3][2]
position of the top left corner in 1/16 pel for up to 3 fields/frames
Definition: defs.h:263
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1922
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:267
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:35
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1277
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2502
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:435
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
MpegEncContext::top_field_first
int top_field_first
Definition: mpegvideo.h:450
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:799
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:124
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:396
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:267
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:242
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1430
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
int64_t
long long int64_t
Definition: coverity.c:34
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:43
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2808
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:459
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:221
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:463
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2690
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:539
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:401
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:89
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:302
AVOption
AVOption.
Definition: opt.h:429
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:180
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:42
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
ff_mpv_framesize_disable
static void ff_mpv_framesize_disable(ScratchpadContext *sc)
Disable allocating the ScratchpadContext's buffers in future calls to ff_mpv_framesize_alloc().
Definition: mpegpicture.h:143
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:826
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:820
reverse.h
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:91
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:880
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:96
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:32
thread.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1407
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:269
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:100
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:653
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:151
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:84
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:87
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:566
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:399
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:910
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:124
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:145
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1719
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2168
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1830
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1180
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1593
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
MpegEncContext::picture_structure
int picture_structure
Definition: mpegvideo.h:446
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:108
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
AVPanScan::width
int width
width and height in 1/16 pel
Definition: defs.h:255
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1666
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:62
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:48
val
static double val(void *priv, double ch)
Definition: aeval.c:77
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:90
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:852
MpegEncContext::width
int width
Definition: mpegvideo.h:96
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:639
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1769
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:88
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:449
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:2137
IPUContext
Definition: mpeg12dec.c:2706
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:809
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:774
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2644
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:87
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2713
HAS_MV
#define HAS_MV(a, dir)
Definition: mpegutils.h:92
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:31
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:820
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1009
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2569
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:85
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:530
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:729
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
Mpeg1Context::stereo3d_type
enum AVStereo3DType stereo3d_type
Definition: mpeg12dec.c:76
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:896
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:74
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:140
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.h:28
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:260
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:49
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:83
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1099
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:75
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:847
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:241
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1302
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:177
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:278
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:125
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:390
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:410
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:204
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:701
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:30
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:88
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:165
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:738
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:740
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1123
M2V_PARAM
#define M2V_PARAM
Definition: mpeg12dec.c:2618
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
profiles.h
CC_FORMAT_A53_PART4
@ CC_FORMAT_A53_PART4
Definition: mpeg12dec.c:67
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:49
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
MB_TYPE_BIDIR_MV
#define MB_TYPE_BIDIR_MV
Definition: mpegutils.h:52
lowres
static int lowres
Definition: ffplay.c:330
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:150
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:305
CC_FORMAT_AUTO
@ CC_FORMAT_AUTO
Definition: mpeg12dec.c:66
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
mpeg12codecs.h
MpegEncContext::slice_context_count
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:153
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1794
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:85
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:222
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:301
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
startcode.h
CC_FORMAT_DVD
@ CC_FORMAT_DVD
Definition: mpeg12dec.c:69
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:220
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:81
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:65
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:515
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
mpeg2video_options
static const AVOption mpeg2video_options[]
Definition: mpeg12dec.c:2620
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:310
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
CC_FORMAT_SCTE20
@ CC_FORMAT_SCTE20
Definition: mpeg12dec.c:68
height
#define height
Definition: dsp.h:85
RL_VLC_ELEM
Definition: vlc.h:56
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:397
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
shift
static int shift(int a, int b)
Definition: bonk.c:261
IPUContext::flags
int flags
Definition: mpeg12dec.c:2709
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:299
mpeg_field_start
static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1252
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:128
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2588
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2099
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:55
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1605
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:441
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1858
AVCodecHWConfigInternal
Definition: hwconfig.h:25
MpegEncContext::mbskip_table
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
Definition: mpegvideo.h:191
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:119
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:344
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:46
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:449
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:77
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:764
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:196
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:268
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:413
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
mpeg2video_class
static const AVClass mpeg2video_class
Definition: mpeg12dec.c:2636
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1604
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:116
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
emms.h
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:293
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:403
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:465
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1806
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:529
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
internal.h
mpeg_set_cc_format
static void mpeg_set_cc_format(AVCodecContext *avctx, enum Mpeg2ClosedCaptionsFormat format, const char *label)
Definition: mpeg12dec.c:1908
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:86
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:188
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1414
MpegEncContext::thread_context
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:152
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_TYPE_MV_2_MV_DIR
#define MB_TYPE_MV_2_MV_DIR(a)
Definition: mpegutils.h:94
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:133
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2707
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:159
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:454
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:708
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:123
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:535
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:700
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:606
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2137
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
ff_mpv_decode_init
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:46
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:857
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:371
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:77
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:700
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1612
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:168
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:448
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1623
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:739
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1650
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:707
Mpeg1Context::cc_format
enum Mpeg2ClosedCaptionsFormat cc_format
Definition: mpeg12dec.c:79
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:131
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:395
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:82
AVCodecContext::ticks_per_frame
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:582
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
MpegEncContext::resync_mb_x
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:350
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1808
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:146
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1406
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:639
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:387
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2580
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:86
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2829
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:468
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:452
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:476
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:73
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:141
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:261
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:58
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:72
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1056
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
AVStereo3DType
AVStereo3DType
List of possible 3D Types.
Definition: stereo3d.h:48
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:86
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:97
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2710
AVPanScan::height
int height
Definition: defs.h:256
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:147
MpegEncContext::resync_mb_y
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:351
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2067
h
h
Definition: vp9dsp_template.c:2070
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:151
Mpeg2ClosedCaptionsFormat
Mpeg2ClosedCaptionsFormat
Definition: mpeg12dec.c:65
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:93
width
#define width
Definition: dsp.h:85
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:150
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:217
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:455
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1382
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1381
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1157
MpegEncContext::codec_id
enum AVCodecID codec_id
Definition: mpegvideo.h:108
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:648
MB_TYPE_FORWARD_MV
#define MB_TYPE_FORWARD_MV
Definition: mpegutils.h:50
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:30
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:80
Mpeg1Context
Definition: mpeg12dec.c:72
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:300
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1194
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:92
ff_mpv_decode_close
int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:163
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:217
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:65
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1390
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:458
MpegEncContext::codec_tag
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:115