FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 #include <stdatomic.h>
33 
34 #include "libavutil/attributes.h"
35 #include "libavutil/emms.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/mem_internal.h"
39 #include "libavutil/reverse.h"
40 #include "libavutil/stereo3d.h"
41 #include "libavutil/timecode.h"
42 
43 #include "avcodec.h"
44 #include "codec_internal.h"
45 #include "decode.h"
46 #include "error_resilience.h"
47 #include "get_bits.h"
48 #include "hwaccel_internal.h"
49 #include "hwconfig.h"
50 #include "idctdsp.h"
51 #include "mpeg_er.h"
52 #include "mpeg12.h"
53 #include "mpeg12codecs.h"
54 #include "mpeg12data.h"
55 #include "mpeg12dec.h"
56 #include "mpegutils.h"
57 #include "mpegvideo.h"
58 #include "mpegvideodata.h"
59 #include "mpegvideodec.h"
60 #include "profiles.h"
61 #include "startcode.h"
62 
63 #define A53_MAX_CC_COUNT 2000
64 
71 };
72 
73 typedef struct Mpeg12SliceContext {
76 
77  DECLARE_ALIGNED_32(int16_t, block)[12][64];
79 
80 typedef struct Mpeg1Context {
82  AVPanScan pan_scan; /* some temporary storage for the panscan */
87  uint8_t afd;
88  int has_afd;
92  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
93  unsigned frame_rate_index;
94  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
96  int tmpgexs;
99  int vbv_delay;
101  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
102 } Mpeg1Context;
103 
104 /* as H.263, but only 17 codes */
105 static int mpeg_decode_motion(Mpeg12SliceContext *const s, int fcode, int pred)
106 {
107  int code, sign, val, shift;
108 
109  code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
110  if (code == 0)
111  return pred;
112  if (code < 0)
113  return 0xffff;
114 
115  sign = get_bits1(&s->gb);
116  shift = fcode - 1;
117  val = code;
118  if (shift) {
119  val = (val - 1) << shift;
120  val |= get_bits(&s->gb, shift);
121  val++;
122  }
123  if (sign)
124  val = -val;
125  val += pred;
126 
127  /* modulo decoding */
128  return sign_extend(val, 5 + shift);
129 }
130 
131 #define MAX_INDEX (64 - 1)
132 #define check_scantable_index(ctx, x) \
133  do { \
134  if ((x) > MAX_INDEX) { \
135  av_log(ctx->c.avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
136  ctx->c.mb_x, ctx->c.mb_y); \
137  return AVERROR_INVALIDDATA; \
138  } \
139  } while (0)
140 
142  int16_t *block, int n)
143 {
144  int level, i, j, run;
145  const uint8_t *const scantable = s->c.intra_scantable.permutated;
146  const uint16_t *quant_matrix = s->c.inter_matrix;
147  const int qscale = s->c.qscale;
148 
149  {
150  OPEN_READER(re, &s->gb);
151  i = -1;
152  // special case for first coefficient, no need to add second VLC table
153  UPDATE_CACHE(re, &s->gb);
154  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
155  level = (3 * qscale * quant_matrix[0]) >> 5;
156  level = (level - 1) | 1;
157  if (GET_CACHE(re, &s->gb) & 0x40000000)
158  level = -level;
159  block[0] = level;
160  i++;
161  SKIP_BITS(re, &s->gb, 2);
162  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
163  goto end;
164  }
165  /* now quantify & encode AC coefficients */
166  for (;;) {
167  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
168  TEX_VLC_BITS, 2, 0);
169 
170  if (level != 0) {
171  i += run;
172  if (i > MAX_INDEX)
173  break;
174  j = scantable[i];
175  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
176  level = (level - 1) | 1;
177  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
178  SHOW_SBITS(re, &s->gb, 1);
179  SKIP_BITS(re, &s->gb, 1);
180  } else {
181  /* escape */
182  run = SHOW_UBITS(re, &s->gb, 6) + 1;
183  LAST_SKIP_BITS(re, &s->gb, 6);
184  UPDATE_CACHE(re, &s->gb);
185  level = SHOW_SBITS(re, &s->gb, 8);
186  SKIP_BITS(re, &s->gb, 8);
187  if (level == -128) {
188  level = SHOW_UBITS(re, &s->gb, 8) - 256;
189  SKIP_BITS(re, &s->gb, 8);
190  } else if (level == 0) {
191  level = SHOW_UBITS(re, &s->gb, 8);
192  SKIP_BITS(re, &s->gb, 8);
193  }
194  i += run;
195  if (i > MAX_INDEX)
196  break;
197  j = scantable[i];
198  if (level < 0) {
199  level = -level;
200  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
201  level = (level - 1) | 1;
202  level = -level;
203  } else {
204  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
205  level = (level - 1) | 1;
206  }
207  }
208 
209  block[j] = level;
210  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
211  break;
212  UPDATE_CACHE(re, &s->gb);
213  }
214 end:
215  LAST_SKIP_BITS(re, &s->gb, 2);
216  CLOSE_READER(re, &s->gb);
217  }
218 
220 
221  s->c.block_last_index[n] = i;
222  return 0;
223 }
224 
226  int16_t *block, int n)
227 {
228  int level, i, j, run;
229  const uint8_t *const scantable = s->c.intra_scantable.permutated;
230  const uint16_t *quant_matrix;
231  const int qscale = s->c.qscale;
232  int mismatch;
233 
234  mismatch = 1;
235 
236  {
237  OPEN_READER(re, &s->gb);
238  i = -1;
239  if (n < 4)
240  quant_matrix = s->c.inter_matrix;
241  else
242  quant_matrix = s->c.chroma_inter_matrix;
243 
244  // Special case for first coefficient, no need to add second VLC table.
245  UPDATE_CACHE(re, &s->gb);
246  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
247  level = (3 * qscale * quant_matrix[0]) >> 5;
248  if (GET_CACHE(re, &s->gb) & 0x40000000)
249  level = -level;
250  block[0] = level;
251  mismatch ^= level;
252  i++;
253  SKIP_BITS(re, &s->gb, 2);
254  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
255  goto end;
256  }
257 
258  /* now quantify & encode AC coefficients */
259  for (;;) {
260  GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
261  TEX_VLC_BITS, 2, 0);
262 
263  if (level != 0) {
264  i += run;
265  if (i > MAX_INDEX)
266  break;
267  j = scantable[i];
268  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
269  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
270  SHOW_SBITS(re, &s->gb, 1);
271  SKIP_BITS(re, &s->gb, 1);
272  } else {
273  /* escape */
274  run = SHOW_UBITS(re, &s->gb, 6) + 1;
275  LAST_SKIP_BITS(re, &s->gb, 6);
276  UPDATE_CACHE(re, &s->gb);
277  level = SHOW_SBITS(re, &s->gb, 12);
278  SKIP_BITS(re, &s->gb, 12);
279 
280  i += run;
281  if (i > MAX_INDEX)
282  break;
283  j = scantable[i];
284  if (level < 0) {
285  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
286  level = -level;
287  } else {
288  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
289  }
290  }
291 
292  mismatch ^= level;
293  block[j] = level;
294  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
295  break;
296  UPDATE_CACHE(re, &s->gb);
297  }
298 end:
299  LAST_SKIP_BITS(re, &s->gb, 2);
300  CLOSE_READER(re, &s->gb);
301  }
302  block[63] ^= (mismatch & 1);
303 
305 
306  s->c.block_last_index[n] = i;
307  return 0;
308 }
309 
311  int16_t *block, int n)
312 {
313  int level, dc, diff, i, j, run;
314  int component;
315  const RL_VLC_ELEM *rl_vlc;
316  const uint8_t *const scantable = s->c.intra_scantable.permutated;
317  const uint16_t *quant_matrix;
318  const int qscale = s->c.qscale;
319  int mismatch;
320 
321  /* DC coefficient */
322  if (n < 4) {
323  quant_matrix = s->c.intra_matrix;
324  component = 0;
325  } else {
326  quant_matrix = s->c.chroma_intra_matrix;
327  component = (n & 1) + 1;
328  }
329  diff = decode_dc(&s->gb, component);
330  dc = s->c.last_dc[component];
331  dc += diff;
332  s->c.last_dc[component] = dc;
333  block[0] = dc * (1 << (3 - s->c.intra_dc_precision));
334  ff_tlog(s->c.avctx, "dc=%d\n", block[0]);
335  mismatch = block[0] ^ 1;
336  i = 0;
337  if (s->c.intra_vlc_format)
339  else
341 
342  {
343  OPEN_READER(re, &s->gb);
344  /* now quantify & encode AC coefficients */
345  for (;;) {
346  UPDATE_CACHE(re, &s->gb);
347  GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
348  TEX_VLC_BITS, 2, 0);
349 
350  if (level == 127) {
351  break;
352  } else if (level != 0) {
353  i += run;
354  if (i > MAX_INDEX)
355  break;
356  j = scantable[i];
357  level = (level * qscale * quant_matrix[j]) >> 4;
358  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
359  SHOW_SBITS(re, &s->gb, 1);
360  LAST_SKIP_BITS(re, &s->gb, 1);
361  } else {
362  /* escape */
363  run = SHOW_UBITS(re, &s->gb, 6) + 1;
364  SKIP_BITS(re, &s->gb, 6);
365  level = SHOW_SBITS(re, &s->gb, 12);
366  LAST_SKIP_BITS(re, &s->gb, 12);
367  i += run;
368  if (i > MAX_INDEX)
369  break;
370  j = scantable[i];
371  if (level < 0) {
372  level = (-level * qscale * quant_matrix[j]) >> 4;
373  level = -level;
374  } else {
375  level = (level * qscale * quant_matrix[j]) >> 4;
376  }
377  }
378 
379  mismatch ^= level;
380  block[j] = level;
381  }
382  CLOSE_READER(re, &s->gb);
383  }
384  block[63] ^= mismatch & 1;
385 
387 
388  return 0;
389 }
390 
391 static inline int get_dmv(Mpeg12SliceContext *const s)
392 {
393  if (get_bits1(&s->gb))
394  return 1 - (get_bits1(&s->gb) << 1);
395  else
396  return 0;
397 }
398 
399 /* motion type (for MPEG-2) */
400 #define MT_FIELD 1
401 #define MT_FRAME 2
402 #define MT_16X8 2
403 #define MT_DMV 3
404 
405 static int mpeg_decode_mb(Mpeg12SliceContext *const s, int *mb_skip_run)
406 {
407  int i, j, k, cbp, val, mb_type, motion_type;
408  const int mb_block_count = 4 + (1 << s->c.chroma_format);
409  int ret;
410 
411  ff_tlog(s->c.avctx, "decode_mb: x=%d y=%d\n", s->c.mb_x, s->c.mb_y);
412 
413  av_assert2(s->c.mb_skipped == 0);
414 
415  if ((*mb_skip_run)-- != 0) {
416  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
417  s->c.mb_skipped = 1;
418  s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride] =
420  } else {
421  int mb_type;
422 
423  if (s->c.mb_x)
424  mb_type = s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride - 1];
425  else
426  // FIXME not sure if this is allowed in MPEG at all
427  mb_type = s->c.cur_pic.mb_type[s->c.mb_width + (s->c.mb_y - 1) * s->c.mb_stride - 1];
428  if (IS_INTRA(mb_type)) {
429  av_log(s->c.avctx, AV_LOG_ERROR, "skip with previntra\n");
430  return AVERROR_INVALIDDATA;
431  }
432  s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride] =
433  mb_type | MB_TYPE_SKIP;
434 
435  if ((s->c.mv[0][0][0] | s->c.mv[0][0][1] | s->c.mv[1][0][0] | s->c.mv[1][0][1]) == 0)
436  s->c.mb_skipped = 1;
437  }
438 
439  return 0;
440  }
441 
442  switch (s->c.pict_type) {
443  default:
444  case AV_PICTURE_TYPE_I:
445  if (get_bits1(&s->gb) == 0) {
446  if (get_bits1(&s->gb) == 0) {
447  av_log(s->c.avctx, AV_LOG_ERROR,
448  "Invalid mb type in I-frame at %d %d\n",
449  s->c.mb_x, s->c.mb_y);
450  return AVERROR_INVALIDDATA;
451  }
452  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
453  } else {
454  mb_type = MB_TYPE_INTRA;
455  }
456  break;
457  case AV_PICTURE_TYPE_P:
458  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
459  if (mb_type < 0) {
460  av_log(s->c.avctx, AV_LOG_ERROR,
461  "Invalid mb type in P-frame at %d %d\n", s->c.mb_x, s->c.mb_y);
462  return AVERROR_INVALIDDATA;
463  }
464  break;
465  case AV_PICTURE_TYPE_B:
466  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
467  if (mb_type < 0) {
468  av_log(s->c.avctx, AV_LOG_ERROR,
469  "Invalid mb type in B-frame at %d %d\n", s->c.mb_x, s->c.mb_y);
470  return AVERROR_INVALIDDATA;
471  }
472  break;
473  }
474  ff_tlog(s->c.avctx, "mb_type=%x\n", mb_type);
475 // motion_type = 0; /* avoid warning */
476  if (IS_INTRA(mb_type)) {
477  s->c.bdsp.clear_blocks(s->block[0]);
478 
479  if (!s->c.chroma_y_shift)
480  s->c.bdsp.clear_blocks(s->block[6]);
481 
482  /* compute DCT type */
483  // FIXME: add an interlaced_dct coded var?
484  if (s->c.picture_structure == PICT_FRAME &&
485  !s->c.frame_pred_frame_dct)
486  s->c.interlaced_dct = get_bits1(&s->gb);
487 
488  if (IS_QUANT(mb_type))
489  s->c.qscale = mpeg_get_qscale(&s->gb, s->c.q_scale_type);
490 
491  if (s->c.concealment_motion_vectors) {
492  /* just parse them */
493  if (s->c.picture_structure != PICT_FRAME)
494  skip_bits1(&s->gb); /* field select */
495 
496  s->c.mv[0][0][0] =
497  s->c.last_mv[0][0][0] =
498  s->c.last_mv[0][1][0] = mpeg_decode_motion(s, s->c.mpeg_f_code[0][0],
499  s->c.last_mv[0][0][0]);
500  s->c.mv[0][0][1] =
501  s->c.last_mv[0][0][1] =
502  s->c.last_mv[0][1][1] = mpeg_decode_motion(s, s->c.mpeg_f_code[0][1],
503  s->c.last_mv[0][0][1]);
504 
505  check_marker(s->c.avctx, &s->gb, "after concealment_motion_vectors");
506  } else {
507  /* reset mv prediction */
508  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
509  }
510  s->c.mb_intra = 1;
511 
512  if (s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO) {
513  for (i = 0; i < mb_block_count; i++)
514  if ((ret = mpeg2_decode_block_intra(s, s->block[i], i)) < 0)
515  return ret;
516  } else {
517  for (i = 0; i < 6; i++) {
519  s->c.intra_matrix,
520  s->c.intra_scantable.permutated,
521  s->c.last_dc, s->block[i],
522  i, s->c.qscale);
523  if (ret < 0) {
524  av_log(s->c.avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
525  s->c.mb_x, s->c.mb_y);
526  return ret;
527  }
528  }
529  }
530  } else {
531  if (mb_type & MB_TYPE_ZERO_MV) {
532  av_assert2(mb_type & MB_TYPE_CBP);
533 
534  s->c.mv_dir = MV_DIR_FORWARD;
535  if (s->c.picture_structure == PICT_FRAME) {
536  if (s->c.picture_structure == PICT_FRAME
537  && !s->c.frame_pred_frame_dct)
538  s->c.interlaced_dct = get_bits1(&s->gb);
539  s->c.mv_type = MV_TYPE_16X16;
540  } else {
541  s->c.mv_type = MV_TYPE_FIELD;
542  mb_type |= MB_TYPE_INTERLACED;
543  s->c.field_select[0][0] = s->c.picture_structure - 1;
544  }
545 
546  if (IS_QUANT(mb_type))
547  s->c.qscale = mpeg_get_qscale(&s->gb, s->c.q_scale_type);
548 
549  s->c.last_mv[0][0][0] = 0;
550  s->c.last_mv[0][0][1] = 0;
551  s->c.last_mv[0][1][0] = 0;
552  s->c.last_mv[0][1][1] = 0;
553  s->c.mv[0][0][0] = 0;
554  s->c.mv[0][0][1] = 0;
555  } else {
556  av_assert2(mb_type & MB_TYPE_BIDIR_MV);
557  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
558  /* get additional motion vector type */
559  if (s->c.picture_structure == PICT_FRAME && s->c.frame_pred_frame_dct) {
560  motion_type = MT_FRAME;
561  } else {
562  motion_type = get_bits(&s->gb, 2);
563  if (s->c.picture_structure == PICT_FRAME && HAS_CBP(mb_type))
564  s->c.interlaced_dct = get_bits1(&s->gb);
565  }
566 
567  if (IS_QUANT(mb_type))
568  s->c.qscale = mpeg_get_qscale(&s->gb, s->c.q_scale_type);
569 
570  /* motion vectors */
571  s->c.mv_dir = MB_TYPE_MV_2_MV_DIR(mb_type);
572  ff_tlog(s->c.avctx, "motion_type=%d\n", motion_type);
573  switch (motion_type) {
574  case MT_FRAME: /* or MT_16X8 */
575  if (s->c.picture_structure == PICT_FRAME) {
576  mb_type |= MB_TYPE_16x16;
577  s->c.mv_type = MV_TYPE_16X16;
578  for (i = 0; i < 2; i++) {
579  if (HAS_MV(mb_type, i)) {
580  /* MT_FRAME */
581  s->c.mv[i][0][0] =
582  s->c.last_mv[i][0][0] =
583  s->c.last_mv[i][1][0] =
584  mpeg_decode_motion(s, s->c.mpeg_f_code[i][0],
585  s->c.last_mv[i][0][0]);
586  s->c.mv[i][0][1] =
587  s->c.last_mv[i][0][1] =
588  s->c.last_mv[i][1][1] =
589  mpeg_decode_motion(s, s->c.mpeg_f_code[i][1],
590  s->c.last_mv[i][0][1]);
591  /* full_pel: only for MPEG-1 */
592  if (s->c.full_pel[i]) {
593  s->c.mv[i][0][0] *= 2;
594  s->c.mv[i][0][1] *= 2;
595  }
596  }
597  }
598  } else {
599  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
600  s->c.mv_type = MV_TYPE_16X8;
601  for (i = 0; i < 2; i++) {
602  if (HAS_MV(mb_type, i)) {
603  /* MT_16X8 */
604  for (j = 0; j < 2; j++) {
605  s->c.field_select[i][j] = get_bits1(&s->gb);
606  for (k = 0; k < 2; k++) {
607  val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][k],
608  s->c.last_mv[i][j][k]);
609  s->c.last_mv[i][j][k] = val;
610  s->c.mv[i][j][k] = val;
611  }
612  }
613  }
614  }
615  }
616  break;
617  case MT_FIELD:
618  s->c.mv_type = MV_TYPE_FIELD;
619  if (s->c.picture_structure == PICT_FRAME) {
620  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
621  for (i = 0; i < 2; i++) {
622  if (HAS_MV(mb_type, i)) {
623  for (j = 0; j < 2; j++) {
624  s->c.field_select[i][j] = get_bits1(&s->gb);
625  val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][0],
626  s->c.last_mv[i][j][0]);
627  s->c.last_mv[i][j][0] = val;
628  s->c.mv[i][j][0] = val;
629  ff_tlog(s->c.avctx, "fmx=%d\n", val);
630  val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][1],
631  s->c.last_mv[i][j][1] >> 1);
632  s->c.last_mv[i][j][1] = 2 * val;
633  s->c.mv[i][j][1] = val;
634  ff_tlog(s->c.avctx, "fmy=%d\n", val);
635  }
636  }
637  }
638  } else {
639  av_assert0(!s->c.progressive_sequence);
640  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
641  for (i = 0; i < 2; i++) {
642  if (HAS_MV(mb_type, i)) {
643  s->c.field_select[i][0] = get_bits1(&s->gb);
644  for (k = 0; k < 2; k++) {
645  val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][k],
646  s->c.last_mv[i][0][k]);
647  s->c.last_mv[i][0][k] = val;
648  s->c.last_mv[i][1][k] = val;
649  s->c.mv[i][0][k] = val;
650  }
651  }
652  }
653  }
654  break;
655  case MT_DMV:
656  if (s->c.progressive_sequence){
657  av_log(s->c.avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
658  return AVERROR_INVALIDDATA;
659  }
660  s->c.mv_type = MV_TYPE_DMV;
661  for (i = 0; i < 2; i++) {
662  if (HAS_MV(mb_type, i)) {
663  int dmx, dmy, mx, my, m;
664  const int my_shift = s->c.picture_structure == PICT_FRAME;
665 
666  mx = mpeg_decode_motion(s, s->c.mpeg_f_code[i][0],
667  s->c.last_mv[i][0][0]);
668  s->c.last_mv[i][0][0] = mx;
669  s->c.last_mv[i][1][0] = mx;
670  dmx = get_dmv(s);
671  my = mpeg_decode_motion(s, s->c.mpeg_f_code[i][1],
672  s->c.last_mv[i][0][1] >> my_shift);
673  dmy = get_dmv(s);
674 
675 
676  s->c.last_mv[i][0][1] = my * (1 << my_shift);
677  s->c.last_mv[i][1][1] = my * (1 << my_shift);
678 
679  s->c.mv[i][0][0] = mx;
680  s->c.mv[i][0][1] = my;
681  s->c.mv[i][1][0] = mx; // not used
682  s->c.mv[i][1][1] = my; // not used
683 
684  if (s->c.picture_structure == PICT_FRAME) {
685  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
686 
687  // m = 1 + 2 * s->c.top_field_first;
688  m = s->c.top_field_first ? 1 : 3;
689 
690  /* top -> top pred */
691  s->c.mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
692  s->c.mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
693  m = 4 - m;
694  s->c.mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
695  s->c.mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
696  } else {
697  mb_type |= MB_TYPE_16x16;
698 
699  s->c.mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
700  s->c.mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
701  if (s->c.picture_structure == PICT_TOP_FIELD)
702  s->c.mv[i][2][1]--;
703  else
704  s->c.mv[i][2][1]++;
705  }
706  }
707  }
708  break;
709  default:
710  av_log(s->c.avctx, AV_LOG_ERROR,
711  "00 motion_type at %d %d\n", s->c.mb_x, s->c.mb_y);
712  return AVERROR_INVALIDDATA;
713  }
714  }
715 
716  s->c.mb_intra = 0;
717  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 128 << s->c.intra_dc_precision;
718  if (HAS_CBP(mb_type)) {
719  s->c.bdsp.clear_blocks(s->block[0]);
720 
721  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
722  if (mb_block_count > 6) {
723  cbp *= 1 << mb_block_count - 6;
724  cbp |= get_bits(&s->gb, mb_block_count - 6);
725  s->c.bdsp.clear_blocks(s->block[6]);
726  }
727  if (cbp <= 0) {
728  av_log(s->c.avctx, AV_LOG_ERROR,
729  "invalid cbp %d at %d %d\n", cbp, s->c.mb_x, s->c.mb_y);
730  return AVERROR_INVALIDDATA;
731  }
732 
733  if (s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO) {
734  cbp <<= 12 - mb_block_count;
735 
736  for (i = 0; i < mb_block_count; i++) {
737  if (cbp & (1 << 11)) {
738  if ((ret = mpeg2_decode_block_non_intra(s, s->block[i], i)) < 0)
739  return ret;
740  } else {
741  s->c.block_last_index[i] = -1;
742  }
743  cbp += cbp;
744  }
745  } else {
746  for (i = 0; i < 6; i++) {
747  if (cbp & 32) {
748  if ((ret = mpeg1_decode_block_inter(s, s->block[i], i)) < 0)
749  return ret;
750  } else {
751  s->c.block_last_index[i] = -1;
752  }
753  cbp += cbp;
754  }
755  }
756  } else {
757  for (i = 0; i < 12; i++)
758  s->c.block_last_index[i] = -1;
759  }
760  }
761 
762  s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride] = mb_type;
763 
764  return 0;
765 }
766 
768 {
769  Mpeg1Context *s = avctx->priv_data;
770  MPVContext *const s2 = &s->slice.c;
771  int ret;
772 
773  s2->slice_ctx_size = sizeof(s->slice);
774  s2->out_format = FMT_MPEG1;
775 
776  if ( avctx->codec_tag != AV_RL32("VCR2")
777  && avctx->codec_tag != AV_RL32("BW10"))
778  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
779  ret = ff_mpv_decode_init(s2, avctx);
780  if (ret < 0)
781  return ret;
782 
784 
786  avctx->color_range = AVCOL_RANGE_MPEG;
787  return 0;
788 }
789 
791 #if CONFIG_MPEG1_NVDEC_HWACCEL
793 #endif
794 #if CONFIG_MPEG1_VDPAU_HWACCEL
796 #endif
799 };
800 
802 #if CONFIG_MPEG2_NVDEC_HWACCEL
804 #endif
805 #if CONFIG_MPEG2_VDPAU_HWACCEL
807 #endif
808 #if CONFIG_MPEG2_DXVA2_HWACCEL
810 #endif
811 #if CONFIG_MPEG2_D3D11VA_HWACCEL
814 #endif
815 #if CONFIG_MPEG2_D3D12VA_HWACCEL
817 #endif
818 #if CONFIG_MPEG2_VAAPI_HWACCEL
820 #endif
821 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
823 #endif
826 };
827 
828 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
831 };
832 
833 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
836 };
837 
839 {
840  Mpeg1Context *s1 = avctx->priv_data;
841  MPVContext *const s = &s1->slice.c;
842  const enum AVPixelFormat *pix_fmts;
843 
844  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
845  return AV_PIX_FMT_GRAY8;
846 
847  if (s->chroma_format < CHROMA_422)
851  else if (s->chroma_format == CHROMA_422)
853  else
855 
856  return ff_get_format(avctx, pix_fmts);
857 }
858 
859 /* Call this function when we know all parameters.
860  * It may be called in different places for MPEG-1 and MPEG-2. */
862 {
863  Mpeg1Context *s1 = avctx->priv_data;
864  MPVContext *const s = &s1->slice.c;
865  int ret;
866 
867  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
868  // MPEG-1 aspect
869  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
870  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
871  } else { // MPEG-2
872  // MPEG-2 aspect
873  if (s1->aspect_ratio_info > 1) {
874  AVRational dar =
876  (AVRational) { s1->pan_scan.width,
877  s1->pan_scan.height }),
878  (AVRational) { s->width, s->height });
879 
880  /* We ignore the spec here and guess a bit as reality does not
881  * match the spec, see for example res_change_ffmpeg_aspect.ts
882  * and sequence-display-aspect.mpg.
883  * issue1613, 621, 562 */
884  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
885  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
886  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
887  s->avctx->sample_aspect_ratio =
889  (AVRational) { s->width, s->height });
890  } else {
891  s->avctx->sample_aspect_ratio =
893  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
894 // issue1613 4/3 16/9 -> 16/9
895 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
896 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
897 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
898  ff_dlog(avctx, "aspect A %d/%d\n",
901  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
902  s->avctx->sample_aspect_ratio.den);
903  }
904  } else {
905  s->avctx->sample_aspect_ratio =
907  }
908  } // MPEG-2
909 
910  if (av_image_check_sar(s->width, s->height,
911  avctx->sample_aspect_ratio) < 0) {
912  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
913  avctx->sample_aspect_ratio.num,
914  avctx->sample_aspect_ratio.den);
915  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
916  }
917 
918  if (!s->context_initialized ||
919  avctx->coded_width != s->width ||
920  avctx->coded_height != s->height ||
921  s1->save_chroma_format != s->chroma_format ||
922  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
923  0) {
924  if (s->context_initialized)
926 
927  ret = ff_set_dimensions(avctx, s->width, s->height);
928  if (ret < 0)
929  return ret;
930 
931  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s1->bit_rate &&
932  (s1->bit_rate != 0x3FFFF*400)) {
933  avctx->rc_max_rate = s1->bit_rate;
934  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s1->bit_rate &&
935  (s1->bit_rate != 0x3FFFF*400 || s1->vbv_delay != 0xFFFF)) {
936  avctx->bit_rate = s1->bit_rate;
937  }
938  s1->save_progressive_seq = s->progressive_sequence;
939  s1->save_chroma_format = s->chroma_format;
940 
941  /* low_delay may be forced, in this case we will have B-frames
942  * that behave like P-frames. */
943  avctx->has_b_frames = !s->low_delay;
944 
945  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
946  // MPEG-1 fps
949  } else { // MPEG-2
950  // MPEG-2 fps
951  av_reduce(&s->avctx->framerate.num,
952  &s->avctx->framerate.den,
955  1 << 30);
956 
957  switch (s->chroma_format) {
959  case CHROMA_422:
961  default: av_assert0(0);
962  }
963  } // MPEG-2
964 
965  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
966 
967  if ((ret = ff_mpv_common_init(s)) < 0)
968  return ret;
969  if (!s->avctx->lowres)
970  for (int i = 0; i < s->slice_context_count; i++)
971  ff_mpv_framesize_disable(&s->thread_context[i]->sc);
972  }
973  return 0;
974 }
975 
976 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
977  int buf_size)
978 {
979  Mpeg1Context *s1 = avctx->priv_data;
980  MPVContext *const s = &s1->slice.c;
981  GetBitContext gb0, *const gb = &gb0;
982  int ref, f_code, vbv_delay, ret;
983 
984  ret = init_get_bits8(gb, buf, buf_size);
985  if (ret < 0)
986  return ret;
987 
988  ref = get_bits(gb, 10); /* temporal ref */
989  s->pict_type = get_bits(gb, 3);
990  if (s->pict_type == 0 || s->pict_type > 3)
991  return AVERROR_INVALIDDATA;
992 
993  vbv_delay = get_bits(gb, 16);
994  s1->vbv_delay = vbv_delay;
995  if (s->pict_type == AV_PICTURE_TYPE_P ||
996  s->pict_type == AV_PICTURE_TYPE_B) {
997  s->full_pel[0] = get_bits1(gb);
998  f_code = get_bits(gb, 3);
999  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1000  return AVERROR_INVALIDDATA;
1001  f_code += !f_code;
1002  s->mpeg_f_code[0][0] = f_code;
1003  s->mpeg_f_code[0][1] = f_code;
1004  }
1005  if (s->pict_type == AV_PICTURE_TYPE_B) {
1006  s->full_pel[1] = get_bits1(gb);
1007  f_code = get_bits(gb, 3);
1008  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1009  return AVERROR_INVALIDDATA;
1010  f_code += !f_code;
1011  s->mpeg_f_code[1][0] = f_code;
1012  s->mpeg_f_code[1][1] = f_code;
1013  }
1014 
1015  if (avctx->debug & FF_DEBUG_PICT_INFO)
1016  av_log(avctx, AV_LOG_DEBUG,
1017  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1018 
1019  return 0;
1020 }
1021 
1023  GetBitContext *const gb)
1024 {
1025  MPVContext *const s = &s1->slice.c;
1026  int horiz_size_ext, vert_size_ext;
1027  int bit_rate_ext;
1028 
1029  skip_bits(gb, 1); /* profile and level esc*/
1030  s->avctx->profile = get_bits(gb, 3);
1031  s->avctx->level = get_bits(gb, 4);
1032  s->progressive_sequence = get_bits1(gb); /* progressive_sequence */
1033  s->chroma_format = get_bits(gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1034 
1035  if (!s->chroma_format) {
1036  s->chroma_format = CHROMA_420;
1037  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1038  }
1039 
1040  horiz_size_ext = get_bits(gb, 2);
1041  vert_size_ext = get_bits(gb, 2);
1042  s->width |= (horiz_size_ext << 12);
1043  s->height |= (vert_size_ext << 12);
1044  bit_rate_ext = get_bits(gb, 12); /* XXX: handle it */
1045  s1->bit_rate += (bit_rate_ext << 18) * 400LL;
1046  check_marker(s->avctx, gb, "after bit rate extension");
1047  s->avctx->rc_buffer_size += get_bits(gb, 8) * 1024 * 16 << 10;
1048 
1049  s->low_delay = get_bits1(gb);
1050  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1051  s->low_delay = 1;
1052 
1053  s1->frame_rate_ext.num = get_bits(gb, 2) + 1;
1054  s1->frame_rate_ext.den = get_bits(gb, 5) + 1;
1055 
1056  ff_dlog(s->avctx, "sequence extension\n");
1057  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1058 
1059  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1060  av_log(s->avctx, AV_LOG_DEBUG,
1061  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1062  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1063  s->avctx->rc_buffer_size, s1->bit_rate);
1064 }
1065 
1067  GetBitContext *const gb)
1068 {
1069  MPVContext *const s = &s1->slice.c;
1070  int color_description, w, h;
1071 
1072  skip_bits(gb, 3); /* video format */
1073  color_description = get_bits1(gb);
1074  if (color_description) {
1075  s->avctx->color_primaries = get_bits(gb, 8);
1076  s->avctx->color_trc = get_bits(gb, 8);
1077  s->avctx->colorspace = get_bits(gb, 8);
1078  }
1079  w = get_bits(gb, 14);
1080  skip_bits(gb, 1); // marker
1081  h = get_bits(gb, 14);
1082  // remaining 3 bits are zero padding
1083 
1084  s1->pan_scan.width = 16 * w;
1085  s1->pan_scan.height = 16 * h;
1086 
1087  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1088  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1089 }
1090 
1092  GetBitContext *const gb)
1093 {
1094  MPVContext *const s = &s1->slice.c;
1095  int i, nofco;
1096 
1097  nofco = 1;
1098  if (s->progressive_sequence) {
1099  if (s->repeat_first_field) {
1100  nofco++;
1101  if (s->top_field_first)
1102  nofco++;
1103  }
1104  } else {
1105  if (s->picture_structure == PICT_FRAME) {
1106  nofco++;
1107  if (s->repeat_first_field)
1108  nofco++;
1109  }
1110  }
1111  for (i = 0; i < nofco; i++) {
1112  s1->pan_scan.position[i][0] = get_sbits(gb, 16);
1113  skip_bits(gb, 1); // marker
1114  s1->pan_scan.position[i][1] = get_sbits(gb, 16);
1115  skip_bits(gb, 1); // marker
1116  }
1117 
1118  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1119  av_log(s->avctx, AV_LOG_DEBUG,
1120  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1121  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1122  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1123  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1124 }
1125 
1126 static int load_matrix(MPVContext *const s, GetBitContext *const gb,
1127  uint16_t matrix0[64], uint16_t matrix1[64], int intra)
1128 {
1129  int i;
1130 
1131  for (i = 0; i < 64; i++) {
1132  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1133  int v = get_bits(gb, 8);
1134  if (v == 0) {
1135  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1136  return AVERROR_INVALIDDATA;
1137  }
1138  if (intra && i == 0 && v != 8) {
1139  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1140  v = 8; // needed by pink.mpg / issue1046
1141  }
1142  matrix0[j] = v;
1143  if (matrix1)
1144  matrix1[j] = v;
1145  }
1146  return 0;
1147 }
1148 
1150  GetBitContext *const gb)
1151 {
1152  ff_dlog(s->avctx, "matrix extension\n");
1153 
1154  if (get_bits1(gb))
1155  load_matrix(s, gb, s->chroma_intra_matrix, s->intra_matrix, 1);
1156  if (get_bits1(gb))
1157  load_matrix(s, gb, s->chroma_inter_matrix, s->inter_matrix, 0);
1158  if (get_bits1(gb))
1159  load_matrix(s, gb, s->chroma_intra_matrix, NULL, 1);
1160  if (get_bits1(gb))
1161  load_matrix(s, gb, s->chroma_inter_matrix, NULL, 0);
1162 }
1163 
1165  GetBitContext *const gb)
1166 {
1167  MPVContext *const s = &s1->slice.c;
1168 
1169  s->full_pel[0] = s->full_pel[1] = 0;
1170  s->mpeg_f_code[0][0] = get_bits(gb, 4);
1171  s->mpeg_f_code[0][1] = get_bits(gb, 4);
1172  s->mpeg_f_code[1][0] = get_bits(gb, 4);
1173  s->mpeg_f_code[1][1] = get_bits(gb, 4);
1174  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1175  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1176  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1177  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1178  if (!s->pict_type && s->context_initialized) {
1179  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1180  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1181  return AVERROR_INVALIDDATA;
1182  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1183  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1184  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1185  s->pict_type = AV_PICTURE_TYPE_I;
1186  else
1187  s->pict_type = AV_PICTURE_TYPE_P;
1188  } else
1189  s->pict_type = AV_PICTURE_TYPE_B;
1190  }
1191 
1192  s->intra_dc_precision = get_bits(gb, 2);
1193  s->picture_structure = get_bits(gb, 2);
1194  s->top_field_first = get_bits1(gb);
1195  s->frame_pred_frame_dct = get_bits1(gb);
1196  s->concealment_motion_vectors = get_bits1(gb);
1197  s->q_scale_type = get_bits1(gb);
1198  s->intra_vlc_format = get_bits1(gb);
1199  s->alternate_scan = get_bits1(gb);
1200  s->repeat_first_field = get_bits1(gb);
1201  s->chroma_420_type = get_bits1(gb);
1202  s->progressive_frame = get_bits1(gb);
1203 
1204  // We only initialize intra_scantable.permutated, as this is all we use.
1205  ff_permute_scantable(s->intra_scantable.permutated,
1206  s->alternate_scan ? ff_alternate_vertical_scan : ff_zigzag_direct,
1207  s->idsp.idct_permutation);
1208 
1209  /* composite display not parsed */
1210  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1211  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1212  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1213  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1214  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1215  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1216  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1217  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1218  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1219 
1220  return 0;
1221 }
1222 
1223 static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
1224 {
1225  MPVContext *const s = &s1->slice.c;
1226  AVCodecContext *avctx = s->avctx;
1227  int second_field = 0;
1228  int ret;
1229 
1230  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1231  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1232  return AVERROR_INVALIDDATA;
1233  }
1234 
1235  /* start frame decoding */
1236  if (s->first_field || s->picture_structure == PICT_FRAME) {
1237  AVFrameSideData *pan_scan;
1238 
1239  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1240  return ret;
1241 
1242  if (s->picture_structure != PICT_FRAME) {
1243  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST *
1244  (s->picture_structure == PICT_TOP_FIELD);
1245 
1246  for (int i = 0; i < 3; i++) {
1247  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1248  s->cur_pic.data[i] = FF_PTR_ADD(s->cur_pic.data[i],
1249  s->cur_pic.linesize[i]);
1250  }
1251  s->cur_pic.linesize[i] *= 2;
1252  }
1253  }
1254 
1256 
1257  /* first check if we must repeat the frame */
1258  s->cur_pic.ptr->f->repeat_pict = 0;
1259  if (s->repeat_first_field) {
1260  if (s->progressive_sequence) {
1261  if (s->top_field_first)
1262  s->cur_pic.ptr->f->repeat_pict = 4;
1263  else
1264  s->cur_pic.ptr->f->repeat_pict = 2;
1265  } else if (s->progressive_frame) {
1266  s->cur_pic.ptr->f->repeat_pict = 1;
1267  }
1268  }
1269 
1270  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1271  AV_FRAME_DATA_PANSCAN, sizeof(s1->pan_scan),
1272  &pan_scan);
1273  if (ret < 0)
1274  return ret;
1275  if (pan_scan)
1276  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1277 
1278  if (s1->a53_buf_ref) {
1280  s->avctx, s->cur_pic.ptr->f, AV_FRAME_DATA_A53_CC,
1281  &s1->a53_buf_ref);
1282  if (ret < 0)
1283  return ret;
1284  }
1285 
1286  if (s1->has_stereo3d) {
1287  AVStereo3D *stereo = av_stereo3d_create_side_data(s->cur_pic.ptr->f);
1288  if (!stereo)
1289  return AVERROR(ENOMEM);
1290 
1291  stereo->type = s1->stereo3d_type;
1292  s1->has_stereo3d = 0;
1293  }
1294 
1295  if (s1->has_afd) {
1296  AVFrameSideData *sd;
1297  ret = ff_frame_new_side_data(s->avctx, s->cur_pic.ptr->f,
1298  AV_FRAME_DATA_AFD, 1, &sd);
1299  if (ret < 0)
1300  return ret;
1301  if (sd)
1302  *sd->data = s1->afd;
1303  s1->has_afd = 0;
1304  }
1305  } else { // second field
1306  second_field = 1;
1307  if (!s->cur_pic.ptr) {
1308  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1309  return AVERROR_INVALIDDATA;
1310  }
1311 
1312  if (s->avctx->hwaccel) {
1313  if ((ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame)) < 0) {
1314  av_log(avctx, AV_LOG_ERROR,
1315  "hardware accelerator failed to decode first field\n");
1316  return ret;
1317  }
1318  }
1320  if (ret < 0)
1321  return ret;
1322 
1323  for (int i = 0; i < 3; i++) {
1324  s->cur_pic.data[i] = s->cur_pic.ptr->f->data[i];
1325  if (s->picture_structure == PICT_BOTTOM_FIELD)
1326  s->cur_pic.data[i] +=
1327  s->cur_pic.ptr->f->linesize[i];
1328  }
1329  }
1330 
1331  if (avctx->hwaccel) {
1332  if ((ret = FF_HW_CALL(avctx, start_frame, NULL, buf, buf_size)) < 0)
1333  return ret;
1334  } else if (s->codec_tag == MKTAG('V', 'C', 'R', '2')) {
1335  // Exchange UV
1336  FFSWAP(uint8_t*, s->cur_pic.data[1], s->cur_pic.data[2]);
1337  FFSWAP(ptrdiff_t, s->cur_pic.linesize[1], s->cur_pic.linesize[2]);
1338  if (!second_field) {
1339  FFSWAP(uint8_t*, s->next_pic.data[1], s->next_pic.data[2]);
1340  FFSWAP(ptrdiff_t, s->next_pic.linesize[1], s->next_pic.linesize[2]);
1341  FFSWAP(uint8_t*, s->last_pic.data[1], s->last_pic.data[2]);
1342  FFSWAP(ptrdiff_t, s->last_pic.linesize[1], s->last_pic.linesize[2]);
1343  }
1344  }
1345 
1346  return 0;
1347 }
1348 
1349 #define DECODE_SLICE_ERROR -1
1350 #define DECODE_SLICE_OK 0
1351 
1352 /**
1353  * Decode a slice.
1354  * Mpeg12SliceContext.c.mb_y must be set to the MB row from the startcode.
1355  * @return DECODE_SLICE_ERROR if the slice is damaged,
1356  * DECODE_SLICE_OK if this slice is OK
1357  */
1358 static int mpeg_decode_slice(Mpeg12SliceContext *const s, int mb_y,
1359  const uint8_t **buf, int buf_size)
1360 {
1361  AVCodecContext *avctx = s->c.avctx;
1362  const int lowres = s->c.avctx->lowres;
1363  const int field_pic = s->c.picture_structure != PICT_FRAME;
1364  int ret;
1365 
1366  s->c.resync_mb_x =
1367  s->c.resync_mb_y = -1;
1368 
1369  av_assert0(mb_y < s->c.mb_height);
1370 
1371  ret = init_get_bits8(&s->gb, *buf, buf_size);
1372  if (ret < 0)
1373  return ret;
1374 
1375  if (s->c.codec_id != AV_CODEC_ID_MPEG1VIDEO && s->c.mb_height > 2800/16)
1376  skip_bits(&s->gb, 3);
1377 
1379  s->c.interlaced_dct = 0;
1380 
1381  s->c.qscale = mpeg_get_qscale(&s->gb, s->c.q_scale_type);
1382 
1383  if (s->c.qscale == 0) {
1384  av_log(s->c.avctx, AV_LOG_ERROR, "qscale == 0\n");
1385  return AVERROR_INVALIDDATA;
1386  }
1387 
1388  /* extra slice info */
1389  if (skip_1stop_8data_bits(&s->gb) < 0)
1390  return AVERROR_INVALIDDATA;
1391 
1392  s->c.mb_x = 0;
1393 
1394  if (mb_y == 0 && s->c.codec_tag == AV_RL32("SLIF")) {
1395  skip_bits1(&s->gb);
1396  } else {
1397  while (get_bits_left(&s->gb) > 0) {
1398  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1399  MBINCR_VLC_BITS, 2);
1400  if (code < 0) {
1401  av_log(s->c.avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1402  return AVERROR_INVALIDDATA;
1403  }
1404  if (code >= 33) {
1405  if (code == 33)
1406  s->c.mb_x += 33;
1407  /* otherwise, stuffing, nothing to do */
1408  } else {
1409  s->c.mb_x += code;
1410  break;
1411  }
1412  }
1413  }
1414 
1415  if (s->c.mb_x >= (unsigned) s->c.mb_width) {
1416  av_log(s->c.avctx, AV_LOG_ERROR, "initial skip overflow\n");
1417  return AVERROR_INVALIDDATA;
1418  }
1419 
1420  if (avctx->hwaccel) {
1421  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1422  int start_code = -1;
1423  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1424  if (buf_end < *buf + buf_size)
1425  buf_end -= 4;
1426  s->c.mb_y = mb_y;
1427  if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
1428  return DECODE_SLICE_ERROR;
1429  *buf = buf_end;
1430  return DECODE_SLICE_OK;
1431  }
1432 
1433  s->c.resync_mb_x = s->c.mb_x;
1434  s->c.resync_mb_y = s->c.mb_y = mb_y;
1435  ff_init_block_index(&s->c);
1436 
1437  if (s->c.mb_y == 0 && s->c.mb_x == 0 && (s->c.first_field || s->c.picture_structure == PICT_FRAME)) {
1438  if (s->c.avctx->debug & FF_DEBUG_PICT_INFO) {
1439  av_log(s->c.avctx, AV_LOG_DEBUG,
1440  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1441  s->c.qscale,
1442  s->c.mpeg_f_code[0][0], s->c.mpeg_f_code[0][1],
1443  s->c.mpeg_f_code[1][0], s->c.mpeg_f_code[1][1],
1444  s->c.pict_type == AV_PICTURE_TYPE_I ? 'I' :
1445  (s->c.pict_type == AV_PICTURE_TYPE_P ? 'P' :
1446  (s->c.pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1447  s->c.progressive_sequence ? "ps" : "",
1448  s->c.progressive_frame ? "pf" : "",
1449  s->c.alternate_scan ? "alt" : "",
1450  s->c.top_field_first ? "top" : "",
1451  s->c.intra_dc_precision, s->c.picture_structure,
1452  s->c.frame_pred_frame_dct, s->c.concealment_motion_vectors,
1453  s->c.q_scale_type, s->c.intra_vlc_format,
1454  s->c.repeat_first_field, s->c.chroma_420_type ? "420" : "");
1455  }
1456  }
1457 
1458  for (int mb_skip_run = 0;;) {
1459  ret = mpeg_decode_mb(s, &mb_skip_run);
1460  if (ret < 0)
1461  return ret;
1462 
1463  // Note motion_val is normally NULL unless we want to extract the MVs.
1464  if (s->c.cur_pic.motion_val[0]) {
1465  const int wrap = s->c.b8_stride;
1466  int xy = s->c.mb_x * 2 + s->c.mb_y * 2 * wrap;
1467  int b8_xy = 4 * (s->c.mb_x + s->c.mb_y * s->c.mb_stride);
1468  int motion_x, motion_y, dir, i;
1469 
1470  for (i = 0; i < 2; i++) {
1471  for (dir = 0; dir < 2; dir++) {
1472  if (s->c.mb_intra ||
1473  (dir == 1 && s->c.pict_type != AV_PICTURE_TYPE_B)) {
1474  motion_x = motion_y = 0;
1475  } else if (s->c.mv_type == MV_TYPE_16X16 ||
1476  (s->c.mv_type == MV_TYPE_FIELD && field_pic)) {
1477  motion_x = s->c.mv[dir][0][0];
1478  motion_y = s->c.mv[dir][0][1];
1479  } else { /* if ((s->c.mv_type == MV_TYPE_FIELD) || (s->c.mv_type == MV_TYPE_16X8)) */
1480  motion_x = s->c.mv[dir][i][0];
1481  motion_y = s->c.mv[dir][i][1];
1482  }
1483 
1484  s->c.cur_pic.motion_val[dir][xy][0] = motion_x;
1485  s->c.cur_pic.motion_val[dir][xy][1] = motion_y;
1486  s->c.cur_pic.motion_val[dir][xy + 1][0] = motion_x;
1487  s->c.cur_pic.motion_val[dir][xy + 1][1] = motion_y;
1488  s->c.cur_pic.ref_index [dir][b8_xy] =
1489  s->c.cur_pic.ref_index [dir][b8_xy + 1] = s->c.field_select[dir][i];
1490  av_assert2(s->c.field_select[dir][i] == 0 ||
1491  s->c.field_select[dir][i] == 1);
1492  }
1493  xy += wrap;
1494  b8_xy += 2;
1495  }
1496  }
1497 
1498  s->c.dest[0] += 16 >> lowres;
1499  s->c.dest[1] +=(16 >> lowres) >> s->c.chroma_x_shift;
1500  s->c.dest[2] +=(16 >> lowres) >> s->c.chroma_x_shift;
1501 
1502  ff_mpv_reconstruct_mb(&s->c, s->block);
1503 
1504  if (++s->c.mb_x >= s->c.mb_width) {
1505  const int mb_size = 16 >> s->c.avctx->lowres;
1506  int left;
1507 
1508  ff_mpeg_draw_horiz_band(&s->c, mb_size * (s->c.mb_y >> field_pic), mb_size);
1509 
1510  s->c.mb_x = 0;
1511  s->c.mb_y += 1 << field_pic;
1512 
1513  if (s->c.mb_y >= s->c.mb_height) {
1514  int left = get_bits_left(&s->gb);
1515  int is_d10 = s->c.chroma_format == CHROMA_422 &&
1516  s->c.pict_type == AV_PICTURE_TYPE_I &&
1517  avctx->profile == 0 && avctx->level == 5 &&
1518  s->c.intra_dc_precision == 2 &&
1519  s->c.q_scale_type == 1 && s->c.alternate_scan == 0 &&
1520  s->c.progressive_frame == 0
1521  /* vbv_delay == 0xBBB || 0xE10 */;
1522 
1523  if (left >= 32 && !is_d10) {
1524  GetBitContext gb = s->gb;
1525  align_get_bits(&gb);
1526  if (show_bits(&gb, 24) == 0x060E2B) {
1527  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1528  is_d10 = 1;
1529  }
1530  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1531  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1532  goto eos;
1533  }
1534  }
1535 
1536  if (left < 0 ||
1537  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1538  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1539  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1540  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->c.mb_x, s->c.mb_y);
1541  return AVERROR_INVALIDDATA;
1542  } else
1543  goto eos;
1544  }
1545  // There are some files out there which are missing the last slice
1546  // in cases where the slice is completely outside the visible
1547  // area, we detect this here instead of running into the end expecting
1548  // more data
1549  left = get_bits_left(&s->gb);
1550  if (s->c.mb_y >= ((s->c.height + 15) >> 4) &&
1551  !s->c.progressive_sequence &&
1552  left <= 25 &&
1553  left >= 0 &&
1554  mb_skip_run == -1 &&
1555  (!left || show_bits(&s->gb, left) == 0))
1556  goto eos;
1557 
1558  ff_init_block_index(&s->c);
1559  }
1560 
1561  /* skip mb handling */
1562  if (mb_skip_run == -1) {
1563  /* read increment again */
1564  mb_skip_run = 0;
1565  for (;;) {
1566  int code = get_vlc2(&s->gb, ff_mbincr_vlc,
1567  MBINCR_VLC_BITS, 2);
1568  if (code < 0) {
1569  av_log(s->c.avctx, AV_LOG_ERROR, "mb incr damaged\n");
1570  return AVERROR_INVALIDDATA;
1571  }
1572  if (code >= 33) {
1573  if (code == 33) {
1574  mb_skip_run += 33;
1575  } else if (code == 35) {
1576  if (mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1577  av_log(s->c.avctx, AV_LOG_ERROR, "slice mismatch\n");
1578  return AVERROR_INVALIDDATA;
1579  }
1580  goto eos; /* end of slice */
1581  }
1582  /* otherwise, stuffing, nothing to do */
1583  } else {
1584  mb_skip_run += code;
1585  break;
1586  }
1587  }
1588  if (mb_skip_run) {
1589  int i;
1590  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
1591  av_log(s->c.avctx, AV_LOG_ERROR,
1592  "skipped MB in I-frame at %d %d\n", s->c.mb_x, s->c.mb_y);
1593  return AVERROR_INVALIDDATA;
1594  }
1595 
1596  /* skip mb */
1597  s->c.mb_intra = 0;
1598  for (i = 0; i < 12; i++)
1599  s->c.block_last_index[i] = -1;
1600  s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 128 << s->c.intra_dc_precision;
1601  if (s->c.picture_structure == PICT_FRAME)
1602  s->c.mv_type = MV_TYPE_16X16;
1603  else
1604  s->c.mv_type = MV_TYPE_FIELD;
1605  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
1606  /* if P type, zero motion vector is implied */
1607  s->c.mv_dir = MV_DIR_FORWARD;
1608  s->c.mv[0][0][0] = s->c.mv[0][0][1] = 0;
1609  s->c.last_mv[0][0][0] = s->c.last_mv[0][0][1] = 0;
1610  s->c.last_mv[0][1][0] = s->c.last_mv[0][1][1] = 0;
1611  s->c.field_select[0][0] = (s->c.picture_structure - 1) & 1;
1612  } else {
1613  /* if B type, reuse previous vectors and directions */
1614  s->c.mv[0][0][0] = s->c.last_mv[0][0][0];
1615  s->c.mv[0][0][1] = s->c.last_mv[0][0][1];
1616  s->c.mv[1][0][0] = s->c.last_mv[1][0][0];
1617  s->c.mv[1][0][1] = s->c.last_mv[1][0][1];
1618  s->c.field_select[0][0] = (s->c.picture_structure - 1) & 1;
1619  s->c.field_select[1][0] = (s->c.picture_structure - 1) & 1;
1620  }
1621  }
1622  }
1623  }
1624 eos: // end of slice
1625  if (get_bits_left(&s->gb) < 0) {
1626  av_log(s->c.avctx, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1627  return AVERROR_INVALIDDATA;
1628  }
1629  *buf += (get_bits_count(&s->gb) - 1) / 8;
1630  ff_dlog(s->c.avctx, "Slice start:%d %d end:%d %d\n", s->c.resync_mb_x, s->c.resync_mb_y, s->c.mb_x, s->c.mb_y);
1631  return 0;
1632 }
1633 
1635 {
1636  Mpeg12SliceContext *const s = *(void **) arg;
1637  const uint8_t *buf = s->gb.buffer;
1638  const uint8_t *end = buf + get_bits_bytesize(&s->gb, 0);
1639  int mb_y = s->c.start_mb_y;
1640  const int field_pic = s->c.picture_structure != PICT_FRAME;
1641 
1642  s->c.er.error_count = (3 * (s->c.end_mb_y - s->c.start_mb_y) * s->c.mb_width) >> field_pic;
1643 
1644  for (;;) {
1645  uint32_t start_code;
1646  int ret;
1647 
1648  ret = mpeg_decode_slice(s, mb_y, &buf, end - buf);
1649  emms_c();
1650  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1651  ret, s->c.resync_mb_x, s->c.resync_mb_y, s->c.mb_x, s->c.mb_y,
1652  s->c.start_mb_y, s->c.end_mb_y, s->c.er.error_count);
1653  if (ret < 0) {
1654  if (c->err_recognition & AV_EF_EXPLODE)
1655  return ret;
1656  if (s->c.resync_mb_x >= 0 && s->c.resync_mb_y >= 0)
1657  ff_er_add_slice(&s->c.er, s->c.resync_mb_x, s->c.resync_mb_y,
1658  s->c.mb_x, s->c.mb_y,
1660  } else {
1661  ff_er_add_slice(&s->c.er, s->c.resync_mb_x, s->c.resync_mb_y,
1662  s->c.mb_x - 1, s->c.mb_y,
1664  }
1665 
1666  if (s->c.mb_y == s->c.end_mb_y)
1667  return 0;
1668 
1669  start_code = -1;
1670  buf = avpriv_find_start_code(buf, end, &start_code);
1671  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1672  return AVERROR_INVALIDDATA;
1674  if (s->c.codec_id != AV_CODEC_ID_MPEG1VIDEO && s->c.mb_height > 2800/16)
1675  mb_y += (*buf&0xE0)<<2;
1676  mb_y <<= field_pic;
1677  if (s->c.picture_structure == PICT_BOTTOM_FIELD)
1678  mb_y++;
1679  if (mb_y >= s->c.end_mb_y)
1680  return AVERROR_INVALIDDATA;
1681  }
1682 }
1683 
1684 /**
1685  * Handle slice ends.
1686  * @return 1 if it seems to be the last slice
1687  */
1688 static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
1689 {
1690  Mpeg1Context *s1 = avctx->priv_data;
1691  MPVContext *const s = &s1->slice.c;
1692 
1693  if (!s->context_initialized || !s->cur_pic.ptr)
1694  return 0;
1695 
1696  if (s->avctx->hwaccel) {
1697  int ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
1698  if (ret < 0) {
1699  av_log(avctx, AV_LOG_ERROR,
1700  "hardware accelerator failed to decode picture\n");
1701  return ret;
1702  }
1703  }
1704 
1705  /* end of slice reached */
1706  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
1707  /* end of image */
1708 
1709  ff_er_frame_end(&s->er, NULL);
1710 
1712 
1713  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1714  int ret = av_frame_ref(pict, s->cur_pic.ptr->f);
1715  if (ret < 0)
1716  return ret;
1717  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1718  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1719  *got_output = 1;
1720  } else {
1721  /* latency of 1 frame for I- and P-frames */
1722  if (s->last_pic.ptr && !s->last_pic.ptr->dummy) {
1723  int ret = av_frame_ref(pict, s->last_pic.ptr->f);
1724  if (ret < 0)
1725  return ret;
1726  ff_print_debug_info(s, s->last_pic.ptr, pict);
1727  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG2);
1728  *got_output = 1;
1729  }
1730  }
1731 
1732  return 1;
1733  } else {
1734  return 0;
1735  }
1736 }
1737 
1739  const uint8_t *buf, int buf_size)
1740 {
1741  Mpeg1Context *s1 = avctx->priv_data;
1742  MPVContext *const s = &s1->slice.c;
1743  GetBitContext gb0, *const gb = &gb0;
1744  int width, height;
1745  int i, v, j;
1746 
1747  int ret = init_get_bits8(gb, buf, buf_size);
1748  if (ret < 0)
1749  return ret;
1750 
1751  width = get_bits(gb, 12);
1752  height = get_bits(gb, 12);
1753  if (width == 0 || height == 0) {
1754  av_log(avctx, AV_LOG_WARNING,
1755  "Invalid horizontal or vertical size value.\n");
1757  return AVERROR_INVALIDDATA;
1758  }
1759  s1->aspect_ratio_info = get_bits(gb, 4);
1760  if (s1->aspect_ratio_info == 0) {
1761  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
1763  return AVERROR_INVALIDDATA;
1764  }
1765  s1->frame_rate_index = get_bits(gb, 4);
1766  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
1767  av_log(avctx, AV_LOG_WARNING,
1768  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
1769  s1->frame_rate_index = 1;
1770  }
1771  s1->bit_rate = get_bits(gb, 18) * 400;
1772  if (check_marker(s->avctx, gb, "in sequence header") == 0) {
1773  return AVERROR_INVALIDDATA;
1774  }
1775 
1776  s->avctx->rc_buffer_size = get_bits(gb, 10) * 1024 * 16;
1777  skip_bits(gb, 1);
1778 
1779  /* get matrix */
1780  if (get_bits1(gb)) {
1781  load_matrix(s, gb, s->chroma_intra_matrix, s->intra_matrix, 1);
1782  } else {
1783  for (i = 0; i < 64; i++) {
1784  j = s->idsp.idct_permutation[i];
1786  s->intra_matrix[j] = v;
1787  s->chroma_intra_matrix[j] = v;
1788  }
1789  }
1790  if (get_bits1(gb)) {
1791  load_matrix(s, gb, s->chroma_inter_matrix, s->inter_matrix, 0);
1792  } else {
1793  for (i = 0; i < 64; i++) {
1794  int j = s->idsp.idct_permutation[i];
1796  s->inter_matrix[j] = v;
1797  s->chroma_inter_matrix[j] = v;
1798  }
1799  }
1800 
1801  if (show_bits(gb, 23) != 0) {
1802  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
1803  return AVERROR_INVALIDDATA;
1804  }
1805 
1806  s->width = width;
1807  s->height = height;
1808 
1809  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
1810  s->progressive_sequence = 1;
1811  s->progressive_frame = 1;
1812  s->picture_structure = PICT_FRAME;
1813  s->first_field = 0;
1814  s->frame_pred_frame_dct = 1;
1815  s->chroma_format = CHROMA_420;
1816  s->codec_id =
1817  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1818  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1819  s->low_delay = 1;
1820 
1821  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1822  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
1823  s->avctx->rc_buffer_size, s1->bit_rate, s1->aspect_ratio_info);
1824 
1825  return 0;
1826 }
1827 
1829 {
1830  Mpeg1Context *s1 = avctx->priv_data;
1831  MPVContext *const s = &s1->slice.c;
1832  int i, v, ret;
1833 
1834  /* start new MPEG-1 context decoding */
1835  if (s->context_initialized)
1837 
1838  s->width = avctx->coded_width;
1839  s->height = avctx->coded_height;
1840  avctx->has_b_frames = 0; // true?
1841  s->low_delay = 1;
1842 
1843  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1844 
1845  if ((ret = ff_mpv_common_init(s)) < 0)
1846  return ret;
1847  if (!s->avctx->lowres)
1848  for (int i = 0; i < s->slice_context_count; i++)
1849  ff_mpv_framesize_disable(&s->thread_context[i]->sc);
1850 
1851  for (i = 0; i < 64; i++) {
1852  int j = s->idsp.idct_permutation[i];
1854  s->intra_matrix[j] = v;
1855  s->chroma_intra_matrix[j] = v;
1856 
1858  s->inter_matrix[j] = v;
1859  s->chroma_inter_matrix[j] = v;
1860  }
1861 
1862  s->progressive_sequence = 1;
1863  s->progressive_frame = 1;
1864  s->picture_structure = PICT_FRAME;
1865  s->first_field = 0;
1866  s->frame_pred_frame_dct = 1;
1867  s->chroma_format = CHROMA_420;
1868  if (s->codec_tag == AV_RL32("BW10")) {
1869  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
1870  } else {
1871  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1872  }
1873  s1->save_progressive_seq = s->progressive_sequence;
1874  s1->save_chroma_format = s->chroma_format;
1875  return 0;
1876 }
1877 
1879  const char *label)
1880 {
1881  Mpeg1Context *s1 = avctx->priv_data;
1882 
1884 
1885  if (!s1->cc_format) {
1886  s1->cc_format = format;
1887 
1888  av_log(avctx, AV_LOG_DEBUG, "CC: first seen substream is %s format\n", label);
1889  }
1890 
1891 #if FF_API_CODEC_PROPS
1895 #endif
1896 }
1897 
1899  const uint8_t *p, int buf_size)
1900 {
1901  Mpeg1Context *s1 = avctx->priv_data;
1902 
1903  if ((!s1->cc_format || s1->cc_format == CC_FORMAT_A53_PART4) &&
1904  buf_size >= 6 &&
1905  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
1906  p[4] == 3 && (p[5] & 0x40)) {
1907  /* extract A53 Part 4 CC data */
1908  int cc_count = p[5] & 0x1f;
1909  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
1910  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1911  const uint64_t new_size = (old_size + cc_count
1912  * UINT64_C(3));
1913  int ret;
1914 
1915  if (new_size > 3*A53_MAX_CC_COUNT)
1916  return AVERROR(EINVAL);
1917 
1918  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1919  if (ret >= 0)
1920  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
1921 
1922  mpeg_set_cc_format(avctx, CC_FORMAT_A53_PART4, "A/53 Part 4");
1923  }
1924  return 1;
1925  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_SCTE20) &&
1926  buf_size >= 2 &&
1927  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
1928  /* extract SCTE-20 CC data */
1929  GetBitContext gb;
1930  int cc_count = 0;
1931  int i, ret;
1932 
1933  ret = init_get_bits8(&gb, p + 2, buf_size - 2);
1934  if (ret < 0)
1935  return ret;
1936  cc_count = get_bits(&gb, 5);
1937  if (cc_count > 0) {
1938  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
1939  const uint64_t new_size = (old_size + cc_count
1940  * UINT64_C(3));
1941  if (new_size > 3*A53_MAX_CC_COUNT)
1942  return AVERROR(EINVAL);
1943 
1944  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
1945  if (ret >= 0) {
1946  uint8_t field, cc1, cc2;
1947  uint8_t *cap = s1->a53_buf_ref->data + old_size;
1948 
1949  memset(cap, 0, cc_count * 3);
1950  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
1951  skip_bits(&gb, 2); // priority
1952  field = get_bits(&gb, 2);
1953  skip_bits(&gb, 5); // line_offset
1954  cc1 = get_bits(&gb, 8);
1955  cc2 = get_bits(&gb, 8);
1956  skip_bits(&gb, 1); // marker
1957 
1958  if (!field) { // forbidden
1959  cap[0] = cap[1] = cap[2] = 0x00;
1960  } else {
1961  field = (field == 2 ? 1 : 0);
1962  if (!s1->slice.c.top_field_first) field = !field;
1963  cap[0] = 0x04 | field;
1964  cap[1] = ff_reverse[cc1];
1965  cap[2] = ff_reverse[cc2];
1966  }
1967  cap += 3;
1968  }
1969  }
1970 
1971  mpeg_set_cc_format(avctx, CC_FORMAT_SCTE20, "SCTE-20");
1972  }
1973  return 1;
1974  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DVD) &&
1975  buf_size >= 11 &&
1976  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
1977  /* extract DVD CC data
1978  *
1979  * uint32_t user_data_start_code 0x000001B2 (big endian)
1980  * uint16_t user_identifier 0x4343 "CC"
1981  * uint8_t user_data_type_code 0x01
1982  * uint8_t caption_block_size 0xF8
1983  * uint8_t
1984  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
1985  * bit 6 caption_filler 0
1986  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
1987  * bit 0 caption_extra_field_added 1=one additional caption word
1988  *
1989  * struct caption_field_block {
1990  * uint8_t
1991  * bit 7:1 caption_filler 0x7F (all 1s)
1992  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
1993  * uint8_t caption_first_byte
1994  * uint8_t caption_second_byte
1995  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
1996  *
1997  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
1998  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
1999  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2000  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2001  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2002  int cc_count = 0;
2003  int i, ret;
2004  // There is a caption count field in the data, but it is often
2005  // incorrect. So count the number of captions present.
2006  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2007  cc_count++;
2008  // Transform the DVD format into A53 Part 4 format
2009  if (cc_count > 0) {
2010  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2011  const uint64_t new_size = (old_size + cc_count
2012  * UINT64_C(6));
2013  if (new_size > 3*A53_MAX_CC_COUNT)
2014  return AVERROR(EINVAL);
2015 
2016  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2017  if (ret >= 0) {
2018  uint8_t field1 = !!(p[4] & 0x80);
2019  uint8_t *cap = s1->a53_buf_ref->data + old_size;
2020  p += 5;
2021  for (i = 0; i < cc_count; i++) {
2022  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2023  cap[1] = p[1];
2024  cap[2] = p[2];
2025  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2026  cap[4] = p[4];
2027  cap[5] = p[5];
2028  cap += 6;
2029  p += 6;
2030  }
2031  }
2032 
2033  mpeg_set_cc_format(avctx, CC_FORMAT_DVD, "DVD");
2034  }
2035  return 1;
2036  } else if ((!s1->cc_format || s1->cc_format == CC_FORMAT_DISH) &&
2037  buf_size >= 12 &&
2038  p[0] == 0x05 && p[1] == 0x02) {
2039  /* extract Dish Network CC data */
2040  const uint8_t cc_header = 0xf8 | 0x04 /* valid */ | 0x00 /* line 21 field 1 */;
2041  uint8_t cc_data[4] = {0};
2042  int cc_count = 0;
2043  uint8_t cc_type = p[7];
2044  p += 8;
2045  buf_size -= 8;
2046 
2047  if (cc_type == 0x05 && buf_size >= 7) {
2048  cc_type = p[6];
2049  p += 7;
2050  buf_size -= 7;
2051  }
2052 
2053  if (cc_type == 0x02 && buf_size >= 4) { /* 2-byte caption, can be repeated */
2054  cc_count = 1;
2055  cc_data[0] = p[1];
2056  cc_data[1] = p[2];
2057  cc_type = p[3];
2058 
2059  /* Only repeat characters when the next type flag
2060  * is 0x04 and the characters are repeatable (i.e., less than
2061  * 32 with the parity stripped).
2062  */
2063  if (cc_type == 0x04 && (cc_data[0] & 0x7f) < 32) {
2064  cc_count = 2;
2065  cc_data[2] = cc_data[0];
2066  cc_data[3] = cc_data[1];
2067  }
2068  } else if (cc_type == 0x04 && buf_size >= 5) { /* 4-byte caption, not repeated */
2069  cc_count = 2;
2070  cc_data[0] = p[1];
2071  cc_data[1] = p[2];
2072  cc_data[2] = p[3];
2073  cc_data[3] = p[4];
2074  }
2075 
2076  if (cc_count > 0) {
2077  int ret;
2078  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2079  const uint64_t new_size = (old_size + cc_count * UINT64_C(3));
2080  if (new_size > 3 * A53_MAX_CC_COUNT)
2081  return AVERROR(EINVAL);
2082 
2083  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2084  if (ret >= 0) {
2085  uint8_t *cap = s1->a53_buf_ref->data + old_size;
2086  cap[0] = cc_header;
2087  cap[1] = cc_data[0];
2088  cap[2] = cc_data[1];
2089  if (cc_count == 2) {
2090  cap[3] = cc_header;
2091  cap[4] = cc_data[2];
2092  cap[5] = cc_data[3];
2093  }
2094  }
2095 
2096  mpeg_set_cc_format(avctx, CC_FORMAT_DISH, "Dish Network");
2097  }
2098  return 1;
2099  }
2100  return 0;
2101 }
2102 
2104  const uint8_t *p, int buf_size)
2105 {
2106  const uint8_t *buf_end = p + buf_size;
2107  Mpeg1Context *s1 = avctx->priv_data;
2108 
2109 #if 0
2110  int i;
2111  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2112  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2113  }
2114  av_log(avctx, AV_LOG_ERROR, "\n");
2115 #endif
2116 
2117  if (buf_size > 29){
2118  int i;
2119  for(i=0; i<20; i++)
2120  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2121  s1->tmpgexs = 1;
2122  }
2123  }
2124  /* we parse the DTG active format information */
2125  if (buf_end - p >= 5 &&
2126  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2127  int flags = p[4];
2128  p += 5;
2129  if (flags & 0x80) {
2130  /* skip event id */
2131  p += 2;
2132  }
2133  if (flags & 0x40) {
2134  if (buf_end - p < 1)
2135  return;
2136  s1->has_afd = 1;
2137  s1->afd = p[0] & 0x0f;
2138  }
2139  } else if (buf_end - p >= 6 &&
2140  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2141  p[4] == 0x03) { // S3D_video_format_length
2142  // the 0x7F mask ignores the reserved_bit value
2143  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2144 
2145  if (S3D_video_format_type == 0x03 ||
2146  S3D_video_format_type == 0x04 ||
2147  S3D_video_format_type == 0x08 ||
2148  S3D_video_format_type == 0x23) {
2149 
2150  s1->has_stereo3d = 1;
2151 
2152  switch (S3D_video_format_type) {
2153  case 0x03:
2155  break;
2156  case 0x04:
2158  break;
2159  case 0x08:
2161  break;
2162  case 0x23:
2164  break;
2165  }
2166  }
2167  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2168  return;
2169  }
2170 }
2171 
2173  const uint8_t *buf, int buf_size)
2174 {
2175  Mpeg1Context *s1 = avctx->priv_data;
2176  MPVContext *const s = &s1->slice.c;
2177  GetBitContext gb0, *const gb = &gb0;
2178  int broken_link;
2179  int64_t tc;
2180 
2181  int ret = init_get_bits8(gb, buf, buf_size);
2182  if (ret < 0)
2183  return ret;
2184 
2185  tc = s1->timecode_frame_start = get_bits(gb, 25);
2186 
2187  s1->closed_gop = get_bits1(gb);
2188  /* broken_link indicates that after editing the
2189  * reference frames of the first B-Frames after GOP I-Frame
2190  * are missing (open gop) */
2191  broken_link = get_bits1(gb);
2192 
2193  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2194  char tcbuf[AV_TIMECODE_STR_SIZE];
2196  av_log(s->avctx, AV_LOG_DEBUG,
2197  "GOP (%s) closed_gop=%d broken_link=%d\n",
2198  tcbuf, s1->closed_gop, broken_link);
2199  }
2200 
2201  return 0;
2202 }
2203 
2205  Mpeg1Context *const s)
2206 {
2207  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2208  !avctx->hwaccel) {
2209  MPVContext *const s2 = &s->slice.c;
2210  int error_count = 0;
2211 
2212  avctx->execute(avctx, slice_decode_thread,
2213  s2->mpeg12_contexts, NULL,
2214  s->slice_count, sizeof(s2->mpeg12_contexts[0]));
2215 
2216  for (int i = 0; i < s->slice_count; i++) {
2217  MpegEncContext *const slice = s2->thread_context[i];
2218  int slice_err = atomic_load_explicit(&slice->er.error_count,
2219  memory_order_relaxed);
2220  // error_count can get set to INT_MAX on serious errors.
2221  // So use saturated addition.
2222  if ((unsigned)slice_err > INT_MAX - error_count) {
2223  error_count = INT_MAX;
2224  break;
2225  }
2226  error_count += slice_err;
2227  }
2228  atomic_store_explicit(&s2->er.error_count, error_count,
2229  memory_order_relaxed);
2230  }
2231 }
2232 
2233 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2234  int *got_output, const uint8_t *buf, int buf_size)
2235 {
2236  Mpeg1Context *s = avctx->priv_data;
2237  MPVContext *const s2 = &s->slice.c;
2238  const uint8_t *buf_ptr = buf;
2239  const uint8_t *buf_end = buf + buf_size;
2240  int ret, input_size;
2241  int last_code = 0, skip_frame = 0;
2242  int picture_start_code_seen = 0;
2243 
2244  for (;;) {
2245  /* find next start code */
2246  uint32_t start_code = -1;
2247  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2248  if (start_code > 0x1ff) {
2249  if (!skip_frame) {
2251 
2252  ret = slice_end(avctx, picture, got_output);
2253  if (ret < 0)
2254  return ret;
2255  }
2256  s2->pict_type = 0;
2257 
2258  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2259  return AVERROR_INVALIDDATA;
2260 
2261  return FFMAX(0, buf_ptr - buf);
2262  }
2263 
2264  input_size = buf_end - buf_ptr;
2265 
2266  if (avctx->debug & FF_DEBUG_STARTCODE)
2267  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2268  start_code, buf_ptr - buf, input_size);
2269 
2270  /* prepare data for next start code */
2271  switch (start_code) {
2272  case SEQ_START_CODE:
2273  if (last_code == 0) {
2274  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2275  if (buf != avctx->extradata)
2276  s->sync = 1;
2277  } else {
2278  av_log(avctx, AV_LOG_ERROR,
2279  "ignoring SEQ_START_CODE after %X\n", last_code);
2280  if (avctx->err_recognition & AV_EF_EXPLODE)
2281  return AVERROR_INVALIDDATA;
2282  }
2283  break;
2284 
2285  case PICTURE_START_CODE:
2286  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2287  /* If it's a frame picture, there can't be more than one picture header.
2288  Yet, it does happen and we need to handle it. */
2289  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2290  break;
2291  }
2292  picture_start_code_seen = 1;
2293 
2294  if (buf == avctx->extradata && avctx->codec_tag == AV_RL32("AVmp")) {
2295  av_log(avctx, AV_LOG_WARNING, "ignoring picture start code in AVmp extradata\n");
2296  break;
2297  }
2298 
2299  if (s2->width <= 0 || s2->height <= 0) {
2300  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2301  s2->width, s2->height);
2302  return AVERROR_INVALIDDATA;
2303  }
2304 
2305  if (s->tmpgexs){
2306  s2->intra_dc_precision= 3;
2307  s2->intra_matrix[0]= 1;
2308  }
2309  if (s->slice_count) {
2311  s->slice_count = 0;
2312  }
2313  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2314  ret = mpeg_decode_postinit(avctx);
2315  if (ret < 0) {
2316  av_log(avctx, AV_LOG_ERROR,
2317  "mpeg_decode_postinit() failure\n");
2318  return ret;
2319  }
2320 
2321  /* We have a complete image: we try to decompress it. */
2322  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2323  s2->pict_type = 0;
2324  s->first_slice = 1;
2325  last_code = PICTURE_START_CODE;
2326  } else {
2327  av_log(avctx, AV_LOG_ERROR,
2328  "ignoring pic after %X\n", last_code);
2329  if (avctx->err_recognition & AV_EF_EXPLODE)
2330  return AVERROR_INVALIDDATA;
2331  }
2332  break;
2333  case EXT_START_CODE: {
2334  GetBitContext gb0, *const gb = &gb0;
2335 
2336  ret = init_get_bits8(gb, buf_ptr, input_size);
2337  if (ret < 0)
2338  return ret;
2339 
2340  switch (get_bits(gb, 4)) {
2341  case 0x1:
2342  if (last_code == 0) {
2344  } else {
2345  av_log(avctx, AV_LOG_ERROR,
2346  "ignoring seq ext after %X\n", last_code);
2347  if (avctx->err_recognition & AV_EF_EXPLODE)
2348  return AVERROR_INVALIDDATA;
2349  }
2350  break;
2351  case 0x2:
2353  break;
2354  case 0x3:
2356  break;
2357  case 0x7:
2359  break;
2360  case 0x8:
2361  if (last_code == PICTURE_START_CODE) {
2363  if (ret < 0)
2364  return ret;
2365  } else {
2366  av_log(avctx, AV_LOG_ERROR,
2367  "ignoring pic cod ext after %X\n", last_code);
2368  if (avctx->err_recognition & AV_EF_EXPLODE)
2369  return AVERROR_INVALIDDATA;
2370  }
2371  break;
2372  }
2373  break;
2374  }
2375  case USER_START_CODE:
2376  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2377  break;
2378  case GOP_START_CODE:
2379  if (last_code == 0) {
2380  s2->first_field = 0;
2381  ret = mpeg_decode_gop(avctx, buf_ptr, input_size);
2382  if (ret < 0)
2383  return ret;
2384  s->sync = 1;
2385  } else {
2386  av_log(avctx, AV_LOG_ERROR,
2387  "ignoring GOP_START_CODE after %X\n", last_code);
2388  if (avctx->err_recognition & AV_EF_EXPLODE)
2389  return AVERROR_INVALIDDATA;
2390  }
2391  break;
2392  default:
2394  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2395  if (s2->progressive_sequence && !s2->progressive_frame) {
2396  s2->progressive_frame = 1;
2397  av_log(s2->avctx, AV_LOG_ERROR,
2398  "interlaced frame in progressive sequence, ignoring\n");
2399  }
2400 
2401  if (s2->picture_structure == 0 ||
2403  av_log(s2->avctx, AV_LOG_ERROR,
2404  "picture_structure %d invalid, ignoring\n",
2405  s2->picture_structure);
2407  }
2408 
2410  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2411 
2412  if (s2->picture_structure == PICT_FRAME) {
2413  s2->first_field = 0;
2414  s2->v_edge_pos = 16 * s2->mb_height;
2415  } else {
2416  s2->first_field ^= 1;
2417  s2->v_edge_pos = 8 * s2->mb_height;
2418  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2419  }
2420  }
2422  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2423  const int field_pic = s2->picture_structure != PICT_FRAME;
2424  int mb_y = start_code - SLICE_MIN_START_CODE;
2425  last_code = SLICE_MIN_START_CODE;
2426  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2427  mb_y += (*buf_ptr&0xE0)<<2;
2428 
2429  mb_y <<= field_pic;
2431  mb_y++;
2432 
2433  if (buf_end - buf_ptr < 2) {
2434  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2435  return AVERROR_INVALIDDATA;
2436  }
2437 
2438  if (mb_y >= s2->mb_height) {
2439  av_log(s2->avctx, AV_LOG_ERROR,
2440  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2441  return AVERROR_INVALIDDATA;
2442  }
2443 
2444  if (!s2->last_pic.ptr) {
2445  /* Skip B-frames if we do not have reference frames and
2446  * GOP is not closed. */
2447  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2448  if (!s->closed_gop) {
2449  skip_frame = 1;
2450  av_log(s2->avctx, AV_LOG_DEBUG,
2451  "Skipping B slice due to open GOP\n");
2452  break;
2453  }
2454  }
2455  }
2457  s->sync = 1;
2458  if (!s2->next_pic.ptr) {
2459  /* Skip P-frames if we do not have a reference frame or
2460  * we have an invalid header. */
2461  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2462  skip_frame = 1;
2463  av_log(s2->avctx, AV_LOG_DEBUG,
2464  "Skipping P slice due to !sync\n");
2465  break;
2466  }
2467  }
2468  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2469  s2->pict_type == AV_PICTURE_TYPE_B) ||
2470  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2471  s2->pict_type != AV_PICTURE_TYPE_I) ||
2472  avctx->skip_frame >= AVDISCARD_ALL) {
2473  skip_frame = 1;
2474  break;
2475  }
2476 
2477  if (!s2->context_initialized)
2478  break;
2479 
2480  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2481  if (mb_y < avctx->skip_top ||
2482  mb_y >= s2->mb_height - avctx->skip_bottom)
2483  break;
2484  }
2485 
2486  if (!s2->pict_type) {
2487  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2488  if (avctx->err_recognition & AV_EF_EXPLODE)
2489  return AVERROR_INVALIDDATA;
2490  break;
2491  }
2492 
2493  if (s->first_slice) {
2494  skip_frame = 0;
2495  s->first_slice = 0;
2496  if ((ret = mpeg_field_start(s, buf, buf_size)) < 0)
2497  return ret;
2498  }
2499  if (!s2->cur_pic.ptr) {
2500  av_log(avctx, AV_LOG_ERROR,
2501  "current_picture not initialized\n");
2502  return AVERROR_INVALIDDATA;
2503  }
2504 
2505  if (HAVE_THREADS &&
2506  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2507  !avctx->hwaccel) {
2508  int threshold = (s2->mb_height * s->slice_count +
2509  s2->slice_context_count / 2) /
2510  s2->slice_context_count;
2511  if (threshold <= mb_y) {
2512  Mpeg12SliceContext *const thread_context = s2->mpeg12_contexts[s->slice_count];
2513 
2514  thread_context->c.start_mb_y = mb_y;
2515  thread_context->c.end_mb_y = s2->mb_height;
2516  if (s->slice_count) {
2517  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2518  ret = ff_update_duplicate_context(&thread_context->c, s2);
2519  if (ret < 0)
2520  return ret;
2521  }
2522  ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
2523  if (ret < 0)
2524  return ret;
2525  s->slice_count++;
2526  }
2527  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2528  } else {
2529  ret = mpeg_decode_slice(&s->slice, mb_y, &buf_ptr, input_size);
2530  emms_c();
2531 
2532  if (ret < 0) {
2533  if (avctx->err_recognition & AV_EF_EXPLODE)
2534  return ret;
2535  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2536  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2537  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2539  } else {
2540  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2541  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2543  }
2544  }
2545  }
2546  break;
2547  }
2548  }
2549 }
2550 
2551 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2552  int *got_output, AVPacket *avpkt)
2553 {
2554  const uint8_t *buf = avpkt->data;
2555  int ret;
2556  int buf_size = avpkt->size;
2557  Mpeg1Context *s = avctx->priv_data;
2558  MPVContext *const s2 = &s->slice.c;
2559 
2560  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2561  /* special case for last picture */
2562  if (s2->low_delay == 0 && s2->next_pic.ptr) {
2563  int ret = av_frame_ref(picture, s2->next_pic.ptr->f);
2564  if (ret < 0)
2565  return ret;
2566 
2568 
2569  *got_output = 1;
2570  }
2571  return buf_size;
2572  }
2573 
2574  if (!s2->context_initialized &&
2575  (s2->codec_tag == AV_RL32("VCR2") || s2->codec_tag == AV_RL32("BW10")))
2576  vcr2_init_sequence(avctx);
2577 
2578  s->slice_count = 0;
2579 
2580  if (avctx->extradata && !s->extradata_decoded) {
2581  ret = decode_chunks(avctx, picture, got_output,
2582  avctx->extradata, avctx->extradata_size);
2583  if (*got_output) {
2584  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2585  av_frame_unref(picture);
2586  *got_output = 0;
2587  }
2588  s->extradata_decoded = 1;
2589  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2591  return ret;
2592  }
2593  }
2594 
2595  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2596  if (ret<0 || *got_output) {
2598 
2599  if (s->timecode_frame_start != -1 && *got_output) {
2600  char tcbuf[AV_TIMECODE_STR_SIZE];
2601  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2603  sizeof(int64_t));
2604  if (!tcside)
2605  return AVERROR(ENOMEM);
2606  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2607 
2608  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2609  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2610 
2611  s->timecode_frame_start = -1;
2612  }
2613  }
2614 
2615  return ret;
2616 }
2617 
2618 static av_cold void flush(AVCodecContext *avctx)
2619 {
2620  Mpeg1Context *s = avctx->priv_data;
2621 
2622  s->sync = 0;
2623  s->closed_gop = 0;
2624 
2625  av_buffer_unref(&s->a53_buf_ref);
2626  ff_mpeg_flush(avctx);
2627 }
2628 
2630 {
2631  Mpeg1Context *s = avctx->priv_data;
2632 
2633  av_buffer_unref(&s->a53_buf_ref);
2634  return ff_mpv_decode_close(avctx);
2635 }
2636 
2638  .p.name = "mpeg1video",
2639  CODEC_LONG_NAME("MPEG-1 video"),
2640  .p.type = AVMEDIA_TYPE_VIDEO,
2641  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2642  .priv_data_size = sizeof(Mpeg1Context),
2646  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2648  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2649  .flush = flush,
2650  .p.max_lowres = 3,
2651  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2652 #if CONFIG_MPEG1_NVDEC_HWACCEL
2653  HWACCEL_NVDEC(mpeg1),
2654 #endif
2655 #if CONFIG_MPEG1_VDPAU_HWACCEL
2656  HWACCEL_VDPAU(mpeg1),
2657 #endif
2658 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2659  HWACCEL_VIDEOTOOLBOX(mpeg1),
2660 #endif
2661  NULL
2662  },
2663 };
2664 
2665 #define M2V_OFFSET(x) offsetof(Mpeg1Context, x)
2666 #define M2V_PARAM AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2667 
2668 static const AVOption mpeg2video_options[] = {
2669  { "cc_format", "extract a specific Closed Captions format",
2670  M2V_OFFSET(cc_format), AV_OPT_TYPE_INT, { .i64 = CC_FORMAT_AUTO },
2671  CC_FORMAT_AUTO, CC_FORMAT_DISH, M2V_PARAM, .unit = "cc_format" },
2672 
2673  { "auto", "pick first seen CC substream", 0, AV_OPT_TYPE_CONST,
2674  { .i64 = CC_FORMAT_AUTO }, .flags = M2V_PARAM, .unit = "cc_format" },
2675  { "a53", "pick A/53 Part 4 CC substream", 0, AV_OPT_TYPE_CONST,
2676  { .i64 = CC_FORMAT_A53_PART4 }, .flags = M2V_PARAM, .unit = "cc_format" },
2677  { "scte20", "pick SCTE-20 CC substream", 0, AV_OPT_TYPE_CONST,
2678  { .i64 = CC_FORMAT_SCTE20 }, .flags = M2V_PARAM, .unit = "cc_format" },
2679  { "dvd", "pick DVD CC substream", 0, AV_OPT_TYPE_CONST,
2680  { .i64 = CC_FORMAT_DVD }, .flags = M2V_PARAM, .unit = "cc_format" },
2681  { "dish", "pick Dish Network CC substream", 0, AV_OPT_TYPE_CONST,
2682  { .i64 = CC_FORMAT_DISH }, .flags = M2V_PARAM, .unit = "cc_format" },
2683  { NULL }
2684 };
2685 
2686 static const AVClass mpeg2video_class = {
2687  .class_name = "MPEG-2 video",
2688  .item_name = av_default_item_name,
2689  .option = mpeg2video_options,
2690  .version = LIBAVUTIL_VERSION_INT,
2691  .category = AV_CLASS_CATEGORY_DECODER,
2692 };
2693 
2695  .p.name = "mpeg2video",
2696  CODEC_LONG_NAME("MPEG-2 video"),
2697  .p.type = AVMEDIA_TYPE_VIDEO,
2698  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2699  .p.priv_class = &mpeg2video_class,
2700  .priv_data_size = sizeof(Mpeg1Context),
2704  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2706  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2707  .flush = flush,
2708  .p.max_lowres = 3,
2710  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2711 #if CONFIG_MPEG2_DXVA2_HWACCEL
2712  HWACCEL_DXVA2(mpeg2),
2713 #endif
2714 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2715  HWACCEL_D3D11VA(mpeg2),
2716 #endif
2717 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2718  HWACCEL_D3D11VA2(mpeg2),
2719 #endif
2720 #if CONFIG_MPEG2_D3D12VA_HWACCEL
2721  HWACCEL_D3D12VA(mpeg2),
2722 #endif
2723 #if CONFIG_MPEG2_NVDEC_HWACCEL
2724  HWACCEL_NVDEC(mpeg2),
2725 #endif
2726 #if CONFIG_MPEG2_VAAPI_HWACCEL
2727  HWACCEL_VAAPI(mpeg2),
2728 #endif
2729 #if CONFIG_MPEG2_VDPAU_HWACCEL
2730  HWACCEL_VDPAU(mpeg2),
2731 #endif
2732 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2733  HWACCEL_VIDEOTOOLBOX(mpeg2),
2734 #endif
2735  NULL
2736  },
2737 };
2738 
2739 //legacy decoder
2741  .p.name = "mpegvideo",
2742  CODEC_LONG_NAME("MPEG-1 video"),
2743  .p.type = AVMEDIA_TYPE_VIDEO,
2744  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2745  .priv_data_size = sizeof(Mpeg1Context),
2749  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2751  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2752  .flush = flush,
2753  .p.max_lowres = 3,
2754 };
2755 
2756 typedef struct IPUContext {
2758 
2759  int flags;
2760 } IPUContext;
2761 
2763  int *got_frame, AVPacket *avpkt)
2764 {
2765  IPUContext *s = avctx->priv_data;
2766  MPVContext *const m = &s->m.c;
2767  GetBitContext *const gb = &s->m.gb;
2768  int16_t (*const block)[64] = s->m.block;
2769  int ret;
2770 
2771  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
2772  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2LL + 3*4 + 2*2 + 2*6))
2773  return AVERROR_INVALIDDATA;
2774 
2775  ret = ff_get_buffer(avctx, frame, 0);
2776  if (ret < 0)
2777  return ret;
2778 
2779  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2780  if (ret < 0)
2781  return ret;
2782 
2783  s->flags = get_bits(gb, 8);
2784  m->intra_dc_precision = s->flags & 3;
2785  m->q_scale_type = !!(s->flags & 0x40);
2786  m->intra_vlc_format = !!(s->flags & 0x20);
2787  m->alternate_scan = !!(s->flags & 0x10);
2788 
2790  s->flags & 0x10 ? ff_alternate_vertical_scan : ff_zigzag_direct,
2791  m->idsp.idct_permutation);
2792 
2793  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
2794  m->qscale = 1;
2795 
2796  for (int y = 0; y < avctx->height; y += 16) {
2797  int intraquant;
2798 
2799  for (int x = 0; x < avctx->width; x += 16) {
2800  if (x || y) {
2801  if (!get_bits1(gb))
2802  return AVERROR_INVALIDDATA;
2803  }
2804  if (get_bits1(gb)) {
2805  intraquant = 0;
2806  } else {
2807  if (!get_bits1(gb))
2808  return AVERROR_INVALIDDATA;
2809  intraquant = 1;
2810  }
2811 
2812  if (s->flags & 4)
2813  skip_bits1(gb);
2814 
2815  if (intraquant)
2816  m->qscale = mpeg_get_qscale(gb, m->q_scale_type);
2817 
2818  memset(block, 0, 6 * sizeof(*block));
2819 
2820  for (int n = 0; n < 6; n++) {
2821  if (s->flags & 0x80) {
2823  m->intra_matrix,
2825  m->last_dc, block[n],
2826  n, m->qscale);
2827  } else {
2828  ret = mpeg2_decode_block_intra(&s->m, block[n], n);
2829  }
2830 
2831  if (ret < 0)
2832  return ret;
2833  }
2834 
2835  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
2836  frame->linesize[0], block[0]);
2837  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
2838  frame->linesize[0], block[1]);
2839  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
2840  frame->linesize[0], block[2]);
2841  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
2842  frame->linesize[0], block[3]);
2843  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
2844  frame->linesize[1], block[4]);
2845  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
2846  frame->linesize[2], block[5]);
2847  }
2848  }
2849 
2850  align_get_bits(gb);
2851  if (get_bits_left(gb) != 32)
2852  return AVERROR_INVALIDDATA;
2853 
2854  *got_frame = 1;
2855 
2856  return avpkt->size;
2857 }
2858 
2860 {
2861  IPUContext *s = avctx->priv_data;
2862  MPVContext *const m = &s->m.c;
2863 
2864  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2865  m->avctx = avctx;
2866 
2867  ff_idctdsp_init(&m->idsp, avctx);
2869 
2870  for (int i = 0; i < 64; i++) {
2871  int j = m->idsp.idct_permutation[i];
2873  m->intra_matrix[j] = v;
2874  m->chroma_intra_matrix[j] = v;
2875  }
2876 
2877  return 0;
2878 }
2879 
2881  .p.name = "ipu",
2882  CODEC_LONG_NAME("IPU Video"),
2883  .p.type = AVMEDIA_TYPE_VIDEO,
2884  .p.id = AV_CODEC_ID_IPU,
2885  .priv_data_size = sizeof(IPUContext),
2886  .init = ipu_decode_init,
2888  .p.capabilities = AV_CODEC_CAP_DR1,
2889 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:1828
flags
const SwsFlags flags[]
Definition: swscale.c:61
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:378
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:175
Mpeg1Context::slice
Mpeg12SliceContext slice
Definition: mpeg12dec.c:81
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:88
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:248
M2V_OFFSET
#define M2V_OFFSET(x)
Definition: mpeg12dec.c:2665
ff_mb_pat_vlc
VLCElem ff_mb_pat_vlc[512]
Definition: mpeg12.c:145
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:85
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:380
AVPanScan::position
int16_t position[3][2]
position of the top left corner in 1/16 pel for up to 3 fields/frames
Definition: defs.h:271
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:493
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:1898
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:47
AV_CLASS_CATEGORY_DECODER
@ AV_CLASS_CATEGORY_DECODER
Definition: log.h:35
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:114
IPUContext::m
Mpeg12SliceContext m
Definition: mpeg12dec.c:2757
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1200
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2551
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
MpegEncContext::top_field_first
int top_field_first
Definition: mpegvideo.h:256
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:645
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:132
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:400
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:39
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:177
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
AVPanScan
Pan Scan area.
Definition: defs.h:250
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1398
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:38
int64_t
long long int64_t
Definition: coverity.c:34
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:42
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
Mpeg1Context::vbv_delay
int vbv_delay
Definition: mpeg12dec.c:99
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:2859
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:158
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:421
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2740
AVPacket::data
uint8_t * data
Definition: packet.h:552
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:95
AVOption
AVOption.
Definition: opt.h:429
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:136
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:41
mpeg_decode_mb
static int mpeg_decode_mb(Mpeg12SliceContext *const s, int *mb_skip_run)
Definition: mpeg12dec.c:405
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
ff_mpv_framesize_disable
static void ff_mpv_framesize_disable(ScratchpadContext *sc)
Disable allocating the ScratchpadContext's buffers in future calls to ff_mpv_framesize_alloc().
Definition: mpegpicture.h:143
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:828
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:491
reverse.h
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:81
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:209
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:97
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:33
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:861
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:86
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:31
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MPVContext *const s, GetBitContext *const gb)
Definition: mpeg12dec.c:1149
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1375
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:179
CC_FORMAT_DISH
@ CC_FORMAT_DISH
Definition: mpeg12dec.c:70
MpegEncContext::out_format
enum OutputFormat out_format
output format
Definition: mpegvideo.h:87
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:649
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:247
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
ff_mpeg2_rl_vlc
RL_VLC_ELEM ff_mpeg2_rl_vlc[674]
Definition: mpeg12.c:148
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:77
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:551
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:403
MpegEncContext::mb_height
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:98
ff_mbincr_vlc
VLCElem ff_mbincr_vlc[538]
Definition: mpeg12.c:142
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:157
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1688
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:53
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2233
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1662
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
MpegEncContext::picture_structure
int picture_structure
Definition: mpegvideo.h:252
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
GetBitContext
Definition: get_bits.h:109
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
AVPanScan::width
int width
width and height in 1/16 pel
Definition: defs.h:263
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1634
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:62
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:47
val
static double val(void *priv, double ch)
Definition: aeval.c:77
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:96
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:833
mpeg_decode_slice
static int mpeg_decode_slice(Mpeg12SliceContext *const s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1358
MpegEncContext::width
int width
Definition: mpegvideo.h:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1738
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:87
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:255
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *const s1, GetBitContext *const gb)
Definition: mpeg12dec.c:1066
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:2067
IPUContext
Definition: mpeg12dec.c:2756
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:790
MpegEncContext::slice_ctx_size
unsigned slice_ctx_size
If set, ff_mpv_common_init() will allocate slice contexts of this size.
Definition: mpegvideo.h:285
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2694
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:93
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2762
HAS_MV
#define HAS_MV(a, dir)
Definition: mpegutils.h:91
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:30
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:801
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:976
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:91
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:185
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:63
Mpeg1Context::stereo3d_type
enum AVStereo3DType stereo3d_type
Definition: mpeg12dec.c:83
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:898
ff_mpeg_flush
av_cold void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:411
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mv_vlc
VLCElem ff_mv_vlc[266]
Definition: mpeg12.c:137
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:265
MPVWorkPicture::ptr
MPVPicture * ptr
RefStruct reference.
Definition: mpegpicture.h:99
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:359
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.h:28
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:244
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
flush
static av_cold void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2618
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:90
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:82
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:318
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(Mpeg12SliceContext *const s, int16_t *block, int n)
Definition: mpeg12dec.c:225
get_bits.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:828
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:225
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:134
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
arg
const char * arg
Definition: jacosubdec.c:67
rl_vlc
static const VLCElem * rl_vlc[2]
Definition: mobiclip.c:278
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:447
MpegEncContext::mb_stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
Definition: mpegvideo.h:99
Mpeg12SliceContext
Definition: mpeg12dec.c:73
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:231
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:229
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
MpegEncContext::mpeg12_contexts
struct Mpeg12SliceContext * mpeg12_contexts[MAX_THREADS]
Definition: mpegvideo.h:113
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:372
NULL
#define NULL
Definition: coverity.c:32
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
run
uint8_t run
Definition: svq3.c:207
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:669
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:29
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:194
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
mpeg12_execute_slice_threads
static void mpeg12_execute_slice_threads(AVCodecContext *avctx, Mpeg1Context *const s)
Definition: mpeg12dec.c:2204
hwaccel_internal.h
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:94
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:128
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:788
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:125
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:790
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
M2V_PARAM
#define M2V_PARAM
Definition: mpeg12dec.c:2666
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
profiles.h
mpeg_get_qscale
static int mpeg_get_qscale(GetBitContext *const gb, int q_scale_type)
Definition: mpegvideodec.h:80
CC_FORMAT_A53_PART4
@ CC_FORMAT_A53_PART4
Definition: mpeg12dec.c:67
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:231
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:48
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
MB_TYPE_BIDIR_MV
#define MB_TYPE_BIDIR_MV
Definition: mpegutils.h:51
lowres
static int lowres
Definition: ffplay.c:330
ff_mpeg1_rl_vlc
RL_VLC_ELEM ff_mpeg1_rl_vlc[680]
Definition: mpeg12.c:147
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
CC_FORMAT_AUTO
@ CC_FORMAT_AUTO
Definition: mpeg12dec.c:66
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
mpeg12codecs.h
MpegEncContext::slice_context_count
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:116
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:646
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1628
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
get_bits_bytesize
static int get_bits_bytesize(const GetBitContext *s, int round_up)
Get the size of the GetBitContext's buffer in bytes.
Definition: get_bits.h:264
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:163
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:263
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
Mpeg1Context::save_chroma_format
int save_chroma_format
Definition: mpeg12dec.c:91
startcode.h
CC_FORMAT_DVD
@ CC_FORMAT_DVD
Definition: mpeg12dec.c:69
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:228
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:89
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:67
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:495
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1635
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
mpeg2video_options
static const AVOption mpeg2video_options[]
Definition: mpeg12dec.c:2668
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:553
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:155
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:310
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
CC_FORMAT_SCTE20
@ CC_FORMAT_SCTE20
Definition: mpeg12dec.c:68
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:401
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
IPUContext::flags
int flags
Definition: mpeg12dec.c:2759
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:204
mpeg_field_start
static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1223
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:125
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:101
Mpeg1Context::bit_rate
int64_t bit_rate
Definition: mpeg12dec.c:100
VLCElem
Definition: vlc.h:32
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2637
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2029
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:278
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:61
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1573
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:403
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:40
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1690
AVCodecHWConfigInternal
Definition: hwconfig.h:25
MpegEncContext::mbskip_table
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
Definition: mpegvideo.h:147
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:97
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:306
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:45
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:174
Mpeg12SliceContext::gb
GetBitContext gb
Definition: mpeg12dec.c:75
Mpeg12SliceContext::c
MPVContext c
Definition: mpeg12dec.c:74
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:84
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:767
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:193
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:178
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:411
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
mpeg2video_class
static const AVClass mpeg2video_class
Definition: mpeg12dec.c:2686
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *const s1, GetBitContext *const gb)
Definition: mpeg12dec.c:1022
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:116
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:266
emms.h
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:365
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:270
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:264
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:360
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:369
internal.h
mpeg_set_cc_format
static void mpeg_set_cc_format(AVCodecContext *avctx, enum Mpeg2ClosedCaptionsFormat format, const char *label)
Definition: mpeg12dec.c:1878
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:85
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:194
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:185
atomic_store_explicit
#define atomic_store_explicit(object, desired, order)
Definition: stdatomic.h:90
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1382
MpegEncContext::thread_context
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:112
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_TYPE_MV_2_MV_DIR
#define MB_TYPE_MV_2_MV_DIR(a)
Definition: mpegutils.h:93
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:494
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:122
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:259
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:676
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:131
mpeg_decode_motion
static int mpeg_decode_motion(Mpeg12SliceContext *const s, int fcode, int pred)
Definition: mpeg12dec.c:105
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:287
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:750
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:600
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
mpeg_decode_gop
static int mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2172
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:147
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:838
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:351
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(Mpeg12SliceContext *const s, int16_t *block, int n)
Definition: mpeg12dec.c:141
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:694
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1580
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:147
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:254
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1591
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:243
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:789
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1618
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:699
Mpeg1Context::cc_format
enum Mpeg2ClosedCaptionsFormat cc_format
Definition: mpeg12dec.c:86
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:357
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:89
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
MpegEncContext::resync_mb_x
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:210
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1639
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
ff_mb_ptype_vlc
VLCElem ff_mb_ptype_vlc[64]
Definition: mpeg12.c:143
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1374
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2629
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MPVContext *s, int16_t block[][64])
Definition: mpegvideo_dec.c:1083
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:86
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:2880
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:276
load_matrix
static int load_matrix(MPVContext *const s, GetBitContext *const gb, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1126
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:34
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:273
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:258
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:171
ff_tlog
#define ff_tlog(a,...)
Definition: tableprint_vlc.h:29
Mpeg12SliceContext::DECLARE_ALIGNED_32
DECLARE_ALIGNED_32(int16_t, block)[12][64]
AVPacket
This structure stores compressed data.
Definition: packet.h:529
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:48
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *const s1, GetBitContext *const gb)
Definition: mpeg12dec.c:1164
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *const s1, GetBitContext *const gb)
Definition: mpeg12dec.c:1091
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
AVStereo3DType
AVStereo3DType
List of possible 3D Types.
Definition: stereo3d.h:48
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(Mpeg12SliceContext *const s, int16_t *block, int n)
Definition: mpeg12dec.c:310
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:92
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AVPanScan::height
int height
Definition: defs.h:264
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
ff_mb_btype_vlc
VLCElem ff_mb_btype_vlc[64]
Definition: mpeg12.c:144
MpegEncContext::resync_mb_y
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:211
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2103
h
h
Definition: vp9dsp_template.c:2070
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:110
Mpeg2ClosedCaptionsFormat
Mpeg2ClosedCaptionsFormat
Definition: mpeg12dec.c:65
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:32
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:101
width
#define width
Definition: dsp.h:89
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:109
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:225
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:260
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1350
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1349
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:63
MpegEncContext::codec_id
enum AVCodecID codec_id
Definition: mpegvideo.h:90
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
MB_TYPE_FORWARD_MV
#define MB_TYPE_FORWARD_MV
Definition: mpegutils.h:49
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:30
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:87
Mpeg1Context
Definition: mpeg12dec.c:80
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:205
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:98
get_dmv
static int get_dmv(Mpeg12SliceContext *const s)
Definition: mpeg12dec.c:391
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:64
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:263
MpegEncContext::codec_tag
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:93