FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include "config_components.h"
34 
35 #include <stddef.h>
36 #include <string.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/mem.h"
41 #include "libavutil/mem_internal.h"
42 #include "libavutil/thread.h"
43 
44 #include "avcodec.h"
45 #include "codec_internal.h"
46 #include "decode.h"
47 #include "get_bits.h"
48 #include "hpeldsp.h"
49 #include "internal.h"
50 #include "jpegquanttables.h"
51 #include "mathops.h"
52 #include "progressframe.h"
53 #include "libavutil/refstruct.h"
54 #include "thread.h"
55 #include "videodsp.h"
56 #include "vp3data.h"
57 #include "vp4data.h"
58 #include "vp3dsp.h"
59 #include "xiph.h"
60 
61 #define VP3_MV_VLC_BITS 6
62 #define VP4_MV_VLC_BITS 6
63 #define SUPERBLOCK_VLC_BITS 6
64 
65 #define FRAGMENT_PIXELS 8
66 
67 // FIXME split things out into their own arrays
68 typedef struct Vp3Fragment {
69  int16_t dc;
70  uint8_t coding_method;
71  uint8_t qpi;
72 } Vp3Fragment;
73 
74 #define SB_NOT_CODED 0
75 #define SB_PARTIALLY_CODED 1
76 #define SB_FULLY_CODED 2
77 
78 // This is the maximum length of a single long bit run that can be encoded
79 // for superblock coding or block qps. Theora special-cases this to read a
80 // bit instead of flipping the current bit to allow for runs longer than 4129.
81 #define MAXIMUM_LONG_BIT_RUN 4129
82 
83 #define MODE_INTER_NO_MV 0
84 #define MODE_INTRA 1
85 #define MODE_INTER_PLUS_MV 2
86 #define MODE_INTER_LAST_MV 3
87 #define MODE_INTER_PRIOR_LAST 4
88 #define MODE_USING_GOLDEN 5
89 #define MODE_GOLDEN_MV 6
90 #define MODE_INTER_FOURMV 7
91 #define CODING_MODE_COUNT 8
92 
93 /* special internal mode */
94 #define MODE_COPY 8
95 
96 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
97 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
98 
99 
100 /* There are 6 preset schemes, plus a free-form scheme */
101 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
102  /* scheme 1: Last motion vector dominates */
107 
108  /* scheme 2 */
113 
114  /* scheme 3 */
119 
120  /* scheme 4 */
125 
126  /* scheme 5: No motion vector dominates */
131 
132  /* scheme 6 */
137 };
138 
139 static const uint8_t hilbert_offset[16][2] = {
140  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
141  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
142  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
143  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
144 };
145 
146 enum {
152 };
153 
154 static const uint8_t vp4_pred_block_type_map[8] = {
163 };
164 
165 static VLCElem superblock_run_length_vlc[88]; /* version < 2 */
166 static VLCElem fragment_run_length_vlc[56]; /* version < 2 */
167 static VLCElem motion_vector_vlc[112]; /* version < 2 */
168 
169 // The VP4 tables reuse this vlc.
170 static VLCElem mode_code_vlc[24 + 2108 * CONFIG_VP4_DECODER];
171 
172 #if CONFIG_VP4_DECODER
173 static const VLCElem *vp4_mv_vlc_table[2][7]; /* version >= 2 */
174 static const VLCElem *block_pattern_vlc[2]; /* version >= 2 */
175 #endif
176 
177 typedef struct {
178  int dc;
179  int type;
180 } VP4Predictor;
181 
182 #define MIN_DEQUANT_VAL 2
183 
184 typedef struct HuffEntry {
185  uint8_t len, sym;
186 } HuffEntry;
187 
188 typedef struct HuffTable {
190  uint8_t nb_entries;
191 } HuffTable;
192 
193 typedef struct CoeffVLCs {
194  const VLCElem *vlc_tabs[80];
195  VLC vlcs[80];
196 } CoeffVLCs;
197 
198 typedef struct Vp3DecodeContext {
201  int version;
202  int width, height;
207  int keyframe;
208  uint8_t idct_permutation[64];
209  uint8_t idct_scantable[64];
213  DECLARE_ALIGNED(16, int16_t, block)[64];
217 
218  int qps[3];
219  int nqps;
220  int last_qps[3];
221 
231  unsigned char *superblock_coding;
232 
233  int macroblock_count; /* y macroblock count */
239  int yuv_macroblock_count; /* y+u+v macroblock count */
240 
244 
247  int data_offset[3];
248  uint8_t offset_x;
249  uint8_t offset_y;
251 
252  int8_t (*motion_val[2])[2];
253 
254  /* tables */
255  uint16_t coded_dc_scale_factor[2][64];
256  uint32_t coded_ac_scale_factor[64];
257  uint8_t base_matrix[384][64];
258  uint8_t qr_count[2][3];
259  uint8_t qr_size[2][3][64];
260  uint16_t qr_base[2][3][64];
261 
262  /**
263  * This is a list of all tokens in bitstream order. Reordering takes place
264  * by pulling from each level during IDCT. As a consequence, IDCT must be
265  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
266  * otherwise. The 32 different tokens with up to 12 bits of extradata are
267  * collapsed into 3 types, packed as follows:
268  * (from the low to high bits)
269  *
270  * 2 bits: type (0,1,2)
271  * 0: EOB run, 14 bits for run length (12 needed)
272  * 1: zero run, 7 bits for run length
273  * 7 bits for the next coefficient (3 needed)
274  * 2: coefficient, 14 bits (11 needed)
275  *
276  * Coefficients are signed, so are packed in the highest bits for automatic
277  * sign extension.
278  */
279  int16_t *dct_tokens[3][64];
280  int16_t *dct_tokens_base;
281 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
282 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
283 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
284 
285  /**
286  * number of blocks that contain DCT coefficients at
287  * the given level or higher
288  */
289  int num_coded_frags[3][64];
291 
292  /* this is a list of indexes into the all_fragments array indicating
293  * which of the fragments are coded */
295 
299 
300  /**
301  * The first 16 of the following VLCs are for the dc coefficients;
302  * the others are four groups of 16 VLCs each for ac coefficients.
303  * This is a RefStruct reference to share these VLCs between threads.
304  */
306 
307  /* these arrays need to be on 16-byte boundaries since SSE2 operations
308  * index into them */
309  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
310 
311  /* This table contains superblock_count * 16 entries. Each set of 16
312  * numbers corresponds to the fragment indexes 0..15 of the superblock.
313  * An entry will be -1 to indicate that no entry corresponds to that
314  * index. */
316 
317  /* This is an array that indicates how a particular macroblock
318  * is coded. */
319  unsigned char *macroblock_coding;
320 
321  uint8_t *edge_emu_buffer;
322 
323  /* Huffman decode */
325 
326  uint8_t filter_limit_values[64];
328 
329  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
331 
332 /************************************************************************
333  * VP3 specific functions
334  ************************************************************************/
335 
336 static av_cold void free_tables(AVCodecContext *avctx)
337 {
338  Vp3DecodeContext *s = avctx->priv_data;
339 
340  av_freep(&s->superblock_coding);
341  av_freep(&s->all_fragments);
342  av_freep(&s->nkf_coded_fragment_list);
343  av_freep(&s->kf_coded_fragment_list);
344  av_freep(&s->dct_tokens_base);
345  av_freep(&s->superblock_fragments);
346  av_freep(&s->macroblock_coding);
347  av_freep(&s->dc_pred_row);
348  av_freep(&s->motion_val[0]);
349  av_freep(&s->motion_val[1]);
350 }
351 
352 static void vp3_decode_flush(AVCodecContext *avctx)
353 {
354  Vp3DecodeContext *s = avctx->priv_data;
355 
356  ff_progress_frame_unref(&s->golden_frame);
357  ff_progress_frame_unref(&s->last_frame);
358  ff_progress_frame_unref(&s->current_frame);
359 }
360 
362 {
363  Vp3DecodeContext *s = avctx->priv_data;
364 
365  free_tables(avctx);
366  av_freep(&s->edge_emu_buffer);
367 
368  s->theora_tables = 0;
369 
370  /* release all frames */
371  vp3_decode_flush(avctx);
372 
373  av_refstruct_unref(&s->coeff_vlc);
374 
375  return 0;
376 }
377 
378 /**
379  * This function sets up all of the various blocks mappings:
380  * superblocks <-> fragments, macroblocks <-> fragments,
381  * superblocks <-> macroblocks
382  *
383  * @return 0 is successful; returns 1 if *anything* went wrong.
384  */
386 {
387  int j = 0;
388 
389  for (int plane = 0; plane < 3; plane++) {
390  int sb_width = plane ? s->c_superblock_width
391  : s->y_superblock_width;
392  int sb_height = plane ? s->c_superblock_height
393  : s->y_superblock_height;
394  int frag_width = s->fragment_width[!!plane];
395  int frag_height = s->fragment_height[!!plane];
396 
397  for (int sb_y = 0; sb_y < sb_height; sb_y++)
398  for (int sb_x = 0; sb_x < sb_width; sb_x++)
399  for (int i = 0; i < 16; i++) {
400  int x = 4 * sb_x + hilbert_offset[i][0];
401  int y = 4 * sb_y + hilbert_offset[i][1];
402 
403  if (x < frag_width && y < frag_height)
404  s->superblock_fragments[j++] = s->fragment_start[plane] +
405  y * frag_width + x;
406  else
407  s->superblock_fragments[j++] = -1;
408  }
409  }
410 
411  return 0; /* successful path out */
412 }
413 
414 /*
415  * This function sets up the dequantization tables used for a particular
416  * frame.
417  */
418 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
419 {
420  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
421 
422  for (int inter = 0; inter < 2; inter++) {
423  for (int plane = 0; plane < 3; plane++) {
424  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
425  int sum = 0, bmi, bmj, qistart, qri;
426  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
427  sum += s->qr_size[inter][plane][qri];
428  if (s->qps[qpi] <= sum)
429  break;
430  }
431  qistart = sum - s->qr_size[inter][plane][qri];
432  bmi = s->qr_base[inter][plane][qri];
433  bmj = s->qr_base[inter][plane][qri + 1];
434  for (int i = 0; i < 64; i++) {
435  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
436  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
437  s->qr_size[inter][plane][qri]) /
438  (2 * s->qr_size[inter][plane][qri]);
439 
440  int qmin = 8 << (inter + !i);
441  int qscale = i ? ac_scale_factor : dc_scale_factor;
442  int qbias = (1 + inter) * 3;
443  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
444  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
445  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
446  }
447  /* all DC coefficients use the same quant so as not to interfere
448  * with DC prediction */
449  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
450  }
451  }
452 }
453 
454 /*
455  * This function initializes the loop filter boundary limits if the frame's
456  * quality index is different from the previous frame's.
457  *
458  * The filter_limit_values may not be larger than 127.
459  */
461 {
462  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
463 }
464 
465 /*
466  * This function unpacks all of the superblock/macroblock/fragment coding
467  * information from the bitstream.
468  */
470 {
471  const int superblock_starts[3] = {
472  0, s->u_superblock_start, s->v_superblock_start
473  };
474  int bit = 0;
475  int current_superblock = 0;
476  int current_run = 0;
477  int num_partial_superblocks = 0;
478 
479  int current_fragment;
480  int plane0_num_coded_frags = 0;
481 
482  if (s->keyframe) {
483  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
484  } else {
485  /* unpack the list of partially-coded superblocks */
486  bit = get_bits1(gb) ^ 1;
487  current_run = 0;
488 
489  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
490  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
491  bit = get_bits1(gb);
492  else
493  bit ^= 1;
494 
495  current_run = get_vlc2(gb, superblock_run_length_vlc,
497  if (current_run == 34)
498  current_run += get_bits(gb, 12);
499 
500  if (current_run > s->superblock_count - current_superblock) {
501  av_log(s->avctx, AV_LOG_ERROR,
502  "Invalid partially coded superblock run length\n");
503  return -1;
504  }
505 
506  memset(s->superblock_coding + current_superblock, bit, current_run);
507 
508  current_superblock += current_run;
509  if (bit)
510  num_partial_superblocks += current_run;
511  }
512 
513  /* unpack the list of fully coded superblocks if any of the blocks were
514  * not marked as partially coded in the previous step */
515  if (num_partial_superblocks < s->superblock_count) {
516  int superblocks_decoded = 0;
517 
518  current_superblock = 0;
519  bit = get_bits1(gb) ^ 1;
520  current_run = 0;
521 
522  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
523  get_bits_left(gb) > 0) {
524  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
525  bit = get_bits1(gb);
526  else
527  bit ^= 1;
528 
529  current_run = get_vlc2(gb, superblock_run_length_vlc,
531  if (current_run == 34)
532  current_run += get_bits(gb, 12);
533 
534  for (int j = 0; j < current_run; current_superblock++) {
535  if (current_superblock >= s->superblock_count) {
536  av_log(s->avctx, AV_LOG_ERROR,
537  "Invalid fully coded superblock run length\n");
538  return -1;
539  }
540 
541  /* skip any superblocks already marked as partially coded */
542  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
543  s->superblock_coding[current_superblock] = 2 * bit;
544  j++;
545  }
546  }
547  superblocks_decoded += current_run;
548  }
549  }
550 
551  /* if there were partial blocks, initialize bitstream for
552  * unpacking fragment codings */
553  if (num_partial_superblocks) {
554  current_run = 0;
555  bit = get_bits1(gb);
556  /* toggle the bit because as soon as the first run length is
557  * fetched the bit will be toggled again */
558  bit ^= 1;
559  }
560  }
561 
562  /* figure out which fragments are coded; iterate through each
563  * superblock (all planes) */
564  s->total_num_coded_frags = 0;
565  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
566 
567  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
568  : s->nkf_coded_fragment_list;
569 
570  for (int plane = 0; plane < 3; plane++) {
571  int sb_start = superblock_starts[plane];
572  int sb_end = sb_start + (plane ? s->c_superblock_count
573  : s->y_superblock_count);
574  int num_coded_frags = 0;
575 
576  if (s->keyframe) {
577  if (s->num_kf_coded_fragment[plane] == -1) {
578  for (int i = sb_start; i < sb_end; i++) {
579  /* iterate through all 16 fragments in a superblock */
580  for (int j = 0; j < 16; j++) {
581  /* if the fragment is in bounds, check its coding status */
582  current_fragment = s->superblock_fragments[i * 16 + j];
583  if (current_fragment != -1) {
584  s->coded_fragment_list[plane][num_coded_frags++] =
585  current_fragment;
586  }
587  }
588  }
589  s->num_kf_coded_fragment[plane] = num_coded_frags;
590  } else
591  num_coded_frags = s->num_kf_coded_fragment[plane];
592  } else {
593  for (int i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
594  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
595  return AVERROR_INVALIDDATA;
596  }
597  /* iterate through all 16 fragments in a superblock */
598  for (int j = 0; j < 16; j++) {
599  /* if the fragment is in bounds, check its coding status */
600  current_fragment = s->superblock_fragments[i * 16 + j];
601  if (current_fragment != -1) {
602  int coded = s->superblock_coding[i];
603 
604  if (coded == SB_PARTIALLY_CODED) {
605  /* fragment may or may not be coded; this is the case
606  * that cares about the fragment coding runs */
607  if (current_run-- == 0) {
608  bit ^= 1;
609  current_run = get_vlc2(gb, fragment_run_length_vlc, 5, 2);
610  }
611  coded = bit;
612  }
613 
614  if (coded) {
615  /* default mode; actual mode will be decoded in
616  * the next phase */
617  s->all_fragments[current_fragment].coding_method =
619  s->coded_fragment_list[plane][num_coded_frags++] =
620  current_fragment;
621  } else {
622  /* not coded; copy this fragment from the prior frame */
623  s->all_fragments[current_fragment].coding_method =
624  MODE_COPY;
625  }
626  }
627  }
628  }
629  }
630  if (!plane)
631  plane0_num_coded_frags = num_coded_frags;
632  s->total_num_coded_frags += num_coded_frags;
633  for (int i = 0; i < 64; i++)
634  s->num_coded_frags[plane][i] = num_coded_frags;
635  if (plane < 2)
636  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
637  num_coded_frags;
638  }
639  return 0;
640 }
641 
642 #define BLOCK_X (2 * mb_x + (k & 1))
643 #define BLOCK_Y (2 * mb_y + (k >> 1))
644 
645 #if CONFIG_VP4_DECODER
646 /**
647  * @return number of blocks, or > yuv_macroblock_count on error.
648  * return value is always >= 1.
649  */
650 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
651 {
652  int v = 1;
653  int bits;
654  while ((bits = show_bits(gb, 9)) == 0x1ff) {
655  skip_bits(gb, 9);
656  v += 256;
657  if (v > s->yuv_macroblock_count) {
658  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
659  return v;
660  }
661  }
662 #define body(n) { \
663  skip_bits(gb, 2 + n); \
664  v += (1 << n) + get_bits(gb, n); }
665 #define thresh(n) (0x200 - (0x80 >> n))
666 #define else_if(n) else if (bits < thresh(n)) body(n)
667  if (bits < 0x100) {
668  skip_bits(gb, 1);
669  } else if (bits < thresh(0)) {
670  skip_bits(gb, 2);
671  v += 1;
672  }
673  else_if(1)
674  else_if(2)
675  else_if(3)
676  else_if(4)
677  else_if(5)
678  else_if(6)
679  else body(7)
680 #undef body
681 #undef thresh
682 #undef else_if
683  return v;
684 }
685 
686 static int vp4_get_block_pattern(GetBitContext *gb, int *next_block_pattern_table)
687 {
688  int v = get_vlc2(gb, block_pattern_vlc[*next_block_pattern_table], 5, 1);
689  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
690  return v + 1;
691 }
692 
693 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
694 {
695  int fragment;
696  int next_block_pattern_table;
697  int bit, current_run, has_partial;
698 
699  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
700 
701  if (s->keyframe)
702  return 0;
703 
704  has_partial = 0;
705  bit = get_bits1(gb);
706  for (int i = 0; i < s->yuv_macroblock_count; i += current_run) {
707  if (get_bits_left(gb) <= 0)
708  return AVERROR_INVALIDDATA;
709  current_run = vp4_get_mb_count(s, gb);
710  if (current_run > s->yuv_macroblock_count - i)
711  return -1;
712  memset(s->superblock_coding + i, 2 * bit, current_run);
713  bit ^= 1;
714  has_partial |= bit;
715  }
716 
717  if (has_partial) {
718  if (get_bits_left(gb) <= 0)
719  return AVERROR_INVALIDDATA;
720  bit = get_bits1(gb);
721  current_run = vp4_get_mb_count(s, gb);
722  for (int i = 0; i < s->yuv_macroblock_count; i++) {
723  if (!s->superblock_coding[i]) {
724  if (!current_run) {
725  bit ^= 1;
726  current_run = vp4_get_mb_count(s, gb);
727  }
728  s->superblock_coding[i] = bit;
729  current_run--;
730  }
731  }
732  if (current_run) /* handle situation when vp4_get_mb_count() fails */
733  return -1;
734  }
735 
736  next_block_pattern_table = 0;
737  for (int plane = 0, i = 0; plane < 3; plane++) {
738  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
739  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
740  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
741  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
742  int fragment_width = s->fragment_width[!!plane];
743  int fragment_height = s->fragment_height[!!plane];
744 
745  for (int sb_y = 0; sb_y < sb_height; sb_y++) {
746  for (int sb_x = 0; sb_x < sb_width; sb_x++) {
747  for (int j = 0; j < 4; j++) {
748  int mb_x = 2 * sb_x + (j >> 1);
749  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
750  int mb_coded, pattern, coded;
751 
752  if (mb_x >= mb_width || mb_y >= mb_height)
753  continue;
754 
755  mb_coded = s->superblock_coding[i++];
756 
757  if (mb_coded == SB_FULLY_CODED)
758  pattern = 0xF;
759  else if (mb_coded == SB_PARTIALLY_CODED)
760  pattern = vp4_get_block_pattern(gb, &next_block_pattern_table);
761  else
762  pattern = 0;
763 
764  for (int k = 0; k < 4; k++) {
765  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
766  continue;
767  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
768  coded = pattern & (8 >> k);
769  /* MODE_INTER_NO_MV is the default for coded fragments.
770  the actual method is decoded in the next phase. */
771  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
772  }
773  }
774  }
775  }
776  }
777  return 0;
778 }
779 #endif
780 
781 /*
782  * This function unpacks all the coding mode data for individual macroblocks
783  * from the bitstream.
784  */
786 {
787  int scheme;
788  int current_macroblock;
789  int current_fragment;
790  int coding_mode;
791  int custom_mode_alphabet[CODING_MODE_COUNT];
792  const int *alphabet;
793  Vp3Fragment *frag;
794 
795  if (s->keyframe) {
796  for (int i = 0; i < s->fragment_count; i++)
797  s->all_fragments[i].coding_method = MODE_INTRA;
798  } else {
799  /* fetch the mode coding scheme for this frame */
800  scheme = get_bits(gb, 3);
801 
802  /* is it a custom coding scheme? */
803  if (scheme == 0) {
804  for (int i = 0; i < 8; i++)
805  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
806  for (int i = 0; i < 8; i++)
807  custom_mode_alphabet[get_bits(gb, 3)] = i;
808  alphabet = custom_mode_alphabet;
809  } else
810  alphabet = ModeAlphabet[scheme - 1];
811 
812  /* iterate through all of the macroblocks that contain 1 or more
813  * coded fragments */
814  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
815  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
816  if (get_bits_left(gb) <= 0)
817  return -1;
818 
819  for (int j = 0; j < 4; j++) {
820  int k;
821  int mb_x = 2 * sb_x + (j >> 1);
822  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
823  current_macroblock = mb_y * s->macroblock_width + mb_x;
824 
825  if (mb_x >= s->macroblock_width ||
826  mb_y >= s->macroblock_height)
827  continue;
828 
829  /* coding modes are only stored if the macroblock has
830  * at least one luma block coded, otherwise it must be
831  * INTER_NO_MV */
832  for (k = 0; k < 4; k++) {
833  current_fragment = BLOCK_Y *
834  s->fragment_width[0] + BLOCK_X;
835  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
836  break;
837  }
838  if (k == 4) {
839  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
840  continue;
841  }
842 
843  /* mode 7 means get 3 bits for each coding mode */
844  if (scheme == 7)
845  coding_mode = get_bits(gb, 3);
846  else
847  coding_mode = alphabet[get_vlc2(gb, mode_code_vlc, 4, 2)];
848 
849  s->macroblock_coding[current_macroblock] = coding_mode;
850  for (k = 0; k < 4; k++) {
851  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
852  if (frag->coding_method != MODE_COPY)
853  frag->coding_method = coding_mode;
854  }
855 
856 #define SET_CHROMA_MODES \
857  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
858  frag[s->fragment_start[1]].coding_method = coding_mode; \
859  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
860  frag[s->fragment_start[2]].coding_method = coding_mode;
861 
862  if (s->chroma_y_shift) {
863  frag = s->all_fragments + mb_y *
864  s->fragment_width[1] + mb_x;
866  } else if (s->chroma_x_shift) {
867  frag = s->all_fragments +
868  2 * mb_y * s->fragment_width[1] + mb_x;
869  for (k = 0; k < 2; k++) {
871  frag += s->fragment_width[1];
872  }
873  } else {
874  for (k = 0; k < 4; k++) {
875  frag = s->all_fragments +
876  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
878  }
879  }
880  }
881  }
882  }
883  }
884 
885  return 0;
886 }
887 
888 static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
889 {
890 #if CONFIG_VP4_DECODER
891  int v = get_vlc2(gb, vp4_mv_vlc_table[axis][vp4_mv_table_selector[FFABS(last_motion)]],
892  VP4_MV_VLC_BITS, 2);
893  return last_motion < 0 ? -v : v;
894 #else
895  return 0;
896 #endif
897 }
898 
899 /*
900  * This function unpacks all the motion vectors for the individual
901  * macroblocks from the bitstream.
902  */
904 {
905  int coding_mode;
906  int motion_x[4];
907  int motion_y[4];
908  int last_motion_x = 0;
909  int last_motion_y = 0;
910  int prior_last_motion_x = 0;
911  int prior_last_motion_y = 0;
912  int last_gold_motion_x = 0;
913  int last_gold_motion_y = 0;
914  int current_macroblock;
915  int current_fragment;
916  int frag;
917 
918  if (s->keyframe)
919  return 0;
920 
921  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
922  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
923 
924  /* iterate through all of the macroblocks that contain 1 or more
925  * coded fragments */
926  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
927  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
928  if (get_bits_left(gb) <= 0)
929  return -1;
930 
931  for (int j = 0; j < 4; j++) {
932  int mb_x = 2 * sb_x + (j >> 1);
933  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
934  current_macroblock = mb_y * s->macroblock_width + mb_x;
935 
936  if (mb_x >= s->macroblock_width ||
937  mb_y >= s->macroblock_height ||
938  s->macroblock_coding[current_macroblock] == MODE_COPY)
939  continue;
940 
941  switch (s->macroblock_coding[current_macroblock]) {
942  case MODE_GOLDEN_MV:
943  if (coding_mode == 2) { /* VP4 */
944  last_gold_motion_x = motion_x[0] = vp4_get_mv(gb, 0, last_gold_motion_x);
945  last_gold_motion_y = motion_y[0] = vp4_get_mv(gb, 1, last_gold_motion_y);
946  break;
947  } /* otherwise fall through */
948  case MODE_INTER_PLUS_MV:
949  /* all 6 fragments use the same motion vector */
950  if (coding_mode == 0) {
951  motion_x[0] = get_vlc2(gb, motion_vector_vlc,
952  VP3_MV_VLC_BITS, 2);
953  motion_y[0] = get_vlc2(gb, motion_vector_vlc,
954  VP3_MV_VLC_BITS, 2);
955  } else if (coding_mode == 1) {
956  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
957  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
958  } else { /* VP4 */
959  motion_x[0] = vp4_get_mv(gb, 0, last_motion_x);
960  motion_y[0] = vp4_get_mv(gb, 1, last_motion_y);
961  }
962 
963  /* vector maintenance, only on MODE_INTER_PLUS_MV */
964  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
965  prior_last_motion_x = last_motion_x;
966  prior_last_motion_y = last_motion_y;
967  last_motion_x = motion_x[0];
968  last_motion_y = motion_y[0];
969  }
970  break;
971 
972  case MODE_INTER_FOURMV:
973  /* vector maintenance */
974  prior_last_motion_x = last_motion_x;
975  prior_last_motion_y = last_motion_y;
976 
977  /* fetch 4 vectors from the bitstream, one for each
978  * Y fragment, then average for the C fragment vectors */
979  for (int k = 0; k < 4; k++) {
980  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
981  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
982  if (coding_mode == 0) {
983  motion_x[k] = get_vlc2(gb, motion_vector_vlc,
984  VP3_MV_VLC_BITS, 2);
985  motion_y[k] = get_vlc2(gb, motion_vector_vlc,
986  VP3_MV_VLC_BITS, 2);
987  } else if (coding_mode == 1) {
988  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
989  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
990  } else { /* VP4 */
991  motion_x[k] = vp4_get_mv(gb, 0, prior_last_motion_x);
992  motion_y[k] = vp4_get_mv(gb, 1, prior_last_motion_y);
993  }
994  last_motion_x = motion_x[k];
995  last_motion_y = motion_y[k];
996  } else {
997  motion_x[k] = 0;
998  motion_y[k] = 0;
999  }
1000  }
1001  break;
1002 
1003  case MODE_INTER_LAST_MV:
1004  /* all 6 fragments use the last motion vector */
1005  motion_x[0] = last_motion_x;
1006  motion_y[0] = last_motion_y;
1007 
1008  /* no vector maintenance (last vector remains the
1009  * last vector) */
1010  break;
1011 
1012  case MODE_INTER_PRIOR_LAST:
1013  /* all 6 fragments use the motion vector prior to the
1014  * last motion vector */
1015  motion_x[0] = prior_last_motion_x;
1016  motion_y[0] = prior_last_motion_y;
1017 
1018  /* vector maintenance */
1019  prior_last_motion_x = last_motion_x;
1020  prior_last_motion_y = last_motion_y;
1021  last_motion_x = motion_x[0];
1022  last_motion_y = motion_y[0];
1023  break;
1024 
1025  default:
1026  /* covers intra, inter without MV, golden without MV */
1027  motion_x[0] = 0;
1028  motion_y[0] = 0;
1029 
1030  /* no vector maintenance */
1031  break;
1032  }
1033 
1034  /* assign the motion vectors to the correct fragments */
1035  for (int k = 0; k < 4; k++) {
1036  current_fragment =
1037  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1038  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1039  s->motion_val[0][current_fragment][0] = motion_x[k];
1040  s->motion_val[0][current_fragment][1] = motion_y[k];
1041  } else {
1042  s->motion_val[0][current_fragment][0] = motion_x[0];
1043  s->motion_val[0][current_fragment][1] = motion_y[0];
1044  }
1045  }
1046 
1047  if (s->chroma_y_shift) {
1048  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1049  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1050  motion_x[2] + motion_x[3], 2);
1051  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1052  motion_y[2] + motion_y[3], 2);
1053  }
1054  if (s->version <= 2) {
1055  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1056  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1057  }
1058  frag = mb_y * s->fragment_width[1] + mb_x;
1059  s->motion_val[1][frag][0] = motion_x[0];
1060  s->motion_val[1][frag][1] = motion_y[0];
1061  } else if (s->chroma_x_shift) {
1062  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1063  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1064  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1065  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1066  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1067  } else {
1068  motion_x[1] = motion_x[0];
1069  motion_y[1] = motion_y[0];
1070  }
1071  if (s->version <= 2) {
1072  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1073  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1074  }
1075  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1076  for (int k = 0; k < 2; k++) {
1077  s->motion_val[1][frag][0] = motion_x[k];
1078  s->motion_val[1][frag][1] = motion_y[k];
1079  frag += s->fragment_width[1];
1080  }
1081  } else {
1082  for (int k = 0; k < 4; k++) {
1083  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1084  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1085  s->motion_val[1][frag][0] = motion_x[k];
1086  s->motion_val[1][frag][1] = motion_y[k];
1087  } else {
1088  s->motion_val[1][frag][0] = motion_x[0];
1089  s->motion_val[1][frag][1] = motion_y[0];
1090  }
1091  }
1092  }
1093  }
1094  }
1095  }
1096 
1097  return 0;
1098 }
1099 
1101 {
1102  int num_blocks = s->total_num_coded_frags;
1103 
1104  for (int qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1105  int i = 0, blocks_decoded = 0, num_blocks_at_qpi = 0;
1106  int bit, run_length;
1107 
1108  bit = get_bits1(gb) ^ 1;
1109  run_length = 0;
1110 
1111  do {
1112  if (run_length == MAXIMUM_LONG_BIT_RUN)
1113  bit = get_bits1(gb);
1114  else
1115  bit ^= 1;
1116 
1117  run_length = get_vlc2(gb, superblock_run_length_vlc,
1118  SUPERBLOCK_VLC_BITS, 2);
1119  if (run_length == 34)
1120  run_length += get_bits(gb, 12);
1121  blocks_decoded += run_length;
1122 
1123  if (!bit)
1124  num_blocks_at_qpi += run_length;
1125 
1126  for (int j = 0; j < run_length; i++) {
1127  if (i >= s->total_num_coded_frags)
1128  return -1;
1129 
1130  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1131  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1132  j++;
1133  }
1134  }
1135  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1136 
1137  num_blocks -= num_blocks_at_qpi;
1138  }
1139 
1140  return 0;
1141 }
1142 
1143 static inline int get_eob_run(GetBitContext *gb, int token)
1144 {
1145  int v = eob_run_table[token].base;
1146  if (eob_run_table[token].bits)
1147  v += get_bits(gb, eob_run_table[token].bits);
1148  return v;
1149 }
1150 
1151 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1152 {
1153  int bits_to_get, zero_run;
1154 
1155  bits_to_get = coeff_get_bits[token];
1156  if (bits_to_get)
1157  bits_to_get = get_bits(gb, bits_to_get);
1158  *coeff = coeff_tables[token][bits_to_get];
1159 
1160  zero_run = zero_run_base[token];
1161  if (zero_run_get_bits[token])
1162  zero_run += get_bits(gb, zero_run_get_bits[token]);
1163 
1164  return zero_run;
1165 }
1166 
1167 /*
1168  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1169  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1170  * data. This function unpacks all the VLCs for either the Y plane or both
1171  * C planes, and is called for DC coefficients or different AC coefficient
1172  * levels (since different coefficient types require different VLC tables.
1173  *
1174  * This function returns a residual eob run. E.g, if a particular token gave
1175  * instructions to EOB the next 5 fragments and there were only 2 fragments
1176  * left in the current fragment range, 3 would be returned so that it could
1177  * be passed into the next call to this same function.
1178  */
1180  const VLCElem *vlc_table, int coeff_index,
1181  int plane,
1182  int eob_run)
1183 {
1184  int j = 0;
1185  int token;
1186  int zero_run = 0;
1187  int16_t coeff = 0;
1188  int blocks_ended;
1189  int coeff_i = 0;
1190  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1191  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1192 
1193  /* local references to structure members to avoid repeated dereferences */
1194  const int *coded_fragment_list = s->coded_fragment_list[plane];
1195  Vp3Fragment *all_fragments = s->all_fragments;
1196 
1197  if (num_coeffs < 0) {
1198  av_log(s->avctx, AV_LOG_ERROR,
1199  "Invalid number of coefficients at level %d\n", coeff_index);
1200  return AVERROR_INVALIDDATA;
1201  }
1202 
1203  if (eob_run > num_coeffs) {
1204  coeff_i =
1205  blocks_ended = num_coeffs;
1206  eob_run -= num_coeffs;
1207  } else {
1208  coeff_i =
1209  blocks_ended = eob_run;
1210  eob_run = 0;
1211  }
1212 
1213  // insert fake EOB token to cover the split between planes or zzi
1214  if (blocks_ended)
1215  dct_tokens[j++] = blocks_ended << 2;
1216 
1217  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1218  /* decode a VLC into a token */
1219  token = get_vlc2(gb, vlc_table, 11, 3);
1220  /* use the token to get a zero run, a coefficient, and an eob run */
1221  if ((unsigned) token <= 6U) {
1222  eob_run = get_eob_run(gb, token);
1223  if (!eob_run)
1224  eob_run = INT_MAX;
1225 
1226  // record only the number of blocks ended in this plane,
1227  // any spill will be recorded in the next plane.
1228  if (eob_run > num_coeffs - coeff_i) {
1229  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1230  blocks_ended += num_coeffs - coeff_i;
1231  eob_run -= num_coeffs - coeff_i;
1232  coeff_i = num_coeffs;
1233  } else {
1234  dct_tokens[j++] = TOKEN_EOB(eob_run);
1235  blocks_ended += eob_run;
1236  coeff_i += eob_run;
1237  eob_run = 0;
1238  }
1239  } else if (token >= 0) {
1240  zero_run = get_coeff(gb, token, &coeff);
1241 
1242  if (zero_run) {
1243  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1244  } else {
1245  // Save DC into the fragment structure. DC prediction is
1246  // done in raster order, so the actual DC can't be in with
1247  // other tokens. We still need the token in dct_tokens[]
1248  // however, or else the structure collapses on itself.
1249  if (!coeff_index)
1250  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1251 
1252  dct_tokens[j++] = TOKEN_COEFF(coeff);
1253  }
1254 
1255  if (coeff_index + zero_run > 64) {
1256  av_log(s->avctx, AV_LOG_DEBUG,
1257  "Invalid zero run of %d with %d coeffs left\n",
1258  zero_run, 64 - coeff_index);
1259  zero_run = 64 - coeff_index;
1260  }
1261 
1262  // zero runs code multiple coefficients,
1263  // so don't try to decode coeffs for those higher levels
1264  for (int i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1265  s->num_coded_frags[plane][i]--;
1266  coeff_i++;
1267  } else {
1268  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1269  return -1;
1270  }
1271  }
1272 
1273  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1274  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1275 
1276  // decrement the number of blocks that have higher coefficients for each
1277  // EOB run at this level
1278  if (blocks_ended)
1279  for (int i = coeff_index + 1; i < 64; i++)
1280  s->num_coded_frags[plane][i] -= blocks_ended;
1281 
1282  // setup the next buffer
1283  if (plane < 2)
1284  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1285  else if (coeff_index < 63)
1286  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1287 
1288  return eob_run;
1289 }
1290 
1292  int first_fragment,
1293  int fragment_width,
1294  int fragment_height);
1295 /*
1296  * This function unpacks all of the DCT coefficient data from the
1297  * bitstream.
1298  */
1300 {
1301  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1302  int dc_y_table;
1303  int dc_c_table;
1304  int ac_y_table;
1305  int ac_c_table;
1306  int residual_eob_run = 0;
1307  const VLCElem *y_tables[64], *c_tables[64];
1308 
1309  s->dct_tokens[0][0] = s->dct_tokens_base;
1310 
1311  if (get_bits_left(gb) < 16)
1312  return AVERROR_INVALIDDATA;
1313 
1314  /* fetch the DC table indexes */
1315  dc_y_table = get_bits(gb, 4);
1316  dc_c_table = get_bits(gb, 4);
1317 
1318  /* unpack the Y plane DC coefficients */
1319  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_y_table], 0,
1320  0, residual_eob_run);
1321  if (residual_eob_run < 0)
1322  return residual_eob_run;
1323  if (get_bits_left(gb) < 8)
1324  return AVERROR_INVALIDDATA;
1325 
1326  /* reverse prediction of the Y-plane DC coefficients */
1327  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1328 
1329  /* unpack the C plane DC coefficients */
1330  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1331  1, residual_eob_run);
1332  if (residual_eob_run < 0)
1333  return residual_eob_run;
1334  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1335  2, residual_eob_run);
1336  if (residual_eob_run < 0)
1337  return residual_eob_run;
1338 
1339  /* reverse prediction of the C-plane DC coefficients */
1340  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1341  reverse_dc_prediction(s, s->fragment_start[1],
1342  s->fragment_width[1], s->fragment_height[1]);
1343  reverse_dc_prediction(s, s->fragment_start[2],
1344  s->fragment_width[1], s->fragment_height[1]);
1345  }
1346 
1347  if (get_bits_left(gb) < 8)
1348  return AVERROR_INVALIDDATA;
1349  /* fetch the AC table indexes */
1350  ac_y_table = get_bits(gb, 4);
1351  ac_c_table = get_bits(gb, 4);
1352 
1353  /* build tables of AC VLC tables */
1354  for (int i = 1; i <= 5; i++) {
1355  /* AC VLC table group 1 */
1356  y_tables[i] = coeff_vlc[ac_y_table + 16];
1357  c_tables[i] = coeff_vlc[ac_c_table + 16];
1358  }
1359  for (int i = 6; i <= 14; i++) {
1360  /* AC VLC table group 2 */
1361  y_tables[i] = coeff_vlc[ac_y_table + 32];
1362  c_tables[i] = coeff_vlc[ac_c_table + 32];
1363  }
1364  for (int i = 15; i <= 27; i++) {
1365  /* AC VLC table group 3 */
1366  y_tables[i] = coeff_vlc[ac_y_table + 48];
1367  c_tables[i] = coeff_vlc[ac_c_table + 48];
1368  }
1369  for (int i = 28; i <= 63; i++) {
1370  /* AC VLC table group 4 */
1371  y_tables[i] = coeff_vlc[ac_y_table + 64];
1372  c_tables[i] = coeff_vlc[ac_c_table + 64];
1373  }
1374 
1375  /* decode all AC coefficients */
1376  for (int i = 1; i <= 63; i++) {
1377  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1378  0, residual_eob_run);
1379  if (residual_eob_run < 0)
1380  return residual_eob_run;
1381 
1382  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1383  1, residual_eob_run);
1384  if (residual_eob_run < 0)
1385  return residual_eob_run;
1386  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1387  2, residual_eob_run);
1388  if (residual_eob_run < 0)
1389  return residual_eob_run;
1390  }
1391 
1392  return 0;
1393 }
1394 
1395 #if CONFIG_VP4_DECODER
1396 /**
1397  * eob_tracker[] is instead of TOKEN_EOB(value)
1398  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1399  *
1400  * @return < 0 on error
1401  */
1402 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1403  const VLCElem *const vlc_tables[64],
1404  int plane, int eob_tracker[64], int fragment)
1405 {
1406  int token;
1407  int zero_run = 0;
1408  int16_t coeff = 0;
1409  int coeff_i = 0;
1410  int eob_run;
1411 
1412  while (!eob_tracker[coeff_i]) {
1413  if (get_bits_left(gb) < 1)
1414  return AVERROR_INVALIDDATA;
1415 
1416  token = get_vlc2(gb, vlc_tables[coeff_i], 11, 3);
1417 
1418  /* use the token to get a zero run, a coefficient, and an eob run */
1419  if ((unsigned) token <= 6U) {
1420  eob_run = get_eob_run(gb, token);
1421  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1422  eob_tracker[coeff_i] = eob_run - 1;
1423  return 0;
1424  } else if (token >= 0) {
1425  zero_run = get_coeff(gb, token, &coeff);
1426 
1427  if (zero_run) {
1428  if (coeff_i + zero_run > 64) {
1429  av_log(s->avctx, AV_LOG_DEBUG,
1430  "Invalid zero run of %d with %d coeffs left\n",
1431  zero_run, 64 - coeff_i);
1432  zero_run = 64 - coeff_i;
1433  }
1434  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1435  coeff_i += zero_run;
1436  } else {
1437  if (!coeff_i)
1438  s->all_fragments[fragment].dc = coeff;
1439 
1440  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1441  }
1442  coeff_i++;
1443  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1444  return 0; /* stop */
1445  } else {
1446  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1447  return -1;
1448  }
1449  }
1450  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1451  eob_tracker[coeff_i]--;
1452  return 0;
1453 }
1454 
1455 static void vp4_dc_predictor_reset(VP4Predictor *p)
1456 {
1457  p->dc = 0;
1458  p->type = VP4_DC_UNDEFINED;
1459 }
1460 
1461 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1462 {
1463  for (int i = 0; i < 4; i++)
1464  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1465 
1466  for (int j = 1; j < 5; j++)
1467  for (int i = 0; i < 4; i++)
1468  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1469 }
1470 
1471 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1472 {
1473  for (int i = 0; i < 4; i++)
1474  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1475 
1476  for (int i = 1; i < 5; i++)
1477  dc_pred[i][0] = dc_pred[i][4];
1478 }
1479 
1480 /* note: dc_pred points to the current block */
1481 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1482 {
1483  int count = 0;
1484  int dc = 0;
1485 
1486  if (dc_pred[-6].type == type) {
1487  dc += dc_pred[-6].dc;
1488  count++;
1489  }
1490 
1491  if (dc_pred[6].type == type) {
1492  dc += dc_pred[6].dc;
1493  count++;
1494  }
1495 
1496  if (count != 2 && dc_pred[-1].type == type) {
1497  dc += dc_pred[-1].dc;
1498  count++;
1499  }
1500 
1501  if (count != 2 && dc_pred[1].type == type) {
1502  dc += dc_pred[1].dc;
1503  count++;
1504  }
1505 
1506  /* using division instead of shift to correctly handle negative values */
1507  return count == 2 ? dc / 2 : last_dc[type];
1508 }
1509 
1510 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1511 {
1512  int16_t *base = s->dct_tokens_base;
1513  for (int plane = 0; plane < 3; plane++) {
1514  for (int i = 0; i < 64; i++) {
1515  s->dct_tokens[plane][i] = base;
1516  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1517  }
1518  }
1519 }
1520 
1521 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1522 {
1523  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1524  int dc_y_table;
1525  int dc_c_table;
1526  int ac_y_table;
1527  int ac_c_table;
1528  const VLCElem *tables[2][64];
1529  int eob_tracker[64];
1530  VP4Predictor dc_pred[6][6];
1531  int last_dc[NB_VP4_DC_TYPES];
1532 
1533  if (get_bits_left(gb) < 16)
1534  return AVERROR_INVALIDDATA;
1535 
1536  /* fetch the DC table indexes */
1537  dc_y_table = get_bits(gb, 4);
1538  dc_c_table = get_bits(gb, 4);
1539 
1540  ac_y_table = get_bits(gb, 4);
1541  ac_c_table = get_bits(gb, 4);
1542 
1543  /* build tables of DC/AC VLC tables */
1544 
1545  /* DC table group */
1546  tables[0][0] = coeff_vlc[dc_y_table];
1547  tables[1][0] = coeff_vlc[dc_c_table];
1548  for (int i = 1; i <= 5; i++) {
1549  /* AC VLC table group 1 */
1550  tables[0][i] = coeff_vlc[ac_y_table + 16];
1551  tables[1][i] = coeff_vlc[ac_c_table + 16];
1552  }
1553  for (int i = 6; i <= 14; i++) {
1554  /* AC VLC table group 2 */
1555  tables[0][i] = coeff_vlc[ac_y_table + 32];
1556  tables[1][i] = coeff_vlc[ac_c_table + 32];
1557  }
1558  for (int i = 15; i <= 27; i++) {
1559  /* AC VLC table group 3 */
1560  tables[0][i] = coeff_vlc[ac_y_table + 48];
1561  tables[1][i] = coeff_vlc[ac_c_table + 48];
1562  }
1563  for (int i = 28; i <= 63; i++) {
1564  /* AC VLC table group 4 */
1565  tables[0][i] = coeff_vlc[ac_y_table + 64];
1566  tables[1][i] = coeff_vlc[ac_c_table + 64];
1567  }
1568 
1569  vp4_set_tokens_base(s);
1570 
1571  memset(last_dc, 0, sizeof(last_dc));
1572 
1573  for (int plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1574  memset(eob_tracker, 0, sizeof(eob_tracker));
1575 
1576  /* initialise dc prediction */
1577  for (int i = 0; i < s->fragment_width[!!plane]; i++)
1578  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1579 
1580  for (int j = 0; j < 6; j++)
1581  for (int i = 0; i < 6; i++)
1582  vp4_dc_predictor_reset(&dc_pred[j][i]);
1583 
1584  for (int sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1585  for (int sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1586  vp4_dc_pred_before(s, dc_pred, sb_x);
1587  for (int j = 0; j < 16; j++) {
1588  int hx = hilbert_offset[j][0];
1589  int hy = hilbert_offset[j][1];
1590  int x = 4 * sb_x + hx;
1591  int y = 4 * sb_y + hy;
1592  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1593  int fragment, dc_block_type;
1594 
1595  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1596  continue;
1597 
1598  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1599 
1600  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1601  continue;
1602 
1603  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1604  return -1;
1605 
1606  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1607 
1608  s->all_fragments[fragment].dc +=
1609  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1610 
1611  this_dc_pred->type = dc_block_type,
1612  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1613  }
1614  vp4_dc_pred_after(s, dc_pred, sb_x);
1615  }
1616  }
1617  }
1618 
1619  vp4_set_tokens_base(s);
1620 
1621  return 0;
1622 }
1623 #endif
1624 
1625 /*
1626  * This function reverses the DC prediction for each coded fragment in
1627  * the frame. Much of this function is adapted directly from the original
1628  * VP3 source code.
1629  */
1630 #define COMPATIBLE_FRAME(x) \
1631  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1632 #define DC_COEFF(u) s->all_fragments[u].dc
1633 
1635  int first_fragment,
1636  int fragment_width,
1637  int fragment_height)
1638 {
1639 #define PUL 8
1640 #define PU 4
1641 #define PUR 2
1642 #define PL 1
1643 
1644  int i = first_fragment;
1645 
1646  int predicted_dc;
1647 
1648  /* DC values for the left, up-left, up, and up-right fragments */
1649  int vl, vul, vu, vur;
1650 
1651  /* indexes for the left, up-left, up, and up-right fragments */
1652  int l, ul, u, ur;
1653 
1654  /*
1655  * The 6 fields mean:
1656  * 0: up-left multiplier
1657  * 1: up multiplier
1658  * 2: up-right multiplier
1659  * 3: left multiplier
1660  */
1661  static const int predictor_transform[16][4] = {
1662  { 0, 0, 0, 0 },
1663  { 0, 0, 0, 128 }, // PL
1664  { 0, 0, 128, 0 }, // PUR
1665  { 0, 0, 53, 75 }, // PUR|PL
1666  { 0, 128, 0, 0 }, // PU
1667  { 0, 64, 0, 64 }, // PU |PL
1668  { 0, 128, 0, 0 }, // PU |PUR
1669  { 0, 0, 53, 75 }, // PU |PUR|PL
1670  { 128, 0, 0, 0 }, // PUL
1671  { 0, 0, 0, 128 }, // PUL|PL
1672  { 64, 0, 64, 0 }, // PUL|PUR
1673  { 0, 0, 53, 75 }, // PUL|PUR|PL
1674  { 0, 128, 0, 0 }, // PUL|PU
1675  { -104, 116, 0, 116 }, // PUL|PU |PL
1676  { 24, 80, 24, 0 }, // PUL|PU |PUR
1677  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1678  };
1679 
1680  /* This table shows which types of blocks can use other blocks for
1681  * prediction. For example, INTRA is the only mode in this table to
1682  * have a frame number of 0. That means INTRA blocks can only predict
1683  * from other INTRA blocks. There are 2 golden frame coding types;
1684  * blocks encoding in these modes can only predict from other blocks
1685  * that were encoded with these 1 of these 2 modes. */
1686  static const unsigned char compatible_frame[9] = {
1687  1, /* MODE_INTER_NO_MV */
1688  0, /* MODE_INTRA */
1689  1, /* MODE_INTER_PLUS_MV */
1690  1, /* MODE_INTER_LAST_MV */
1691  1, /* MODE_INTER_PRIOR_MV */
1692  2, /* MODE_USING_GOLDEN */
1693  2, /* MODE_GOLDEN_MV */
1694  1, /* MODE_INTER_FOUR_MV */
1695  3 /* MODE_COPY */
1696  };
1697  int current_frame_type;
1698 
1699  /* there is a last DC predictor for each of the 3 frame types */
1700  short last_dc[3];
1701 
1702  int transform = 0;
1703 
1704  vul =
1705  vu =
1706  vur =
1707  vl = 0;
1708  last_dc[0] =
1709  last_dc[1] =
1710  last_dc[2] = 0;
1711 
1712  /* for each fragment row... */
1713  for (int y = 0; y < fragment_height; y++) {
1714  /* for each fragment in a row... */
1715  for (int x = 0; x < fragment_width; x++, i++) {
1716 
1717  /* reverse prediction if this block was coded */
1718  if (s->all_fragments[i].coding_method != MODE_COPY) {
1719  current_frame_type =
1720  compatible_frame[s->all_fragments[i].coding_method];
1721 
1722  transform = 0;
1723  if (x) {
1724  l = i - 1;
1725  vl = DC_COEFF(l);
1726  if (COMPATIBLE_FRAME(l))
1727  transform |= PL;
1728  }
1729  if (y) {
1730  u = i - fragment_width;
1731  vu = DC_COEFF(u);
1732  if (COMPATIBLE_FRAME(u))
1733  transform |= PU;
1734  if (x) {
1735  ul = i - fragment_width - 1;
1736  vul = DC_COEFF(ul);
1737  if (COMPATIBLE_FRAME(ul))
1738  transform |= PUL;
1739  }
1740  if (x + 1 < fragment_width) {
1741  ur = i - fragment_width + 1;
1742  vur = DC_COEFF(ur);
1743  if (COMPATIBLE_FRAME(ur))
1744  transform |= PUR;
1745  }
1746  }
1747 
1748  if (transform == 0) {
1749  /* if there were no fragments to predict from, use last
1750  * DC saved */
1751  predicted_dc = last_dc[current_frame_type];
1752  } else {
1753  /* apply the appropriate predictor transform */
1754  predicted_dc =
1755  (predictor_transform[transform][0] * vul) +
1756  (predictor_transform[transform][1] * vu) +
1757  (predictor_transform[transform][2] * vur) +
1758  (predictor_transform[transform][3] * vl);
1759 
1760  predicted_dc /= 128;
1761 
1762  /* check for outranging on the [ul u l] and
1763  * [ul u ur l] predictors */
1764  if ((transform == 15) || (transform == 13)) {
1765  if (FFABS(predicted_dc - vu) > 128)
1766  predicted_dc = vu;
1767  else if (FFABS(predicted_dc - vl) > 128)
1768  predicted_dc = vl;
1769  else if (FFABS(predicted_dc - vul) > 128)
1770  predicted_dc = vul;
1771  }
1772  }
1773 
1774  /* at long last, apply the predictor */
1775  DC_COEFF(i) += predicted_dc;
1776  /* save the DC */
1777  last_dc[current_frame_type] = DC_COEFF(i);
1778  }
1779  }
1780  }
1781 }
1782 
1783 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1784  int ystart, int yend)
1785 {
1786  int *bounding_values = s->bounding_values_array + 127;
1787 
1788  int width = s->fragment_width[!!plane];
1789  int height = s->fragment_height[!!plane];
1790  int fragment = s->fragment_start[plane] + ystart * width;
1791  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1792  uint8_t *plane_data = s->current_frame.f->data[plane];
1793  if (!s->flipped_image)
1794  stride = -stride;
1795  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1796 
1797  for (int y = ystart; y < yend; y++) {
1798  for (int x = 0; x < width; x++) {
1799  /* This code basically just deblocks on the edges of coded blocks.
1800  * However, it has to be much more complicated because of the
1801  * brain damaged deblock ordering used in VP3/Theora. Order matters
1802  * because some pixels get filtered twice. */
1803  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1804  /* do not perform left edge filter for left columns frags */
1805  if (x > 0) {
1806  s->vp3dsp.h_loop_filter(
1807  plane_data + 8 * x,
1808  stride, bounding_values);
1809  }
1810 
1811  /* do not perform top edge filter for top row fragments */
1812  if (y > 0) {
1813  s->vp3dsp.v_loop_filter(
1814  plane_data + 8 * x,
1815  stride, bounding_values);
1816  }
1817 
1818  /* do not perform right edge filter for right column
1819  * fragments or if right fragment neighbor is also coded
1820  * in this frame (it will be filtered in next iteration) */
1821  if ((x < width - 1) &&
1822  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1823  s->vp3dsp.h_loop_filter(
1824  plane_data + 8 * x + 8,
1825  stride, bounding_values);
1826  }
1827 
1828  /* do not perform bottom edge filter for bottom row
1829  * fragments or if bottom fragment neighbor is also coded
1830  * in this frame (it will be filtered in the next row) */
1831  if ((y < height - 1) &&
1832  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1833  s->vp3dsp.v_loop_filter(
1834  plane_data + 8 * x + 8 * stride,
1835  stride, bounding_values);
1836  }
1837  }
1838 
1839  fragment++;
1840  }
1841  plane_data += 8 * stride;
1842  }
1843 }
1844 
1845 /**
1846  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1847  * for the next block in coding order
1848  */
1849 static inline int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag,
1850  int plane, int inter, int16_t block[64])
1851 {
1852  const int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1853  const uint8_t *perm = s->idct_scantable;
1854  int i = 0;
1855 
1856  do {
1857  int token = *s->dct_tokens[plane][i];
1858  switch (token & 3) {
1859  case 0: // EOB
1860  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1861  s->dct_tokens[plane][i]++;
1862  else
1863  *s->dct_tokens[plane][i] = token & ~3;
1864  goto end;
1865  case 1: // zero run
1866  s->dct_tokens[plane][i]++;
1867  i += (token >> 2) & 0x7f;
1868  if (i > 63) {
1869  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1870  return i;
1871  }
1872  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1873  i++;
1874  break;
1875  case 2: // coeff
1876  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1877  s->dct_tokens[plane][i++]++;
1878  break;
1879  default: // shouldn't happen
1880  return i;
1881  }
1882  } while (i < 64);
1883  // return value is expected to be a valid level
1884  i--;
1885 end:
1886  // the actual DC+prediction is in the fragment structure
1887  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1888  return i;
1889 }
1890 
1891 /**
1892  * called when all pixels up to row y are complete
1893  */
1895 {
1896  int h, cy;
1898 
1899  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1900  int y_flipped = s->flipped_image ? s->height - y : y;
1901 
1902  /* At the end of the frame, report INT_MAX instead of the height of
1903  * the frame. This makes the other threads' ff_thread_await_progress()
1904  * calls cheaper, because they don't have to clip their values. */
1905  ff_progress_frame_report(&s->current_frame,
1906  y_flipped == s->height ? INT_MAX
1907  : y_flipped - 1);
1908  }
1909 
1910  if (!s->avctx->draw_horiz_band)
1911  return;
1912 
1913  h = y - s->last_slice_end;
1914  s->last_slice_end = y;
1915  y -= h;
1916 
1917  if (!s->flipped_image)
1918  y = s->height - y - h;
1919 
1920  cy = y >> s->chroma_y_shift;
1921  offset[0] = s->current_frame.f->linesize[0] * y;
1922  offset[1] = s->current_frame.f->linesize[1] * cy;
1923  offset[2] = s->current_frame.f->linesize[2] * cy;
1924  for (int i = 3; i < AV_NUM_DATA_POINTERS; i++)
1925  offset[i] = 0;
1926 
1927  emms_c();
1928  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1929 }
1930 
1931 /**
1932  * Wait for the reference frame of the current fragment.
1933  * The progress value is in luma pixel rows.
1934  */
1936  int motion_y, int y)
1937 {
1938  const ProgressFrame *ref_frame;
1939  int ref_row;
1940  int border = motion_y & 1;
1941 
1942  if (fragment->coding_method == MODE_USING_GOLDEN ||
1943  fragment->coding_method == MODE_GOLDEN_MV)
1944  ref_frame = &s->golden_frame;
1945  else
1946  ref_frame = &s->last_frame;
1947 
1948  ref_row = y + (motion_y >> 1);
1949  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1950 
1952 }
1953 
1954 #if CONFIG_VP4_DECODER
1955 /**
1956  * @return non-zero if temp (edge_emu_buffer) was populated
1957  */
1958 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1959  const uint8_t *motion_source, ptrdiff_t stride,
1960  int src_x, int src_y, uint8_t *temp)
1961 {
1962  int motion_shift = plane ? 4 : 2;
1963  int subpel_mask = plane ? 3 : 1;
1964  int *bounding_values = s->bounding_values_array + 127;
1965 
1966  int x, y;
1967  int x2, y2;
1968  int x_subpel, y_subpel;
1969  int x_offset, y_offset;
1970 
1971  int block_width = plane ? 8 : 16;
1972  int plane_width = s->width >> (plane && s->chroma_x_shift);
1973  int plane_height = s->height >> (plane && s->chroma_y_shift);
1974 
1975 #define loop_stride 12
1976  uint8_t loop[12 * loop_stride];
1977 
1978  /* using division instead of shift to correctly handle negative values */
1979  x = 8 * bx + motion_x / motion_shift;
1980  y = 8 * by + motion_y / motion_shift;
1981 
1982  x_subpel = motion_x & subpel_mask;
1983  y_subpel = motion_y & subpel_mask;
1984 
1985  if (x_subpel || y_subpel) {
1986  x--;
1987  y--;
1988 
1989  if (x_subpel)
1990  x = FFMIN(x, x + FFSIGN(motion_x));
1991 
1992  if (y_subpel)
1993  y = FFMIN(y, y + FFSIGN(motion_y));
1994 
1995  x2 = x + block_width;
1996  y2 = y + block_width;
1997 
1998  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
1999  return 0;
2000 
2001  x_offset = (-(x + 2) & 7) + 2;
2002  y_offset = (-(y + 2) & 7) + 2;
2003 
2004  av_assert1(!(x_offset > 8 + x_subpel && y_offset > 8 + y_subpel));
2005 
2006  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2007  loop_stride, stride,
2008  12, 12, src_x - 1, src_y - 1,
2009  plane_width,
2010  plane_height);
2011 
2012  if (x_offset <= 8 + x_subpel)
2013  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2014 
2015  if (y_offset <= 8 + y_subpel)
2016  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2017 
2018  } else {
2019 
2020  x_offset = -x & 7;
2021  y_offset = -y & 7;
2022 
2023  if (!x_offset && !y_offset)
2024  return 0;
2025 
2026  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2027  loop_stride, stride,
2028  12, 12, src_x - 1, src_y - 1,
2029  plane_width,
2030  plane_height);
2031 
2032 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2033  if ((uintptr_t)(ptr) & 7) \
2034  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2035  else \
2036  s->vp3dsp.name(ptr, stride, bounding_values);
2037 
2038  if (x_offset)
2039  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2040 
2041  if (y_offset)
2042  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2043  }
2044 
2045  for (int i = 0; i < 9; i++)
2046  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2047 
2048  return 1;
2049 }
2050 #endif
2051 
2052 /*
2053  * Perform the final rendering for a particular slice of data.
2054  * The slice number ranges from 0..(c_superblock_height - 1).
2055  */
2056 static void render_slice(Vp3DecodeContext *s, int slice)
2057 {
2058  int16_t *block = s->block;
2059  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2060  /* When decoding keyframes, the earlier frames may not be available,
2061  * so we just use the current frame in this case instead;
2062  * it also avoid using undefined pointer arithmetic. Nothing is
2063  * ever read from these frames in case of a keyframe. */
2064  const AVFrame *last_frame = s->last_frame.f ?
2065  s->last_frame.f : s->current_frame.f;
2066  const AVFrame *golden_frame = s->golden_frame.f ?
2067  s->golden_frame.f : s->current_frame.f;
2068  int motion_halfpel_index;
2069  int first_pixel;
2070 
2071  if (slice >= s->c_superblock_height)
2072  return;
2073 
2074  for (int plane = 0; plane < 3; plane++) {
2075  uint8_t *output_plane = s->current_frame.f->data[plane] +
2076  s->data_offset[plane];
2077  const uint8_t *last_plane = last_frame->data[plane] +
2078  s->data_offset[plane];
2079  const uint8_t *golden_plane = golden_frame->data[plane] +
2080  s->data_offset[plane];
2081  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2082  int plane_width = s->width >> (plane && s->chroma_x_shift);
2083  int plane_height = s->height >> (plane && s->chroma_y_shift);
2084  const int8_t (*motion_val)[2] = s->motion_val[!!plane];
2085 
2086  int sb_y = slice << (!plane && s->chroma_y_shift);
2087  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2088  int slice_width = plane ? s->c_superblock_width
2089  : s->y_superblock_width;
2090 
2091  int fragment_width = s->fragment_width[!!plane];
2092  int fragment_height = s->fragment_height[!!plane];
2093  int fragment_start = s->fragment_start[plane];
2094 
2095  int do_await = !plane && HAVE_THREADS &&
2096  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2097 
2098  if (!s->flipped_image)
2099  stride = -stride;
2100  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2101  continue;
2102 
2103  /* for each superblock row in the slice (both of them)... */
2104  for (; sb_y < slice_height; sb_y++) {
2105  /* for each superblock in a row... */
2106  for (int sb_x = 0; sb_x < slice_width; sb_x++) {
2107  /* for each block in a superblock... */
2108  for (int j = 0; j < 16; j++) {
2109  int x = 4 * sb_x + hilbert_offset[j][0];
2110  int y = 4 * sb_y + hilbert_offset[j][1];
2111  int fragment = y * fragment_width + x;
2112 
2113  int i = fragment_start + fragment;
2114 
2115  // bounds check
2116  if (x >= fragment_width || y >= fragment_height)
2117  continue;
2118 
2119  first_pixel = 8 * y * stride + 8 * x;
2120 
2121  if (do_await &&
2122  s->all_fragments[i].coding_method != MODE_INTRA)
2123  await_reference_row(s, &s->all_fragments[i],
2124  motion_val[fragment][1],
2125  (16 * y) >> s->chroma_y_shift);
2126 
2127  /* transform if this block was coded */
2128  if (s->all_fragments[i].coding_method != MODE_COPY) {
2129  const uint8_t *motion_source;
2130  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2131  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2132  motion_source = golden_plane;
2133  else
2134  motion_source = last_plane;
2135 
2136  motion_source += first_pixel;
2137  motion_halfpel_index = 0;
2138 
2139  /* sort out the motion vector if this fragment is coded
2140  * using a motion vector method */
2141  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2142  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2143  int src_x, src_y;
2144  int standard_mc = 1;
2145  motion_x = motion_val[fragment][0];
2146  motion_y = motion_val[fragment][1];
2147 #if CONFIG_VP4_DECODER
2148  if (plane && s->version >= 2) {
2149  motion_x = (motion_x >> 1) | (motion_x & 1);
2150  motion_y = (motion_y >> 1) | (motion_y & 1);
2151  }
2152 #endif
2153 
2154  src_x = (motion_x >> 1) + 8 * x;
2155  src_y = (motion_y >> 1) + 8 * y;
2156 
2157  motion_halfpel_index = motion_x & 0x01;
2158  motion_source += (motion_x >> 1);
2159 
2160  motion_halfpel_index |= (motion_y & 0x01) << 1;
2161  motion_source += ((motion_y >> 1) * stride);
2162 
2163 #if CONFIG_VP4_DECODER
2164  if (s->version >= 2) {
2165  uint8_t *temp = s->edge_emu_buffer;
2166  if (stride < 0)
2167  temp -= 8 * stride;
2168  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2169  motion_source = temp;
2170  standard_mc = 0;
2171  }
2172  }
2173 #endif
2174 
2175  if (standard_mc && (
2176  src_x < 0 || src_y < 0 ||
2177  src_x + 9 >= plane_width ||
2178  src_y + 9 >= plane_height)) {
2179  uint8_t *temp = s->edge_emu_buffer;
2180  if (stride < 0)
2181  temp -= 8 * stride;
2182 
2183  s->vdsp.emulated_edge_mc(temp, motion_source,
2184  stride, stride,
2185  9, 9, src_x, src_y,
2186  plane_width,
2187  plane_height);
2188  motion_source = temp;
2189  }
2190  }
2191 
2192  /* first, take care of copying a block from either the
2193  * previous or the golden frame */
2194  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2195  /* Note, it is possible to implement all MC cases
2196  * with put_no_rnd_pixels_l2 which would look more
2197  * like the VP3 source but this would be slower as
2198  * put_no_rnd_pixels_tab is better optimized */
2199  if (motion_halfpel_index != 3) {
2200  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2201  output_plane + first_pixel,
2202  motion_source, stride, 8);
2203  } else {
2204  /* d is 0 if motion_x and _y have the same sign,
2205  * else -1 */
2206  int d = (motion_x ^ motion_y) >> 31;
2207  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2208  motion_source - d,
2209  motion_source + stride + 1 + d,
2210  stride, 8);
2211  }
2212  }
2213 
2214  /* invert DCT and place (or add) in final output */
2215 
2216  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2217  vp3_dequant(s, s->all_fragments + i,
2218  plane, 0, block);
2219  s->vp3dsp.idct_put(output_plane + first_pixel,
2220  stride,
2221  block);
2222  } else {
2223  if (vp3_dequant(s, s->all_fragments + i,
2224  plane, 1, block)) {
2225  s->vp3dsp.idct_add(output_plane + first_pixel,
2226  stride,
2227  block);
2228  } else {
2229  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2230  stride, block);
2231  }
2232  }
2233  } else {
2234  /* copy directly from the previous frame */
2235  s->hdsp.put_pixels_tab[1][0](
2236  output_plane + first_pixel,
2237  last_plane + first_pixel,
2238  stride, 8);
2239  }
2240  }
2241  }
2242 
2243  // Filter up to the last row in the superblock row
2244  if (s->version < 2 && !s->skip_loop_filter)
2245  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2246  FFMIN(4 * sb_y + 3, fragment_height - 1));
2247  }
2248  }
2249 
2250  /* this looks like a good place for slice dispatch... */
2251  /* algorithm:
2252  * if (slice == s->macroblock_height - 1)
2253  * dispatch (both last slice & 2nd-to-last slice);
2254  * else if (slice > 0)
2255  * dispatch (slice - 1);
2256  */
2257 
2258  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2259  s->height - 16));
2260 }
2261 
2262 static av_cold void init_tables_once(void)
2263 {
2265 
2267  SUPERBLOCK_VLC_BITS, 34,
2269  NULL, 0, 0, 1, 0);
2270 
2273  NULL, 0, 0, 0, 0);
2274 
2276  &motion_vector_vlc_table[0][1], 2,
2277  &motion_vector_vlc_table[0][0], 2, 1,
2278  -31, 0);
2279 
2281  mode_code_vlc_len, 1,
2282  NULL, 0, 0, 0, 0);
2283 
2284 #if CONFIG_VP4_DECODER
2285  for (int j = 0; j < 2; j++)
2286  for (int i = 0; i < 7; i++) {
2287  vp4_mv_vlc_table[j][i] =
2289  &vp4_mv_vlc[j][i][0][1], 2,
2290  &vp4_mv_vlc[j][i][0][0], 2, 1,
2291  -31, 0);
2292  }
2293 
2294  /* version >= 2 */
2295  for (int i = 0; i < 2; i++) {
2296  block_pattern_vlc[i] =
2297  ff_vlc_init_tables(&state, 5, 14,
2298  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2299  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0);
2300  }
2301 #endif
2302 }
2303 
2304 /// Allocate tables for per-frame data in Vp3DecodeContext
2306 {
2307  Vp3DecodeContext *s = avctx->priv_data;
2308  int y_fragment_count, c_fragment_count;
2309 
2310  free_tables(avctx);
2311 
2312  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2313  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2314 
2315  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2316  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2317  s->all_fragments = av_calloc(s->fragment_count, sizeof(*s->all_fragments));
2318 
2319  s-> kf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2320  s->nkf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2321  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2322 
2323  s->dct_tokens_base = av_calloc(s->fragment_count,
2324  64 * sizeof(*s->dct_tokens_base));
2325  s->motion_val[0] = av_calloc(y_fragment_count, sizeof(*s->motion_val[0]));
2326  s->motion_val[1] = av_calloc(c_fragment_count, sizeof(*s->motion_val[1]));
2327 
2328  /* work out the block mapping tables */
2329  s->superblock_fragments = av_calloc(s->superblock_count, 16 * sizeof(int));
2330  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2331 
2332  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2333 
2334  if (!s->superblock_coding || !s->all_fragments ||
2335  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2336  !s->nkf_coded_fragment_list ||
2337  !s->superblock_fragments || !s->macroblock_coding ||
2338  !s->dc_pred_row ||
2339  !s->motion_val[0] || !s->motion_val[1]) {
2340  return -1;
2341  }
2342 
2344 
2345  return 0;
2346 }
2347 
2348 
2349 static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
2350 {
2351  CoeffVLCs *vlcs = obj;
2352 
2353  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++)
2354  ff_vlc_free(&vlcs->vlcs[i]);
2355 }
2356 
2358 {
2359  static AVOnce init_static_once = AV_ONCE_INIT;
2360  Vp3DecodeContext *s = avctx->priv_data;
2361  int ret;
2362  int c_width;
2363  int c_height;
2364  int y_fragment_count, c_fragment_count;
2365 
2366  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0')) {
2367  s->version = 3;
2368 #if !CONFIG_VP4_DECODER
2369  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2371 #endif
2372  } else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2373  s->version = 0;
2374  else
2375  s->version = 1;
2376 
2377  s->avctx = avctx;
2378  s->width = FFALIGN(avctx->coded_width, 16);
2379  s->height = FFALIGN(avctx->coded_height, 16);
2380  if (s->width < 18)
2381  return AVERROR_PATCHWELCOME;
2382  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2383  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2385  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2386  ff_videodsp_init(&s->vdsp, 8);
2387  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
2388 
2389  for (int i = 0; i < 64; i++) {
2390 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2391  s->idct_permutation[i] = TRANSPOSE(i);
2392  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2393 #undef TRANSPOSE
2394  }
2395 
2396  /* initialize to an impossible value which will force a recalculation
2397  * in the first frame decode */
2398  for (int i = 0; i < 3; i++)
2399  s->qps[i] = -1;
2400 
2401  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2402  if (ret)
2403  return ret;
2404 
2405  s->y_superblock_width = (s->width + 31) / 32;
2406  s->y_superblock_height = (s->height + 31) / 32;
2407  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2408 
2409  /* work out the dimensions for the C planes */
2410  c_width = s->width >> s->chroma_x_shift;
2411  c_height = s->height >> s->chroma_y_shift;
2412  s->c_superblock_width = (c_width + 31) / 32;
2413  s->c_superblock_height = (c_height + 31) / 32;
2414  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2415 
2416  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2417  s->u_superblock_start = s->y_superblock_count;
2418  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2419 
2420  s->macroblock_width = (s->width + 15) / 16;
2421  s->macroblock_height = (s->height + 15) / 16;
2422  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2423  s->c_macroblock_width = (c_width + 15) / 16;
2424  s->c_macroblock_height = (c_height + 15) / 16;
2425  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2426  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2427 
2428  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2429  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2430  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2431  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2432 
2433  /* fragment count covers all 8x8 blocks for all 3 planes */
2434  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2435  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2436  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2437  s->fragment_start[1] = y_fragment_count;
2438  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2439 
2440  if (!s->theora_tables) {
2441  for (int i = 0; i < 64; i++) {
2442  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2443  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2444  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2445  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2446  s->base_matrix[1][i] = s->version < 2 ? ff_mjpeg_std_chrominance_quant_tbl[i] : vp4_generic_dequant[i];
2447  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2448  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2449  }
2450 
2451  for (int inter = 0; inter < 2; inter++) {
2452  for (int plane = 0; plane < 3; plane++) {
2453  s->qr_count[inter][plane] = 1;
2454  s->qr_size[inter][plane][0] = 63;
2455  s->qr_base[inter][plane][0] =
2456  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2457  }
2458  }
2459  }
2460 
2461  if (!avctx->internal->is_copy) {
2462  CoeffVLCs *vlcs = av_refstruct_alloc_ext(sizeof(*s->coeff_vlc), 0,
2464  if (!vlcs)
2465  return AVERROR(ENOMEM);
2466 
2467  s->coeff_vlc = vlcs;
2468 
2469  if (!s->theora_tables) {
2470  const uint8_t (*bias_tabs)[32][2];
2471 
2472  /* init VLC tables */
2473  bias_tabs = CONFIG_VP4_DECODER && s->version >= 2 ? vp4_bias : vp3_bias;
2474  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2475  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, 32,
2476  &bias_tabs[i][0][1], 2,
2477  &bias_tabs[i][0][0], 2, 1,
2478  0, 0, avctx);
2479  if (ret < 0)
2480  return ret;
2481  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2482  }
2483  } else {
2484  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2485  const HuffTable *tab = &s->huffman_table[i];
2486 
2487  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, tab->nb_entries,
2488  &tab->entries[0].len, sizeof(*tab->entries),
2489  &tab->entries[0].sym, sizeof(*tab->entries), 1,
2490  0, 0, avctx);
2491  if (ret < 0)
2492  return ret;
2493  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2494  }
2495  }
2496  }
2497 
2498  ff_thread_once(&init_static_once, init_tables_once);
2499 
2500  return allocate_tables(avctx);
2501 }
2502 
2503 /// Release and shuffle frames after decode finishes
2504 static void update_frames(AVCodecContext *avctx)
2505 {
2506  Vp3DecodeContext *s = avctx->priv_data;
2507 
2508  if (s->keyframe)
2509  ff_progress_frame_replace(&s->golden_frame, &s->current_frame);
2510 
2511  /* shuffle frames */
2512  ff_progress_frame_unref(&s->last_frame);
2513  FFSWAP(ProgressFrame, s->last_frame, s->current_frame);
2514 }
2515 
2516 #if HAVE_THREADS
2517 static void ref_frames(Vp3DecodeContext *dst, const Vp3DecodeContext *src)
2518 {
2519  ff_progress_frame_replace(&dst->current_frame, &src->current_frame);
2520  ff_progress_frame_replace(&dst->golden_frame, &src->golden_frame);
2521  ff_progress_frame_replace(&dst->last_frame, &src->last_frame);
2522 }
2523 
2524 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2525 {
2526  Vp3DecodeContext *s = dst->priv_data;
2527  const Vp3DecodeContext *s1 = src->priv_data;
2528  int qps_changed = 0;
2529 
2530  av_refstruct_replace(&s->coeff_vlc, s1->coeff_vlc);
2531 
2532  // copy previous frame data
2533  ref_frames(s, s1);
2534  if (!s1->current_frame.f ||
2535  s->width != s1->width || s->height != s1->height) {
2536  return -1;
2537  }
2538 
2539  if (s != s1) {
2540  s->keyframe = s1->keyframe;
2541 
2542  // copy qscale data if necessary
2543  for (int i = 0; i < 3; i++) {
2544  if (s->qps[i] != s1->qps[1]) {
2545  qps_changed = 1;
2546  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2547  }
2548  }
2549 
2550  if (s->qps[0] != s1->qps[0])
2551  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2552  sizeof(s->bounding_values_array));
2553 
2554  if (qps_changed) {
2555  memcpy(s->qps, s1->qps, sizeof(s->qps));
2556  memcpy(s->last_qps, s1->last_qps, sizeof(s->last_qps));
2557  s->nqps = s1->nqps;
2558  }
2559  }
2560 
2561  update_frames(dst);
2562  return 0;
2563 }
2564 #endif
2565 
2567  int *got_frame, AVPacket *avpkt)
2568 {
2569  const uint8_t *buf = avpkt->data;
2570  int buf_size = avpkt->size;
2571  Vp3DecodeContext *s = avctx->priv_data;
2572  GetBitContext gb;
2573  int ret;
2574 
2575  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2576  return ret;
2577 
2578 #if CONFIG_THEORA_DECODER
2579  if (s->theora && get_bits1(&gb)) {
2580  int type = get_bits(&gb, 7);
2581  skip_bits_long(&gb, 6*8); /* "theora" */
2582 
2583  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2584  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2585  return AVERROR_PATCHWELCOME;
2586  }
2587  if (type == 0) {
2588  vp3_decode_end(avctx);
2589  ret = theora_decode_header(avctx, &gb);
2590 
2591  if (ret >= 0)
2592  ret = vp3_decode_init(avctx);
2593  if (ret < 0) {
2594  vp3_decode_end(avctx);
2595  return ret;
2596  }
2597  return buf_size;
2598  } else if (type == 2) {
2599  vp3_decode_end(avctx);
2600  ret = theora_decode_tables(avctx, &gb);
2601  if (ret >= 0)
2602  ret = vp3_decode_init(avctx);
2603  if (ret < 0) {
2604  vp3_decode_end(avctx);
2605  return ret;
2606  }
2607  return buf_size;
2608  }
2609 
2610  av_log(avctx, AV_LOG_ERROR,
2611  "Header packet passed to frame decoder, skipping\n");
2612  return -1;
2613  }
2614 #endif
2615 
2616  s->keyframe = !get_bits1(&gb);
2617  if (!s->all_fragments) {
2618  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2619  return -1;
2620  }
2621  if (!s->theora)
2622  skip_bits(&gb, 1);
2623  for (int i = 0; i < 3; i++)
2624  s->last_qps[i] = s->qps[i];
2625 
2626  s->nqps = 0;
2627  do {
2628  s->qps[s->nqps++] = get_bits(&gb, 6);
2629  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2630  for (int i = s->nqps; i < 3; i++)
2631  s->qps[i] = -1;
2632 
2633  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2634  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%"PRId64": Q index = %d\n",
2635  s->keyframe ? "key" : "", avctx->frame_num + 1, s->qps[0]);
2636 
2637  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2638  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2639  : AVDISCARD_NONKEY);
2640 
2641  if (s->qps[0] != s->last_qps[0])
2643 
2644  for (int i = 0; i < s->nqps; i++)
2645  // reinit all dequantizers if the first one changed, because
2646  // the DC of the first quantizer must be used for all matrices
2647  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2648  init_dequantizer(s, i);
2649 
2650  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2651  return buf_size;
2652 
2653  ff_progress_frame_unref(&s->current_frame);
2654  ret = ff_progress_frame_get_buffer(avctx, &s->current_frame,
2656  if (ret < 0) {
2657  // Don't goto error here, as one can't report progress on or
2658  // unref a non-existent frame.
2659  return ret;
2660  }
2661  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2663  if (s->keyframe)
2664  s->current_frame.f->flags |= AV_FRAME_FLAG_KEY;
2665  else
2666  s->current_frame.f->flags &= ~AV_FRAME_FLAG_KEY;
2667 
2668  if (!s->edge_emu_buffer) {
2669  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2670  if (!s->edge_emu_buffer) {
2671  ret = AVERROR(ENOMEM);
2672  goto error;
2673  }
2674  }
2675 
2676  if (s->keyframe) {
2677  if (!s->theora) {
2678  skip_bits(&gb, 4); /* width code */
2679  skip_bits(&gb, 4); /* height code */
2680  if (s->version) {
2681  int version = get_bits(&gb, 5);
2682 #if !CONFIG_VP4_DECODER
2683  if (version >= 2) {
2684  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2686  }
2687 #endif
2688  s->version = version;
2689  if (avctx->frame_num == 0)
2690  av_log(s->avctx, AV_LOG_DEBUG,
2691  "VP version: %d\n", s->version);
2692  }
2693  }
2694  if (s->version || s->theora) {
2695  if (get_bits1(&gb))
2696  av_log(s->avctx, AV_LOG_ERROR,
2697  "Warning, unsupported keyframe coding type?!\n");
2698  skip_bits(&gb, 2); /* reserved? */
2699 
2700 #if CONFIG_VP4_DECODER
2701  if (s->version >= 2) {
2702  int mb_height, mb_width;
2703  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2704 
2705  mb_height = get_bits(&gb, 8);
2706  mb_width = get_bits(&gb, 8);
2707  if (mb_height != s->macroblock_height ||
2708  mb_width != s->macroblock_width)
2709  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2710 
2711  mb_width_mul = get_bits(&gb, 5);
2712  mb_width_div = get_bits(&gb, 3);
2713  mb_height_mul = get_bits(&gb, 5);
2714  mb_height_div = get_bits(&gb, 3);
2715  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2716  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multipler/divider");
2717 
2718  if (get_bits(&gb, 2))
2719  avpriv_request_sample(s->avctx, "unknown bits");
2720  }
2721 #endif
2722  }
2723  } else {
2724  if (!s->golden_frame.f) {
2725  av_log(s->avctx, AV_LOG_WARNING,
2726  "vp3: first frame not a keyframe\n");
2727 
2728  if ((ret = ff_progress_frame_get_buffer(avctx, &s->golden_frame,
2729  AV_GET_BUFFER_FLAG_REF)) < 0)
2730  goto error;
2731  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2732  ff_progress_frame_replace(&s->last_frame, &s->golden_frame);
2733  ff_progress_frame_report(&s->golden_frame, INT_MAX);
2734  }
2735  }
2736  ff_thread_finish_setup(avctx);
2737 
2738  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2739 
2740  if (s->version < 2) {
2741  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2742  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2743  goto error;
2744  }
2745 #if CONFIG_VP4_DECODER
2746  } else {
2747  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2748  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2749  goto error;
2750  }
2751 #endif
2752  }
2753  if ((ret = unpack_modes(s, &gb)) < 0) {
2754  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2755  goto error;
2756  }
2757  if (ret = unpack_vectors(s, &gb)) {
2758  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2759  goto error;
2760  }
2761  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2762  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2763  goto error;
2764  }
2765 
2766  if (s->version < 2) {
2767  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2768  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2769  goto error;
2770  }
2771 #if CONFIG_VP4_DECODER
2772  } else {
2773  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2774  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2775  goto error;
2776  }
2777 #endif
2778  }
2779 
2780  for (int i = 0; i < 3; i++) {
2781  int height = s->height >> (i && s->chroma_y_shift);
2782  if (s->flipped_image)
2783  s->data_offset[i] = 0;
2784  else
2785  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2786  }
2787 
2788  s->last_slice_end = 0;
2789  for (int i = 0; i < s->c_superblock_height; i++)
2790  render_slice(s, i);
2791 
2792  // filter the last row
2793  if (s->version < 2)
2794  for (int i = 0; i < 3; i++) {
2795  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2796  apply_loop_filter(s, i, row, row + 1);
2797  }
2798  vp3_draw_horiz_band(s, s->height);
2799 
2800  /* output frame, offset as needed */
2801  if ((ret = av_frame_ref(frame, s->current_frame.f)) < 0)
2802  return ret;
2803 
2804  frame->crop_left = s->offset_x;
2805  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2806  frame->crop_top = s->offset_y;
2807  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2808 
2809  *got_frame = 1;
2810 
2811  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2812  update_frames(avctx);
2813 
2814  return buf_size;
2815 
2816 error:
2817  ff_progress_frame_report(&s->current_frame, INT_MAX);
2818 
2819  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2820  av_frame_unref(s->current_frame.f);
2821 
2822  return ret;
2823 }
2824 
2825 static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length,
2826  AVCodecContext *avctx)
2827 {
2828  if (get_bits1(gb)) {
2829  int token;
2830  if (huff->nb_entries >= 32) { /* overflow */
2831  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2832  return -1;
2833  }
2834  token = get_bits(gb, 5);
2835  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2836  length, huff->nb_entries, token);
2837  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2838  } else {
2839  /* The following bound follows from the fact that nb_entries <= 32. */
2840  if (length >= 31) { /* overflow */
2841  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2842  return -1;
2843  }
2844  length++;
2845  if (read_huffman_tree(huff, gb, length, avctx))
2846  return -1;
2847  if (read_huffman_tree(huff, gb, length, avctx))
2848  return -1;
2849  }
2850  return 0;
2851 }
2852 
2853 #if CONFIG_THEORA_DECODER
2854 static const enum AVPixelFormat theora_pix_fmts[4] = {
2856 };
2857 
2858 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2859 {
2860  Vp3DecodeContext *s = avctx->priv_data;
2861  int visible_width, visible_height, colorspace;
2862  uint8_t offset_x = 0, offset_y = 0;
2863  int ret;
2864  AVRational fps, aspect;
2865 
2866  if (get_bits_left(gb) < 206)
2867  return AVERROR_INVALIDDATA;
2868 
2869  s->theora_header = 0;
2870  s->theora = get_bits(gb, 24);
2871  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2872  if (!s->theora) {
2873  s->theora = 1;
2874  avpriv_request_sample(s->avctx, "theora 0");
2875  }
2876 
2877  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2878  * but previous versions have the image flipped relative to vp3 */
2879  if (s->theora < 0x030200) {
2880  s->flipped_image = 1;
2881  av_log(avctx, AV_LOG_DEBUG,
2882  "Old (<alpha3) Theora bitstream, flipped image\n");
2883  }
2884 
2885  visible_width =
2886  s->width = get_bits(gb, 16) << 4;
2887  visible_height =
2888  s->height = get_bits(gb, 16) << 4;
2889 
2890  if (s->theora >= 0x030200) {
2891  visible_width = get_bits(gb, 24);
2892  visible_height = get_bits(gb, 24);
2893 
2894  offset_x = get_bits(gb, 8); /* offset x */
2895  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2896  }
2897 
2898  /* sanity check */
2899  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2900  visible_width + offset_x > s->width ||
2901  visible_height + offset_y > s->height ||
2902  visible_width < 18
2903  ) {
2904  av_log(avctx, AV_LOG_ERROR,
2905  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2906  visible_width, visible_height, offset_x, offset_y,
2907  s->width, s->height);
2908  return AVERROR_INVALIDDATA;
2909  }
2910 
2911  fps.num = get_bits_long(gb, 32);
2912  fps.den = get_bits_long(gb, 32);
2913  if (fps.num && fps.den) {
2914  if (fps.num < 0 || fps.den < 0) {
2915  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2916  return AVERROR_INVALIDDATA;
2917  }
2918  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2919  fps.den, fps.num, 1 << 30);
2920  }
2921 
2922  aspect.num = get_bits(gb, 24);
2923  aspect.den = get_bits(gb, 24);
2924  if (aspect.num && aspect.den) {
2926  &avctx->sample_aspect_ratio.den,
2927  aspect.num, aspect.den, 1 << 30);
2928  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2929  }
2930 
2931  if (s->theora < 0x030200)
2932  skip_bits(gb, 5); /* keyframe frequency force */
2933  colorspace = get_bits(gb, 8);
2934  skip_bits(gb, 24); /* bitrate */
2935 
2936  skip_bits(gb, 6); /* quality hint */
2937 
2938  if (s->theora >= 0x030200) {
2939  skip_bits(gb, 5); /* keyframe frequency force */
2940  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2941  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2942  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2943  return AVERROR_INVALIDDATA;
2944  }
2945  skip_bits(gb, 3); /* reserved */
2946  } else
2947  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2948 
2949  if (s->width < 18)
2950  return AVERROR_PATCHWELCOME;
2951  ret = ff_set_dimensions(avctx, s->width, s->height);
2952  if (ret < 0)
2953  return ret;
2954  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2955  avctx->width = visible_width;
2956  avctx->height = visible_height;
2957  // translate offsets from theora axis ([0,0] lower left)
2958  // to normal axis ([0,0] upper left)
2959  s->offset_x = offset_x;
2960  s->offset_y = s->height - visible_height - offset_y;
2961  }
2962 
2963  if (colorspace == 1)
2965  else if (colorspace == 2)
2967 
2968  if (colorspace == 1 || colorspace == 2) {
2969  avctx->colorspace = AVCOL_SPC_BT470BG;
2970  avctx->color_trc = AVCOL_TRC_BT709;
2971  }
2972 
2973  s->theora_header = 1;
2974  return 0;
2975 }
2976 
2977 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2978 {
2979  Vp3DecodeContext *s = avctx->priv_data;
2980  int n, matrices, ret;
2981 
2982  if (!s->theora_header)
2983  return AVERROR_INVALIDDATA;
2984 
2985  if (s->theora >= 0x030200) {
2986  n = get_bits(gb, 3);
2987  /* loop filter limit values table */
2988  if (n)
2989  for (int i = 0; i < 64; i++)
2990  s->filter_limit_values[i] = get_bits(gb, n);
2991  }
2992 
2993  if (s->theora >= 0x030200)
2994  n = get_bits(gb, 4) + 1;
2995  else
2996  n = 16;
2997  /* quality threshold table */
2998  for (int i = 0; i < 64; i++)
2999  s->coded_ac_scale_factor[i] = get_bits(gb, n);
3000 
3001  if (s->theora >= 0x030200)
3002  n = get_bits(gb, 4) + 1;
3003  else
3004  n = 16;
3005  /* dc scale factor table */
3006  for (int i = 0; i < 64; i++)
3007  s->coded_dc_scale_factor[0][i] =
3008  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
3009 
3010  if (s->theora >= 0x030200)
3011  matrices = get_bits(gb, 9) + 1;
3012  else
3013  matrices = 3;
3014 
3015  if (matrices > 384) {
3016  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
3017  return -1;
3018  }
3019 
3020  for (int j = 0; j < matrices; j++)
3021  for (int i = 0; i < 64; i++)
3022  s->base_matrix[j][i] = get_bits(gb, 8);
3023 
3024  for (int inter = 0; inter <= 1; inter++) {
3025  for (int plane = 0; plane <= 2; plane++) {
3026  int newqr = 1;
3027  if (inter || plane > 0)
3028  newqr = get_bits1(gb);
3029  if (!newqr) {
3030  int qtj, plj;
3031  if (inter && get_bits1(gb)) {
3032  qtj = 0;
3033  plj = plane;
3034  } else {
3035  qtj = (3 * inter + plane - 1) / 3;
3036  plj = (plane + 2) % 3;
3037  }
3038  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3039  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3040  sizeof(s->qr_size[0][0]));
3041  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3042  sizeof(s->qr_base[0][0]));
3043  } else {
3044  int qri = 0;
3045  int qi = 0;
3046 
3047  for (;;) {
3048  int i = get_bits(gb, av_log2(matrices - 1) + 1);
3049  if (i >= matrices) {
3050  av_log(avctx, AV_LOG_ERROR,
3051  "invalid base matrix index\n");
3052  return -1;
3053  }
3054  s->qr_base[inter][plane][qri] = i;
3055  if (qi >= 63)
3056  break;
3057  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3058  s->qr_size[inter][plane][qri++] = i;
3059  qi += i;
3060  }
3061 
3062  if (qi > 63) {
3063  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3064  return -1;
3065  }
3066  s->qr_count[inter][plane] = qri;
3067  }
3068  }
3069  }
3070 
3071  /* Huffman tables */
3072  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3073  s->huffman_table[i].nb_entries = 0;
3074  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3075  return ret;
3076  }
3077 
3078  s->theora_tables = 1;
3079 
3080  return 0;
3081 }
3082 
3083 static av_cold int theora_decode_init(AVCodecContext *avctx)
3084 {
3085  Vp3DecodeContext *s = avctx->priv_data;
3086  GetBitContext gb;
3087  int ptype;
3088  const uint8_t *header_start[3];
3089  int header_len[3];
3090  int ret;
3091 
3092  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3093 
3094  s->theora = 1;
3095 
3096  if (!avctx->extradata_size) {
3097  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3098  return -1;
3099  }
3100 
3102  42, header_start, header_len) < 0) {
3103  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3104  return -1;
3105  }
3106 
3107  for (int i = 0; i < 3; i++) {
3108  if (header_len[i] <= 0)
3109  continue;
3110  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3111  if (ret < 0)
3112  return ret;
3113 
3114  ptype = get_bits(&gb, 8);
3115 
3116  if (!(ptype & 0x80)) {
3117  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3118 // return -1;
3119  }
3120 
3121  // FIXME: Check for this as well.
3122  skip_bits_long(&gb, 6 * 8); /* "theora" */
3123 
3124  switch (ptype) {
3125  case 0x80:
3126  if (theora_decode_header(avctx, &gb) < 0)
3127  return -1;
3128  break;
3129  case 0x81:
3130 // FIXME: is this needed? it breaks sometimes
3131 // theora_decode_comments(avctx, gb);
3132  break;
3133  case 0x82:
3134  if (theora_decode_tables(avctx, &gb))
3135  return -1;
3136  break;
3137  default:
3138  av_log(avctx, AV_LOG_ERROR,
3139  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3140  break;
3141  }
3142  if (ptype != 0x81 && get_bits_left(&gb) >= 8U)
3143  av_log(avctx, AV_LOG_WARNING,
3144  "%d bits left in packet %X\n",
3145  get_bits_left(&gb), ptype);
3146  if (s->theora < 0x030200)
3147  break;
3148  }
3149 
3150  return vp3_decode_init(avctx);
3151 }
3152 
3153 const FFCodec ff_theora_decoder = {
3154  .p.name = "theora",
3155  CODEC_LONG_NAME("Theora"),
3156  .p.type = AVMEDIA_TYPE_VIDEO,
3157  .p.id = AV_CODEC_ID_THEORA,
3158  .priv_data_size = sizeof(Vp3DecodeContext),
3159  .init = theora_decode_init,
3160  .close = vp3_decode_end,
3162  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3164  .flush = vp3_decode_flush,
3165  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3166  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3169 };
3170 #endif
3171 
3173  .p.name = "vp3",
3174  CODEC_LONG_NAME("On2 VP3"),
3175  .p.type = AVMEDIA_TYPE_VIDEO,
3176  .p.id = AV_CODEC_ID_VP3,
3177  .priv_data_size = sizeof(Vp3DecodeContext),
3178  .init = vp3_decode_init,
3179  .close = vp3_decode_end,
3181  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3183  .flush = vp3_decode_flush,
3184  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3185  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3187 };
3188 
3189 #if CONFIG_VP4_DECODER
3190 const FFCodec ff_vp4_decoder = {
3191  .p.name = "vp4",
3192  CODEC_LONG_NAME("On2 VP4"),
3193  .p.type = AVMEDIA_TYPE_VIDEO,
3194  .p.id = AV_CODEC_ID_VP4,
3195  .priv_data_size = sizeof(Vp3DecodeContext),
3196  .init = vp3_decode_init,
3197  .close = vp3_decode_end,
3199  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3201  .flush = vp3_decode_flush,
3202  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3203  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3205 };
3206 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1893
vp4data.h
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2305
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1849
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:69
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:100
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:248
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
VP3DSPContext
Definition: vp3dsp.h:25
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp4_get_mv
static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:888
vp3_decode_flush
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:352
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:699
mem_internal.h
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:238
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:133
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:87
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
VP4Predictor
Definition: vp3.c:177
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:209
thread.h
HuffEntry::len
uint8_t len
Definition: exr.c:96
AVRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
VP4Predictor::dc
int dc
Definition: vp3.c:178
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
mode_code_vlc_len
static const uint8_t mode_code_vlc_len[8]
Definition: vp3data.h:97
superblock_run_length_vlc
static VLCElem superblock_run_length_vlc[88]
Definition: vp3.c:165
read_huffman_tree
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2825
PUR
#define PUR
vp3dsp.h
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:403
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:149
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:692
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:539
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
ff_vp3_decoder
const FFCodec ff_vp3_decoder
Definition: vp3.c:3172
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:148
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
This function sets up the ProgressFrame, i.e.
Definition: decode.c:1848
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:245
mode_code_vlc
static VLCElem mode_code_vlc[24+2108 *CONFIG_VP4_DECODER]
Definition: vp3.c:170
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:326
FFCodec
Definition: codec_internal.h:127
fragment_run_length_vlc
static VLCElem fragment_run_length_vlc[56]
Definition: vp3.c:166
motion_vector_vlc
static VLCElem motion_vector_vlc[112]
Definition: vp3.c:167
base
uint8_t base
Definition: vp3data.h:128
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:70
thread.h
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:469
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2056
CoeffVLCs::vlc_tabs
const VLCElem * vlc_tabs[80]
Definition: vp3.c:194
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1415
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:424
Vp3DecodeContext::height
int height
Definition: vp3.c:202
vlc_tables
static VLCElem vlc_tables[VLC_TABLES_SIZE]
Definition: imc.c:115
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:375
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
fragment
Definition: dashdec.c:37
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:225
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+2]
Definition: vp3.c:327
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:574
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:54
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:315
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:646
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
Vp3DecodeContext::golden_frame
ProgressFrame golden_frame
Definition: vp3.c:204
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1151
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:258
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:210
vp4_mv_vlc
static const uint8_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:643
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:223
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:91
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
CoeffVLCs
Definition: rv60dec.c:87
GetBitContext
Definition: get_bits.h:108
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
perm
perm
Definition: f_perms.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3198
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:86
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:224
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:249
Vp3DecodeContext::theora
int theora
Definition: vp3.c:200
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:647
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:335
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
progressframe.h
refstruct.h
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:298
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:282
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:154
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, const Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1935
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:685
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:101
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:654
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:139
VLCInitState
For static VLCs, the number of bits can often be hardcoded at each get_vlc2() callsite.
Definition: vlc.h:212
emms_c
#define emms_c()
Definition: emms.h:63
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:538
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:243
CoeffVLCs::vlcs
VLC vlcs[80]
Definition: vp3.c:195
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:460
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
HuffTable::nb_entries
uint8_t nb_entries
Definition: vp3.c:190
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:385
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:75
bits
uint8_t bits
Definition: vp3data.h:128
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:74
av_refstruct_alloc_ext
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
Definition: refstruct.h:94
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:71
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1876
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1634
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1299
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:101
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:56
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:593
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:88
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:234
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:208
if
if(ret)
Definition: filter_design.txt:179
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:418
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:90
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:226
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:332
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:250
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
free_vlc_tables
static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
Definition: vp3.c:2349
HuffTable
Used to store optimal huffman encoding results.
Definition: mjpegenc_huffman.h:69
PU
#define PU
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:785
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:222
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
fragment_run_length_vlc_len
static const uint8_t fragment_run_length_vlc_len[30]
Definition: vp3data.h:92
vp4_bias
static const uint8_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:329
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:200
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:283
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:216
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:305
Vp3DecodeContext::last_qps
int last_qps[3]
Definition: vp3.c:220
Vp3DecodeContext::coeff_vlc
CoeffVLCs * coeff_vlc
The first 16 of the following VLCs are for the dc coefficients; the others are four groups of 16 VLCs...
Definition: vp3.c:305
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:300
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
jpegquanttables.h
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:63
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:259
AVOnce
#define AVOnce
Definition: thread.h:202
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1632
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:212
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:214
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:242
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:290
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:76
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:220
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:515
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:115
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:401
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:903
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_vp4_decoder
const FFCodec ff_vp4_decoder
update_frames
static void update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:2504
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:61
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:147
VLCElem
Definition: vlc.h:32
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:404
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: dec.c:593
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:279
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:255
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:218
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:213
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:203
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:247
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:319
version
version
Definition: libkvazaar.c:321
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:613
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1612
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:199
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:81
emms.h
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:297
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:207
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:84
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1783
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:235
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:448
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:239
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:321
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:537
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:236
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:82
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2566
superblock_run_length_vlc_lens
static const uint8_t superblock_run_length_vlc_lens[34]
Definition: vp3data.h:85
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:623
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:233
SUPERBLOCK_VLC_BITS
#define SUPERBLOCK_VLC_BITS
Definition: vp3.c:63
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:716
Vp3DecodeContext::current_frame
ProgressFrame current_frame
Definition: vp3.c:206
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:230
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:227
AVCodecContext::height
int height
Definition: avcodec.h:632
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP4_MV_VLC_BITS
#define VP4_MV_VLC_BITS
Definition: vp3.c:62
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:294
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:228
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:591
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:200
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:336
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, const VLCElem *vlc_table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1179
ProgressFrame::f
struct AVFrame * f
Definition: progressframe.h:74
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:85
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:289
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:203
BLOCK_X
#define BLOCK_X
Definition: vp3.c:642
U
#define U(x)
Definition: vpx_arith.h:37
MODE_COPY
#define MODE_COPY
Definition: vp3.c:94
Vp3DecodeContext
Definition: vp3.c:198
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1883
ff_theora_decoder
const FFCodec ff_theora_decoder
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:89
coeff_vlc
static const VLCElem * coeff_vlc[2][8][4]
Definition: atrac9dec.c:110
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:65
AVCodecContext
main external API structure.
Definition: avcodec.h:451
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1894
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:739
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:140
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VLC
Definition: vlc.h:36
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:256
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1031
HuffEntry
Definition: exr.c:95
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:41
temp
else temp
Definition: vf_mcdeint.c:263
body
static void body(uint32_t ABCD[4], const uint8_t *src, size_t nblocks)
Definition: md5.c:103
VLC::table
VLCElem * table
Definition: vlc.h:38
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:26
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:83
VideoDSPContext
Definition: videodsp.h:40
ff_vlc_init_tables_from_lengths
const av_cold VLCElem * ff_vlc_init_tables_from_lengths(VLCInitState *state, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags)
Definition: vlc.c:366
HuffEntry::sym
uint8_t sym
Definition: vp3.c:185
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:231
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1630
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:54
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:647
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:246
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vlc_init_tables
static const VLCElem * ff_vlc_init_tables(VLCInitState *state, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, int flags)
Definition: vlc.h:246
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
vp3_bias
static const uint8_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:370
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1143
HuffTable::entries
HuffEntry entries[32]
Definition: vp3.c:189
VLC_INIT_STATIC_TABLE_FROM_LENGTHS
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
Definition: vlc.h:280
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:476
Vp3DecodeContext::huffman_table
HuffTable huffman_table[5 *16]
Definition: vp3.c:324
ProgressFrame
The ProgressFrame structure.
Definition: progressframe.h:73
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
VLC_INIT_STATE
#define VLC_INIT_STATE(_table)
Definition: vlc.h:217
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:74
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
VP4Predictor::type
int type
Definition: vp3.c:179
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2357
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:257
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
videodsp.h
VP3_MV_VLC_BITS
#define VP3_MV_VLC_BITS
Definition: vp3.c:61
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:241
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:52
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:632
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:202
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:296
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1100
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:260
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:361
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:81
init_tables_once
static av_cold void init_tables_once(void)
Definition: vp3.c:2262
Vp3DecodeContext::version
int version
Definition: vp3.c:201
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:85
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:252
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:215
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:329
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:229
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:148
state
static struct @468 state
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:280
Vp3Fragment
Definition: vp3.c:68
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:150
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:151
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:656
src
#define src
Definition: vp8dsp.c:248
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:219
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:309
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:211
eob_run_table
static const struct @261 eob_run_table[7]
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:281
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:237
Vp3DecodeContext::last_frame
ProgressFrame last_frame
Definition: vp3.c:205