FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "progressframe.h"
34 #include "libavutil/refstruct.h"
35 #include "thread.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/avassert.h"
45 #include "libavutil/mem.h"
46 #include "libavutil/pixdesc.h"
48 
49 #define VP9_SYNCCODE 0x498342
50 
51 #if HAVE_THREADS
52 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
53  (offsetof(VP9Context, progress_mutex)),
54  (offsetof(VP9Context, progress_cond)));
55 
56 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
57  VP9Context *s = avctx->priv_data;
58 
59  if (avctx->active_thread_type & FF_THREAD_SLICE) {
60  if (s->entries)
61  av_freep(&s->entries);
62 
63  s->entries = av_malloc_array(n, sizeof(atomic_int));
64  if (!s->entries)
65  return AVERROR(ENOMEM);
66  }
67  return 0;
68 }
69 
70 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
71  pthread_mutex_lock(&s->progress_mutex);
72  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
73  pthread_cond_signal(&s->progress_cond);
74  pthread_mutex_unlock(&s->progress_mutex);
75 }
76 
77 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
78  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
79  return;
80 
81  pthread_mutex_lock(&s->progress_mutex);
82  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
83  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
84  pthread_mutex_unlock(&s->progress_mutex);
85 }
86 #else
87 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
88 #endif
89 
91 {
92  av_freep(&td->b_base);
93  av_freep(&td->block_base);
95 }
96 
97 static void vp9_frame_unref(VP9Frame *f)
98 {
100  av_refstruct_unref(&f->extradata);
101  av_refstruct_unref(&f->hwaccel_picture_private);
102  f->segmentation_map = NULL;
103 }
104 
106 {
107  VP9Context *s = avctx->priv_data;
108  int ret, sz;
109 
111  if (ret < 0)
112  return ret;
113 
114  sz = 64 * s->sb_cols * s->sb_rows;
115  if (sz != s->frame_extradata_pool_size) {
116  av_refstruct_pool_uninit(&s->frame_extradata_pool);
117  s->frame_extradata_pool = av_refstruct_pool_alloc(sz * (1 + sizeof(VP9mvrefPair)),
119  if (!s->frame_extradata_pool) {
120  s->frame_extradata_pool_size = 0;
121  ret = AVERROR(ENOMEM);
122  goto fail;
123  }
124  s->frame_extradata_pool_size = sz;
125  }
126  f->extradata = av_refstruct_pool_get(s->frame_extradata_pool);
127  if (!f->extradata) {
128  ret = AVERROR(ENOMEM);
129  goto fail;
130  }
131 
132  f->segmentation_map = f->extradata;
133  f->mv = (VP9mvrefPair *) ((char*)f->extradata + sz);
134 
135  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
136  if (ret < 0)
137  goto fail;
138 
139  return 0;
140 
141 fail:
143  return ret;
144 }
145 
147 {
148  ff_progress_frame_replace(&dst->tf, &src->tf);
149 
150  av_refstruct_replace(&dst->extradata, src->extradata);
151 
152  dst->segmentation_map = src->segmentation_map;
153  dst->mv = src->mv;
154  dst->uses_2pass = src->uses_2pass;
155 
156  av_refstruct_replace(&dst->hwaccel_picture_private,
157  src->hwaccel_picture_private);
158 }
159 
160 static int update_size(AVCodecContext *avctx, int w, int h)
161 {
162 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
163  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
164  CONFIG_VP9_D3D12VA_HWACCEL + \
165  CONFIG_VP9_NVDEC_HWACCEL + \
166  CONFIG_VP9_VAAPI_HWACCEL + \
167  CONFIG_VP9_VDPAU_HWACCEL + \
168  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
169  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
170  VP9Context *s = avctx->priv_data;
171  uint8_t *p;
172  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
173  int lflvl_len, i;
174 
175  av_assert0(w > 0 && h > 0);
176 
177  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
178  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
179  return ret;
180 
181  switch (s->pix_fmt) {
182  case AV_PIX_FMT_YUV420P:
184 #if CONFIG_VP9_DXVA2_HWACCEL
185  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
186 #endif
187 #if CONFIG_VP9_D3D11VA_HWACCEL
188  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
189  *fmtp++ = AV_PIX_FMT_D3D11;
190 #endif
191 #if CONFIG_VP9_D3D12VA_HWACCEL
192  *fmtp++ = AV_PIX_FMT_D3D12;
193 #endif
194 #if CONFIG_VP9_NVDEC_HWACCEL
195  *fmtp++ = AV_PIX_FMT_CUDA;
196 #endif
197 #if CONFIG_VP9_VAAPI_HWACCEL
198  *fmtp++ = AV_PIX_FMT_VAAPI;
199 #endif
200 #if CONFIG_VP9_VDPAU_HWACCEL
201  *fmtp++ = AV_PIX_FMT_VDPAU;
202 #endif
203 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
204  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
205 #endif
206  break;
208 #if CONFIG_VP9_NVDEC_HWACCEL
209  *fmtp++ = AV_PIX_FMT_CUDA;
210 #endif
211 #if CONFIG_VP9_VAAPI_HWACCEL
212  *fmtp++ = AV_PIX_FMT_VAAPI;
213 #endif
214 #if CONFIG_VP9_VDPAU_HWACCEL
215  *fmtp++ = AV_PIX_FMT_VDPAU;
216 #endif
217  break;
218  case AV_PIX_FMT_YUV444P:
221 #if CONFIG_VP9_VAAPI_HWACCEL
222  *fmtp++ = AV_PIX_FMT_VAAPI;
223 #endif
224  break;
225  case AV_PIX_FMT_GBRP:
226  case AV_PIX_FMT_GBRP10:
227  case AV_PIX_FMT_GBRP12:
228 #if CONFIG_VP9_VAAPI_HWACCEL
229  *fmtp++ = AV_PIX_FMT_VAAPI;
230 #endif
231  break;
232  }
233 
234  *fmtp++ = s->pix_fmt;
235  *fmtp = AV_PIX_FMT_NONE;
236 
237  ret = ff_get_format(avctx, pix_fmts);
238  if (ret < 0)
239  return ret;
240 
241  avctx->pix_fmt = ret;
242  s->gf_fmt = s->pix_fmt;
243  s->w = w;
244  s->h = h;
245  }
246 
247  cols = (w + 7) >> 3;
248  rows = (h + 7) >> 3;
249 
250  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
251  return 0;
252 
253  s->last_fmt = s->pix_fmt;
254  s->sb_cols = (w + 63) >> 6;
255  s->sb_rows = (h + 63) >> 6;
256  s->cols = (w + 7) >> 3;
257  s->rows = (h + 7) >> 3;
258  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
259 
260 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
261  av_freep(&s->intra_pred_data[0]);
262  // FIXME we slightly over-allocate here for subsampled chroma, but a little
263  // bit of padding shouldn't affect performance...
264  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
265  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
266  if (!p)
267  return AVERROR(ENOMEM);
268  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
269  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
270  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
271  assign(s->above_y_nnz_ctx, uint8_t *, 16);
272  assign(s->above_mode_ctx, uint8_t *, 16);
273  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
274  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
275  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
276  assign(s->above_partition_ctx, uint8_t *, 8);
277  assign(s->above_skip_ctx, uint8_t *, 8);
278  assign(s->above_txfm_ctx, uint8_t *, 8);
279  assign(s->above_segpred_ctx, uint8_t *, 8);
280  assign(s->above_intra_ctx, uint8_t *, 8);
281  assign(s->above_comp_ctx, uint8_t *, 8);
282  assign(s->above_ref_ctx, uint8_t *, 8);
283  assign(s->above_filter_ctx, uint8_t *, 8);
284  assign(s->lflvl, VP9Filter *, lflvl_len);
285 #undef assign
286 
287  if (s->td) {
288  for (i = 0; i < s->active_tile_cols; i++)
289  vp9_tile_data_free(&s->td[i]);
290  }
291 
292  if (s->s.h.bpp != s->last_bpp) {
293  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
294  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
295  s->last_bpp = s->s.h.bpp;
296  }
297 
298  return 0;
299 }
300 
302 {
303  int i;
304  VP9Context *s = avctx->priv_data;
305  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
306  VP9TileData *td = &s->td[0];
307 
308  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
309  return 0;
310 
311  vp9_tile_data_free(td);
312  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
313  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
314  if (s->s.frames[CUR_FRAME].uses_2pass) {
315  int sbs = s->sb_cols * s->sb_rows;
316 
317  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
318  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
319  16 * 16 + 2 * chroma_eobs) * sbs);
320  if (!td->b_base || !td->block_base)
321  return AVERROR(ENOMEM);
322  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
323  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
324  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
325  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
326  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
327 
329  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
330  if (!td->block_structure)
331  return AVERROR(ENOMEM);
332  }
333  } else {
334  for (i = 1; i < s->active_tile_cols; i++)
335  vp9_tile_data_free(&s->td[i]);
336 
337  for (i = 0; i < s->active_tile_cols; i++) {
338  s->td[i].b_base = av_malloc(sizeof(VP9Block));
339  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
340  16 * 16 + 2 * chroma_eobs);
341  if (!s->td[i].b_base || !s->td[i].block_base)
342  return AVERROR(ENOMEM);
343  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
344  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
345  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
346  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
347  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
348 
350  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
351  if (!s->td[i].block_structure)
352  return AVERROR(ENOMEM);
353  }
354  }
355  }
356  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
357 
358  return 0;
359 }
360 
361 // The sign bit is at the end, not the start, of a bit sequence
363 {
364  int v = get_bits(gb, n);
365  return get_bits1(gb) ? -v : v;
366 }
367 
368 static av_always_inline int inv_recenter_nonneg(int v, int m)
369 {
370  if (v > 2 * m)
371  return v;
372  if (v & 1)
373  return m - ((v + 1) >> 1);
374  return m + (v >> 1);
375 }
376 
377 // differential forward probability updates
378 static int update_prob(VPXRangeCoder *c, int p)
379 {
380  static const uint8_t inv_map_table[255] = {
381  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
382  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
383  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
384  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
385  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
386  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
387  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
388  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
389  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
390  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
391  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
392  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
393  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
394  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
395  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
396  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
397  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
398  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
399  252, 253, 253,
400  };
401  int d;
402 
403  /* This code is trying to do a differential probability update. For a
404  * current probability A in the range [1, 255], the difference to a new
405  * probability of any value can be expressed differentially as 1-A, 255-A
406  * where some part of this (absolute range) exists both in positive as
407  * well as the negative part, whereas another part only exists in one
408  * half. We're trying to code this shared part differentially, i.e.
409  * times two where the value of the lowest bit specifies the sign, and
410  * the single part is then coded on top of this. This absolute difference
411  * then again has a value of [0, 254], but a bigger value in this range
412  * indicates that we're further away from the original value A, so we
413  * can code this as a VLC code, since higher values are increasingly
414  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
415  * updates vs. the 'fine, exact' updates further down the range, which
416  * adds one extra dimension to this differential update model. */
417 
418  if (!vp89_rac_get(c)) {
419  d = vp89_rac_get_uint(c, 4) + 0;
420  } else if (!vp89_rac_get(c)) {
421  d = vp89_rac_get_uint(c, 4) + 16;
422  } else if (!vp89_rac_get(c)) {
423  d = vp89_rac_get_uint(c, 5) + 32;
424  } else {
425  d = vp89_rac_get_uint(c, 7);
426  if (d >= 65)
427  d = (d << 1) - 65 + vp89_rac_get(c);
428  d += 64;
429  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
430  }
431 
432  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
433  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
434 }
435 
437 {
438  static const enum AVColorSpace colorspaces[8] = {
441  };
442  VP9Context *s = avctx->priv_data;
443  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
444 
445  s->bpp_index = bits;
446  s->s.h.bpp = 8 + bits * 2;
447  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
448  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
449  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
450  static const enum AVPixelFormat pix_fmt_rgb[3] = {
452  };
453  s->ss_h = s->ss_v = 0;
454  avctx->color_range = AVCOL_RANGE_JPEG;
455  s->pix_fmt = pix_fmt_rgb[bits];
456  if (avctx->profile & 1) {
457  if (get_bits1(&s->gb)) {
458  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
459  return AVERROR_INVALIDDATA;
460  }
461  } else {
462  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
463  avctx->profile);
464  return AVERROR_INVALIDDATA;
465  }
466  } else {
467  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
474  };
476  if (avctx->profile & 1) {
477  s->ss_h = get_bits1(&s->gb);
478  s->ss_v = get_bits1(&s->gb);
479  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
480  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
481  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
482  avctx->profile);
483  return AVERROR_INVALIDDATA;
484  } else if (get_bits1(&s->gb)) {
485  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
486  avctx->profile);
487  return AVERROR_INVALIDDATA;
488  }
489  } else {
490  s->ss_h = s->ss_v = 1;
491  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
492  }
493  }
494 
495  return 0;
496 }
497 
499  const uint8_t *data, int size, int *ref)
500 {
501  VP9Context *s = avctx->priv_data;
502  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
503  int last_invisible;
504  const uint8_t *data2;
505 
506  /* general header */
507  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
508  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
509  return ret;
510  }
511  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
512  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
513  return AVERROR_INVALIDDATA;
514  }
515  avctx->profile = get_bits1(&s->gb);
516  avctx->profile |= get_bits1(&s->gb) << 1;
517  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
518  if (avctx->profile > 3) {
519  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
520  return AVERROR_INVALIDDATA;
521  }
522  s->s.h.profile = avctx->profile;
523  if (get_bits1(&s->gb)) {
524  *ref = get_bits(&s->gb, 3);
525  return 0;
526  }
527 
528  s->last_keyframe = s->s.h.keyframe;
529  s->s.h.keyframe = !get_bits1(&s->gb);
530 
531  last_invisible = s->s.h.invisible;
532  s->s.h.invisible = !get_bits1(&s->gb);
533  s->s.h.errorres = get_bits1(&s->gb);
534  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
535 
536  if (s->s.h.keyframe) {
537  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
538  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
539  return AVERROR_INVALIDDATA;
540  }
541  if ((ret = read_colorspace_details(avctx)) < 0)
542  return ret;
543  // for profile 1, here follows the subsampling bits
544  s->s.h.refreshrefmask = 0xff;
545  w = get_bits(&s->gb, 16) + 1;
546  h = get_bits(&s->gb, 16) + 1;
547  if (get_bits1(&s->gb)) // display size
548  skip_bits(&s->gb, 32);
549  } else {
550  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
551  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
552  if (s->s.h.intraonly) {
553  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
554  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
555  return AVERROR_INVALIDDATA;
556  }
557  if (avctx->profile >= 1) {
558  if ((ret = read_colorspace_details(avctx)) < 0)
559  return ret;
560  } else {
561  s->ss_h = s->ss_v = 1;
562  s->s.h.bpp = 8;
563  s->bpp_index = 0;
564  s->bytesperpixel = 1;
565  s->pix_fmt = AV_PIX_FMT_YUV420P;
566  avctx->colorspace = AVCOL_SPC_BT470BG;
567  avctx->color_range = AVCOL_RANGE_MPEG;
568  }
569  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
570  w = get_bits(&s->gb, 16) + 1;
571  h = get_bits(&s->gb, 16) + 1;
572  if (get_bits1(&s->gb)) // display size
573  skip_bits(&s->gb, 32);
574  } else {
575  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
576  s->s.h.refidx[0] = get_bits(&s->gb, 3);
577  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
578  s->s.h.refidx[1] = get_bits(&s->gb, 3);
579  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
580  s->s.h.refidx[2] = get_bits(&s->gb, 3);
581  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
582  if (!s->s.refs[s->s.h.refidx[0]].f ||
583  !s->s.refs[s->s.h.refidx[1]].f ||
584  !s->s.refs[s->s.h.refidx[2]].f) {
585  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
586  return AVERROR_INVALIDDATA;
587  }
588  if (get_bits1(&s->gb)) {
589  w = s->s.refs[s->s.h.refidx[0]].f->width;
590  h = s->s.refs[s->s.h.refidx[0]].f->height;
591  } else if (get_bits1(&s->gb)) {
592  w = s->s.refs[s->s.h.refidx[1]].f->width;
593  h = s->s.refs[s->s.h.refidx[1]].f->height;
594  } else if (get_bits1(&s->gb)) {
595  w = s->s.refs[s->s.h.refidx[2]].f->width;
596  h = s->s.refs[s->s.h.refidx[2]].f->height;
597  } else {
598  w = get_bits(&s->gb, 16) + 1;
599  h = get_bits(&s->gb, 16) + 1;
600  }
601  // Note that in this code, "CUR_FRAME" is actually before we
602  // have formally allocated a frame, and thus actually represents
603  // the _last_ frame
604  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f &&
605  s->s.frames[CUR_FRAME].tf.f->width == w &&
606  s->s.frames[CUR_FRAME].tf.f->height == h;
607  if (get_bits1(&s->gb)) // display size
608  skip_bits(&s->gb, 32);
609  s->s.h.highprecisionmvs = get_bits1(&s->gb);
610  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
611  get_bits(&s->gb, 2);
612  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
613  s->s.h.signbias[0] != s->s.h.signbias[2];
614  if (s->s.h.allowcompinter) {
615  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
616  s->s.h.fixcompref = 2;
617  s->s.h.varcompref[0] = 0;
618  s->s.h.varcompref[1] = 1;
619  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
620  s->s.h.fixcompref = 1;
621  s->s.h.varcompref[0] = 0;
622  s->s.h.varcompref[1] = 2;
623  } else {
624  s->s.h.fixcompref = 0;
625  s->s.h.varcompref[0] = 1;
626  s->s.h.varcompref[1] = 2;
627  }
628  }
629  }
630  }
631  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
632  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
633  s->s.h.framectxid = c = get_bits(&s->gb, 2);
634  if (s->s.h.keyframe || s->s.h.intraonly)
635  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
636 
637  /* loopfilter header data */
638  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
639  // reset loopfilter defaults
640  s->s.h.lf_delta.ref[0] = 1;
641  s->s.h.lf_delta.ref[1] = 0;
642  s->s.h.lf_delta.ref[2] = -1;
643  s->s.h.lf_delta.ref[3] = -1;
644  s->s.h.lf_delta.mode[0] = 0;
645  s->s.h.lf_delta.mode[1] = 0;
646  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
647  }
648  s->s.h.filter.level = get_bits(&s->gb, 6);
649  sharp = get_bits(&s->gb, 3);
650  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
651  // the old cache values since they are still valid
652  if (s->s.h.filter.sharpness != sharp) {
653  for (i = 1; i <= 63; i++) {
654  int limit = i;
655 
656  if (sharp > 0) {
657  limit >>= (sharp + 3) >> 2;
658  limit = FFMIN(limit, 9 - sharp);
659  }
660  limit = FFMAX(limit, 1);
661 
662  s->filter_lut.lim_lut[i] = limit;
663  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
664  }
665  }
666  s->s.h.filter.sharpness = sharp;
667  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
668  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
669  for (i = 0; i < 4; i++)
670  if (get_bits1(&s->gb))
671  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
672  for (i = 0; i < 2; i++)
673  if (get_bits1(&s->gb))
674  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
675  }
676  }
677 
678  /* quantization header data */
679  s->s.h.yac_qi = get_bits(&s->gb, 8);
680  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
681  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
682  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
683  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
684  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
685 #if FF_API_CODEC_PROPS
687  if (s->s.h.lossless)
690 #endif
691 
692  /* segmentation header info */
693  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
694  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
695  for (i = 0; i < 7; i++)
696  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
697  get_bits(&s->gb, 8) : 255;
698  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
699  for (i = 0; i < 3; i++)
700  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
701  get_bits(&s->gb, 8) : 255;
702  }
703 
704  if (get_bits1(&s->gb)) {
705  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
706  for (i = 0; i < 8; i++) {
707  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
708  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
709  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
710  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
711  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
712  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
713  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
714  }
715  }
716  } else {
717  // Reset fields under segmentation switch if segmentation is disabled.
718  // This is necessary because some hwaccels don't ignore these fields
719  // if segmentation is disabled.
720  s->s.h.segmentation.temporal = 0;
721  s->s.h.segmentation.update_map = 0;
722  }
723 
724  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
725  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
726  int qyac, qydc, quvac, quvdc, lflvl, sh;
727 
728  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
729  if (s->s.h.segmentation.absolute_vals)
730  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
731  else
732  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
733  } else {
734  qyac = s->s.h.yac_qi;
735  }
736  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
737  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
738  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
739  qyac = av_clip_uintp2(qyac, 8);
740 
741  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
742  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
743  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
744  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
745 
746  sh = s->s.h.filter.level >= 32;
747  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
748  if (s->s.h.segmentation.absolute_vals)
749  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
750  else
751  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
752  } else {
753  lflvl = s->s.h.filter.level;
754  }
755  if (s->s.h.lf_delta.enabled) {
756  s->s.h.segmentation.feat[i].lflvl[0][0] =
757  s->s.h.segmentation.feat[i].lflvl[0][1] =
758  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
759  for (j = 1; j < 4; j++) {
760  s->s.h.segmentation.feat[i].lflvl[j][0] =
761  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
762  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
763  s->s.h.segmentation.feat[i].lflvl[j][1] =
764  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
765  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
766  }
767  } else {
768  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
769  sizeof(s->s.h.segmentation.feat[i].lflvl));
770  }
771  }
772 
773  /* tiling info */
774  if ((ret = update_size(avctx, w, h)) < 0) {
775  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
776  w, h, s->pix_fmt);
777  return ret;
778  }
779  for (s->s.h.tiling.log2_tile_cols = 0;
780  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
781  s->s.h.tiling.log2_tile_cols++) ;
782  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
783  max = FFMAX(0, max - 1);
784  while (max > s->s.h.tiling.log2_tile_cols) {
785  if (get_bits1(&s->gb))
786  s->s.h.tiling.log2_tile_cols++;
787  else
788  break;
789  }
790  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
791  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
792  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
793  int n_range_coders;
794  VPXRangeCoder *rc;
795 
796  if (s->td) {
797  for (i = 0; i < s->active_tile_cols; i++)
798  vp9_tile_data_free(&s->td[i]);
799  av_freep(&s->td);
800  }
801 
802  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
803  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
804  s->s.h.tiling.tile_cols : 1;
805  vp9_alloc_entries(avctx, s->sb_rows);
806  if (avctx->active_thread_type == FF_THREAD_SLICE) {
807  n_range_coders = 4; // max_tile_rows
808  } else {
809  n_range_coders = s->s.h.tiling.tile_cols;
810  }
811  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
812  n_range_coders * sizeof(VPXRangeCoder));
813  if (!s->td)
814  return AVERROR(ENOMEM);
815  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
816  for (i = 0; i < s->active_tile_cols; i++) {
817  s->td[i].s = s;
818  s->td[i].c_b = rc;
819  rc += n_range_coders;
820  }
821  }
822 
823  /* check reference frames */
824  if (!s->s.h.keyframe && !s->s.h.intraonly) {
825  int valid_ref_frame = 0;
826  for (i = 0; i < 3; i++) {
827  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
828  int refw = ref->width, refh = ref->height;
829 
830  if (ref->format != avctx->pix_fmt) {
831  av_log(avctx, AV_LOG_ERROR,
832  "Ref pixfmt (%s) did not match current frame (%s)",
833  av_get_pix_fmt_name(ref->format),
834  av_get_pix_fmt_name(avctx->pix_fmt));
835  return AVERROR_INVALIDDATA;
836  } else if (refw == w && refh == h) {
837  s->mvscale[i][0] = s->mvscale[i][1] = 0;
838  } else {
839  /* Check to make sure at least one of frames that */
840  /* this frame references has valid dimensions */
841  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
842  av_log(avctx, AV_LOG_WARNING,
843  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
844  refw, refh, w, h);
845  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
846  continue;
847  }
848  s->mvscale[i][0] = (refw << 14) / w;
849  s->mvscale[i][1] = (refh << 14) / h;
850  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
851  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
852  }
853  valid_ref_frame++;
854  }
855  if (!valid_ref_frame) {
856  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
857  return AVERROR_INVALIDDATA;
858  }
859  }
860 
861  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
862  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
863  s->prob_ctx[3].p = ff_vp9_default_probs;
864  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
865  sizeof(ff_vp9_default_coef_probs));
866  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
867  sizeof(ff_vp9_default_coef_probs));
868  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
869  sizeof(ff_vp9_default_coef_probs));
870  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
871  sizeof(ff_vp9_default_coef_probs));
872  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
873  s->prob_ctx[c].p = ff_vp9_default_probs;
874  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
875  sizeof(ff_vp9_default_coef_probs));
876  }
877 
878  // next 16 bits is size of the rest of the header (arith-coded)
879  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
880  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
881 
882  data2 = align_get_bits(&s->gb);
883  if (size2 > size - (data2 - data)) {
884  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
885  return AVERROR_INVALIDDATA;
886  }
887  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
888  if (ret < 0)
889  return ret;
890 
891  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
892  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
893  return AVERROR_INVALIDDATA;
894  }
895 
896  for (i = 0; i < s->active_tile_cols; i++) {
897  if (s->s.h.keyframe || s->s.h.intraonly) {
898  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
899  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
900  } else {
901  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
902  }
903  s->td[i].nb_block_structure = 0;
904  }
905 
906  /* FIXME is it faster to not copy here, but do it down in the fw updates
907  * as explicit copies if the fw update is missing (and skip the copy upon
908  * fw update)? */
909  s->prob.p = s->prob_ctx[c].p;
910 
911  // txfm updates
912  if (s->s.h.lossless) {
913  s->s.h.txfmmode = TX_4X4;
914  } else {
915  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
916  if (s->s.h.txfmmode == 3)
917  s->s.h.txfmmode += vp89_rac_get(&s->c);
918 
919  if (s->s.h.txfmmode == TX_SWITCHABLE) {
920  for (i = 0; i < 2; i++)
921  if (vpx_rac_get_prob_branchy(&s->c, 252))
922  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
923  for (i = 0; i < 2; i++)
924  for (j = 0; j < 2; j++)
925  if (vpx_rac_get_prob_branchy(&s->c, 252))
926  s->prob.p.tx16p[i][j] =
927  update_prob(&s->c, s->prob.p.tx16p[i][j]);
928  for (i = 0; i < 2; i++)
929  for (j = 0; j < 3; j++)
930  if (vpx_rac_get_prob_branchy(&s->c, 252))
931  s->prob.p.tx32p[i][j] =
932  update_prob(&s->c, s->prob.p.tx32p[i][j]);
933  }
934  }
935 
936  // coef updates
937  for (i = 0; i < 4; i++) {
938  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
939  if (vp89_rac_get(&s->c)) {
940  for (j = 0; j < 2; j++)
941  for (k = 0; k < 2; k++)
942  for (l = 0; l < 6; l++)
943  for (m = 0; m < 6; m++) {
944  uint8_t *p = s->prob.coef[i][j][k][l][m];
945  uint8_t *r = ref[j][k][l][m];
946  if (m >= 3 && l == 0) // dc only has 3 pt
947  break;
948  for (n = 0; n < 3; n++) {
949  if (vpx_rac_get_prob_branchy(&s->c, 252))
950  p[n] = update_prob(&s->c, r[n]);
951  else
952  p[n] = r[n];
953  }
954  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
955  }
956  } else {
957  for (j = 0; j < 2; j++)
958  for (k = 0; k < 2; k++)
959  for (l = 0; l < 6; l++)
960  for (m = 0; m < 6; m++) {
961  uint8_t *p = s->prob.coef[i][j][k][l][m];
962  uint8_t *r = ref[j][k][l][m];
963  if (m > 3 && l == 0) // dc only has 3 pt
964  break;
965  memcpy(p, r, 3);
966  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
967  }
968  }
969  if (s->s.h.txfmmode == i)
970  break;
971  }
972 
973  // mode updates
974  for (i = 0; i < 3; i++)
975  if (vpx_rac_get_prob_branchy(&s->c, 252))
976  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
977  if (!s->s.h.keyframe && !s->s.h.intraonly) {
978  for (i = 0; i < 7; i++)
979  for (j = 0; j < 3; j++)
980  if (vpx_rac_get_prob_branchy(&s->c, 252))
981  s->prob.p.mv_mode[i][j] =
982  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
983 
984  if (s->s.h.filtermode == FILTER_SWITCHABLE)
985  for (i = 0; i < 4; i++)
986  for (j = 0; j < 2; j++)
987  if (vpx_rac_get_prob_branchy(&s->c, 252))
988  s->prob.p.filter[i][j] =
989  update_prob(&s->c, s->prob.p.filter[i][j]);
990 
991  for (i = 0; i < 4; i++)
992  if (vpx_rac_get_prob_branchy(&s->c, 252))
993  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
994 
995  if (s->s.h.allowcompinter) {
996  s->s.h.comppredmode = vp89_rac_get(&s->c);
997  if (s->s.h.comppredmode)
998  s->s.h.comppredmode += vp89_rac_get(&s->c);
999  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1000  for (i = 0; i < 5; i++)
1001  if (vpx_rac_get_prob_branchy(&s->c, 252))
1002  s->prob.p.comp[i] =
1003  update_prob(&s->c, s->prob.p.comp[i]);
1004  } else {
1005  s->s.h.comppredmode = PRED_SINGLEREF;
1006  }
1007 
1008  if (s->s.h.comppredmode != PRED_COMPREF) {
1009  for (i = 0; i < 5; i++) {
1010  if (vpx_rac_get_prob_branchy(&s->c, 252))
1011  s->prob.p.single_ref[i][0] =
1012  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1013  if (vpx_rac_get_prob_branchy(&s->c, 252))
1014  s->prob.p.single_ref[i][1] =
1015  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1016  }
1017  }
1018 
1019  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1020  for (i = 0; i < 5; i++)
1021  if (vpx_rac_get_prob_branchy(&s->c, 252))
1022  s->prob.p.comp_ref[i] =
1023  update_prob(&s->c, s->prob.p.comp_ref[i]);
1024  }
1025 
1026  for (i = 0; i < 4; i++)
1027  for (j = 0; j < 9; j++)
1028  if (vpx_rac_get_prob_branchy(&s->c, 252))
1029  s->prob.p.y_mode[i][j] =
1030  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1031 
1032  for (i = 0; i < 4; i++)
1033  for (j = 0; j < 4; j++)
1034  for (k = 0; k < 3; k++)
1035  if (vpx_rac_get_prob_branchy(&s->c, 252))
1036  s->prob.p.partition[3 - i][j][k] =
1037  update_prob(&s->c,
1038  s->prob.p.partition[3 - i][j][k]);
1039 
1040  // mv fields don't use the update_prob subexp model for some reason
1041  for (i = 0; i < 3; i++)
1042  if (vpx_rac_get_prob_branchy(&s->c, 252))
1043  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1044 
1045  for (i = 0; i < 2; i++) {
1046  if (vpx_rac_get_prob_branchy(&s->c, 252))
1047  s->prob.p.mv_comp[i].sign =
1048  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1049 
1050  for (j = 0; j < 10; j++)
1051  if (vpx_rac_get_prob_branchy(&s->c, 252))
1052  s->prob.p.mv_comp[i].classes[j] =
1053  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1054 
1055  if (vpx_rac_get_prob_branchy(&s->c, 252))
1056  s->prob.p.mv_comp[i].class0 =
1057  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1058 
1059  for (j = 0; j < 10; j++)
1060  if (vpx_rac_get_prob_branchy(&s->c, 252))
1061  s->prob.p.mv_comp[i].bits[j] =
1062  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1063  }
1064 
1065  for (i = 0; i < 2; i++) {
1066  for (j = 0; j < 2; j++)
1067  for (k = 0; k < 3; k++)
1068  if (vpx_rac_get_prob_branchy(&s->c, 252))
1069  s->prob.p.mv_comp[i].class0_fp[j][k] =
1070  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1071 
1072  for (j = 0; j < 3; j++)
1073  if (vpx_rac_get_prob_branchy(&s->c, 252))
1074  s->prob.p.mv_comp[i].fp[j] =
1075  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1076  }
1077 
1078  if (s->s.h.highprecisionmvs) {
1079  for (i = 0; i < 2; i++) {
1080  if (vpx_rac_get_prob_branchy(&s->c, 252))
1081  s->prob.p.mv_comp[i].class0_hp =
1082  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1083 
1084  if (vpx_rac_get_prob_branchy(&s->c, 252))
1085  s->prob.p.mv_comp[i].hp =
1086  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1087  }
1088  }
1089  }
1090 
1091  return (data2 - data) + size2;
1092 }
1093 
1094 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1095  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1096 {
1097  const VP9Context *s = td->s;
1098  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1099  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1100  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1101  s->prob.p.partition[bl][c];
1102  enum BlockPartition bp;
1103  ptrdiff_t hbs = 4 >> bl;
1104  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1105  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1106  int bytesperpixel = s->bytesperpixel;
1107 
1108  if (bl == BL_8X8) {
1110  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1111  } else if (col + hbs < s->cols) { // FIXME why not <=?
1112  if (row + hbs < s->rows) { // FIXME why not <=?
1114  switch (bp) {
1115  case PARTITION_NONE:
1116  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1117  break;
1118  case PARTITION_H:
1119  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1120  yoff += hbs * 8 * y_stride;
1121  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1122  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1123  break;
1124  case PARTITION_V:
1125  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1126  yoff += hbs * 8 * bytesperpixel;
1127  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1128  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1129  break;
1130  case PARTITION_SPLIT:
1131  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1132  decode_sb(td, row, col + hbs, lflvl,
1133  yoff + 8 * hbs * bytesperpixel,
1134  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1135  yoff += hbs * 8 * y_stride;
1136  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1137  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1138  decode_sb(td, row + hbs, col + hbs, lflvl,
1139  yoff + 8 * hbs * bytesperpixel,
1140  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1141  break;
1142  default:
1143  av_unreachable("ff_vp9_partition_tree only has "
1144  "the four PARTITION_* terminal codes");
1145  }
1146  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1147  bp = PARTITION_SPLIT;
1148  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1149  decode_sb(td, row, col + hbs, lflvl,
1150  yoff + 8 * hbs * bytesperpixel,
1151  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1152  } else {
1153  bp = PARTITION_H;
1154  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1155  }
1156  } else if (row + hbs < s->rows) { // FIXME why not <=?
1157  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1158  bp = PARTITION_SPLIT;
1159  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1160  yoff += hbs * 8 * y_stride;
1161  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1162  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1163  } else {
1164  bp = PARTITION_V;
1165  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1166  }
1167  } else {
1168  bp = PARTITION_SPLIT;
1169  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1170  }
1171  td->counts.partition[bl][c][bp]++;
1172 }
1173 
1174 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1175  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1176 {
1177  const VP9Context *s = td->s;
1178  VP9Block *b = td->b;
1179  ptrdiff_t hbs = 4 >> bl;
1180  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1181  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1182  int bytesperpixel = s->bytesperpixel;
1183 
1184  if (bl == BL_8X8) {
1185  av_assert2(b->bl == BL_8X8);
1186  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1187  } else if (td->b->bl == bl) {
1188  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1189  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1190  yoff += hbs * 8 * y_stride;
1191  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1192  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1193  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1194  yoff += hbs * 8 * bytesperpixel;
1195  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1196  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1197  }
1198  } else {
1199  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1200  if (col + hbs < s->cols) { // FIXME why not <=?
1201  if (row + hbs < s->rows) {
1202  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1203  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1204  yoff += hbs * 8 * y_stride;
1205  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1206  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1207  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1208  yoff + 8 * hbs * bytesperpixel,
1209  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1210  } else {
1211  yoff += hbs * 8 * bytesperpixel;
1212  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1213  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1214  }
1215  } else if (row + hbs < s->rows) {
1216  yoff += hbs * 8 * y_stride;
1217  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1218  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1219  }
1220  }
1221 }
1222 
1223 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1224 {
1225  int sb_start = ( idx * n) >> log2_n;
1226  int sb_end = ((idx + 1) * n) >> log2_n;
1227  *start = FFMIN(sb_start, n) << 3;
1228  *end = FFMIN(sb_end, n) << 3;
1229 }
1230 
1232 {
1233  int i;
1234 
1235  av_freep(&s->intra_pred_data[0]);
1236  for (i = 0; i < s->active_tile_cols; i++)
1237  vp9_tile_data_free(&s->td[i]);
1238 }
1239 
1241 {
1242  VP9Context *s = avctx->priv_data;
1243  int i;
1244 
1245  for (int i = 0; i < 3; i++)
1246  vp9_frame_unref(&s->s.frames[i]);
1247  av_refstruct_pool_uninit(&s->frame_extradata_pool);
1248  for (i = 0; i < 8; i++) {
1249  ff_progress_frame_unref(&s->s.refs[i]);
1250  ff_progress_frame_unref(&s->next_refs[i]);
1251  }
1252 
1253  free_buffers(s);
1254 #if HAVE_THREADS
1255  av_freep(&s->entries);
1256  ff_pthread_free(s, vp9_context_offsets);
1257 #endif
1258  av_freep(&s->td);
1259  return 0;
1260 }
1261 
1262 static int decode_tiles(AVCodecContext *avctx,
1263  const uint8_t *data, int size)
1264 {
1265  VP9Context *s = avctx->priv_data;
1266  VP9TileData *td = &s->td[0];
1267  int row, col, tile_row, tile_col, ret;
1268  int bytesperpixel;
1269  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1270  AVFrame *f;
1271  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1272 
1273  f = s->s.frames[CUR_FRAME].tf.f;
1274  ls_y = f->linesize[0];
1275  ls_uv =f->linesize[1];
1276  bytesperpixel = s->bytesperpixel;
1277 
1278  yoff = uvoff = 0;
1279  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1280  set_tile_offset(&tile_row_start, &tile_row_end,
1281  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1282 
1283  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1284  int64_t tile_size;
1285 
1286  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1287  tile_row == s->s.h.tiling.tile_rows - 1) {
1288  tile_size = size;
1289  } else {
1290  tile_size = AV_RB32(data);
1291  data += 4;
1292  size -= 4;
1293  }
1294  if (tile_size > size)
1295  return AVERROR_INVALIDDATA;
1296  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1297  if (ret < 0)
1298  return ret;
1299  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1300  return AVERROR_INVALIDDATA;
1301  data += tile_size;
1302  size -= tile_size;
1303  }
1304 
1305  for (row = tile_row_start; row < tile_row_end;
1306  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1307  VP9Filter *lflvl_ptr = s->lflvl;
1308  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1309 
1310  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1311  set_tile_offset(&tile_col_start, &tile_col_end,
1312  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1313  td->tile_col_start = tile_col_start;
1314  if (s->pass != 2) {
1315  memset(td->left_partition_ctx, 0, 8);
1316  memset(td->left_skip_ctx, 0, 8);
1317  if (s->s.h.keyframe || s->s.h.intraonly) {
1318  memset(td->left_mode_ctx, DC_PRED, 16);
1319  } else {
1320  memset(td->left_mode_ctx, NEARESTMV, 8);
1321  }
1322  memset(td->left_y_nnz_ctx, 0, 16);
1323  memset(td->left_uv_nnz_ctx, 0, 32);
1324  memset(td->left_segpred_ctx, 0, 8);
1325 
1326  td->c = &td->c_b[tile_col];
1327  }
1328 
1329  for (col = tile_col_start;
1330  col < tile_col_end;
1331  col += 8, yoff2 += 64 * bytesperpixel,
1332  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1333  // FIXME integrate with lf code (i.e. zero after each
1334  // use, similar to invtxfm coefficients, or similar)
1335  if (s->pass != 1) {
1336  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1337  }
1338 
1339  if (s->pass == 2) {
1340  decode_sb_mem(td, row, col, lflvl_ptr,
1341  yoff2, uvoff2, BL_64X64);
1342  } else {
1343  if (vpx_rac_is_end(td->c)) {
1344  return AVERROR_INVALIDDATA;
1345  }
1346  decode_sb(td, row, col, lflvl_ptr,
1347  yoff2, uvoff2, BL_64X64);
1348  }
1349  }
1350  }
1351 
1352  if (s->pass == 1)
1353  continue;
1354 
1355  // backup pre-loopfilter reconstruction data for intra
1356  // prediction of next row of sb64s
1357  if (row + 8 < s->rows) {
1358  memcpy(s->intra_pred_data[0],
1359  f->data[0] + yoff + 63 * ls_y,
1360  8 * s->cols * bytesperpixel);
1361  memcpy(s->intra_pred_data[1],
1362  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1363  8 * s->cols * bytesperpixel >> s->ss_h);
1364  memcpy(s->intra_pred_data[2],
1365  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1366  8 * s->cols * bytesperpixel >> s->ss_h);
1367  }
1368 
1369  // loopfilter one row
1370  if (s->s.h.filter.level) {
1371  yoff2 = yoff;
1372  uvoff2 = uvoff;
1373  lflvl_ptr = s->lflvl;
1374  for (col = 0; col < s->cols;
1375  col += 8, yoff2 += 64 * bytesperpixel,
1376  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1377  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1378  yoff2, uvoff2);
1379  }
1380  }
1381 
1382  // FIXME maybe we can make this more finegrained by running the
1383  // loopfilter per-block instead of after each sbrow
1384  // In fact that would also make intra pred left preparation easier?
1385  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, row >> 3);
1386  }
1387  }
1388  return 0;
1389 }
1390 
1391 #if HAVE_THREADS
1392 static av_always_inline
1393 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1394  int threadnr)
1395 {
1396  VP9Context *s = avctx->priv_data;
1397  VP9TileData *td = &s->td[jobnr];
1398  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1399  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1400  unsigned tile_cols_len;
1401  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1402  VP9Filter *lflvl_ptr_base;
1403  AVFrame *f;
1404 
1405  f = s->s.frames[CUR_FRAME].tf.f;
1406  ls_y = f->linesize[0];
1407  ls_uv =f->linesize[1];
1408 
1409  set_tile_offset(&tile_col_start, &tile_col_end,
1410  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1411  td->tile_col_start = tile_col_start;
1412  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1413  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1414  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1415 
1416  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1417  set_tile_offset(&tile_row_start, &tile_row_end,
1418  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1419 
1420  td->c = &td->c_b[tile_row];
1421  for (row = tile_row_start; row < tile_row_end;
1422  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1423  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1424  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1425 
1426  memset(td->left_partition_ctx, 0, 8);
1427  memset(td->left_skip_ctx, 0, 8);
1428  if (s->s.h.keyframe || s->s.h.intraonly) {
1429  memset(td->left_mode_ctx, DC_PRED, 16);
1430  } else {
1431  memset(td->left_mode_ctx, NEARESTMV, 8);
1432  }
1433  memset(td->left_y_nnz_ctx, 0, 16);
1434  memset(td->left_uv_nnz_ctx, 0, 32);
1435  memset(td->left_segpred_ctx, 0, 8);
1436 
1437  for (col = tile_col_start;
1438  col < tile_col_end;
1439  col += 8, yoff2 += 64 * bytesperpixel,
1440  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1441  // FIXME integrate with lf code (i.e. zero after each
1442  // use, similar to invtxfm coefficients, or similar)
1443  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1444  decode_sb(td, row, col, lflvl_ptr,
1445  yoff2, uvoff2, BL_64X64);
1446  }
1447 
1448  // backup pre-loopfilter reconstruction data for intra
1449  // prediction of next row of sb64s
1450  tile_cols_len = tile_col_end - tile_col_start;
1451  if (row + 8 < s->rows) {
1452  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1453  f->data[0] + yoff + 63 * ls_y,
1454  8 * tile_cols_len * bytesperpixel);
1455  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1456  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1457  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1458  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1459  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1460  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1461  }
1462 
1463  vp9_report_tile_progress(s, row >> 3, 1);
1464  }
1465  }
1466  return 0;
1467 }
1468 
1469 static av_always_inline
1470 int loopfilter_proc(AVCodecContext *avctx)
1471 {
1472  VP9Context *s = avctx->priv_data;
1473  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1474  VP9Filter *lflvl_ptr;
1475  int bytesperpixel = s->bytesperpixel, col, i;
1476  AVFrame *f;
1477 
1478  f = s->s.frames[CUR_FRAME].tf.f;
1479  ls_y = f->linesize[0];
1480  ls_uv =f->linesize[1];
1481 
1482  for (i = 0; i < s->sb_rows; i++) {
1483  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1484 
1485  if (s->s.h.filter.level) {
1486  yoff = (ls_y * 64)*i;
1487  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1488  lflvl_ptr = s->lflvl+s->sb_cols*i;
1489  for (col = 0; col < s->cols;
1490  col += 8, yoff += 64 * bytesperpixel,
1491  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1492  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1493  yoff, uvoff);
1494  }
1495  }
1496  }
1497  return 0;
1498 }
1499 #endif
1500 
1502 {
1503  AVVideoEncParams *par;
1504  unsigned int tile, nb_blocks = 0;
1505 
1506  if (s->s.h.segmentation.enabled) {
1507  for (tile = 0; tile < s->active_tile_cols; tile++)
1508  nb_blocks += s->td[tile].nb_block_structure;
1509  }
1510 
1512  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1513  if (!par)
1514  return AVERROR(ENOMEM);
1515 
1516  par->qp = s->s.h.yac_qi;
1517  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1518  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1519  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1520  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1521  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1522 
1523  if (nb_blocks) {
1524  unsigned int block = 0;
1525  unsigned int tile, block_tile;
1526 
1527  for (tile = 0; tile < s->active_tile_cols; tile++) {
1528  VP9TileData *td = &s->td[tile];
1529 
1530  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1532  unsigned int row = td->block_structure[block_tile].row;
1533  unsigned int col = td->block_structure[block_tile].col;
1534  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1535 
1536  b->src_x = col * 8;
1537  b->src_y = row * 8;
1538  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1539  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1540 
1541  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1542  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1543  if (s->s.h.segmentation.absolute_vals)
1544  b->delta_qp -= par->qp;
1545  }
1546  }
1547  }
1548  }
1549 
1550  return 0;
1551 }
1552 
1554  int *got_frame, AVPacket *pkt)
1555 {
1556  const uint8_t *data = pkt->data;
1557  int size = pkt->size;
1558  VP9Context *s = avctx->priv_data;
1559  int ret, i, j, ref;
1560  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1561  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1562  const VP9Frame *src;
1563  AVFrame *f;
1564 
1565  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1566  return ret;
1567  } else if (ret == 0) {
1568  if (!s->s.refs[ref].f) {
1569  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1570  return AVERROR_INVALIDDATA;
1571  }
1572  for (int i = 0; i < 8; i++)
1573  ff_progress_frame_replace(&s->next_refs[i], &s->s.refs[i]);
1574  ff_thread_finish_setup(avctx);
1575  ff_progress_frame_await(&s->s.refs[ref], INT_MAX);
1576 
1577  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1578  return ret;
1579  frame->pts = pkt->pts;
1580  frame->pkt_dts = pkt->dts;
1581  *got_frame = 1;
1582  return pkt->size;
1583  }
1584  data += ret;
1585  size -= ret;
1586 
1587  src = !s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres ?
1588  &s->s.frames[CUR_FRAME] : &s->s.frames[BLANK_FRAME];
1589  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly)
1590  vp9_frame_replace(&s->s.frames[REF_FRAME_SEGMAP], src);
1591  vp9_frame_replace(&s->s.frames[REF_FRAME_MVPAIR], src);
1592  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1593  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1594  return ret;
1595  f = s->s.frames[CUR_FRAME].tf.f;
1596  if (s->s.h.keyframe)
1597  f->flags |= AV_FRAME_FLAG_KEY;
1598  else
1599  f->flags &= ~AV_FRAME_FLAG_KEY;
1600  if (s->s.h.lossless)
1601  f->flags |= AV_FRAME_FLAG_LOSSLESS;
1602  else
1603  f->flags &= ~AV_FRAME_FLAG_LOSSLESS;
1604  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1605 
1606  // Non-existent frames have the implicit dimension 0x0 != CUR_FRAME
1607  if (!s->s.frames[REF_FRAME_MVPAIR].tf.f ||
1608  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1609  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1610  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1611  }
1612 
1613  // ref frame setup
1614  for (i = 0; i < 8; i++) {
1615  ff_progress_frame_replace(&s->next_refs[i],
1616  s->s.h.refreshrefmask & (1 << i) ?
1617  &s->s.frames[CUR_FRAME].tf : &s->s.refs[i]);
1618  }
1619 
1620  if (avctx->hwaccel) {
1621  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1622  ret = hwaccel->start_frame(avctx, pkt->buf, pkt->data, pkt->size);
1623  if (ret < 0)
1624  return ret;
1625  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1626  if (ret < 0)
1627  return ret;
1628  ret = hwaccel->end_frame(avctx);
1629  if (ret < 0)
1630  return ret;
1631  goto finish;
1632  }
1633 
1634  // main tile decode loop
1635  memset(s->above_partition_ctx, 0, s->cols);
1636  memset(s->above_skip_ctx, 0, s->cols);
1637  if (s->s.h.keyframe || s->s.h.intraonly) {
1638  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1639  } else {
1640  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1641  }
1642  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1643  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1644  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1645  memset(s->above_segpred_ctx, 0, s->cols);
1646  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1647  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1648  if ((ret = update_block_buffers(avctx)) < 0) {
1649  av_log(avctx, AV_LOG_ERROR,
1650  "Failed to allocate block buffers\n");
1651  return ret;
1652  }
1653  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1654  int j, k, l, m;
1655 
1656  for (i = 0; i < 4; i++) {
1657  for (j = 0; j < 2; j++)
1658  for (k = 0; k < 2; k++)
1659  for (l = 0; l < 6; l++)
1660  for (m = 0; m < 6; m++)
1661  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1662  s->prob.coef[i][j][k][l][m], 3);
1663  if (s->s.h.txfmmode == i)
1664  break;
1665  }
1666  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1667  ff_thread_finish_setup(avctx);
1668  } else if (!s->s.h.refreshctx) {
1669  ff_thread_finish_setup(avctx);
1670  }
1671 
1672 #if HAVE_THREADS
1673  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1674  for (i = 0; i < s->sb_rows; i++)
1675  atomic_init(&s->entries[i], 0);
1676  }
1677 #endif
1678 
1679  do {
1680  for (i = 0; i < s->active_tile_cols; i++) {
1681  s->td[i].b = s->td[i].b_base;
1682  s->td[i].block = s->td[i].block_base;
1683  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1684  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1685  s->td[i].eob = s->td[i].eob_base;
1686  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1687  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1688  s->td[i].error_info = 0;
1689  }
1690 
1691 #if HAVE_THREADS
1692  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1693  int tile_row, tile_col;
1694 
1695  av_assert1(!s->pass);
1696 
1697  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1698  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1699  int64_t tile_size;
1700 
1701  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1702  tile_row == s->s.h.tiling.tile_rows - 1) {
1703  tile_size = size;
1704  } else {
1705  tile_size = AV_RB32(data);
1706  data += 4;
1707  size -= 4;
1708  }
1709  if (tile_size > size)
1710  return AVERROR_INVALIDDATA;
1711  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1712  if (ret < 0)
1713  return ret;
1714  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1715  return AVERROR_INVALIDDATA;
1716  data += tile_size;
1717  size -= tile_size;
1718  }
1719  }
1720 
1721  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1722  } else
1723 #endif
1724  {
1725  ret = decode_tiles(avctx, data, size);
1726  if (ret < 0)
1727  goto fail;
1728  }
1729 
1730  // Sum all counts fields into td[0].counts for tile threading
1731  if (avctx->active_thread_type == FF_THREAD_SLICE)
1732  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1733  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1734  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1735 
1736  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1738  ff_thread_finish_setup(avctx);
1739  }
1740  } while (s->pass++ == 1);
1741 
1742  if (s->td->error_info < 0) {
1743  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1744  s->td->error_info = 0;
1746  goto fail;
1747  }
1749  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1750  if (ret < 0)
1751  goto fail;
1752  }
1753 
1754 finish:
1755  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1756  // ref frame setup
1757  for (int i = 0; i < 8; i++)
1758  ff_progress_frame_replace(&s->s.refs[i], &s->next_refs[i]);
1759 
1760  if (!s->s.h.invisible) {
1761  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1762  return ret;
1763  *got_frame = 1;
1764  }
1765 
1766  return pkt->size;
1767 fail:
1768  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1769  return ret;
1770 }
1771 
1773 {
1774  VP9Context *s = avctx->priv_data;
1775  int i;
1776 
1777  for (i = 0; i < 3; i++)
1778  vp9_frame_unref(&s->s.frames[i]);
1779  for (i = 0; i < 8; i++)
1780  ff_progress_frame_unref(&s->s.refs[i]);
1781 
1782  if (FF_HW_HAS_CB(avctx, flush))
1783  FF_HW_SIMPLE_CALL(avctx, flush);
1784 }
1785 
1787 {
1788  VP9Context *s = avctx->priv_data;
1789  int ret;
1790 
1791  s->last_bpp = 0;
1792  s->s.h.filter.sharpness = -1;
1793 
1794 #if HAVE_THREADS
1795  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1796  ret = ff_pthread_init(s, vp9_context_offsets);
1797  if (ret < 0)
1798  return ret;
1799  }
1800 #endif
1801 
1802  return 0;
1803 }
1804 
1805 #if HAVE_THREADS
1806 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1807 {
1808  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1809 
1810  for (int i = 0; i < 3; i++)
1811  vp9_frame_replace(&s->s.frames[i], &ssrc->s.frames[i]);
1812  for (int i = 0; i < 8; i++)
1813  ff_progress_frame_replace(&s->s.refs[i], &ssrc->next_refs[i]);
1814  av_refstruct_replace(&s->frame_extradata_pool, ssrc->frame_extradata_pool);
1815  s->frame_extradata_pool_size = ssrc->frame_extradata_pool_size;
1816 
1817  s->s.h.invisible = ssrc->s.h.invisible;
1818  s->s.h.keyframe = ssrc->s.h.keyframe;
1819  s->s.h.intraonly = ssrc->s.h.intraonly;
1820  s->ss_v = ssrc->ss_v;
1821  s->ss_h = ssrc->ss_h;
1822  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1823  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1824  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1825  s->bytesperpixel = ssrc->bytesperpixel;
1826  s->gf_fmt = ssrc->gf_fmt;
1827  s->w = ssrc->w;
1828  s->h = ssrc->h;
1829  s->s.h.bpp = ssrc->s.h.bpp;
1830  s->bpp_index = ssrc->bpp_index;
1831  s->pix_fmt = ssrc->pix_fmt;
1832  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1833  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1834  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1835  sizeof(s->s.h.segmentation.feat));
1836 
1837  return 0;
1838 }
1839 #endif
1840 
1842  .p.name = "vp9",
1843  CODEC_LONG_NAME("Google VP9"),
1844  .p.type = AVMEDIA_TYPE_VIDEO,
1845  .p.id = AV_CODEC_ID_VP9,
1846  .priv_data_size = sizeof(VP9Context),
1847  .init = vp9_decode_init,
1851  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1854  .flush = vp9_decode_flush,
1855  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1856  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1857  .bsfs = "vp9_superframe_split",
1858  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1859 #if CONFIG_VP9_DXVA2_HWACCEL
1860  HWACCEL_DXVA2(vp9),
1861 #endif
1862 #if CONFIG_VP9_D3D11VA_HWACCEL
1863  HWACCEL_D3D11VA(vp9),
1864 #endif
1865 #if CONFIG_VP9_D3D11VA2_HWACCEL
1866  HWACCEL_D3D11VA2(vp9),
1867 #endif
1868 #if CONFIG_VP9_D3D12VA_HWACCEL
1869  HWACCEL_D3D12VA(vp9),
1870 #endif
1871 #if CONFIG_VP9_NVDEC_HWACCEL
1872  HWACCEL_NVDEC(vp9),
1873 #endif
1874 #if CONFIG_VP9_VAAPI_HWACCEL
1875  HWACCEL_VAAPI(vp9),
1876 #endif
1877 #if CONFIG_VP9_VDPAU_HWACCEL
1878  HWACCEL_VDPAU(vp9),
1879 #endif
1880 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1881  HWACCEL_VIDEOTOOLBOX(vp9),
1882 #endif
1883  NULL
1884  },
1885 };
VP9TileData::left_y_nnz_ctx
uint8_t left_y_nnz_ctx[16]
Definition: vp9dec.h:210
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1802
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1262
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:105
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:52
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:50
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
VP9TileData::uvblock_base
int16_t * uvblock_base[2]
Definition: vp9dec.h:226
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1203
VP9TileData::partition
unsigned partition[4][4][4]
Definition: vp9dec.h:201
VP9Frame
Definition: vp9shared.h:65
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1841
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1094
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
int64_t
long long int64_t
Definition: coverity.c:34
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1772
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:249
VP9TileData::left_skip_ctx
uint8_t left_skip_ctx[8]
Definition: vp9dec.h:215
VP9TileData::row
int row
Definition: vp9dec.h:171
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:750
BlockPartition
BlockPartition
Definition: vp9shared.h:35
AVPacket::data
uint8_t * data
Definition: packet.h:535
DC_PRED
@ DC_PRED
Definition: vp9.h:48
pthread_mutex_lock
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:119
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:42
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1762
data
const char data[16]
Definition: mxf.c:149
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:160
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1174
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:170
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:498
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:528
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
VP9TileData::c_b
VPXRangeCoder * c_b
Definition: vp9dec.h:169
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:674
VP9TileData::left_segpred_ctx
uint8_t left_segpred_ctx[8]
Definition: vp9dec.h:217
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:49
VP9Block::bl
enum BlockLevel bl
Definition: vp9dec.h:90
vp89_rac.h
VP9Filter
Definition: vp9dec.h:78
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
VP9TileData::b
VP9Block * b
Definition: vp9dec.h:174
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:92
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
VP9Block
Definition: vp9dec.h:84
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:364
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:679
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:318
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:677
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:196
AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:544
GetBitContext
Definition: get_bits.h:108
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:36
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:97
progressframe.h
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
VP9TileData::col
int col
Definition: vp9dec.h:171
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1240
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:531
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:528
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1638
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
BL_8X8
@ BL_8X8
Definition: vp9shared.h:79
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:38
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2155
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
pthread_mutex_unlock
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:126
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:680
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1785
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
VP9TileData::block_size_idx_x
unsigned int block_size_idx_x
Definition: vp9dec.h:234
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1553
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:518
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:669
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:96
VP9TileData::block_structure
struct VP9TileData::@301 * block_structure
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:109
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:169
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:371
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:530
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:370
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pthread_internal.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:335
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:529
VP9mv
Definition: vp9shared.h:55
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:39
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:179
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp9_frame_replace
static void vp9_frame_replace(VP9Frame *dst, const VP9Frame *src)
Definition: vp9.c:146
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
f
f
Definition: af_crystalizer.c:122
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:536
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
codec_internal.h
VP9TileData::eob_base
uint8_t * eob_base
Definition: vp9dec.h:227
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:68
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:42
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:436
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:533
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:87
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
VP9TileData::b_base
VP9Block * b_base
Definition: vp9dec.h:174
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1231
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:535
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1573
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:301
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:534
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
VP9TileData::block_base
int16_t * block_base
Definition: vp9dec.h:226
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:368
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1572
VP9TileData::left_uv_nnz_ctx
uint8_t left_uv_nnz_ctx[2][16]
Definition: vp9dec.h:213
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:104
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:681
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:378
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:528
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:684
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:436
VP9TileData::block_size_idx_y
unsigned int block_size_idx_y
Definition: vp9dec.h:235
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:545
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:673
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:362
VP9TileData::left_mode_ctx
uint8_t left_mode_ctx[16]
Definition: vp9dec.h:211
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:676
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:733
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP9TileData::c
VPXRangeCoder * c
Definition: vp9dec.h:170
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
VP9TileData::s
const VP9Context * s
Definition: vp9dec.h:168
BL_64X64
@ BL_64X64
Definition: vp9shared.h:76
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1786
tile
static int FUNC() tile(CodedBitstreamContext *ctx, RWContext *rw, APVRawTile *current, int tile_idx, uint32_t tile_size)
Definition: cbs_apv_syntax_template.c:224
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:544
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
av_refstruct_pool_alloc
AVRefStructPool * av_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to av_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:90
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:60
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:532
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1792
VP9TileData
Definition: vp9dec.h:167
VP9TileData::uveob_base
uint8_t * uveob_base[2]
Definition: vp9dec.h:227
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1580
VP9TileData::counts
struct VP9TileData::@299 counts
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:81
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1618
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1264
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:43
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
BlockLevel
BlockLevel
Definition: vp9shared.h:75
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1774
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:105
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:168
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(struct AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:179
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1501
AVPacket
This structure stores compressed data.
Definition: packet.h:512
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:37
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
BLANK_FRAME
#define BLANK_FRAME
Definition: vp9shared.h:171
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:534
h
h
Definition: vp9dsp_template.c:2070
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
VP9TileData::nb_block_structure
unsigned int nb_block_structure
Definition: vp9dec.h:237
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:675
VP9TileData::tile_col_start
unsigned tile_col_start
Definition: vp9dec.h:175
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:646
src
#define src
Definition: vp8dsp.c:248
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:155
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1223
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3261
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540
VP9TileData::left_partition_ctx
uint8_t left_partition_ctx[8]
Definition: vp9dec.h:214