FFmpeg
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  * ICC profile
37  *
38  * Unimplemented:
39  * - Animation
40  * - XMP metadata
41  */
42 
43 #include "libavutil/imgutils.h"
44 #include "libavutil/mem.h"
45 
46 #define BITSTREAM_READER_LE
47 #include "avcodec.h"
48 #include "bytestream.h"
49 #include "codec_internal.h"
50 #include "decode.h"
51 #include "exif_internal.h"
52 #include "get_bits.h"
53 #include "thread.h"
54 #include "tiff_common.h"
55 #include "vp8.h"
56 
57 #define VP8X_FLAG_ANIMATION 0x02
58 #define VP8X_FLAG_XMP_METADATA 0x04
59 #define VP8X_FLAG_EXIF_METADATA 0x08
60 #define VP8X_FLAG_ALPHA 0x10
61 #define VP8X_FLAG_ICC 0x20
62 
63 #define MAX_PALETTE_SIZE 256
64 #define MAX_CACHE_BITS 11
65 #define NUM_CODE_LENGTH_CODES 19
66 #define HUFFMAN_CODES_PER_META_CODE 5
67 #define NUM_LITERAL_CODES 256
68 #define NUM_LENGTH_CODES 24
69 #define NUM_DISTANCE_CODES 40
70 #define NUM_SHORT_DISTANCES 120
71 #define MAX_HUFFMAN_CODE_LENGTH 15
72 
73 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
77 };
78 
80  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
81 };
82 
83 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
84  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
85  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
86  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
87  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
88  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
89  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
90  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
91  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
92  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
93  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
94  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
95  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
96  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
97  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
98  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
99 };
100 
104 };
105 
111 };
112 
118 };
119 
135 };
136 
143 };
144 
145 /* The structure of WebP lossless is an optional series of transformation data,
146  * followed by the primary image. The primary image also optionally contains
147  * an entropy group mapping if there are multiple entropy groups. There is a
148  * basic image type called an "entropy coded image" that is used for all of
149  * these. The type of each entropy coded image is referred to by the
150  * specification as its role. */
151 enum ImageRole {
152  /* Primary Image: Stores the actual pixels of the image. */
154 
155  /* Entropy Image: Defines which Huffman group to use for different areas of
156  * the primary image. */
158 
159  /* Predictors: Defines which predictor type to use for different areas of
160  * the primary image. */
162 
163  /* Color Transform Data: Defines the color transformation for different
164  * areas of the primary image. */
166 
167  /* Color Index: Stored as an image of height == 1. */
169 
171 };
172 
173 typedef struct HuffReader {
174  VLC vlc; /* Huffman decoder context */
175  int simple; /* whether to use simple mode */
176  int nb_symbols; /* number of coded symbols */
177  uint16_t simple_symbols[2]; /* symbols for simple mode */
178 } HuffReader;
179 
180 typedef struct ImageContext {
181  enum ImageRole role; /* role of this image */
182  AVFrame *frame; /* AVFrame for data */
183  int color_cache_bits; /* color cache size, log2 */
184  uint32_t *color_cache; /* color cache data */
185  int nb_huffman_groups; /* number of huffman groups */
186  HuffReader *huffman_groups; /* reader for each huffman group */
187  /* relative size compared to primary image, log2.
188  * for IMAGE_ROLE_COLOR_INDEXING with <= 16 colors, this is log2 of the
189  * number of pixels per byte in the primary image (pixel packing) */
192 } ImageContext;
193 
194 typedef struct WebPContext {
195  VP8Context v; /* VP8 Context used for lossy decoding */
196  GetBitContext gb; /* bitstream reader for main image chunk */
197  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
198  AVPacket *pkt; /* AVPacket to be passed to the underlying VP8 decoder */
199  AVCodecContext *avctx; /* parent AVCodecContext */
200  int initialized; /* set once the VP8 context is initialized */
201  int has_alpha; /* has a separate alpha chunk */
202  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
203  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
204  const uint8_t *alpha_data; /* alpha chunk data */
205  int alpha_data_size; /* alpha chunk data size */
206  int has_exif; /* set after an EXIF chunk has been processed */
207  int has_iccp; /* set after an ICCP chunk has been processed */
208  int width; /* image width */
209  int height; /* image height */
210  int lossless; /* indicates lossless or lossy */
211 
212  int nb_transforms; /* number of transforms */
213  enum TransformType transforms[4]; /* transformations used in the image, in order */
214  /* reduced width when using a color indexing transform with <= 16 colors (pixel packing)
215  * before pixels are unpacked, or same as width otherwise. */
217  int nb_huffman_groups; /* number of huffman groups in the primary image */
218  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
219 } WebPContext;
220 
221 #define GET_PIXEL(frame, x, y) \
222  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
223 
224 #define GET_PIXEL_COMP(frame, x, y, c) \
225  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
226 
228 {
229  int i, j;
230 
231  av_free(img->color_cache);
232  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
233  av_frame_free(&img->frame);
234  if (img->huffman_groups) {
235  for (i = 0; i < img->nb_huffman_groups; i++) {
236  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
237  ff_vlc_free(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
238  }
239  av_free(img->huffman_groups);
240  }
241  memset(img, 0, sizeof(*img));
242 }
243 
245 {
246  if (r->simple) {
247  if (r->nb_symbols == 1)
248  return r->simple_symbols[0];
249  else
250  return r->simple_symbols[get_bits1(gb)];
251  } else
252  return get_vlc2(gb, r->vlc.table, 8, 2);
253 }
254 
255 static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths,
256  uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH + 1],
257  uint8_t lens[], uint16_t syms[],
258  int alphabet_size, void *logctx)
259 {
260  unsigned nb_codes = 0;
261  int ret;
262 
263  // Count the number of symbols of each length and transform len_counts
264  // into an array of offsets.
265  for (int len = 1; len <= MAX_HUFFMAN_CODE_LENGTH; ++len) {
266  unsigned cnt = len_counts[len];
267  len_counts[len] = nb_codes;
268  nb_codes += cnt;
269  }
270 
271  for (int sym = 0; sym < alphabet_size; ++sym) {
272  if (code_lengths[sym]) {
273  unsigned idx = len_counts[code_lengths[sym]]++;
274  syms[idx] = sym;
275  lens[idx] = code_lengths[sym];
276  }
277  }
278 
279  if (nb_codes == 0) {
280  // No symbols
281  return AVERROR_INVALIDDATA;
282  }
283  if (nb_codes == 1) {
284  // Special-case 1 symbol since the VLC reader cannot handle it
285  r->nb_symbols = 1;
286  r->simple = 1;
287  r->simple_symbols[0] = syms[0];
288  return 0;
289  }
290 
291  ret = ff_vlc_init_from_lengths(&r->vlc, 8, nb_codes, lens, 1,
292  syms, 2, 2, 0, VLC_INIT_OUTPUT_LE, logctx);
293  if (ret < 0)
294  return ret;
295  r->simple = 0;
296 
297  return 0;
298 }
299 
301 {
302  hc->nb_symbols = get_bits1(&s->gb) + 1;
303 
304  if (get_bits1(&s->gb))
305  hc->simple_symbols[0] = get_bits(&s->gb, 8);
306  else
307  hc->simple_symbols[0] = get_bits1(&s->gb);
308 
309  if (hc->nb_symbols == 2)
310  hc->simple_symbols[1] = get_bits(&s->gb, 8);
311 
312  hc->simple = 1;
313 }
314 
316  int alphabet_size)
317 {
318  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
319  uint8_t *code_lengths;
320  uint8_t code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
321  uint8_t reordered_code_length_code_lengths[NUM_CODE_LENGTH_CODES];
322  uint16_t reordered_code_length_syms[NUM_CODE_LENGTH_CODES];
323  uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH + 1] = { 0 };
324  int symbol, max_symbol, prev_code_len, ret;
325  int num_codes = 4 + get_bits(&s->gb, 4);
326 
327  av_assert1(num_codes <= NUM_CODE_LENGTH_CODES);
328 
329  for (int i = 0; i < num_codes; i++) {
330  unsigned len = get_bits(&s->gb, 3);
331  code_length_code_lengths[code_length_code_order[i]] = len;
332  len_counts[len]++;
333  }
334 
335  if (get_bits1(&s->gb)) {
336  int bits = 2 + 2 * get_bits(&s->gb, 3);
337  max_symbol = 2 + get_bits(&s->gb, bits);
338  if (max_symbol > alphabet_size) {
339  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
340  max_symbol, alphabet_size);
341  return AVERROR_INVALIDDATA;
342  }
343  } else {
344  max_symbol = alphabet_size;
345  }
346 
347  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths, len_counts,
348  reordered_code_length_code_lengths,
349  reordered_code_length_syms,
350  NUM_CODE_LENGTH_CODES, s->avctx);
351  if (ret < 0)
352  return ret;
353 
354  code_lengths = av_malloc_array(alphabet_size, 2 * sizeof(uint8_t) + sizeof(uint16_t));
355  if (!code_lengths) {
356  ret = AVERROR(ENOMEM);
357  goto finish;
358  }
359 
360  prev_code_len = 8;
361  symbol = 0;
362  memset(len_counts, 0, sizeof(len_counts));
363  while (symbol < alphabet_size) {
364  int code_len;
365 
366  if (!max_symbol--)
367  break;
368  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
369  if (code_len < 16U) {
370  /* Code length code [0..15] indicates literal code lengths. */
371  code_lengths[symbol++] = code_len;
372  len_counts[code_len]++;
373  if (code_len)
374  prev_code_len = code_len;
375  } else {
376  int repeat = 0, length = 0;
377  switch (code_len) {
378  default:
380  goto finish;
381  case 16:
382  /* Code 16 repeats the previous non-zero value [3..6] times,
383  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
384  * non-zero value has been emitted, a value of 8 is repeated. */
385  repeat = 3 + get_bits(&s->gb, 2);
386  length = prev_code_len;
387  len_counts[length] += repeat;
388  break;
389  case 17:
390  /* Code 17 emits a streak of zeros [3..10], i.e.,
391  * 3 + ReadBits(3) times. */
392  repeat = 3 + get_bits(&s->gb, 3);
393  break;
394  case 18:
395  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
396  * 11 + ReadBits(7) times. */
397  repeat = 11 + get_bits(&s->gb, 7);
398  break;
399  }
400  if (symbol + repeat > alphabet_size) {
401  av_log(s->avctx, AV_LOG_ERROR,
402  "invalid symbol %d + repeat %d > alphabet size %d\n",
403  symbol, repeat, alphabet_size);
405  goto finish;
406  }
407  while (repeat-- > 0)
408  code_lengths[symbol++] = length;
409  }
410  }
411 
412  ret = huff_reader_build_canonical(hc, code_lengths, len_counts,
413  code_lengths + symbol,
414  (uint16_t*)(code_lengths + 2 * symbol),
415  symbol, s->avctx);
416 
417 finish:
418  ff_vlc_free(&code_len_hc.vlc);
419  av_free(code_lengths);
420  return ret;
421 }
422 
423 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
424  int w, int h);
425 
426 #define PARSE_BLOCK_SIZE(w, h) do { \
427  block_bits = get_bits(&s->gb, 3) + 2; \
428  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
429  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
430 } while (0)
431 
433 {
434  ImageContext *img;
435  int ret, block_bits, blocks_w, blocks_h, x, y, max;
436 
437  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
438 
439  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
440  if (ret < 0)
441  return ret;
442 
443  img = &s->image[IMAGE_ROLE_ENTROPY];
444  img->size_reduction = block_bits;
445 
446  /* the number of huffman groups is determined by the maximum group number
447  * coded in the entropy image */
448  max = 0;
449  for (y = 0; y < img->frame->height; y++) {
450  for (x = 0; x < img->frame->width; x++) {
451  int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
452  int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
453  int p = p0 << 8 | p1;
454  max = FFMAX(max, p);
455  }
456  }
457  s->nb_huffman_groups = max + 1;
458 
459  return 0;
460 }
461 
463 {
464  int block_bits, blocks_w, blocks_h, ret;
465 
466  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
467 
469  blocks_h);
470  if (ret < 0)
471  return ret;
472 
473  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
474 
475  return 0;
476 }
477 
479 {
480  int block_bits, blocks_w, blocks_h, ret;
481 
482  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
483 
485  blocks_h);
486  if (ret < 0)
487  return ret;
488 
489  s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
490 
491  return 0;
492 }
493 
495 {
496  ImageContext *img;
497  int width_bits, index_size, ret, x;
498  uint8_t *ct;
499 
500  index_size = get_bits(&s->gb, 8) + 1;
501 
502  if (index_size <= 2)
503  width_bits = 3;
504  else if (index_size <= 4)
505  width_bits = 2;
506  else if (index_size <= 16)
507  width_bits = 1;
508  else
509  width_bits = 0;
510 
512  index_size, 1);
513  if (ret < 0)
514  return ret;
515 
516  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
517  img->size_reduction = width_bits;
518  if (width_bits > 0)
519  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
520 
521  /* color index values are delta-coded */
522  ct = img->frame->data[0] + 4;
523  for (x = 4; x < img->frame->width * 4; x++, ct++)
524  ct[0] += ct[-4];
525 
526  return 0;
527 }
528 
530  int x, int y)
531 {
532  ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
533  int group = 0;
534 
535  if (gimg->size_reduction > 0) {
536  int group_x = x >> gimg->size_reduction;
537  int group_y = y >> gimg->size_reduction;
538  int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
539  int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
540  group = g0 << 8 | g1;
541  }
542 
543  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
544 }
545 
547 {
548  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
549  img->color_cache[cache_idx] = c;
550 }
551 
553  int w, int h)
554 {
555  ImageContext *img;
556  HuffReader *hg;
557  int i, j, ret, x, y, width;
558 
559  img = &s->image[role];
560  img->role = role;
561 
562  if (!img->frame) {
563  img->frame = av_frame_alloc();
564  if (!img->frame)
565  return AVERROR(ENOMEM);
566  }
567 
568  img->frame->format = AV_PIX_FMT_ARGB;
569  img->frame->width = w;
570  img->frame->height = h;
571 
572  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
573  ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
574  } else
575  ret = av_frame_get_buffer(img->frame, 1);
576  if (ret < 0)
577  return ret;
578 
579  if (get_bits1(&s->gb)) {
580  img->color_cache_bits = get_bits(&s->gb, 4);
581  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
582  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
583  img->color_cache_bits);
584  return AVERROR_INVALIDDATA;
585  }
586  img->color_cache = av_calloc(1 << img->color_cache_bits,
587  sizeof(*img->color_cache));
588  if (!img->color_cache)
589  return AVERROR(ENOMEM);
590  } else {
591  img->color_cache_bits = 0;
592  }
593 
594  img->nb_huffman_groups = 1;
595  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
597  if (ret < 0)
598  return ret;
599  img->nb_huffman_groups = s->nb_huffman_groups;
600  }
601  img->huffman_groups = av_calloc(img->nb_huffman_groups,
603  sizeof(*img->huffman_groups));
604  if (!img->huffman_groups)
605  return AVERROR(ENOMEM);
606 
607  for (i = 0; i < img->nb_huffman_groups; i++) {
608  hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
609  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
610  int alphabet_size = alphabet_sizes[j];
611  if (!j && img->color_cache_bits > 0)
612  alphabet_size += 1 << img->color_cache_bits;
613 
614  if (get_bits1(&s->gb)) {
615  read_huffman_code_simple(s, &hg[j]);
616  } else {
617  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
618  if (ret < 0)
619  return ret;
620  }
621  }
622  }
623 
624  width = img->frame->width;
625  if (role == IMAGE_ROLE_ARGB)
626  width = s->reduced_width;
627 
628  x = 0; y = 0;
629  while (y < img->frame->height) {
630  int v;
631 
632  if (get_bits_left(&s->gb) < 0)
633  return AVERROR_INVALIDDATA;
634 
635  hg = get_huffman_group(s, img, x, y);
636  v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
637  if (v < NUM_LITERAL_CODES) {
638  /* literal pixel values */
639  uint8_t *p = GET_PIXEL(img->frame, x, y);
640  p[2] = v;
641  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
642  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
643  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
644  if (img->color_cache_bits)
646  x++;
647  if (x == width) {
648  x = 0;
649  y++;
650  }
651  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
652  /* LZ77 backwards mapping */
653  int prefix_code, length, distance, ref_x, ref_y;
654 
655  /* parse length and distance */
656  prefix_code = v - NUM_LITERAL_CODES;
657  if (prefix_code < 4) {
658  length = prefix_code + 1;
659  } else {
660  int extra_bits = (prefix_code - 2) >> 1;
661  int offset = 2 + (prefix_code & 1) << extra_bits;
662  length = offset + get_bits(&s->gb, extra_bits) + 1;
663  }
664  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
665  if (prefix_code > 39U) {
666  av_log(s->avctx, AV_LOG_ERROR,
667  "distance prefix code too large: %d\n", prefix_code);
668  return AVERROR_INVALIDDATA;
669  }
670  if (prefix_code < 4) {
671  distance = prefix_code + 1;
672  } else {
673  int extra_bits = prefix_code - 2 >> 1;
674  int offset = 2 + (prefix_code & 1) << extra_bits;
675  distance = offset + get_bits(&s->gb, extra_bits) + 1;
676  }
677 
678  /* find reference location */
679  if (distance <= NUM_SHORT_DISTANCES) {
680  int xi = lz77_distance_offsets[distance - 1][0];
681  int yi = lz77_distance_offsets[distance - 1][1];
682  distance = FFMAX(1, xi + yi * width);
683  } else {
685  }
686  ref_x = x;
687  ref_y = y;
688  if (distance <= x) {
689  ref_x -= distance;
690  distance = 0;
691  } else {
692  ref_x = 0;
693  distance -= x;
694  }
695  while (distance >= width) {
696  ref_y--;
697  distance -= width;
698  }
699  if (distance > 0) {
700  ref_x = width - distance;
701  ref_y--;
702  }
703  ref_x = FFMAX(0, ref_x);
704  ref_y = FFMAX(0, ref_y);
705 
706  if (ref_y == y && ref_x >= x)
707  return AVERROR_INVALIDDATA;
708 
709  /* copy pixels
710  * source and dest regions can overlap and wrap lines, so just
711  * copy per-pixel */
712  for (i = 0; i < length; i++) {
713  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
714  uint8_t *p = GET_PIXEL(img->frame, x, y);
715 
716  AV_COPY32(p, p_ref);
717  if (img->color_cache_bits)
719  x++;
720  ref_x++;
721  if (x == width) {
722  x = 0;
723  y++;
724  }
725  if (ref_x == width) {
726  ref_x = 0;
727  ref_y++;
728  }
729  if (y == img->frame->height || ref_y == img->frame->height)
730  break;
731  }
732  } else {
733  /* read from color cache */
734  uint8_t *p = GET_PIXEL(img->frame, x, y);
735  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
736 
737  if (!img->color_cache_bits) {
738  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
739  return AVERROR_INVALIDDATA;
740  }
741  if (cache_idx >= 1 << img->color_cache_bits) {
742  av_log(s->avctx, AV_LOG_ERROR,
743  "color cache index out-of-bounds\n");
744  return AVERROR_INVALIDDATA;
745  }
746  AV_WB32(p, img->color_cache[cache_idx]);
747  x++;
748  if (x == width) {
749  x = 0;
750  y++;
751  }
752  }
753  }
754 
755  return 0;
756 }
757 
758 /* PRED_MODE_BLACK */
759 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
760  const uint8_t *p_t, const uint8_t *p_tr)
761 {
762  AV_WB32(p, 0xFF000000);
763 }
764 
765 /* PRED_MODE_L */
766 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
767  const uint8_t *p_t, const uint8_t *p_tr)
768 {
769  AV_COPY32(p, p_l);
770 }
771 
772 /* PRED_MODE_T */
773 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
774  const uint8_t *p_t, const uint8_t *p_tr)
775 {
776  AV_COPY32(p, p_t);
777 }
778 
779 /* PRED_MODE_TR */
780 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
781  const uint8_t *p_t, const uint8_t *p_tr)
782 {
783  AV_COPY32(p, p_tr);
784 }
785 
786 /* PRED_MODE_TL */
787 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
788  const uint8_t *p_t, const uint8_t *p_tr)
789 {
790  AV_COPY32(p, p_tl);
791 }
792 
793 /* PRED_MODE_AVG_T_AVG_L_TR */
794 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
795  const uint8_t *p_t, const uint8_t *p_tr)
796 {
797  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
798  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
799  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
800  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
801 }
802 
803 /* PRED_MODE_AVG_L_TL */
804 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
805  const uint8_t *p_t, const uint8_t *p_tr)
806 {
807  p[0] = p_l[0] + p_tl[0] >> 1;
808  p[1] = p_l[1] + p_tl[1] >> 1;
809  p[2] = p_l[2] + p_tl[2] >> 1;
810  p[3] = p_l[3] + p_tl[3] >> 1;
811 }
812 
813 /* PRED_MODE_AVG_L_T */
814 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
815  const uint8_t *p_t, const uint8_t *p_tr)
816 {
817  p[0] = p_l[0] + p_t[0] >> 1;
818  p[1] = p_l[1] + p_t[1] >> 1;
819  p[2] = p_l[2] + p_t[2] >> 1;
820  p[3] = p_l[3] + p_t[3] >> 1;
821 }
822 
823 /* PRED_MODE_AVG_TL_T */
824 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
825  const uint8_t *p_t, const uint8_t *p_tr)
826 {
827  p[0] = p_tl[0] + p_t[0] >> 1;
828  p[1] = p_tl[1] + p_t[1] >> 1;
829  p[2] = p_tl[2] + p_t[2] >> 1;
830  p[3] = p_tl[3] + p_t[3] >> 1;
831 }
832 
833 /* PRED_MODE_AVG_T_TR */
834 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
835  const uint8_t *p_t, const uint8_t *p_tr)
836 {
837  p[0] = p_t[0] + p_tr[0] >> 1;
838  p[1] = p_t[1] + p_tr[1] >> 1;
839  p[2] = p_t[2] + p_tr[2] >> 1;
840  p[3] = p_t[3] + p_tr[3] >> 1;
841 }
842 
843 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
844 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
845  const uint8_t *p_t, const uint8_t *p_tr)
846 {
847  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
848  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
849  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
850  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
851 }
852 
853 /* PRED_MODE_SELECT */
854 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
855  const uint8_t *p_t, const uint8_t *p_tr)
856 {
857  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
858  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
859  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
860  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
861  if (diff <= 0)
862  AV_COPY32(p, p_t);
863  else
864  AV_COPY32(p, p_l);
865 }
866 
867 /* PRED_MODE_ADD_SUBTRACT_FULL */
868 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
869  const uint8_t *p_t, const uint8_t *p_tr)
870 {
871  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
872  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
873  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
874  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
875 }
876 
877 static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
878 {
879  int d = a + b >> 1;
880  return av_clip_uint8(d + (d - c) / 2);
881 }
882 
883 /* PRED_MODE_ADD_SUBTRACT_HALF */
884 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
885  const uint8_t *p_t, const uint8_t *p_tr)
886 {
887  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
888  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
889  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
890  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
891 }
892 
893 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
894  const uint8_t *p_tl, const uint8_t *p_t,
895  const uint8_t *p_tr);
896 
897 static const inv_predict_func inverse_predict[14] = {
902 };
903 
904 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
905 {
906  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
907  uint8_t p[4];
908 
909  dec = GET_PIXEL(frame, x, y);
910  p_l = GET_PIXEL(frame, x - 1, y);
911  p_tl = GET_PIXEL(frame, x - 1, y - 1);
912  p_t = GET_PIXEL(frame, x, y - 1);
913  if (x == frame->width - 1)
914  p_tr = GET_PIXEL(frame, 0, y);
915  else
916  p_tr = GET_PIXEL(frame, x + 1, y - 1);
917 
918  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
919 
920  dec[0] += p[0];
921  dec[1] += p[1];
922  dec[2] += p[2];
923  dec[3] += p[3];
924 }
925 
927 {
928  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
929  ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
930  int x, y;
931 
932  for (y = 0; y < img->frame->height; y++) {
933  for (x = 0; x < s->reduced_width; x++) {
934  int tx = x >> pimg->size_reduction;
935  int ty = y >> pimg->size_reduction;
936  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
937 
938  if (x == 0) {
939  if (y == 0)
940  m = PRED_MODE_BLACK;
941  else
942  m = PRED_MODE_T;
943  } else if (y == 0)
944  m = PRED_MODE_L;
945 
946  if (m > 13) {
947  av_log(s->avctx, AV_LOG_ERROR,
948  "invalid predictor mode: %d\n", m);
949  return AVERROR_INVALIDDATA;
950  }
951  inverse_prediction(img->frame, m, x, y);
952  }
953  }
954  return 0;
955 }
956 
957 static av_always_inline uint8_t color_transform_delta(uint8_t color_pred,
958  uint8_t color)
959 {
960  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
961 }
962 
964 {
965  ImageContext *img, *cimg;
966  int x, y, cx, cy;
967  uint8_t *p, *cp;
968 
969  img = &s->image[IMAGE_ROLE_ARGB];
970  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
971 
972  for (y = 0; y < img->frame->height; y++) {
973  for (x = 0; x < s->reduced_width; x++) {
974  cx = x >> cimg->size_reduction;
975  cy = y >> cimg->size_reduction;
976  cp = GET_PIXEL(cimg->frame, cx, cy);
977  p = GET_PIXEL(img->frame, x, y);
978 
979  p[1] += color_transform_delta(cp[3], p[2]);
980  p[3] += color_transform_delta(cp[2], p[2]) +
981  color_transform_delta(cp[1], p[1]);
982  }
983  }
984  return 0;
985 }
986 
988 {
989  int x, y;
990  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
991 
992  for (y = 0; y < img->frame->height; y++) {
993  for (x = 0; x < s->reduced_width; x++) {
994  uint8_t *p = GET_PIXEL(img->frame, x, y);
995  p[1] += p[2];
996  p[3] += p[2];
997  }
998  }
999  return 0;
1000 }
1001 
1003 {
1004  ImageContext *img;
1005  ImageContext *pal;
1006  int i, x, y;
1007  uint8_t *p;
1008 
1009  img = &s->image[IMAGE_ROLE_ARGB];
1010  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1011 
1012  if (pal->size_reduction > 0) { // undo pixel packing
1013  GetBitContext gb_g;
1014  uint8_t *line;
1015  int pixel_bits = 8 >> pal->size_reduction;
1016 
1017  line = av_malloc(img->frame->linesize[0] + AV_INPUT_BUFFER_PADDING_SIZE);
1018  if (!line)
1019  return AVERROR(ENOMEM);
1020 
1021  for (y = 0; y < img->frame->height; y++) {
1022  p = GET_PIXEL(img->frame, 0, y);
1023  memcpy(line, p, img->frame->linesize[0]);
1024  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1025  skip_bits(&gb_g, 16);
1026  i = 0;
1027  for (x = 0; x < img->frame->width; x++) {
1028  p = GET_PIXEL(img->frame, x, y);
1029  p[2] = get_bits(&gb_g, pixel_bits);
1030  i++;
1031  if (i == 1 << pal->size_reduction) {
1032  skip_bits(&gb_g, 24);
1033  i = 0;
1034  }
1035  }
1036  }
1037  av_free(line);
1038  s->reduced_width = s->width; // we are back to full size
1039  }
1040 
1041  // switch to local palette if it's worth initializing it
1042  if (img->frame->height * img->frame->width > 300) {
1043  uint8_t palette[256 * 4];
1044  const int size = pal->frame->width * 4;
1045  av_assert0(size <= 1024U);
1046  memcpy(palette, GET_PIXEL(pal->frame, 0, 0), size); // copy palette
1047  // set extra entries to transparent black
1048  memset(palette + size, 0, 256 * 4 - size);
1049  for (y = 0; y < img->frame->height; y++) {
1050  for (x = 0; x < img->frame->width; x++) {
1051  p = GET_PIXEL(img->frame, x, y);
1052  i = p[2];
1053  AV_COPY32(p, &palette[i * 4]);
1054  }
1055  }
1056  } else {
1057  for (y = 0; y < img->frame->height; y++) {
1058  for (x = 0; x < img->frame->width; x++) {
1059  p = GET_PIXEL(img->frame, x, y);
1060  i = p[2];
1061  if (i >= pal->frame->width) {
1062  AV_WB32(p, 0x00000000);
1063  } else {
1064  const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
1065  AV_COPY32(p, pi);
1066  }
1067  }
1068  }
1069  }
1070 
1071  return 0;
1072 }
1073 
1074 static void update_canvas_size(AVCodecContext *avctx, int w, int h)
1075 {
1076  WebPContext *s = avctx->priv_data;
1077  if (s->width && s->width != w) {
1078  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1079  s->width, w);
1080  }
1081  s->width = w;
1082  if (s->height && s->height != h) {
1083  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1084  s->height, h);
1085  }
1086  s->height = h;
1087 }
1088 
1090  int *got_frame, const uint8_t *data_start,
1091  unsigned int data_size, int is_alpha_chunk)
1092 {
1093  WebPContext *s = avctx->priv_data;
1094  int w, h, ret, i, used;
1095 
1096  if (!is_alpha_chunk) {
1097  s->lossless = 1;
1098  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1099  }
1100 
1101  ret = init_get_bits8(&s->gb, data_start, data_size);
1102  if (ret < 0)
1103  return ret;
1104 
1105  if (!is_alpha_chunk) {
1106  if (get_bits(&s->gb, 8) != 0x2F) {
1107  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1108  return AVERROR_INVALIDDATA;
1109  }
1110 
1111  w = get_bits(&s->gb, 14) + 1;
1112  h = get_bits(&s->gb, 14) + 1;
1113 
1114  update_canvas_size(avctx, w, h);
1115 
1116  ret = ff_set_dimensions(avctx, s->width, s->height);
1117  if (ret < 0)
1118  return ret;
1119 
1120  s->has_alpha = get_bits1(&s->gb);
1121 
1122  if (get_bits(&s->gb, 3) != 0x0) {
1123  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1124  return AVERROR_INVALIDDATA;
1125  }
1126  } else {
1127  if (!s->width || !s->height)
1128  return AVERROR_BUG;
1129  w = s->width;
1130  h = s->height;
1131  }
1132 
1133  /* parse transformations */
1134  s->nb_transforms = 0;
1135  s->reduced_width = s->width;
1136  used = 0;
1137  while (get_bits1(&s->gb)) {
1138  enum TransformType transform = get_bits(&s->gb, 2);
1139  if (used & (1 << transform)) {
1140  av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
1141  transform);
1143  goto free_and_return;
1144  }
1145  used |= (1 << transform);
1146  s->transforms[s->nb_transforms++] = transform;
1147  switch (transform) {
1148  case PREDICTOR_TRANSFORM:
1150  break;
1151  case COLOR_TRANSFORM:
1153  break;
1156  break;
1157  }
1158  if (ret < 0)
1159  goto free_and_return;
1160  }
1161 
1162  /* decode primary image */
1163  s->image[IMAGE_ROLE_ARGB].frame = p;
1164  if (is_alpha_chunk)
1165  s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
1167  if (ret < 0)
1168  goto free_and_return;
1169 
1170  /* apply transformations */
1171  for (i = s->nb_transforms - 1; i >= 0; i--) {
1172  switch (s->transforms[i]) {
1173  case PREDICTOR_TRANSFORM:
1175  break;
1176  case COLOR_TRANSFORM:
1178  break;
1179  case SUBTRACT_GREEN:
1181  break;
1184  break;
1185  }
1186  if (ret < 0)
1187  goto free_and_return;
1188  }
1189 
1190  *got_frame = 1;
1192  p->flags |= AV_FRAME_FLAG_KEY;
1194  ret = data_size;
1195 
1196 free_and_return:
1197  for (i = 0; i < IMAGE_ROLE_NB; i++)
1198  image_ctx_free(&s->image[i]);
1199 
1200  return ret;
1201 }
1202 
1204 {
1205  int x, y, ls;
1206  uint8_t *dec;
1207 
1208  ls = frame->linesize[3];
1209 
1210  /* filter first row using horizontal filter */
1211  dec = frame->data[3] + 1;
1212  for (x = 1; x < frame->width; x++, dec++)
1213  *dec += *(dec - 1);
1214 
1215  /* filter first column using vertical filter */
1216  dec = frame->data[3] + ls;
1217  for (y = 1; y < frame->height; y++, dec += ls)
1218  *dec += *(dec - ls);
1219 
1220  /* filter the rest using the specified filter */
1221  switch (m) {
1223  for (y = 1; y < frame->height; y++) {
1224  dec = frame->data[3] + y * ls + 1;
1225  for (x = 1; x < frame->width; x++, dec++)
1226  *dec += *(dec - 1);
1227  }
1228  break;
1229  case ALPHA_FILTER_VERTICAL:
1230  for (y = 1; y < frame->height; y++) {
1231  dec = frame->data[3] + y * ls + 1;
1232  for (x = 1; x < frame->width; x++, dec++)
1233  *dec += *(dec - ls);
1234  }
1235  break;
1236  case ALPHA_FILTER_GRADIENT:
1237  for (y = 1; y < frame->height; y++) {
1238  dec = frame->data[3] + y * ls + 1;
1239  for (x = 1; x < frame->width; x++, dec++)
1240  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1241  }
1242  break;
1243  }
1244 }
1245 
1247  const uint8_t *data_start,
1248  unsigned int data_size)
1249 {
1250  WebPContext *s = avctx->priv_data;
1251  int x, y, ret;
1252 
1253  if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
1254  GetByteContext gb;
1255 
1256  bytestream2_init(&gb, data_start, data_size);
1257  for (y = 0; y < s->height; y++)
1258  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1259  s->width);
1260  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1261  uint8_t *ap, *pp;
1262  int alpha_got_frame = 0;
1263 
1264  s->alpha_frame = av_frame_alloc();
1265  if (!s->alpha_frame)
1266  return AVERROR(ENOMEM);
1267 
1268  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1269  data_start, data_size, 1);
1270  if (ret < 0) {
1271  av_frame_free(&s->alpha_frame);
1272  return ret;
1273  }
1274  if (!alpha_got_frame) {
1275  av_frame_free(&s->alpha_frame);
1276  return AVERROR_INVALIDDATA;
1277  }
1278 
1279  /* copy green component of alpha image to alpha plane of primary image */
1280  for (y = 0; y < s->height; y++) {
1281  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1282  pp = p->data[3] + p->linesize[3] * y;
1283  for (x = 0; x < s->width; x++) {
1284  *pp = *ap;
1285  pp++;
1286  ap += 4;
1287  }
1288  }
1289  av_frame_free(&s->alpha_frame);
1290  }
1291 
1292  /* apply alpha filtering */
1293  if (s->alpha_filter)
1294  alpha_inverse_prediction(p, s->alpha_filter);
1295 
1296  return 0;
1297 }
1298 
1300  int *got_frame, uint8_t *data_start,
1301  unsigned int data_size)
1302 {
1303  WebPContext *s = avctx->priv_data;
1304  int ret;
1305 
1306  if (!s->initialized) {
1307  ff_vp8_decode_init(avctx);
1308  s->initialized = 1;
1309  s->v.actually_webp = 1;
1310  }
1311  avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
1312  s->lossless = 0;
1313 
1314  if (data_size > INT_MAX) {
1315  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1316  return AVERROR_PATCHWELCOME;
1317  }
1318 
1319  av_packet_unref(s->pkt);
1320  s->pkt->data = data_start;
1321  s->pkt->size = data_size;
1322 
1323  ret = ff_vp8_decode_frame(avctx, p, got_frame, s->pkt);
1324  if (ret < 0)
1325  return ret;
1326 
1327  if (!*got_frame)
1328  return AVERROR_INVALIDDATA;
1329 
1330  update_canvas_size(avctx, avctx->width, avctx->height);
1331 
1332  if (s->has_alpha) {
1333  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1334  s->alpha_data_size);
1335  if (ret < 0)
1336  return ret;
1337  }
1338  return ret;
1339 }
1340 
1342  int *got_frame, AVPacket *avpkt)
1343 {
1344  WebPContext *s = avctx->priv_data;
1345  GetByteContext gb;
1346  int ret;
1347  uint32_t chunk_type, chunk_size;
1348  int vp8x_flags = 0;
1349 
1350  s->avctx = avctx;
1351  s->width = 0;
1352  s->height = 0;
1353  *got_frame = 0;
1354  s->has_alpha = 0;
1355  s->has_exif = 0;
1356  s->has_iccp = 0;
1357  bytestream2_init(&gb, avpkt->data, avpkt->size);
1358 
1359  if (bytestream2_get_bytes_left(&gb) < 12)
1360  return AVERROR_INVALIDDATA;
1361 
1362  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1363  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1364  return AVERROR_INVALIDDATA;
1365  }
1366 
1367  chunk_size = bytestream2_get_le32(&gb);
1368  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1369  return AVERROR_INVALIDDATA;
1370 
1371  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1372  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1373  return AVERROR_INVALIDDATA;
1374  }
1375 
1376  while (bytestream2_get_bytes_left(&gb) > 8) {
1377  char chunk_str[5] = { 0 };
1378 
1379  chunk_type = bytestream2_get_le32(&gb);
1380  chunk_size = bytestream2_get_le32(&gb);
1381  if (chunk_size == UINT32_MAX)
1382  return AVERROR_INVALIDDATA;
1383  chunk_size += chunk_size & 1;
1384 
1385  if (bytestream2_get_bytes_left(&gb) < chunk_size) {
1386  /* we seem to be running out of data, but it could also be that the
1387  bitstream has trailing junk leading to bogus chunk_size. */
1388  break;
1389  }
1390 
1391  switch (chunk_type) {
1392  case MKTAG('V', 'P', '8', ' '):
1393  if (!*got_frame) {
1394  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1395  avpkt->data + bytestream2_tell(&gb),
1396  chunk_size);
1397  if (ret < 0)
1398  return ret;
1399  }
1400  bytestream2_skip(&gb, chunk_size);
1401  break;
1402  case MKTAG('V', 'P', '8', 'L'):
1403  if (!*got_frame) {
1404  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1405  avpkt->data + bytestream2_tell(&gb),
1406  chunk_size, 0);
1407  if (ret < 0)
1408  return ret;
1409 #if FF_API_CODEC_PROPS
1413 #endif
1414  }
1415  bytestream2_skip(&gb, chunk_size);
1416  break;
1417  case MKTAG('V', 'P', '8', 'X'):
1418  if (s->width || s->height || *got_frame) {
1419  av_log(avctx, AV_LOG_ERROR, "Canvas dimensions are already set\n");
1420  return AVERROR_INVALIDDATA;
1421  }
1422  vp8x_flags = bytestream2_get_byte(&gb);
1423  bytestream2_skip(&gb, 3);
1424  s->width = bytestream2_get_le24(&gb) + 1;
1425  s->height = bytestream2_get_le24(&gb) + 1;
1426  ret = av_image_check_size(s->width, s->height, 0, avctx);
1427  if (ret < 0)
1428  return ret;
1429  break;
1430  case MKTAG('A', 'L', 'P', 'H'): {
1431  int alpha_header, filter_m, compression;
1432 
1433  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1434  av_log(avctx, AV_LOG_WARNING,
1435  "ALPHA chunk present, but alpha bit not set in the "
1436  "VP8X header\n");
1437  }
1438  if (chunk_size == 0) {
1439  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1440  return AVERROR_INVALIDDATA;
1441  }
1442  alpha_header = bytestream2_get_byte(&gb);
1443  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1444  s->alpha_data_size = chunk_size - 1;
1445  bytestream2_skip(&gb, s->alpha_data_size);
1446 
1447  filter_m = (alpha_header >> 2) & 0x03;
1448  compression = alpha_header & 0x03;
1449 
1450  if (compression > ALPHA_COMPRESSION_VP8L) {
1451  av_log(avctx, AV_LOG_VERBOSE,
1452  "skipping unsupported ALPHA chunk\n");
1453  } else {
1454  s->has_alpha = 1;
1455  s->alpha_compression = compression;
1456  s->alpha_filter = filter_m;
1457  }
1458 
1459  break;
1460  }
1461  case MKTAG('E', 'X', 'I', 'F'): {
1462  AVBufferRef *exif_buf = NULL;
1463 
1464  if (s->has_exif) {
1465  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1466  goto exif_end;
1467  }
1468 
1469  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1470  av_log(avctx, AV_LOG_WARNING,
1471  "EXIF chunk present, but Exif bit not set in the "
1472  "VP8X header\n");
1473 
1474  exif_buf = av_buffer_alloc(chunk_size);
1475  if (!exif_buf) {
1476  av_log(avctx, AV_LOG_WARNING, "unable to allocate EXIF buffer\n");
1477  goto exif_end;
1478  }
1479  s->has_exif = 1;
1480  memcpy(exif_buf->data, gb.buffer, chunk_size);
1481 
1482  ret = ff_decode_exif_attach_buffer(avctx, p, &exif_buf, AV_EXIF_TIFF_HEADER);
1483  if (ret < 0)
1484  av_log(avctx, AV_LOG_WARNING, "unable to attach EXIF buffer\n");
1485 
1486 exif_end:
1487  bytestream2_skip(&gb, chunk_size);
1488  break;
1489  }
1490  case MKTAG('I', 'C', 'C', 'P'): {
1491  AVFrameSideData *sd;
1492 
1493  if (s->has_iccp) {
1494  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra ICCP chunk\n");
1495  bytestream2_skip(&gb, chunk_size);
1496  break;
1497  }
1498  if (!(vp8x_flags & VP8X_FLAG_ICC))
1499  av_log(avctx, AV_LOG_WARNING,
1500  "ICCP chunk present, but ICC Profile bit not set in the "
1501  "VP8X header\n");
1502 
1503  s->has_iccp = 1;
1504 
1505  ret = ff_frame_new_side_data(avctx, p, AV_FRAME_DATA_ICC_PROFILE, chunk_size, &sd);
1506  if (ret < 0)
1507  return ret;
1508 
1509  if (sd) {
1510  bytestream2_get_buffer(&gb, sd->data, chunk_size);
1511  } else {
1512  bytestream2_skip(&gb, chunk_size);
1513  }
1514  break;
1515  }
1516  case MKTAG('A', 'N', 'I', 'M'):
1517  case MKTAG('A', 'N', 'M', 'F'):
1518  case MKTAG('X', 'M', 'P', ' '):
1519  AV_WL32(chunk_str, chunk_type);
1520  av_log(avctx, AV_LOG_WARNING, "skipping unsupported chunk: %s\n",
1521  chunk_str);
1522  bytestream2_skip(&gb, chunk_size);
1523  break;
1524  default:
1525  AV_WL32(chunk_str, chunk_type);
1526  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1527  chunk_str);
1528  bytestream2_skip(&gb, chunk_size);
1529  break;
1530  }
1531  }
1532 
1533  if (!*got_frame) {
1534  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1535  return AVERROR_INVALIDDATA;
1536  }
1537 
1538  return avpkt->size;
1539 }
1540 
1542 {
1543  WebPContext *s = avctx->priv_data;
1544 
1545  s->pkt = av_packet_alloc();
1546  if (!s->pkt)
1547  return AVERROR(ENOMEM);
1548 
1549  return 0;
1550 }
1551 
1553 {
1554  WebPContext *s = avctx->priv_data;
1555 
1556  av_packet_free(&s->pkt);
1557 
1558  if (s->initialized)
1559  return ff_vp8_decode_free(avctx);
1560 
1561  return 0;
1562 }
1563 
1565  .p.name = "webp",
1566  CODEC_LONG_NAME("WebP image"),
1567  .p.type = AVMEDIA_TYPE_VIDEO,
1568  .p.id = AV_CODEC_ID_WEBP,
1569  .priv_data_size = sizeof(WebPContext),
1572  .close = webp_decode_close,
1573  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1574  .caps_internal = FF_CODEC_CAP_ICC_PROFILES |
1576 };
WebPContext::width
int width
Definition: webp.c:208
WebPContext::alpha_frame
AVFrame * alpha_frame
Definition: webp.c:197
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
ff_vp8_decode_free
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2813
HuffReader::vlc
VLC vlc
Definition: webp.c:174
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
inv_predict_12
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:868
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
NUM_SHORT_DISTANCES
#define NUM_SHORT_DISTANCES
Definition: webp.c:70
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:422
vp8_lossy_decode_frame
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1299
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
color
Definition: vf_paletteuse.c:513
PRED_MODE_AVG_T_AVG_L_TR
@ PRED_MODE_AVG_T_AVG_L_TR
Definition: webp.c:126
ALPHA_FILTER_HORIZONTAL
@ ALPHA_FILTER_HORIZONTAL
Definition: webp.c:108
HuffReader::simple_symbols
uint16_t simple_symbols[2]
Definition: webp.c:177
GetByteContext
Definition: bytestream.h:33
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:244
block_bits
static const uint8_t block_bits[]
Definition: imm4.c:103
PRED_MODE_BLACK
@ PRED_MODE_BLACK
Definition: webp.c:121
inv_predict_4
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:787
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
inv_predict_2
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:773
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFrame::width
int width
Definition: frame.h:499
w
uint8_t w
Definition: llviddspenc.c:38
GET_PIXEL_COMP
#define GET_PIXEL_COMP(frame, x, y, c)
Definition: webp.c:224
AVPacket::data
uint8_t * data
Definition: packet.h:558
PRED_MODE_ADD_SUBTRACT_FULL
@ PRED_MODE_ADD_SUBTRACT_FULL
Definition: webp.c:133
COLOR_INDEXING_TRANSFORM
@ COLOR_INDEXING_TRANSFORM
Definition: webp.c:117
b
#define b
Definition: input.c:42
SUBTRACT_GREEN
@ SUBTRACT_GREEN
Definition: webp.c:116
ImageContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:185
parse_transform_color
static int parse_transform_color(WebPContext *s)
Definition: webp.c:478
FFCodec
Definition: codec_internal.h:127
PRED_MODE_AVG_TL_T
@ PRED_MODE_AVG_TL_T
Definition: webp.c:129
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:671
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:512
thread.h
WebPContext::transforms
enum TransformType transforms[4]
Definition: webp.c:213
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
PRED_MODE_TR
@ PRED_MODE_TR
Definition: webp.c:124
PRED_MODE_AVG_L_T
@ PRED_MODE_AVG_L_T
Definition: webp.c:128
vp8_lossless_decode_frame
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, const uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
Definition: webp.c:1089
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
HuffReader::simple
int simple
Definition: webp.c:175
PRED_MODE_TL
@ PRED_MODE_TL
Definition: webp.c:125
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
WebPContext::alpha_compression
enum AlphaCompression alpha_compression
Definition: webp.c:202
inv_predict_10
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:844
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
inv_predict_8
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:824
WebPContext::avctx
AVCodecContext * avctx
Definition: webp.c:199
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
ALPHA_COMPRESSION_NONE
@ ALPHA_COMPRESSION_NONE
Definition: webp.c:102
WebPContext::nb_transforms
int nb_transforms
Definition: webp.c:212
GetBitContext
Definition: get_bits.h:109
update_canvas_size
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
Definition: webp.c:1074
WebPContext::alpha_data_size
int alpha_data_size
Definition: webp.c:205
inv_predict_func
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:893
COLOR_TRANSFORM
@ COLOR_TRANSFORM
Definition: webp.c:115
VP8X_FLAG_EXIF_METADATA
#define VP8X_FLAG_EXIF_METADATA
Definition: webp.c:59
inv_predict_3
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:780
ff_webp_decoder
const FFCodec ff_webp_decoder
Definition: webp.c:1564
color_transform_delta
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
Definition: webp.c:957
decode_entropy_coded_image
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
Definition: webp.c:552
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
HUFF_IDX_GREEN
@ HUFF_IDX_GREEN
Definition: webp.c:138
WebPContext::has_exif
int has_exif
Definition: webp.c:206
read_huffman_code_normal
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
Definition: webp.c:315
WebPContext::has_alpha
int has_alpha
Definition: webp.c:201
PredictionMode
PredictionMode
Definition: webp.c:120
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:90
ImageContext::frame
AVFrame * frame
Definition: webp.c:182
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1638
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
inverse_prediction
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
Definition: webp.c:904
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
TransformType
TransformType
Definition: webp.c:113
PRED_MODE_AVG_T_TR
@ PRED_MODE_AVG_T_TR
Definition: webp.c:130
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1048
HUFFMAN_CODES_PER_META_CODE
#define HUFFMAN_CODES_PER_META_CODE
Definition: webp.c:66
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
code_length_code_order
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
Definition: webp.c:79
color_cache_put
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
Definition: webp.c:546
bits
uint8_t bits
Definition: vp3data.h:128
NUM_DISTANCE_CODES
#define NUM_DISTANCE_CODES
Definition: webp.c:69
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
inv_predict_11
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:854
NUM_CODE_LENGTH_CODES
#define NUM_CODE_LENGTH_CODES
Definition: webp.c:65
ImageContext
Definition: webp.c:180
decode.h
get_bits.h
xi
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h2645.c:418
ImageContext::color_cache
uint32_t * color_cache
Definition: webp.c:184
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
GET_PIXEL
#define GET_PIXEL(frame, x, y)
Definition: webp.c:221
ImageContext::is_alpha_primary
int is_alpha_primary
Definition: webp.c:191
PRED_MODE_AVG_L_TL
@ PRED_MODE_AVG_L_TL
Definition: webp.c:127
webp_decode_close
static av_cold int webp_decode_close(AVCodecContext *avctx)
Definition: webp.c:1552
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
ImageContext::huffman_groups
HuffReader * huffman_groups
Definition: webp.c:186
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
ff_vp8_decode_init
int ff_vp8_decode_init(AVCodecContext *avctx)
apply_subtract_green_transform
static int apply_subtract_green_transform(WebPContext *s)
Definition: webp.c:987
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
HuffReader::nb_symbols
int nb_symbols
Definition: webp.c:176
WebPContext::height
int height
Definition: webp.c:209
ALPHA_FILTER_NONE
@ ALPHA_FILTER_NONE
Definition: webp.c:107
clamp_add_subtract_half
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
Definition: webp.c:877
HUFF_IDX_DIST
@ HUFF_IDX_DIST
Definition: webp.c:142
NULL
#define NULL
Definition: coverity.c:32
exif_internal.h
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
inverse_predict
static const inv_predict_func inverse_predict[14]
Definition: webp.c:897
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
tiff_common.h
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
ImageContext::color_cache_bits
int color_cache_bits
Definition: webp.c:183
parse_transform_color_indexing
static int parse_transform_color_indexing(WebPContext *s)
Definition: webp.c:494
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
webp_decode_init
static av_cold int webp_decode_init(AVCodecContext *avctx)
Definition: webp.c:1541
WebPContext::v
VP8Context v
Definition: webp.c:195
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
alphabet_sizes
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
Definition: webp.c:73
NUM_LITERAL_CODES
#define NUM_LITERAL_CODES
Definition: webp.c:67
IMAGE_ROLE_PREDICTOR
@ IMAGE_ROLE_PREDICTOR
Definition: webp.c:161
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:646
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp8.h
alpha_inverse_prediction
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
Definition: webp.c:1203
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
IMAGE_ROLE_COLOR_INDEXING
@ IMAGE_ROLE_COLOR_INDEXING
Definition: webp.c:168
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
inv_predict_0
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:759
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
IMAGE_ROLE_NB
@ IMAGE_ROLE_NB
Definition: webp.c:170
VP8X_FLAG_ICC
#define VP8X_FLAG_ICC
Definition: webp.c:61
AVPacket::size
int size
Definition: packet.h:559
ff_decode_exif_attach_buffer
int ff_decode_exif_attach_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferRef **pbuf, enum AVExifHeaderMode header_mode)
Attach the data buffer to the frame.
Definition: decode.c:2414
codec_internal.h
AlphaCompression
AlphaCompression
Definition: webp.c:101
PREDICTOR_TRANSFORM
@ PREDICTOR_TRANSFORM
Definition: webp.c:114
ImageContext::size_reduction
int size_reduction
Definition: webp.c:190
size
int size
Definition: twinvq_data.h:10344
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2112
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
ImageContext::role
enum ImageRole role
Definition: webp.c:181
decode_entropy_image
static int decode_entropy_image(WebPContext *s)
Definition: webp.c:432
apply_color_transform
static int apply_color_transform(WebPContext *s)
Definition: webp.c:963
VP8X_FLAG_ALPHA
#define VP8X_FLAG_ALPHA
Definition: webp.c:60
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
img
#define img
Definition: vf_colormatrix.c:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
HuffReader
Definition: webp.c:173
parse_transform_predictor
static int parse_transform_predictor(WebPContext *s)
Definition: webp.c:462
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
Definition: webp.c:131
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
ALPHA_FILTER_GRADIENT
@ ALPHA_FILTER_GRADIENT
Definition: webp.c:110
WebPContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:217
inv_predict_5
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:794
WebPContext::lossless
int lossless
Definition: webp.c:210
WebPContext::reduced_width
int reduced_width
Definition: webp.c:216
NUM_LENGTH_CODES
#define NUM_LENGTH_CODES
Definition: webp.c:68
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
WebPContext::pkt
AVPacket * pkt
Definition: webp.c:198
AlphaFilter
AlphaFilter
Definition: webp.c:106
PRED_MODE_SELECT
@ PRED_MODE_SELECT
Definition: webp.c:132
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
lz77_distance_offsets
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
Definition: webp.c:83
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
WebPContext::gb
GetBitContext gb
Definition: webp.c:196
apply_predictor_transform
static int apply_predictor_transform(WebPContext *s)
Definition: webp.c:926
av_always_inline
#define av_always_inline
Definition: attributes.h:49
HuffmanIndex
HuffmanIndex
Definition: webp.c:137
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AV_CODEC_ID_WEBP
@ AV_CODEC_ID_WEBP
Definition: codec_id.h:226
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
inv_predict_7
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:814
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
huff_reader_get_symbol
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
Definition: webp.c:244
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
avcodec.h
inv_predict_13
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:884
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
WebPContext::image
ImageContext image[IMAGE_ROLE_NB]
Definition: webp.c:218
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_vp8_decode_frame
int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
inv_predict_6
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:804
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
U
#define U(x)
Definition: vpx_arith.h:37
vp8_lossy_decode_alpha
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, const uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1246
AVCodecContext
main external API structure.
Definition: avcodec.h:431
HUFF_IDX_BLUE
@ HUFF_IDX_BLUE
Definition: webp.c:140
IMAGE_ROLE_ENTROPY
@ IMAGE_ROLE_ENTROPY
Definition: webp.c:157
VLC
Definition: vlc.h:50
webp_decode_frame
static int webp_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: webp.c:1341
image_ctx_free
static void image_ctx_free(ImageContext *img)
Definition: webp.c:227
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
WebPContext::initialized
int initialized
Definition: webp.c:200
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
apply_color_indexing_transform
static int apply_color_indexing_transform(WebPContext *s)
Definition: webp.c:1002
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
WebPContext::alpha_data
const uint8_t * alpha_data
Definition: webp.c:204
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
VLC_INIT_OUTPUT_LE
#define VLC_INIT_OUTPUT_LE
Definition: vlc.h:196
MAX_HUFFMAN_CODE_LENGTH
#define MAX_HUFFMAN_CODE_LENGTH
Definition: webp.c:71
ALPHA_FILTER_VERTICAL
@ ALPHA_FILTER_VERTICAL
Definition: webp.c:109
PARSE_BLOCK_SIZE
#define PARSE_BLOCK_SIZE(w, h)
Definition: webp.c:426
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
PRED_MODE_L
@ PRED_MODE_L
Definition: webp.c:122
WebPContext
Definition: webp.c:194
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:535
VP8Context
Definition: vp8.h:161
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
ImageRole
ImageRole
Definition: webp.c:151
bytestream.h
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:231
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
read_huffman_code_simple
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
Definition: webp.c:300
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HUFF_IDX_ALPHA
@ HUFF_IDX_ALPHA
Definition: webp.c:141
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
huff_reader_build_canonical
static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths, uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH+1], uint8_t lens[], uint16_t syms[], int alphabet_size, void *logctx)
Definition: webp.c:255
h
h
Definition: vp9dsp_template.c:2070
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
WebPContext::has_iccp
int has_iccp
Definition: webp.c:207
get_huffman_group
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
Definition: webp.c:529
width
#define width
Definition: dsp.h:89
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
inv_predict_9
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:834
ALPHA_COMPRESSION_VP8L
@ ALPHA_COMPRESSION_VP8L
Definition: webp.c:103
inv_predict_1
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:766
PRED_MODE_T
@ PRED_MODE_T
Definition: webp.c:123
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
WebPContext::alpha_filter
enum AlphaFilter alpha_filter
Definition: webp.c:203
HUFF_IDX_RED
@ HUFF_IDX_RED
Definition: webp.c:139
IMAGE_ROLE_ARGB
@ IMAGE_ROLE_ARGB
Definition: webp.c:153
PRED_MODE_ADD_SUBTRACT_HALF
@ PRED_MODE_ADD_SUBTRACT_HALF
Definition: webp.c:134
IMAGE_ROLE_COLOR_TRANSFORM
@ IMAGE_ROLE_COLOR_TRANSFORM
Definition: webp.c:165