FFmpeg
huffyuvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * see https://multimedia.cx/huffyuv.txt for a description of
5  * the algorithm used
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24  */
25 
26 /**
27  * @file
28  * huffyuv encoder
29  */
30 
31 #include "config_components.h"
32 
33 #include "avcodec.h"
34 #include "bswapdsp.h"
35 #include "codec_internal.h"
36 #include "encode.h"
37 #include "huffyuv.h"
38 #include "huffman.h"
39 #include "huffyuvencdsp.h"
40 #include "lossless_videoencdsp.h"
41 #include "put_bits.h"
42 #include "libavutil/mem.h"
43 #include "libavutil/opt.h"
44 #include "libavutil/pixdesc.h"
45 
46 typedef struct HYuvEncContext {
47  AVClass *class;
50  /* Predictor, use int for AVOption */
51  int predictor;
55  int version;
56  int bps;
57  unsigned mask; // (1<<bps)-1
58  int vlc_n; // number of vlc codes (FFMIN(1<<bps, MAX_VLC_N))
59  int alpha;
60  int chroma;
61  int yuv;
64  int flags;
65  int context;
67 
68  union {
69  uint8_t *temp[3];
70  uint16_t *temp16[3];
71  };
72  uint64_t stats[4][MAX_VLC_N];
73  uint8_t len[4][MAX_VLC_N];
74  uint32_t bits[4][MAX_VLC_N];
78  int non_determ; // non-deterministic, multi-threaded encoder allowed
80 
81 static inline void diff_bytes(HYuvEncContext *s, uint8_t *dst,
82  const uint8_t *src0, const uint8_t *src1, int w)
83 {
84  if (s->bps <= 8) {
85  s->llvidencdsp.diff_bytes(dst, src0, src1, w);
86  } else {
87  s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->mask, w);
88  }
89 }
90 
91 static inline int sub_left_prediction(HYuvEncContext *s, uint8_t *dst,
92  const uint8_t *src, int w, int left)
93 {
94  int i;
95  int min_width = FFMIN(w, 32);
96 
97  if (s->bps <= 8) {
98  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
99  const int temp = src[i];
100  dst[i] = temp - left;
101  left = temp;
102  }
103  if (w < 32)
104  return left;
105  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
106  return src[w-1];
107  } else {
108  const uint16_t *src16 = (const uint16_t *)src;
109  uint16_t *dst16 = ( uint16_t *)dst;
110  for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
111  const int temp = src16[i];
112  dst16[i] = temp - left;
113  left = temp;
114  }
115  if (w < 32)
116  return left;
117  s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->mask, w - 32);
118  return src16[w-1];
119  }
120 }
121 
122 static inline void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst,
123  const uint8_t *src, int w,
124  int *red, int *green, int *blue,
125  int *alpha)
126 {
127  int i;
128  int r, g, b, a;
129  int min_width = FFMIN(w, 8);
130  r = *red;
131  g = *green;
132  b = *blue;
133  a = *alpha;
134 
135  for (i = 0; i < min_width; i++) {
136  const int rt = src[i * 4 + R];
137  const int gt = src[i * 4 + G];
138  const int bt = src[i * 4 + B];
139  const int at = src[i * 4 + A];
140  dst[i * 4 + R] = rt - r;
141  dst[i * 4 + G] = gt - g;
142  dst[i * 4 + B] = bt - b;
143  dst[i * 4 + A] = at - a;
144  r = rt;
145  g = gt;
146  b = bt;
147  a = at;
148  }
149 
150  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
151 
152  *red = src[(w - 1) * 4 + R];
153  *green = src[(w - 1) * 4 + G];
154  *blue = src[(w - 1) * 4 + B];
155  *alpha = src[(w - 1) * 4 + A];
156 }
157 
158 static inline void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst,
159  const uint8_t *src, int w,
160  int *red, int *green, int *blue)
161 {
162  int i;
163  int r, g, b;
164  r = *red;
165  g = *green;
166  b = *blue;
167  for (i = 0; i < FFMIN(w, 16); i++) {
168  const int rt = src[i * 3 + 0];
169  const int gt = src[i * 3 + 1];
170  const int bt = src[i * 3 + 2];
171  dst[i * 3 + 0] = rt - r;
172  dst[i * 3 + 1] = gt - g;
173  dst[i * 3 + 2] = bt - b;
174  r = rt;
175  g = gt;
176  b = bt;
177  }
178 
179  s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
180 
181  *red = src[(w - 1) * 3 + 0];
182  *green = src[(w - 1) * 3 + 1];
183  *blue = src[(w - 1) * 3 + 2];
184 }
185 
186 static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst,
187  const uint8_t *src1, const uint8_t *src2,
188  int w, int *left, int *left_top)
189 {
190  if (s->bps <= 8) {
191  s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
192  } else {
193  s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1,
194  (const uint16_t *)src2, s->mask, w, left, left_top);
195  }
196 }
197 
198 static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
199 {
200  int i;
201  int index = 0;
202  int n = s->vlc_n;
203 
204  for (i = 0; i < n;) {
205  int val = len[i];
206  int repeat = 0;
207 
208  for (; i < n && len[i] == val && repeat < 255; i++)
209  repeat++;
210 
211  av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
212  if (repeat > 7) {
213  buf[index++] = val;
214  buf[index++] = repeat;
215  } else {
216  buf[index++] = val | (repeat << 5);
217  }
218  }
219 
220  return index;
221 }
222 
223 static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
224 {
225  int i, ret;
226  int size = 0;
227  int count = 3;
228 
229  if (s->version > 2)
230  count = 1 + s->alpha + 2*s->chroma;
231 
232  for (i = 0; i < count; i++) {
233  if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
234  return ret;
235 
236  ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n);
237  if (ret < 0)
238  return ret;
239 
240  size += store_table(s, s->len[i], buf + size);
241  }
242  return size;
243 }
244 
246 {
247  HYuvEncContext *s = avctx->priv_data;
248  int i, j;
249  int ret;
250  const AVPixFmtDescriptor *desc;
251 
252  s->avctx = avctx;
253  s->flags = avctx->flags;
254 
255  ff_bswapdsp_init(&s->bdsp);
256  ff_llvidencdsp_init(&s->llvidencdsp);
257 
258  avctx->extradata = av_mallocz(3*MAX_N + 4);
259  if (!avctx->extradata)
260  return AVERROR(ENOMEM);
261  if (s->flags&AV_CODEC_FLAG_PASS1) {
262 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
263  avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
264  if (!avctx->stats_out)
265  return AVERROR(ENOMEM);
266  }
267  s->version = 2;
268 
269  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
270  s->bps = desc->comp[0].depth;
271  s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
272  s->chroma = desc->nb_components > 2;
273  s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
274  s->chroma_h_shift = desc->log2_chroma_w;
275  s->chroma_v_shift = desc->log2_chroma_h;
276 
277  s->mask = (1 << s->bps) - 1;
278  s->vlc_n = FFMIN(1 << s->bps, MAX_VLC_N);
279 
280  ff_huffyuvencdsp_init(&s->hencdsp, s->bps, avctx->width >> s->chroma_h_shift);
281 
282  switch (avctx->pix_fmt) {
283  case AV_PIX_FMT_YUV420P:
284  case AV_PIX_FMT_YUV422P:
285  if (avctx->width & 1) {
286  av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
287  return AVERROR(EINVAL);
288  }
289  s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
290  break;
291  case AV_PIX_FMT_YUV444P:
292  case AV_PIX_FMT_YUV410P:
293  case AV_PIX_FMT_YUV411P:
294  case AV_PIX_FMT_YUV440P:
295  case AV_PIX_FMT_GBRP:
296  case AV_PIX_FMT_GBRP9:
297  case AV_PIX_FMT_GBRP10:
298  case AV_PIX_FMT_GBRP12:
299  case AV_PIX_FMT_GBRP14:
300  case AV_PIX_FMT_GBRP16:
301  case AV_PIX_FMT_GRAY8:
302  case AV_PIX_FMT_GRAY16:
303  case AV_PIX_FMT_YUVA444P:
304  case AV_PIX_FMT_YUVA420P:
305  case AV_PIX_FMT_YUVA422P:
306  case AV_PIX_FMT_GBRAP:
307  case AV_PIX_FMT_YUV420P9:
312  case AV_PIX_FMT_YUV422P9:
317  case AV_PIX_FMT_YUV444P9:
331  s->version = 3;
332  break;
333  case AV_PIX_FMT_RGB32:
334  s->bitstream_bpp = 32;
335  break;
336  case AV_PIX_FMT_RGB24:
337  s->bitstream_bpp = 24;
338  break;
339  default:
340  av_unreachable("Already checked via CODEC_PIXFMTS");
341  }
342 
343  avctx->bits_per_coded_sample = s->bitstream_bpp;
344  s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
345  s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
346  if (s->context) {
347  if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
348  av_log(avctx, AV_LOG_ERROR,
349  "context=1 is not compatible with "
350  "2 pass huffyuv encoding\n");
351  return AVERROR(EINVAL);
352  }
353  }
354 
355  if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
356  if (s->interlaced != ( avctx->height > 288 ))
357  av_log(avctx, AV_LOG_INFO,
358  "using huffyuv 2.2.0 or newer interlacing flag\n");
359  }
360 
361  if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
362  av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
363  "Use vstrict=-2 / -strict -2 to use it anyway.\n");
364  return AVERROR(EINVAL);
365  }
366 
367  if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
368  av_log(avctx, AV_LOG_ERROR,
369  "Error: RGB is incompatible with median predictor\n");
370  return AVERROR(EINVAL);
371  }
372 
373  avctx->extradata[0] = s->predictor | (s->decorrelate << 6);
374  avctx->extradata[2] = s->interlaced ? 0x10 : 0x20;
375  if (s->context)
376  avctx->extradata[2] |= 0x40;
377  if (s->version < 3) {
378  avctx->extradata[1] = s->bitstream_bpp;
379  avctx->extradata[3] = 0;
380  } else {
381  avctx->extradata[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
382  if (s->chroma)
383  avctx->extradata[2] |= s->yuv ? 1 : 2;
384  if (s->alpha)
385  avctx->extradata[2] |= 4;
386  avctx->extradata[3] = 1;
387  }
388  avctx->extradata_size = 4;
389 
390  if (avctx->stats_in) {
391  char *p = avctx->stats_in;
392 
393  for (i = 0; i < 4; i++)
394  for (j = 0; j < s->vlc_n; j++)
395  s->stats[i][j] = 1;
396 
397  for (;;) {
398  for (i = 0; i < 4; i++) {
399  char *next;
400 
401  for (j = 0; j < s->vlc_n; j++) {
402  s->stats[i][j] += strtol(p, &next, 0);
403  if (next == p) return -1;
404  p = next;
405  }
406  }
407  if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
408  }
409  } else {
410  for (i = 0; i < 4; i++)
411  for (j = 0; j < s->vlc_n; j++) {
412  int d = FFMIN(j, s->vlc_n - j);
413 
414  s->stats[i][j] = 100000000 / (d*d + 1);
415  }
416  }
417 
418  ret = store_huffman_tables(s, avctx->extradata + avctx->extradata_size);
419  if (ret < 0)
420  return ret;
421  avctx->extradata_size += ret;
422 
423  if (s->context) {
424  for (i = 0; i < 4; i++) {
425  int pels = avctx->width * avctx->height / (i ? 40 : 10);
426  for (j = 0; j < s->vlc_n; j++) {
427  int d = FFMIN(j, s->vlc_n - j);
428  s->stats[i][j] = pels/(d*d + 1);
429  }
430  }
431  } else {
432  for (i = 0; i < 4; i++)
433  for (j = 0; j < s->vlc_n; j++)
434  s->stats[i][j]= 0;
435  }
436 
437  s->picture_number=0;
438 
439  for (int i = 0; i < 3; i++) {
440  s->temp[i] = av_malloc(4 * avctx->width + 16);
441  if (!s->temp[i])
442  return AVERROR(ENOMEM);
443  }
444 
445  return 0;
446 }
447 static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
448 {
449  int i;
450  const uint8_t *y = s->temp[0] + offset;
451  const uint8_t *u = s->temp[1] + offset / 2;
452  const uint8_t *v = s->temp[2] + offset / 2;
453 
454  if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) {
455  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
456  return -1;
457  }
458 
459 #define LOAD4\
460  int y0 = y[2 * i];\
461  int y1 = y[2 * i + 1];\
462  int u0 = u[i];\
463  int v0 = v[i];
464 
465  count /= 2;
466 
467  if (s->flags & AV_CODEC_FLAG_PASS1) {
468  for(i = 0; i < count; i++) {
469  LOAD4;
470  s->stats[0][y0]++;
471  s->stats[1][u0]++;
472  s->stats[0][y1]++;
473  s->stats[2][v0]++;
474  }
475  }
476  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
477  return 0;
478  if (s->context) {
479  for (i = 0; i < count; i++) {
480  LOAD4;
481  s->stats[0][y0]++;
482  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
483  s->stats[1][u0]++;
484  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
485  s->stats[0][y1]++;
486  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
487  s->stats[2][v0]++;
488  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
489  }
490  } else {
491  for(i = 0; i < count; i++) {
492  LOAD4;
493  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
494  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
495  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
496  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
497  }
498  }
499  return 0;
500 }
501 
502 static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
503 {
504  int count = width/2;
505 
506  if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) {
507  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
508  return -1;
509  }
510 
511 #define LOADEND\
512  int y0 = s->temp[0][width-1];
513 #define LOADEND_14\
514  int y0 = s->temp16[0][width-1] & mask;
515 #define LOADEND_16\
516  int y0 = s->temp16[0][width-1];
517 #define STATEND\
518  s->stats[plane][y0]++;
519 #define STATEND_16\
520  s->stats[plane][y0>>2]++;
521 #define WRITEEND\
522  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
523 #define WRITEEND_16\
524  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
525  put_bits(&s->pb, 2, y0&3);
526 
527 #define LOAD2\
528  int y0 = s->temp[0][2 * i];\
529  int y1 = s->temp[0][2 * i + 1];
530 #define LOAD2_14\
531  int y0 = s->temp16[0][2 * i] & mask;\
532  int y1 = s->temp16[0][2 * i + 1] & mask;
533 #define LOAD2_16\
534  int y0 = s->temp16[0][2 * i];\
535  int y1 = s->temp16[0][2 * i + 1];
536 #define STAT2\
537  s->stats[plane][y0]++;\
538  s->stats[plane][y1]++;
539 #define STAT2_16\
540  s->stats[plane][y0>>2]++;\
541  s->stats[plane][y1>>2]++;
542 #define WRITE2\
543  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
544  put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
545 #define WRITE2_16\
546  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
547  put_bits(&s->pb, 2, y0&3);\
548  put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
549  put_bits(&s->pb, 2, y1&3);
550 
551 #define ENCODE_PLANE(LOAD, LOADEND, WRITE, WRITEEND, STAT, STATEND) \
552 do { \
553  if (s->flags & AV_CODEC_FLAG_PASS1) { \
554  for (int i = 0; i < count; i++) { \
555  LOAD; \
556  STAT; \
557  } \
558  if (width & 1) { \
559  LOADEND; \
560  STATEND; \
561  } \
562  } \
563  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) \
564  return 0; \
565  \
566  if (s->context) { \
567  for (int i = 0; i < count; i++) { \
568  LOAD; \
569  STAT; \
570  WRITE; \
571  } \
572  if (width & 1) { \
573  LOADEND; \
574  STATEND; \
575  WRITEEND; \
576  } \
577  } else { \
578  for (int i = 0; i < count; i++) { \
579  LOAD; \
580  WRITE; \
581  } \
582  if (width & 1) { \
583  LOADEND; \
584  WRITEEND; \
585  } \
586  } \
587 } while (0)
588 
589  if (s->bps <= 8) {
591  } else if (s->bps <= 14) {
592  unsigned mask = s->mask;
593 
595  } else {
597  }
598 #undef LOAD2
599 #undef STAT2
600 #undef WRITE2
601  return 0;
602 }
603 
604 static int encode_gray_bitstream(HYuvEncContext *s, int count)
605 {
606  int i;
607 
608  if (put_bytes_left(&s->pb, 0) < 4 * count) {
609  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
610  return -1;
611  }
612 
613 #define LOAD2\
614  int y0 = s->temp[0][2 * i];\
615  int y1 = s->temp[0][2 * i + 1];
616 #define STAT2\
617  s->stats[0][y0]++;\
618  s->stats[0][y1]++;
619 #define WRITE2\
620  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
621  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
622 
623  count /= 2;
624 
625  if (s->flags & AV_CODEC_FLAG_PASS1) {
626  for (i = 0; i < count; i++) {
627  LOAD2;
628  STAT2;
629  }
630  }
631  if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
632  return 0;
633 
634  if (s->context) {
635  for (i = 0; i < count; i++) {
636  LOAD2;
637  STAT2;
638  WRITE2;
639  }
640  } else {
641  for (i = 0; i < count; i++) {
642  LOAD2;
643  WRITE2;
644  }
645  }
646  return 0;
647 }
648 
649 static inline int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
650 {
651  int i;
652 
653  if (put_bytes_left(&s->pb, 0) < 4 * planes * count) {
654  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
655  return -1;
656  }
657 
658 #define LOAD_GBRA \
659  int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
660  int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
661  int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
662  int a = s->temp[0][planes * i + A];
663 
664 #define STAT_BGRA \
665  s->stats[0][b]++; \
666  s->stats[1][g]++; \
667  s->stats[2][r]++; \
668  if (planes == 4) \
669  s->stats[2][a]++;
670 
671 #define WRITE_GBRA \
672  put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
673  put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
674  put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
675  if (planes == 4) \
676  put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
677 
678  if ((s->flags & AV_CODEC_FLAG_PASS1) &&
679  (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
680  for (i = 0; i < count; i++) {
681  LOAD_GBRA;
682  STAT_BGRA;
683  }
684  } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
685  for (i = 0; i < count; i++) {
686  LOAD_GBRA;
687  STAT_BGRA;
688  WRITE_GBRA;
689  }
690  } else {
691  for (i = 0; i < count; i++) {
692  LOAD_GBRA;
693  WRITE_GBRA;
694  }
695  }
696  return 0;
697 }
698 
700  const AVFrame *p, int *got_packet)
701 {
702  HYuvEncContext *s = avctx->priv_data;
703  const int width = avctx->width;
704  const int width2 = avctx->width >> 1;
705  const int height = avctx->height;
706  const int fake_ystride = (1 + s->interlaced) * p->linesize[0];
707  const int fake_ustride = (1 + s->interlaced) * p->linesize[1];
708  const int fake_vstride = (1 + s->interlaced) * p->linesize[2];
709  int i, j, size = 0, ret;
710 
711  if ((ret = ff_alloc_packet(avctx, pkt, width * height * 3 * 4 + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
712  return ret;
713 
714  if (s->context) {
716  if (size < 0)
717  return size;
718 
719  for (i = 0; i < 4; i++)
720  for (j = 0; j < s->vlc_n; j++)
721  s->stats[i][j] >>= 1;
722  }
723 
724  init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
725 
726  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
727  avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
728  int lefty, leftu, leftv, y, cy;
729 
730  put_bits(&s->pb, 8, leftv = p->data[2][0]);
731  put_bits(&s->pb, 8, lefty = p->data[0][1]);
732  put_bits(&s->pb, 8, leftu = p->data[1][0]);
733  put_bits(&s->pb, 8, p->data[0][0]);
734 
735  lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
736  leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
737  leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
738 
740 
741  if (s->predictor==MEDIAN) {
742  int lefttopy, lefttopu, lefttopv;
743  cy = y = 1;
744  if (s->interlaced) {
745  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
746  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
747  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
748 
750  y++; cy++;
751  }
752 
753  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
754  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
755  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
756 
757  encode_422_bitstream(s, 0, 4);
758 
759  lefttopy = p->data[0][3];
760  lefttopu = p->data[1][1];
761  lefttopv = p->data[2][1];
762  s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
763  s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
764  s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
765  encode_422_bitstream(s, 0, width - 4);
766  y++; cy++;
767 
768  for (; y < height; y++,cy++) {
769  const uint8_t *ydst, *udst, *vdst;
770 
771  if (s->bitstream_bpp == 12) {
772  while (2 * cy > y) {
773  ydst = p->data[0] + p->linesize[0] * y;
774  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
776  y++;
777  }
778  if (y >= height) break;
779  }
780  ydst = p->data[0] + p->linesize[0] * y;
781  udst = p->data[1] + p->linesize[1] * cy;
782  vdst = p->data[2] + p->linesize[2] * cy;
783 
784  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
785  s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
786  s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
787 
789  }
790  } else {
791  for (cy = y = 1; y < height; y++, cy++) {
792  const uint8_t *ydst, *udst, *vdst;
793 
794  /* encode a luma only line & y++ */
795  if (s->bitstream_bpp == 12) {
796  ydst = p->data[0] + p->linesize[0] * y;
797 
798  if (s->predictor == PLANE && s->interlaced < y) {
799  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
800 
801  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
802  } else {
803  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
804  }
806  y++;
807  if (y >= height) break;
808  }
809 
810  ydst = p->data[0] + p->linesize[0] * y;
811  udst = p->data[1] + p->linesize[1] * cy;
812  vdst = p->data[2] + p->linesize[2] * cy;
813 
814  if (s->predictor == PLANE && s->interlaced < cy) {
815  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
816  s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
817  s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
818 
819  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
820  leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
821  leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
822  } else {
823  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
824  leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
825  leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
826  }
827 
829  }
830  }
831  } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
832  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
833  const int stride = -p->linesize[0];
834  const int fake_stride = -fake_ystride;
835  int leftr, leftg, leftb, lefta;
836 
837  put_bits(&s->pb, 8, lefta = data[A]);
838  put_bits(&s->pb, 8, leftr = data[R]);
839  put_bits(&s->pb, 8, leftg = data[G]);
840  put_bits(&s->pb, 8, leftb = data[B]);
841 
842  sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
843  &leftr, &leftg, &leftb, &lefta);
844  encode_bgra_bitstream(s, width - 1, 4);
845 
846  for (int y = 1; y < height; y++) {
847  const uint8_t *dst = data + y*stride;
848  if (s->predictor == PLANE && s->interlaced < y) {
849  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
850  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
851  &leftr, &leftg, &leftb, &lefta);
852  } else {
853  sub_left_prediction_bgr32(s, s->temp[0], dst, width,
854  &leftr, &leftg, &leftb, &lefta);
855  }
857  }
858  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
859  const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
860  const int stride = -p->linesize[0];
861  const int fake_stride = -fake_ystride;
862  int leftr, leftg, leftb;
863 
864  put_bits(&s->pb, 8, leftr = data[0]);
865  put_bits(&s->pb, 8, leftg = data[1]);
866  put_bits(&s->pb, 8, leftb = data[2]);
867  put_bits(&s->pb, 8, 0);
868 
869  sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
870  &leftr, &leftg, &leftb);
872 
873  for (int y = 1; y < height; y++) {
874  const uint8_t *dst = data + y * stride;
875  if (s->predictor == PLANE && s->interlaced < y) {
876  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
877  width * 3);
878  sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
879  &leftr, &leftg, &leftb);
880  } else {
881  sub_left_prediction_rgb24(s, s->temp[0], dst, width,
882  &leftr, &leftg, &leftb);
883  }
885  }
886  } else if (s->version > 2) {
887  int plane;
888  for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
889  int left, y;
890  int w = width;
891  int h = height;
892  int fake_stride = fake_ystride;
893 
894  if (s->chroma && (plane == 1 || plane == 2)) {
895  w >>= s->chroma_h_shift;
896  h >>= s->chroma_v_shift;
897  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
898  }
899 
900  left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
901 
902  encode_plane_bitstream(s, w, plane);
903 
904  if (s->predictor==MEDIAN) {
905  int lefttop;
906  y = 1;
907  if (s->interlaced) {
908  left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
909 
910  encode_plane_bitstream(s, w, plane);
911  y++;
912  }
913 
914  lefttop = p->data[plane][0];
915 
916  for (; y < h; y++) {
917  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
918 
919  sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
920 
921  encode_plane_bitstream(s, w, plane);
922  }
923  } else {
924  for (y = 1; y < h; y++) {
925  const uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
926 
927  if (s->predictor == PLANE && s->interlaced < y) {
928  diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
929 
930  left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
931  } else {
932  left = sub_left_prediction(s, s->temp[0], dst, w , left);
933  }
934 
935  encode_plane_bitstream(s, w, plane);
936  }
937  }
938  }
939  } else {
940  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
941  }
942 
943  size += (put_bits_count(&s->pb) + 31) / 8;
944  put_bits(&s->pb, 16, 0);
945  put_bits(&s->pb, 15, 0);
946  size /= 4;
947 
948  if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
949  int j;
950  char *p = avctx->stats_out;
951  char *end = p + STATS_OUT_SIZE;
952  for (i = 0; i < 4; i++) {
953  for (j = 0; j < s->vlc_n; j++) {
954  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
955  p += strlen(p);
956  s->stats[i][j]= 0;
957  }
958  snprintf(p, end-p, "\n");
959  p++;
960  if (end <= p)
961  return AVERROR(ENOMEM);
962  }
963  } else if (avctx->stats_out)
964  avctx->stats_out[0] = '\0';
965  if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
966  flush_put_bits(&s->pb);
967  s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
968  }
969 
970  s->picture_number++;
971 
972  pkt->size = size * 4;
973  *got_packet = 1;
974 
975  return 0;
976 }
977 
979 {
980  HYuvEncContext *s = avctx->priv_data;
981 
982  av_freep(&avctx->stats_out);
983 
984  for (int i = 0; i < 3; i++)
985  av_freep(&s->temp[i]);
986 
987  return 0;
988 }
989 
990 #define OFFSET(x) offsetof(HYuvEncContext, x)
991 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
992 
993 static const AVOption options[] = {
994  /* ffvhuff-only options */
995  { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
996  /* Common options */
997  { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism",
998  OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 0 },
999  0, 1, VE },
1000  { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, .unit = "pred" },
1001  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, .unit = "pred" },
1002  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, .unit = "pred" },
1003  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, .unit = "pred" },
1004  { NULL },
1005 };
1006 
1007 static const AVClass normal_class = {
1008  .class_name = "huffyuv",
1009  .item_name = av_default_item_name,
1010  .option = options + 1,
1011  .version = LIBAVUTIL_VERSION_INT,
1012 };
1013 
1015  .p.name = "huffyuv",
1016  CODEC_LONG_NAME("Huffyuv / HuffYUV"),
1017  .p.type = AVMEDIA_TYPE_VIDEO,
1018  .p.id = AV_CODEC_ID_HUFFYUV,
1019  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1021  .priv_data_size = sizeof(HYuvEncContext),
1022  .init = encode_init,
1024  .close = encode_end,
1025  .p.priv_class = &normal_class,
1027  .color_ranges = AVCOL_RANGE_MPEG,
1028  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1029 };
1030 
1031 #if CONFIG_FFVHUFF_ENCODER
1032 static const AVClass ff_class = {
1033  .class_name = "ffvhuff",
1034  .item_name = av_default_item_name,
1035  .option = options,
1036  .version = LIBAVUTIL_VERSION_INT,
1037 };
1038 
1039 const FFCodec ff_ffvhuff_encoder = {
1040  .p.name = "ffvhuff",
1041  CODEC_LONG_NAME("Huffyuv FFmpeg variant"),
1042  .p.type = AVMEDIA_TYPE_VIDEO,
1043  .p.id = AV_CODEC_ID_FFVHUFF,
1044  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
1046  .priv_data_size = sizeof(HYuvEncContext),
1047  .init = encode_init,
1049  .close = encode_end,
1050  .p.priv_class = &ff_class,
1051  CODEC_PIXFMTS(
1067  .color_ranges = AVCOL_RANGE_MPEG,
1068  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1069 };
1070 #endif
HYuvEncContext::chroma_h_shift
int chroma_h_shift
Definition: huffyuvenc.c:62
STATEND_16
#define STATEND_16
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:596
A
#define A(x)
Definition: vpx_arith.h:28
CODEC_PIXFMTS
#define CODEC_PIXFMTS(...)
Definition: codec_internal.h:392
bswapdsp.h
HYuvEncContext::flags
int flags
Definition: huffyuvenc.c:64
store_huffman_tables
static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf)
Definition: huffyuvenc.c:223
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
WRITE2_16
#define WRITE2_16
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:77
encode_422_bitstream
static int encode_422_bitstream(HYuvEncContext *s, int offset, int count)
Definition: huffyuvenc.c:447
HYuvEncContext::stats
uint64_t stats[4][MAX_VLC_N]
Definition: huffyuvenc.c:72
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
sub_left_prediction_bgr32
static void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
Definition: huffyuvenc.c:122
src1
const pixel * src1
Definition: h264pred_template.c:420
HYuvEncContext::chroma_v_shift
int chroma_v_shift
Definition: huffyuvenc.c:63
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:50
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
OFFSET
#define OFFSET(x)
Definition: huffyuvenc.c:990
encode_gray_bitstream
static int encode_gray_bitstream(HYuvEncContext *s, int count)
Definition: huffyuvenc.c:604
encode_bgra_bitstream
static int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes)
Definition: huffyuvenc.c:649
mask
int mask
Definition: mediacodecdec_common.c:154
encode_init
static av_cold int encode_init(AVCodecContext *avctx)
Definition: huffyuvenc.c:245
store_table
static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf)
Definition: huffyuvenc.c:198
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:588
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
AVPacket::data
uint8_t * data
Definition: packet.h:588
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:590
AVOption
AVOption.
Definition: opt.h:429
HYuvEncContext::interlaced
int interlaced
Definition: huffyuvenc.c:52
encode.h
b
#define b
Definition: input.c:42
data
const char data[16]
Definition: mxf.c:149
R
#define R
Definition: huffyuv.h:44
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
FFCodec
Definition: codec_internal.h:127
STATS_OUT_SIZE
#define STATS_OUT_SIZE
encode_plane_bitstream
static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane)
Definition: huffyuvenc.c:502
WRITEEND_16
#define WRITEEND_16
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
HYuvEncContext::temp16
uint16_t * temp16[3]
Definition: huffyuvenc.c:70
HYuvEncContext::yuv
int yuv
Definition: huffyuvenc.c:61
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:591
HYuvEncContext::bitstream_bpp
int bitstream_bpp
Definition: huffyuvenc.c:54
HYuvEncContext::decorrelate
int decorrelate
Definition: huffyuvenc.c:53
FF_INPUT_BUFFER_MIN_SIZE
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
Definition: encode.h:33
LOADEND_14
#define LOADEND_14
STAT2_16
#define STAT2_16
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:587
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:560
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
sub_left_prediction_rgb24
static void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue)
Definition: huffyuvenc.c:158
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:558
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:537
HYuvEncContext::vlc_n
int vlc_n
Definition: huffyuvenc.c:58
HYuvEncContext::non_determ
int non_determ
Definition: huffyuvenc.c:78
LOAD2
#define LOAD2
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
AV_CODEC_FLAG2_NO_OUTPUT
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
Definition: avcodec.h:341
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:359
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:111
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
STATEND
#define STATEND
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
WRITE2
#define WRITE2
ff_huffyuvencdsp_init
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, int bpp, int width)
Definition: huffyuvencdsp.c:87
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1332
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
g
const char * g
Definition: vf_curves.c:128
HYuvEncContext::pb
PutBitContext pb
Definition: huffyuvenc.c:49
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:144
MEDIAN
#define MEDIAN(x)
STAT2
#define STAT2
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
B
#define B
Definition: huffyuv.h:42
normal_class
static const AVClass normal_class
Definition: huffyuvenc.c:1007
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:536
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
HuffYUVEncDSPContext
Definition: huffyuvencdsp.h:24
huffyuvencdsp.h
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:119
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
WRITE_GBRA
#define WRITE_GBRA
MAX_N
#define MAX_N
Definition: huffyuv.h:49
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
if
if(ret)
Definition: filter_design.txt:179
LOADEND_16
#define LOADEND_16
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
LOADEND
#define LOADEND
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:37
NULL
#define NULL
Definition: coverity.c:32
WRITEEND
#define WRITEEND
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
ff_huff_gen_len_table
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:63
options
Definition: swscale.c:45
ff_huffyuv_encoder
const FFCodec ff_huffyuv_encoder
Definition: huffyuvenc.c:1014
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:40
HYuvEncContext::context
int context
Definition: huffyuvenc.c:65
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:557
HYuvEncContext::len
uint8_t len[4][MAX_VLC_N]
Definition: huffyuvenc.c:73
index
int index
Definition: gxfenc.c:90
encode_end
static av_cold int encode_end(AVCodecContext *avctx)
Definition: huffyuvenc.c:978
ff_llvidencdsp_init
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
Definition: lossless_videoencdsp.c:100
LOAD2_16
#define LOAD2_16
encode_frame
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet)
Definition: huffyuvenc.c:699
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1324
HYuvEncContext::temp
uint8_t * temp[3]
Definition: huffyuvenc.c:69
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:589
height
#define height
Definition: dsp.h:89
codec_internal.h
LOAD4
#define LOAD4
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
HYuvEncContext
Definition: huffyuvenc.c:46
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:544
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:546
LEFT
#define LEFT
Definition: cdgraphics.c:168
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:592
planes
static const struct @585 planes[]
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
VE
#define VE
Definition: huffyuvenc.c:991
HYuvEncContext::chroma
int chroma
Definition: huffyuvenc.c:60
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
lossless_videoencdsp.h
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1558
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
HYuvEncContext::alpha
int alpha
Definition: huffyuvenc.c:59
sub_left_prediction
static int sub_left_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Definition: huffyuvenc.c:91
src2
const pixel * src2
Definition: h264pred_template.c:421
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
HYuvEncContext::bps
int bps
Definition: huffyuvenc.c:56
HYuvEncContext::version
int version
Definition: huffyuvenc.c:55
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
HYuvEncContext::bdsp
BswapDSPContext bdsp
Definition: huffyuvenc.c:75
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:54
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
LLVidEncDSPContext
Definition: lossless_videoencdsp.h:25
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:538
avcodec.h
sub_median_prediction
static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
Definition: huffyuvenc.c:186
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1369
HYuvEncContext::hencdsp
HuffYUVEncDSPContext hencdsp
Definition: huffyuvenc.c:76
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:589
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:543
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:548
HYuvEncContext::mask
unsigned mask
Definition: huffyuvenc.c:57
HYuvEncContext::picture_number
int picture_number
Definition: huffyuvenc.c:66
AVCodecContext
main external API structure.
Definition: avcodec.h:439
LOAD_GBRA
#define LOAD_GBRA
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
huffman.h
temp
else temp
Definition: vf_mcdeint.c:271
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
G
#define G
Definition: huffyuv.h:43
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
src0
const pixel *const src0
Definition: h264pred_template.c:419
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:82
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
LOAD2_14
#define LOAD2_14
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
ff_ffvhuff_encoder
const FFCodec ff_ffvhuff_encoder
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
HYuvEncContext::llvidencdsp
LLVidEncDSPContext llvidencdsp
Definition: huffyuvenc.c:77
BswapDSPContext
Definition: bswapdsp.h:24
h
h
Definition: vp9dsp_template.c:2070
options
static const AVOption options[]
Definition: huffyuvenc.c:993
HYuvEncContext::predictor
int predictor
Definition: huffyuvenc.c:51
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:549
stride
#define stride
Definition: h264pred_template.c:536
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
huffyuv.h
width
#define width
Definition: dsp.h:89
diff_bytes
static void diff_bytes(HYuvEncContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Definition: huffyuvenc.c:81
ENCODE_PLANE
#define ENCODE_PLANE(LOAD, LOADEND, WRITE, WRITEEND, STAT, STATEND)
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
snprintf
#define snprintf
Definition: snprintf.h:34
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
HYuvEncContext::avctx
AVCodecContext * avctx
Definition: huffyuvenc.c:48
src
#define src
Definition: vp8dsp.c:248
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:547
STAT_BGRA
#define STAT_BGRA
HYuvEncContext::bits
uint32_t bits[4][MAX_VLC_N]
Definition: huffyuvenc.c:74
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290