FFmpeg
vc2enc.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2016 Open Broadcast Systems Ltd.
3  * Author 2016 Rostislav Pehlivanov <atomnuker@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/mem.h"
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/version.h"
26 #include "codec_internal.h"
27 #include "dirac.h"
28 #include "encode.h"
29 #include "put_bits.h"
30 #include "version.h"
31 
32 #include "vc2enc_dwt.h"
33 #include "diractab.h"
34 
35 /* The limited size resolution of each slice forces us to do this */
36 #define SSIZE_ROUND(b) (FFALIGN((b), s->size_scaler) + 4 + s->prefix_bytes)
37 
38 /* Decides the cutoff point in # of slices to distribute the leftover bytes */
39 #define SLICE_REDIST_TOTAL 150
40 
41 typedef struct VC2BaseVideoFormat {
45  const char *name;
47 
49  { 0 }, /* Custom format, here just to make indexing equal to base_vf */
50  { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 176, 120, 0, 1, "QSIF525" },
51  { AV_PIX_FMT_YUV420P, { 2, 25 }, 176, 144, 0, 1, "QCIF" },
52  { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 352, 240, 0, 1, "SIF525" },
53  { AV_PIX_FMT_YUV420P, { 2, 25 }, 352, 288, 0, 1, "CIF" },
54  { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 704, 480, 0, 1, "4SIF525" },
55  { AV_PIX_FMT_YUV420P, { 2, 25 }, 704, 576, 0, 1, "4CIF" },
56 
57  { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 480, 1, 2, "SD480I-60" },
58  { AV_PIX_FMT_YUV422P10, { 1, 25 }, 720, 576, 1, 2, "SD576I-50" },
59 
60  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1280, 720, 0, 3, "HD720P-60" },
61  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1280, 720, 0, 3, "HD720P-50" },
62  { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 1920, 1080, 1, 3, "HD1080I-60" },
63  { AV_PIX_FMT_YUV422P10, { 1, 25 }, 1920, 1080, 1, 3, "HD1080I-50" },
64  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1920, 1080, 0, 3, "HD1080P-60" },
65  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1920, 1080, 0, 3, "HD1080P-50" },
66 
67  { AV_PIX_FMT_YUV444P12, { 1, 24 }, 2048, 1080, 0, 4, "DC2K" },
68  { AV_PIX_FMT_YUV444P12, { 1, 24 }, 4096, 2160, 0, 5, "DC4K" },
69 
70  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 3840, 2160, 0, 6, "UHDTV 4K-60" },
71  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 3840, 2160, 0, 6, "UHDTV 4K-50" },
72 
73  { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 7680, 4320, 0, 7, "UHDTV 8K-60" },
74  { AV_PIX_FMT_YUV422P10, { 1, 50 }, 7680, 4320, 0, 7, "UHDTV 8K-50" },
75 
76  { AV_PIX_FMT_YUV422P10, { 1001, 24000 }, 1920, 1080, 0, 3, "HD1080P-24" },
77  { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 486, 1, 2, "SD Pro486" },
78 };
80 
81 enum VC2_QM {
85 
87 };
88 
89 typedef struct SubBand {
91  ptrdiff_t stride;
92  int width;
93  int height;
94 } SubBand;
95 
96 typedef struct Plane {
99  int width;
100  int height;
103  ptrdiff_t coef_stride;
104 } Plane;
105 
106 typedef struct SliceArgs {
109  void *ctx;
110  int x;
111  int y;
115  int bytes;
116 } SliceArgs;
117 
118 typedef struct TransformArgs {
119  void *ctx;
121  const void *idata;
122  ptrdiff_t istride;
123  int field;
125 } TransformArgs;
126 
127 typedef struct VC2EncContext {
133 
136 
137  /* For conversion from unsigned pixel values to signed */
139  int bpp;
140  int bpp_idx;
141 
142  /* Picture number */
143  uint32_t picture_number;
144 
145  /* Base video format */
146  int base_vf;
147  int level;
148  int profile;
149 
150  /* Quantization matrix */
151  uint8_t quant[MAX_DWT_LEVELS][4];
153 
154  /* Division LUT */
155  uint32_t qmagic_lut[116][2];
156 
157  int num_x; /* #slices horizontally */
158  int num_y; /* #slices vertically */
163 
164  /* Rate control stuff */
168  int q_ceil;
169  int q_avg;
170 
171  /* Options */
172  double tolerance;
180 
181  /* Parse code state */
184 } VC2EncContext;
185 
187 {
188  int i;
189  int pbits = 0, bits = 0, topbit = 1, maxval = 1;
190 
191  if (!val++) {
192  put_bits(pb, 1, 1);
193  return;
194  }
195 
196  while (val > maxval) {
197  topbit <<= 1;
198  maxval <<= 1;
199  maxval |= 1;
200  }
201 
202  bits = ff_log2(topbit);
203 
204  for (i = 0; i < bits; i++) {
205  topbit >>= 1;
206  pbits <<= 2;
207  if (val & topbit)
208  pbits |= 0x1;
209  }
210 
211  put_bits(pb, bits*2 + 1, (pbits << 1) | 1);
212 }
213 
215 {
216  int topbit = 1, maxval = 1;
217 
218  if (!val++)
219  return 1;
220 
221  while (val > maxval) {
222  topbit <<= 1;
223  maxval <<= 1;
224  maxval |= 1;
225  }
226 
227  return ff_log2(topbit)*2 + 1;
228 }
229 
230 /* VC-2 10.4 - parse_info() */
232 {
233  uint32_t cur_pos, dist;
234 
235  align_put_bits(&s->pb);
236 
237  cur_pos = put_bytes_count(&s->pb, 0);
238 
239  /* Magic string */
240  ff_put_string(&s->pb, "BBCD", 0);
241 
242  /* Parse code */
243  put_bits(&s->pb, 8, pcode);
244 
245  /* Next parse offset */
246  dist = cur_pos - s->next_parse_offset;
247  AV_WB32(s->pb.buf + s->next_parse_offset + 5, dist);
248  s->next_parse_offset = cur_pos;
249  put_bits32(&s->pb, pcode == DIRAC_PCODE_END_SEQ ? 13 : 0);
250 
251  /* Last parse offset */
252  put_bits32(&s->pb, s->last_parse_code == DIRAC_PCODE_END_SEQ ? 13 : dist);
253 
254  s->last_parse_code = pcode;
255 }
256 
257 /* VC-2 11.1 - parse_parameters()
258  * The level dictates what the decoder should expect in terms of resolution
259  * and allows it to quickly reject whatever it can't support. Remember,
260  * this codec kinda targets cheapo FPGAs without much memory. Unfortunately
261  * it also limits us greatly in our choice of formats, hence the flag to disable
262  * strict_compliance */
264 {
265  put_vc2_ue_uint(&s->pb, s->ver.major); /* VC-2 demands this to be 2 */
266  put_vc2_ue_uint(&s->pb, s->ver.minor); /* ^^ and this to be 0 */
267  put_vc2_ue_uint(&s->pb, s->profile); /* 3 to signal HQ profile */
268  put_vc2_ue_uint(&s->pb, s->level); /* 3 - 1080/720, 6 - 4K */
269 }
270 
271 /* VC-2 11.3 - frame_size() */
273 {
274  put_bits(&s->pb, 1, !s->strict_compliance);
275  if (!s->strict_compliance) {
276  AVCodecContext *avctx = s->avctx;
277  put_vc2_ue_uint(&s->pb, avctx->width);
278  put_vc2_ue_uint(&s->pb, avctx->height);
279  }
280 }
281 
282 /* VC-2 11.3.3 - color_diff_sampling_format() */
284 {
285  put_bits(&s->pb, 1, !s->strict_compliance);
286  if (!s->strict_compliance) {
287  int idx;
288  if (s->chroma_x_shift == 1 && s->chroma_y_shift == 0)
289  idx = 1; /* 422 */
290  else if (s->chroma_x_shift == 1 && s->chroma_y_shift == 1)
291  idx = 2; /* 420 */
292  else
293  idx = 0; /* 444 */
294  put_vc2_ue_uint(&s->pb, idx);
295  }
296 }
297 
298 /* VC-2 11.3.4 - scan_format() */
300 {
301  put_bits(&s->pb, 1, !s->strict_compliance);
302  if (!s->strict_compliance)
303  put_vc2_ue_uint(&s->pb, s->interlaced);
304 }
305 
306 /* VC-2 11.3.5 - frame_rate() */
308 {
309  put_bits(&s->pb, 1, !s->strict_compliance);
310  if (!s->strict_compliance) {
311  AVCodecContext *avctx = s->avctx;
312  put_vc2_ue_uint(&s->pb, 0);
313  put_vc2_ue_uint(&s->pb, avctx->time_base.den);
314  put_vc2_ue_uint(&s->pb, avctx->time_base.num);
315  }
316 }
317 
318 /* VC-2 11.3.6 - aspect_ratio() */
320 {
321  put_bits(&s->pb, 1, !s->strict_compliance);
322  if (!s->strict_compliance) {
323  AVCodecContext *avctx = s->avctx;
324  put_vc2_ue_uint(&s->pb, 0);
327  }
328 }
329 
330 /* VC-2 11.3.7 - clean_area() */
332 {
333  put_bits(&s->pb, 1, 0);
334 }
335 
336 /* VC-2 11.3.8 - signal_range() */
338 {
339  put_bits(&s->pb, 1, !s->strict_compliance);
340  if (!s->strict_compliance)
341  put_vc2_ue_uint(&s->pb, s->bpp_idx);
342 }
343 
344 /* VC-2 11.3.9 - color_spec() */
346 {
347  AVCodecContext *avctx = s->avctx;
348  put_bits(&s->pb, 1, !s->strict_compliance);
349  if (!s->strict_compliance) {
350  int val;
351  put_vc2_ue_uint(&s->pb, 0);
352 
353  /* primaries */
354  put_bits(&s->pb, 1, 1);
355  if (avctx->color_primaries == AVCOL_PRI_BT470BG)
356  val = 2;
357  else if (avctx->color_primaries == AVCOL_PRI_SMPTE170M)
358  val = 1;
359  else if (avctx->color_primaries == AVCOL_PRI_SMPTE240M)
360  val = 1;
361  else
362  val = 0;
363  put_vc2_ue_uint(&s->pb, val);
364 
365  /* color matrix */
366  put_bits(&s->pb, 1, 1);
367  if (avctx->colorspace == AVCOL_SPC_RGB)
368  val = 3;
369  else if (avctx->colorspace == AVCOL_SPC_YCOCG)
370  val = 2;
371  else if (avctx->colorspace == AVCOL_SPC_BT470BG)
372  val = 1;
373  else
374  val = 0;
375  put_vc2_ue_uint(&s->pb, val);
376 
377  /* transfer function */
378  put_bits(&s->pb, 1, 1);
379  if (avctx->color_trc == AVCOL_TRC_LINEAR)
380  val = 2;
381  else if (avctx->color_trc == AVCOL_TRC_BT1361_ECG)
382  val = 1;
383  else
384  val = 0;
385  put_vc2_ue_uint(&s->pb, val);
386  }
387 }
388 
389 /* VC-2 11.3 - source_parameters() */
391 {
400 }
401 
402 /* VC-2 11 - sequence_header() */
404 {
405  align_put_bits(&s->pb);
407  put_vc2_ue_uint(&s->pb, s->base_vf);
409  put_vc2_ue_uint(&s->pb, s->interlaced); /* Frames or fields coding */
410 }
411 
412 /* VC-2 12.1 - picture_header() */
414 {
415  align_put_bits(&s->pb);
416  put_bits32(&s->pb, s->picture_number++);
417 }
418 
419 /* VC-2 12.3.4.1 - slice_parameters() */
421 {
422  put_vc2_ue_uint(&s->pb, s->num_x);
423  put_vc2_ue_uint(&s->pb, s->num_y);
424  put_vc2_ue_uint(&s->pb, s->prefix_bytes);
425  put_vc2_ue_uint(&s->pb, s->size_scaler);
426 }
427 
428 /* 1st idx = LL, second - vertical, third - horizontal, fourth - total */
429 static const uint8_t vc2_qm_col_tab[][4] = {
430  {20, 9, 15, 4},
431  { 0, 6, 6, 4},
432  { 0, 3, 3, 5},
433  { 0, 3, 5, 1},
434  { 0, 11, 10, 11}
435 };
436 
437 static const uint8_t vc2_qm_flat_tab[][4] = {
438  { 0, 0, 0, 0},
439  { 0, 0, 0, 0},
440  { 0, 0, 0, 0},
441  { 0, 0, 0, 0},
442  { 0, 0, 0, 0}
443 };
444 
446 {
447  int level, orientation;
448 
449  if (s->wavelet_depth <= 4 && s->quant_matrix == VC2_QM_DEF) {
450  s->custom_quant_matrix = 0;
451  for (level = 0; level < s->wavelet_depth; level++) {
452  s->quant[level][0] = ff_dirac_default_qmat[s->wavelet_idx][level][0];
453  s->quant[level][1] = ff_dirac_default_qmat[s->wavelet_idx][level][1];
454  s->quant[level][2] = ff_dirac_default_qmat[s->wavelet_idx][level][2];
455  s->quant[level][3] = ff_dirac_default_qmat[s->wavelet_idx][level][3];
456  }
457  return;
458  }
459 
460  s->custom_quant_matrix = 1;
461 
462  if (s->quant_matrix == VC2_QM_DEF) {
463  for (level = 0; level < s->wavelet_depth; level++) {
464  for (orientation = 0; orientation < 4; orientation++) {
465  if (level <= 3)
466  s->quant[level][orientation] = ff_dirac_default_qmat[s->wavelet_idx][level][orientation];
467  else
468  s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
469  }
470  }
471  } else if (s->quant_matrix == VC2_QM_COL) {
472  for (level = 0; level < s->wavelet_depth; level++) {
473  for (orientation = 0; orientation < 4; orientation++) {
474  s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
475  }
476  }
477  } else {
478  for (level = 0; level < s->wavelet_depth; level++) {
479  for (orientation = 0; orientation < 4; orientation++) {
480  s->quant[level][orientation] = vc2_qm_flat_tab[level][orientation];
481  }
482  }
483  }
484 }
485 
486 /* VC-2 12.3.4.2 - quant_matrix() */
488 {
489  int level;
490  put_bits(&s->pb, 1, s->custom_quant_matrix);
491  if (s->custom_quant_matrix) {
492  put_vc2_ue_uint(&s->pb, s->quant[0][0]);
493  for (level = 0; level < s->wavelet_depth; level++) {
494  put_vc2_ue_uint(&s->pb, s->quant[level][1]);
495  put_vc2_ue_uint(&s->pb, s->quant[level][2]);
496  put_vc2_ue_uint(&s->pb, s->quant[level][3]);
497  }
498  }
499 }
500 
501 /* VC-2 12.3 - transform_parameters() */
503 {
504  put_vc2_ue_uint(&s->pb, s->wavelet_idx);
505  put_vc2_ue_uint(&s->pb, s->wavelet_depth);
506 
509 }
510 
511 /* VC-2 12.2 - wavelet_transform() */
513 {
515  align_put_bits(&s->pb);
516 }
517 
518 /* VC-2 12 - picture_parse() */
520 {
521  align_put_bits(&s->pb);
523  align_put_bits(&s->pb);
525 }
526 
527 #define QUANT(c, mul, add, shift) (((mul) * (c) + (add)) >> (shift))
528 
529 /* VC-2 13.5.5.2 - slice_band() */
530 static void encode_subband(VC2EncContext *s, PutBitContext *pb, int sx, int sy,
531  SubBand *b, int quant)
532 {
533  int x, y;
534 
535  const int left = b->width * (sx+0) / s->num_x;
536  const int right = b->width * (sx+1) / s->num_x;
537  const int top = b->height * (sy+0) / s->num_y;
538  const int bottom = b->height * (sy+1) / s->num_y;
539 
540  dwtcoef *coeff = b->buf + top * b->stride;
541  const uint64_t q_m = ((uint64_t)(s->qmagic_lut[quant][0])) << 2;
542  const uint64_t q_a = s->qmagic_lut[quant][1];
543  const int q_s = av_log2(ff_dirac_qscale_tab[quant]) + 32;
544 
545  for (y = top; y < bottom; y++) {
546  for (x = left; x < right; x++) {
547  uint32_t c_abs = QUANT(FFABS(coeff[x]), q_m, q_a, q_s);
548  put_vc2_ue_uint(pb, c_abs);
549  if (c_abs)
550  put_bits(pb, 1, coeff[x] < 0);
551  }
552  coeff += b->stride;
553  }
554 }
555 
556 static int count_hq_slice(SliceArgs *slice, int quant_idx)
557 {
558  int x, y;
559  uint8_t quants[MAX_DWT_LEVELS][4];
560  int bits = 0, p, level, orientation;
561  VC2EncContext *s = slice->ctx;
562 
563  if (slice->cache[quant_idx])
564  return slice->cache[quant_idx];
565 
566  bits += 8*s->prefix_bytes;
567  bits += 8; /* quant_idx */
568 
569  for (level = 0; level < s->wavelet_depth; level++)
570  for (orientation = !!level; orientation < 4; orientation++)
571  quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
572 
573  for (p = 0; p < 3; p++) {
574  int bytes_start, bytes_len, pad_s, pad_c;
575  bytes_start = bits >> 3;
576  bits += 8;
577  for (level = 0; level < s->wavelet_depth; level++) {
578  for (orientation = !!level; orientation < 4; orientation++) {
579  SubBand *b = &s->plane[p].band[level][orientation];
580 
581  const int q_idx = quants[level][orientation];
582  const uint64_t q_m = ((uint64_t)s->qmagic_lut[q_idx][0]) << 2;
583  const uint64_t q_a = s->qmagic_lut[q_idx][1];
584  const int q_s = av_log2(ff_dirac_qscale_tab[q_idx]) + 32;
585 
586  const int left = b->width * slice->x / s->num_x;
587  const int right = b->width *(slice->x+1) / s->num_x;
588  const int top = b->height * slice->y / s->num_y;
589  const int bottom = b->height *(slice->y+1) / s->num_y;
590 
591  dwtcoef *buf = b->buf + top * b->stride;
592 
593  for (y = top; y < bottom; y++) {
594  for (x = left; x < right; x++) {
595  uint32_t c_abs = QUANT(FFABS(buf[x]), q_m, q_a, q_s);
596  bits += count_vc2_ue_uint(c_abs);
597  bits += !!c_abs;
598  }
599  buf += b->stride;
600  }
601  }
602  }
603  bits += FFALIGN(bits, 8) - bits;
604  bytes_len = (bits >> 3) - bytes_start - 1;
605  pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
606  pad_c = (pad_s*s->size_scaler) - bytes_len;
607  bits += pad_c*8;
608  }
609 
610  slice->cache[quant_idx] = bits;
611 
612  return bits;
613 }
614 
615 /* Approaches the best possible quantizer asymptotically, its kinda exaustive
616  * but we have a LUT to get the coefficient size in bits. Guaranteed to never
617  * overshoot, which is apparently very important when streaming */
618 static int rate_control(AVCodecContext *avctx, void *arg)
619 {
620  SliceArgs *slice_dat = arg;
621  VC2EncContext *s = slice_dat->ctx;
622  const int top = slice_dat->bits_ceil;
623  const int bottom = slice_dat->bits_floor;
624  int quant_buf[2] = {-1, -1};
625  int quant = slice_dat->quant_idx, step = 1;
626  int bits_last, bits = count_hq_slice(slice_dat, quant);
627  while ((bits > top) || (bits < bottom)) {
628  const int signed_step = bits > top ? +step : -step;
629  quant = av_clip(quant + signed_step, 0, s->q_ceil-1);
630  bits = count_hq_slice(slice_dat, quant);
631  if (quant_buf[1] == quant) {
632  quant = FFMAX(quant_buf[0], quant);
633  bits = quant == quant_buf[0] ? bits_last : bits;
634  break;
635  }
636  step = av_clip(step/2, 1, (s->q_ceil-1)/2);
637  quant_buf[1] = quant_buf[0];
638  quant_buf[0] = quant;
639  bits_last = bits;
640  }
641  slice_dat->quant_idx = av_clip(quant, 0, s->q_ceil-1);
642  slice_dat->bytes = SSIZE_ROUND(bits >> 3);
643  return 0;
644 }
645 
647 {
648  int i, j, slice_x, slice_y, bytes_left = 0;
649  int bytes_top[SLICE_REDIST_TOTAL] = {0};
650  int64_t total_bytes_needed = 0;
651  int slice_redist_range = FFMIN(SLICE_REDIST_TOTAL, s->num_x*s->num_y);
652  SliceArgs *enc_args = s->slice_args;
653  SliceArgs *top_loc[SLICE_REDIST_TOTAL] = {NULL};
654 
656 
657  for (slice_y = 0; slice_y < s->num_y; slice_y++) {
658  for (slice_x = 0; slice_x < s->num_x; slice_x++) {
659  SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
660  args->ctx = s;
661  args->x = slice_x;
662  args->y = slice_y;
663  args->bits_ceil = s->slice_max_bytes << 3;
664  args->bits_floor = s->slice_min_bytes << 3;
665  memset(args->cache, 0, s->q_ceil*sizeof(*args->cache));
666  }
667  }
668 
669  /* First pass - determine baseline slice sizes w.r.t. max_slice_size */
670  s->avctx->execute(s->avctx, rate_control, enc_args, NULL, s->num_x*s->num_y,
671  sizeof(SliceArgs));
672 
673  for (i = 0; i < s->num_x*s->num_y; i++) {
674  SliceArgs *args = &enc_args[i];
675  bytes_left += args->bytes;
676  for (j = 0; j < slice_redist_range; j++) {
677  if (args->bytes > bytes_top[j]) {
678  bytes_top[j] = args->bytes;
679  top_loc[j] = args;
680  break;
681  }
682  }
683  }
684 
685  bytes_left = s->frame_max_bytes - bytes_left;
686 
687  /* Second pass - distribute leftover bytes */
688  while (bytes_left > 0) {
689  int distributed = 0;
690  for (i = 0; i < slice_redist_range; i++) {
691  SliceArgs *args;
692  int bits, bytes, diff, prev_bytes, new_idx;
693  if (bytes_left <= 0)
694  break;
695  if (!top_loc[i] || !top_loc[i]->quant_idx)
696  break;
697  args = top_loc[i];
698  prev_bytes = args->bytes;
699  new_idx = FFMAX(args->quant_idx - 1, 0);
700  bits = count_hq_slice(args, new_idx);
701  bytes = SSIZE_ROUND(bits >> 3);
702  diff = bytes - prev_bytes;
703  if ((bytes_left - diff) > 0) {
704  args->quant_idx = new_idx;
705  args->bytes = bytes;
706  bytes_left -= diff;
707  distributed++;
708  }
709  }
710  if (!distributed)
711  break;
712  }
713 
714  for (i = 0; i < s->num_x*s->num_y; i++) {
715  SliceArgs *args = &enc_args[i];
716  total_bytes_needed += args->bytes;
717  s->q_avg = (s->q_avg + args->quant_idx)/2;
718  }
719 
720  return total_bytes_needed;
721 }
722 
723 /* VC-2 13.5.3 - hq_slice */
724 static int encode_hq_slice(AVCodecContext *avctx, void *arg)
725 {
726  SliceArgs *slice_dat = arg;
727  VC2EncContext *s = slice_dat->ctx;
728  PutBitContext *pb = &slice_dat->pb;
729  const int slice_x = slice_dat->x;
730  const int slice_y = slice_dat->y;
731  const int quant_idx = slice_dat->quant_idx;
732  const int slice_bytes_max = slice_dat->bytes;
733  uint8_t quants[MAX_DWT_LEVELS][4];
734  int p, level, orientation;
735 
736  /* The reference decoder ignores it, and its typical length is 0 */
737  memset(put_bits_ptr(pb), 0, s->prefix_bytes);
738  skip_put_bytes(pb, s->prefix_bytes);
739 
740  put_bits(pb, 8, quant_idx);
741 
742  /* Slice quantization (slice_quantizers() in the specs) */
743  for (level = 0; level < s->wavelet_depth; level++)
744  for (orientation = !!level; orientation < 4; orientation++)
745  quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
746 
747  /* Luma + 2 Chroma planes */
748  for (p = 0; p < 3; p++) {
749  int bytes_start, bytes_len, pad_s, pad_c;
750  bytes_start = put_bytes_count(pb, 0);
751  put_bits(pb, 8, 0);
752  for (level = 0; level < s->wavelet_depth; level++) {
753  for (orientation = !!level; orientation < 4; orientation++) {
754  encode_subband(s, pb, slice_x, slice_y,
755  &s->plane[p].band[level][orientation],
756  quants[level][orientation]);
757  }
758  }
759  flush_put_bits(pb);
760  bytes_len = put_bytes_output(pb) - bytes_start - 1;
761  if (p == 2) {
762  int len_diff = slice_bytes_max - put_bytes_output(pb);
763  pad_s = FFALIGN((bytes_len + len_diff), s->size_scaler)/s->size_scaler;
764  pad_c = (pad_s*s->size_scaler) - bytes_len;
765  } else {
766  pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
767  pad_c = (pad_s*s->size_scaler) - bytes_len;
768  }
769  pb->buf[bytes_start] = pad_s;
770  /* vc2-reference uses that padding that decodes to '0' coeffs */
771  memset(put_bits_ptr(pb), 0xFF, pad_c);
772  skip_put_bytes(pb, pad_c);
773  }
774 
775  return 0;
776 }
777 
778 /* VC-2 13.5.1 - low_delay_transform_data() */
780 {
781  uint8_t *buf;
782  int slice_x, slice_y, skip = 0;
783  SliceArgs *enc_args = s->slice_args;
784 
785  flush_put_bits(&s->pb);
786  buf = put_bits_ptr(&s->pb);
787 
788  for (slice_y = 0; slice_y < s->num_y; slice_y++) {
789  for (slice_x = 0; slice_x < s->num_x; slice_x++) {
790  SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
791  init_put_bits(&args->pb, buf + skip, args->bytes+s->prefix_bytes);
792  skip += args->bytes;
793  }
794  }
795 
796  s->avctx->execute(s->avctx, encode_hq_slice, enc_args, NULL, s->num_x*s->num_y,
797  sizeof(SliceArgs));
798 
799  skip_put_bytes(&s->pb, skip);
800 
801  return 0;
802 }
803 
804 /*
805  * Transform basics for a 3 level transform
806  * |---------------------------------------------------------------------|
807  * | LL-0 | HL-0 | | |
808  * |--------|-------| HL-1 | |
809  * | LH-0 | HH-0 | | |
810  * |----------------|-----------------| HL-2 |
811  * | | | |
812  * | LH-1 | HH-1 | |
813  * | | | |
814  * |----------------------------------|----------------------------------|
815  * | | |
816  * | | |
817  * | | |
818  * | LH-2 | HH-2 |
819  * | | |
820  * | | |
821  * | | |
822  * |---------------------------------------------------------------------|
823  *
824  * DWT transforms are generally applied by splitting the image in two vertically
825  * and applying a low pass transform on the left part and a corresponding high
826  * pass transform on the right hand side. This is known as the horizontal filter
827  * stage.
828  * After that, the same operation is performed except the image is divided
829  * horizontally, with the high pass on the lower and the low pass on the higher
830  * side.
831  * Therefore, you're left with 4 subdivisions - known as low-low, low-high,
832  * high-low and high-high. They're referred to as orientations in the decoder
833  * and encoder.
834  *
835  * The LL (low-low) area contains the original image downsampled by the amount
836  * of levels. The rest of the areas can be thought as the details needed
837  * to restore the image perfectly to its original size.
838  */
839 static int dwt_plane(AVCodecContext *avctx, void *arg)
840 {
841  TransformArgs *transform_dat = arg;
842  VC2EncContext *s = transform_dat->ctx;
843  const void *frame_data = transform_dat->idata;
844  const ptrdiff_t linesize = transform_dat->istride;
845  const int field = transform_dat->field;
846  const Plane *p = transform_dat->plane;
847  VC2TransformContext *t = &transform_dat->t;
848  dwtcoef *buf = p->coef_buf;
849  const int idx = s->wavelet_idx;
850  const int skip = 1 + s->interlaced;
851 
852  int x, y, level, offset;
853  ptrdiff_t pix_stride = linesize >> (s->bpp - 1);
854 
855  if (field == 1) {
856  offset = 0;
857  pix_stride <<= 1;
858  } else if (field == 2) {
859  offset = pix_stride;
860  pix_stride <<= 1;
861  } else {
862  offset = 0;
863  }
864 
865  if (s->bpp == 1) {
866  const uint8_t *pix = (const uint8_t *)frame_data + offset;
867  for (y = 0; y < p->height*skip; y+=skip) {
868  for (x = 0; x < p->width; x++) {
869  buf[x] = pix[x] - s->diff_offset;
870  }
871  memset(&buf[x], 0, (p->coef_stride - p->width)*sizeof(dwtcoef));
872  buf += p->coef_stride;
873  pix += pix_stride;
874  }
875  } else {
876  const uint16_t *pix = (const uint16_t *)frame_data + offset;
877  for (y = 0; y < p->height*skip; y+=skip) {
878  for (x = 0; x < p->width; x++) {
879  buf[x] = pix[x] - s->diff_offset;
880  }
881  memset(&buf[x], 0, (p->coef_stride - p->width)*sizeof(dwtcoef));
882  buf += p->coef_stride;
883  pix += pix_stride;
884  }
885  }
886 
887  memset(buf, 0, p->coef_stride * (p->dwt_height - p->height) * sizeof(dwtcoef));
888 
889  for (level = s->wavelet_depth-1; level >= 0; level--) {
890  const SubBand *b = &p->band[level][0];
891  t->vc2_subband_dwt[idx](t, p->coef_buf, p->coef_stride,
892  b->width, b->height);
893  }
894 
895  return 0;
896 }
897 
898 static int encode_frame(VC2EncContext *s, AVPacket *avpkt, const AVFrame *frame,
899  const char *aux_data, const int header_size, int field)
900 {
901  int i, ret;
902  int64_t max_frame_bytes;
903 
904  /* Threaded DWT transform */
905  for (i = 0; i < 3; i++) {
906  s->transform_args[i].ctx = s;
907  s->transform_args[i].field = field;
908  s->transform_args[i].plane = &s->plane[i];
909  s->transform_args[i].idata = frame->data[i];
910  s->transform_args[i].istride = frame->linesize[i];
911  }
912  s->avctx->execute(s->avctx, dwt_plane, s->transform_args, NULL, 3,
913  sizeof(TransformArgs));
914 
915  /* Calculate per-slice quantizers and sizes */
916  max_frame_bytes = header_size + calc_slice_sizes(s);
917 
918  if (field < 2) {
919  ret = ff_get_encode_buffer(s->avctx, avpkt,
920  max_frame_bytes << s->interlaced, 0);
921  if (ret) {
922  av_log(s->avctx, AV_LOG_ERROR, "Error getting output packet.\n");
923  return ret;
924  }
925  init_put_bits(&s->pb, avpkt->data, avpkt->size);
926  }
927 
928  /* Sequence header */
931 
932  /* Encoder version */
933  if (aux_data) {
935  ff_put_string(&s->pb, aux_data, 1);
936  }
937 
938  /* Picture header */
941 
942  /* Encode slices */
943  encode_slices(s);
944 
945  /* End sequence */
947 
948  return 0;
949 }
950 
952  const AVFrame *frame, int *got_packet)
953 {
954  int ret = 0;
955  int slice_ceil, sig_size = 256;
956  VC2EncContext *s = avctx->priv_data;
957  const int bitexact = avctx->flags & AV_CODEC_FLAG_BITEXACT;
958  const char *aux_data = bitexact ? "Lavc" : LIBAVCODEC_IDENT;
959  const int aux_data_size = bitexact ? sizeof("Lavc") : sizeof(LIBAVCODEC_IDENT);
960  const int header_size = 100 + aux_data_size;
961  int64_t r_bitrate = avctx->bit_rate >> (s->interlaced);
962 
963  s->avctx = avctx;
964  s->size_scaler = 2;
965  s->prefix_bytes = 0;
966  s->last_parse_code = 0;
967  s->next_parse_offset = 0;
968 
969  /* Rate control */
970  s->frame_max_bytes = (av_rescale(r_bitrate, s->avctx->time_base.num,
971  s->avctx->time_base.den) >> 3) - header_size;
972  s->slice_max_bytes = slice_ceil = av_rescale(s->frame_max_bytes, 1, s->num_x*s->num_y);
973 
974  /* Find an appropriate size scaler */
975  while (sig_size > 255) {
976  int r_size = SSIZE_ROUND(s->slice_max_bytes);
977  if (r_size > slice_ceil) {
978  s->slice_max_bytes -= r_size - slice_ceil;
979  r_size = SSIZE_ROUND(s->slice_max_bytes);
980  }
981  sig_size = r_size/s->size_scaler; /* Signalled slize size */
982  s->size_scaler <<= 1;
983  }
984 
985  s->slice_min_bytes = s->slice_max_bytes - s->slice_max_bytes*(s->tolerance/100.0f);
986  if (s->slice_min_bytes < 0)
987  return AVERROR(EINVAL);
988 
989  ret = encode_frame(s, avpkt, frame, aux_data, header_size, s->interlaced);
990  if (ret)
991  return ret;
992  if (s->interlaced) {
993  ret = encode_frame(s, avpkt, frame, aux_data, header_size, 2);
994  if (ret)
995  return ret;
996  }
997 
998  flush_put_bits(&s->pb);
999  av_shrink_packet(avpkt, put_bytes_output(&s->pb));
1000 
1001  *got_packet = 1;
1002 
1003  return 0;
1004 }
1005 
1007 {
1008  int i;
1009  VC2EncContext *s = avctx->priv_data;
1010 
1011  av_log(avctx, AV_LOG_INFO, "Qavg: %i\n", s->q_avg);
1012 
1013  for (i = 0; i < 3; i++) {
1014  ff_vc2enc_free_transforms(&s->transform_args[i].t);
1015  av_freep(&s->plane[i].coef_buf);
1016  }
1017 
1018  av_freep(&s->slice_args);
1019 
1020  return 0;
1021 }
1022 
1024 {
1025  Plane *p;
1026  SubBand *b;
1027  int i, level, o, shift, ret;
1028  const AVPixFmtDescriptor *fmt = av_pix_fmt_desc_get(avctx->pix_fmt);
1029  const int depth = fmt->comp[0].depth;
1030  VC2EncContext *s = avctx->priv_data;
1031 
1032  s->picture_number = 0;
1033 
1034  /* Total allowed quantization range */
1035  s->q_ceil = DIRAC_MAX_QUANT_INDEX;
1036 
1037  s->ver.major = 2;
1038  s->ver.minor = 0;
1039  s->profile = 3;
1040  s->level = 3;
1041 
1042  s->base_vf = -1;
1043  s->strict_compliance = 1;
1044 
1045  s->q_avg = 0;
1046  s->slice_max_bytes = 0;
1047  s->slice_min_bytes = 0;
1048 
1049  /* Mark unknown as progressive */
1050  s->interlaced = !((avctx->field_order == AV_FIELD_UNKNOWN) ||
1051  (avctx->field_order == AV_FIELD_PROGRESSIVE));
1052 
1053  for (i = 0; i < base_video_fmts_len; i++) {
1054  const VC2BaseVideoFormat *fmt = &base_video_fmts[i];
1055  if (avctx->pix_fmt != fmt->pix_fmt)
1056  continue;
1057  if (avctx->time_base.num != fmt->time_base.num)
1058  continue;
1059  if (avctx->time_base.den != fmt->time_base.den)
1060  continue;
1061  if (avctx->width != fmt->width)
1062  continue;
1063  if (avctx->height != fmt->height)
1064  continue;
1065  if (s->interlaced != fmt->interlaced)
1066  continue;
1067  s->base_vf = i;
1068  s->level = base_video_fmts[i].level;
1069  break;
1070  }
1071 
1072  if (s->interlaced)
1073  av_log(avctx, AV_LOG_WARNING, "Interlacing enabled!\n");
1074 
1075  if ((s->slice_width & (s->slice_width - 1)) ||
1076  (s->slice_height & (s->slice_height - 1))) {
1077  av_log(avctx, AV_LOG_ERROR, "Slice size is not a power of two!\n");
1078  return AVERROR_UNKNOWN;
1079  }
1080 
1081  if ((s->slice_width > avctx->width) ||
1082  (s->slice_height > avctx->height)) {
1083  av_log(avctx, AV_LOG_ERROR, "Slice size is bigger than the image!\n");
1084  return AVERROR_UNKNOWN;
1085  }
1086 
1087  if (s->base_vf <= 0) {
1089  s->strict_compliance = s->base_vf = 0;
1090  av_log(avctx, AV_LOG_WARNING, "Format does not strictly comply with VC2 specs\n");
1091  } else {
1092  av_log(avctx, AV_LOG_WARNING, "Given format does not strictly comply with "
1093  "the specifications, decrease strictness to use it.\n");
1094  return AVERROR_UNKNOWN;
1095  }
1096  } else {
1097  av_log(avctx, AV_LOG_INFO, "Selected base video format = %i (%s)\n",
1098  s->base_vf, base_video_fmts[s->base_vf].name);
1099  }
1100 
1101  /* Chroma subsampling */
1102  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1103  if (ret)
1104  return ret;
1105 
1106  /* Bit depth and color range index */
1107  if (depth == 8 && avctx->color_range == AVCOL_RANGE_JPEG) {
1108  s->bpp = 1;
1109  s->bpp_idx = 1;
1110  s->diff_offset = 128;
1111  } else if (depth == 8 && (avctx->color_range == AVCOL_RANGE_MPEG ||
1112  avctx->color_range == AVCOL_RANGE_UNSPECIFIED)) {
1113  s->bpp = 1;
1114  s->bpp_idx = 2;
1115  s->diff_offset = 128;
1116  } else if (depth == 10) {
1117  s->bpp = 2;
1118  s->bpp_idx = 3;
1119  s->diff_offset = 512;
1120  } else {
1121  s->bpp = 2;
1122  s->bpp_idx = 4;
1123  s->diff_offset = 2048;
1124  }
1125 
1126  /* Planes initialization */
1127  for (i = 0; i < 3; i++) {
1128  int w, h;
1129  p = &s->plane[i];
1130  p->width = avctx->width >> (i ? s->chroma_x_shift : 0);
1131  p->height = avctx->height >> (i ? s->chroma_y_shift : 0);
1132  if (s->interlaced)
1133  p->height >>= 1;
1134  p->dwt_width = w = FFALIGN(p->width, (1 << s->wavelet_depth));
1135  p->dwt_height = h = FFALIGN(p->height, (1 << s->wavelet_depth));
1136  p->coef_stride = FFALIGN(p->dwt_width, 32);
1137  p->coef_buf = av_mallocz(p->coef_stride*p->dwt_height*sizeof(dwtcoef));
1138  if (!p->coef_buf)
1139  return AVERROR(ENOMEM);
1140  for (level = s->wavelet_depth-1; level >= 0; level--) {
1141  w = w >> 1;
1142  h = h >> 1;
1143  for (o = 0; o < 4; o++) {
1144  b = &p->band[level][o];
1145  b->width = w;
1146  b->height = h;
1147  b->stride = p->coef_stride;
1148  shift = (o > 1)*b->height*b->stride + (o & 1)*b->width;
1149  b->buf = p->coef_buf + shift;
1150  }
1151  }
1152 
1153  /* DWT init */
1154  if (ff_vc2enc_init_transforms(&s->transform_args[i].t,
1155  s->plane[i].coef_stride,
1156  s->plane[i].dwt_height,
1157  s->slice_width, s->slice_height))
1158  return AVERROR(ENOMEM);
1159  }
1160 
1161  /* Slices */
1162  s->num_x = s->plane[0].dwt_width/s->slice_width;
1163  s->num_y = s->plane[0].dwt_height/s->slice_height;
1164 
1165  s->slice_args = av_calloc(s->num_x*s->num_y, sizeof(SliceArgs));
1166  if (!s->slice_args)
1167  return AVERROR(ENOMEM);
1168 
1169  for (i = 0; i < 116; i++) {
1170  const uint64_t qf = ff_dirac_qscale_tab[i];
1171  const uint32_t m = av_log2(qf);
1172  const uint32_t t = (1ULL << (m + 32)) / qf;
1173  const uint32_t r = (t*qf + qf) & UINT32_MAX;
1174  if (!(qf & (qf - 1))) {
1175  s->qmagic_lut[i][0] = 0xFFFFFFFF;
1176  s->qmagic_lut[i][1] = 0xFFFFFFFF;
1177  } else if (r <= 1 << m) {
1178  s->qmagic_lut[i][0] = t + 1;
1179  s->qmagic_lut[i][1] = 0;
1180  } else {
1181  s->qmagic_lut[i][0] = t;
1182  s->qmagic_lut[i][1] = t;
1183  }
1184  }
1185 
1186  return 0;
1187 }
1188 
1189 #define VC2ENC_FLAGS (AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
1190 static const AVOption vc2enc_options[] = {
1191  {"tolerance", "Max undershoot in percent", offsetof(VC2EncContext, tolerance), AV_OPT_TYPE_DOUBLE, {.dbl = 5.0f}, 0.0f, 45.0f, VC2ENC_FLAGS, .unit = "tolerance"},
1192  {"slice_width", "Slice width", offsetof(VC2EncContext, slice_width), AV_OPT_TYPE_INT, {.i64 = 32}, 32, 1024, VC2ENC_FLAGS, .unit = "slice_width"},
1193  {"slice_height", "Slice height", offsetof(VC2EncContext, slice_height), AV_OPT_TYPE_INT, {.i64 = 16}, 8, 1024, VC2ENC_FLAGS, .unit = "slice_height"},
1194  {"wavelet_depth", "Transform depth", offsetof(VC2EncContext, wavelet_depth), AV_OPT_TYPE_INT, {.i64 = 4}, 1, 5, VC2ENC_FLAGS, .unit = "wavelet_depth"},
1195  {"wavelet_type", "Transform type", offsetof(VC2EncContext, wavelet_idx), AV_OPT_TYPE_INT, {.i64 = VC2_TRANSFORM_9_7}, 0, VC2_TRANSFORMS_NB, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1196  {"9_7", "Deslauriers-Dubuc (9,7)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_9_7}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1197  {"5_3", "LeGall (5,3)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_5_3}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1198  {"haar", "Haar (with shift)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_HAAR_S}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1199  {"haar_noshift", "Haar (without shift)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_HAAR}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "wavelet_idx"},
1200  {"qm", "Custom quantization matrix", offsetof(VC2EncContext, quant_matrix), AV_OPT_TYPE_INT, {.i64 = VC2_QM_DEF}, 0, VC2_QM_NB, VC2ENC_FLAGS, .unit = "quant_matrix"},
1201  {"default", "Default from the specifications", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_DEF}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "quant_matrix"},
1202  {"color", "Prevents low bitrate discoloration", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_COL}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "quant_matrix"},
1203  {"flat", "Optimize for PSNR", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_FLAT}, INT_MIN, INT_MAX, VC2ENC_FLAGS, .unit = "quant_matrix"},
1204  {NULL}
1205 };
1206 
1207 static const AVClass vc2enc_class = {
1208  .class_name = "SMPTE VC-2 encoder",
1209  .category = AV_CLASS_CATEGORY_ENCODER,
1210  .option = vc2enc_options,
1211  .item_name = av_default_item_name,
1212  .version = LIBAVUTIL_VERSION_INT
1213 };
1214 
1216  { "b", "600000000" },
1217  { NULL },
1218 };
1219 
1220 static const enum AVPixelFormat allowed_pix_fmts[] = {
1225 };
1226 
1228  .p.name = "vc2",
1229  CODEC_LONG_NAME("SMPTE VC-2"),
1230  .p.type = AVMEDIA_TYPE_VIDEO,
1231  .p.id = AV_CODEC_ID_DIRAC,
1232  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS |
1234  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1235  .priv_data_size = sizeof(VC2EncContext),
1236  .init = vc2_encode_init,
1237  .close = vc2_encode_end,
1239  .p.priv_class = &vc2enc_class,
1240  .defaults = vc2enc_defaults,
1241  .p.pix_fmts = allowed_pix_fmts
1242 };
init_quant_matrix
static void init_quant_matrix(VC2EncContext *s)
Definition: vc2enc.c:445
vc2_qm_col_tab
static const uint8_t vc2_qm_col_tab[][4]
Definition: vc2enc.c:429
SliceArgs::bits_floor
int bits_floor
Definition: vc2enc.c:114
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
VC2EncContext::slice_args
SliceArgs * slice_args
Definition: vc2enc.c:134
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:99
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
vc2_qm_flat_tab
static const uint8_t vc2_qm_flat_tab[][4]
Definition: vc2enc.c:437
DiracVersionInfo
Definition: dirac.h:80
put_bits32
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
Definition: put_bits.h:291
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
VC2_TRANSFORM_9_7
@ VC2_TRANSFORM_9_7
Definition: vc2enc_dwt.h:31
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
Plane::coef_buf
dwtcoef * coef_buf
Definition: vc2enc.c:98
align_put_bits
static void align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: put_bits.h:420
put_bytes_output
static int put_bytes_output(const PutBitContext *s)
Definition: put_bits.h:89
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:200
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:589
MAX_DWT_LEVELS
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
Definition: dirac.h:49
dwtcoef
int32_t dwtcoef
Definition: vc2enc_dwt.h:28
AV_CODEC_ID_DIRAC
@ AV_CODEC_ID_DIRAC
Definition: codec_id.h:168
VC2EncContext::chroma_x_shift
int chroma_x_shift
Definition: vc2enc.c:161
VC2BaseVideoFormat::interlaced
int interlaced
Definition: vc2enc.c:44
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
TransformArgs::plane
Plane * plane
Definition: vc2enc.c:120
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
pixdesc.h
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:678
VC2EncContext::q_avg
int q_avg
Definition: vc2enc.c:169
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
AVCOL_SPC_YCOCG
@ AVCOL_SPC_YCOCG
Definition: pixfmt.h:619
AVPacket::data
uint8_t * data
Definition: packet.h:524
VC2EncContext::diff_offset
int diff_offset
Definition: vc2enc.c:138
SubBand::width
int width
Definition: cfhd.h:111
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
SliceArgs::y
int y
Definition: vc2enc.c:111
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:708
AVOption
AVOption.
Definition: opt.h:346
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
SliceArgs::bytes
int bytes
Definition: vc2enc.c:115
encode_scan_format
static void encode_scan_format(VC2EncContext *s)
Definition: vc2enc.c:299
encode_slices
static int encode_slices(VC2EncContext *s)
Definition: vc2enc.c:779
rate_control
static int rate_control(AVCodecContext *avctx, void *arg)
Definition: vc2enc.c:618
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
VC2EncContext::pb
PutBitContext pb
Definition: vc2enc.c:129
ff_vc2enc_free_transforms
av_cold void ff_vc2enc_free_transforms(VC2TransformContext *s)
Definition: vc2enc_dwt.c:276
FFCodec
Definition: codec_internal.h:126
version.h
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:610
base_video_fmts
static const VC2BaseVideoFormat base_video_fmts[]
Definition: vc2enc.c:48
SliceArgs::pb
PutBitContext pb
Definition: vc2enc.c:107
encode_picture_header
static void encode_picture_header(VC2EncContext *s)
Definition: vc2enc.c:413
encode_slice_params
static void encode_slice_params(VC2EncContext *s)
Definition: vc2enc.c:420
SliceArgs::ctx
void * ctx
Definition: vc2enc.c:109
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
vc2_encode_frame
static av_cold int vc2_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet)
Definition: vc2enc.c:951
VC2TransformContext::vc2_subband_dwt
void(* vc2_subband_dwt[VC2_TRANSFORMS_NB])(struct VC2TransformContext *t, dwtcoef *data, ptrdiff_t stride, int width, int height)
Definition: vc2enc_dwt.h:45
VC2EncContext::qmagic_lut
uint32_t qmagic_lut[116][2]
Definition: vc2enc.c:155
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
ff_dirac_qscale_tab
const int32_t ff_dirac_qscale_tab[116]
Definition: diractab.c:34
VC2_QM_NB
@ VC2_QM_NB
Definition: vc2enc.c:86
VC2EncContext::slice_min_bytes
int slice_min_bytes
Definition: vc2enc.c:167
encode_clean_area
static void encode_clean_area(VC2EncContext *s)
Definition: vc2enc.c:331
encode_frame_size
static void encode_frame_size(VC2EncContext *s)
Definition: vc2enc.c:272
encode_quant_matrix
static void encode_quant_matrix(VC2EncContext *s)
Definition: vc2enc.c:487
diractab.h
ff_dirac_default_qmat
const uint8_t ff_dirac_default_qmat[7][4][4]
Definition: diractab.c:24
VC2EncContext::prefix_bytes
int prefix_bytes
Definition: vc2enc.c:159
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
VC2EncContext::custom_quant_matrix
int custom_quant_matrix
Definition: vc2enc.c:152
encode_color_spec
static void encode_color_spec(VC2EncContext *s)
Definition: vc2enc.c:345
count_hq_slice
static int count_hq_slice(SliceArgs *slice, int quant_idx)
Definition: vc2enc.c:556
VC2BaseVideoFormat
Definition: vc2enc.c:41
FFCodecDefault
Definition: codec_internal.h:96
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
VC2_TRANSFORMS_NB
@ VC2_TRANSFORMS_NB
Definition: vc2enc_dwt.h:39
av_shrink_packet
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
Definition: packet.c:113
TransformArgs
Definition: vc2enc.c:118
VC2_QM_FLAT
@ VC2_QM_FLAT
Definition: vc2enc.c:84
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
val
static double val(void *priv, double ch)
Definition: aeval.c:78
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:295
AVRational::num
int num
Numerator.
Definition: rational.h:59
encode_subband
static void encode_subband(VC2EncContext *s, PutBitContext *pb, int sx, int sy, SubBand *b, int quant)
Definition: vc2enc.c:530
SliceArgs::x
int x
Definition: vc2enc.c:110
dirac.h
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:671
SSIZE_ROUND
#define SSIZE_ROUND(b)
Definition: vc2enc.c:36
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
VC2EncContext::next_parse_offset
uint32_t next_parse_offset
Definition: vc2enc.c:182
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
VC2EncContext::tolerance
double tolerance
Definition: vc2enc.c:172
av_cold
#define av_cold
Definition: attributes.h:90
ff_vc2enc_init_transforms
av_cold int ff_vc2enc_init_transforms(VC2TransformContext *s, int p_stride, int p_height, int slice_w, int slice_h)
Definition: vc2enc_dwt.c:257
DiracParseCodes
DiracParseCodes
Parse code values:
Definition: dirac.h:61
VC2EncContext::slice_width
int slice_width
Definition: vc2enc.c:177
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:199
vc2enc_class
static const AVClass vc2enc_class
Definition: vc2enc.c:1207
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCOL_TRC_BT1361_ECG
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
Definition: pixfmt.h:593
TransformArgs::t
VC2TransformContext t
Definition: vc2enc.c:124
VC2EncContext::quant_matrix
enum VC2_QM quant_matrix
Definition: vc2enc.c:179
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
bits
uint8_t bits
Definition: vp3data.h:128
encode_frame
static int encode_frame(VC2EncContext *s, AVPacket *avpkt, const AVFrame *frame, const char *aux_data, const int header_size, int field)
Definition: vc2enc.c:898
encode_transform_params
static void encode_transform_params(VC2EncContext *s)
Definition: vc2enc.c:502
VC2EncContext::chroma_y_shift
int chroma_y_shift
Definition: vc2enc.c:162
dwt_plane
static int dwt_plane(AVCodecContext *avctx, void *arg)
Definition: vc2enc.c:839
vc2enc_dwt.h
VC2EncContext::wavelet_idx
int wavelet_idx
Definition: vc2enc.c:173
ff_put_string
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
Definition: bitstream.c:39
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
VC2EncContext::slice_max_bytes
int slice_max_bytes
Definition: vc2enc.c:166
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
Definition: pixfmt.h:564
PutBitContext
Definition: put_bits.h:50
VC2EncContext::avctx
AVCodecContext * avctx
Definition: vc2enc.c:131
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:562
arg
const char * arg
Definition: jacosubdec.c:67
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:563
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:73
VC2_QM
VC2_QM
Definition: vc2enc.c:81
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
encode_frame_rate
static void encode_frame_rate(VC2EncContext *s)
Definition: vc2enc.c:307
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:53
NULL
#define NULL
Definition: coverity.c:32
VC2BaseVideoFormat::pix_fmt
enum AVPixelFormat pix_fmt
Definition: vc2enc.c:42
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
VC2EncContext::quant
uint8_t quant[MAX_DWT_LEVELS][4]
Definition: vc2enc.c:151
Plane::dwt_width
int dwt_width
Definition: vc2enc.c:101
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
VC2EncContext::size_scaler
int size_scaler
Definition: vc2enc.c:160
encode_wavelet_transform
static void encode_wavelet_transform(VC2EncContext *s)
Definition: vc2enc.c:512
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
vc2enc_options
static const AVOption vc2enc_options[]
Definition: vc2enc.c:1190
base_video_fmts_len
static const int base_video_fmts_len
Definition: vc2enc.c:79
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
encode_aspect_ratio
static void encode_aspect_ratio(VC2EncContext *s)
Definition: vc2enc.c:319
SliceArgs::quant_idx
int quant_idx
Definition: vc2enc.c:112
VC2BaseVideoFormat::height
int height
Definition: vc2enc.c:44
ff_log2
#define ff_log2
Definition: intmath.h:51
VC2_QM_COL
@ VC2_QM_COL
Definition: vc2enc.c:83
VC2EncContext::av_class
AVClass * av_class
Definition: vc2enc.c:128
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
DIRAC_MAX_QUANT_INDEX
#define DIRAC_MAX_QUANT_INDEX
Definition: diractab.h:41
DIRAC_PCODE_AUX
@ DIRAC_PCODE_AUX
Definition: dirac.h:64
SliceArgs::bits_ceil
int bits_ceil
Definition: vc2enc.c:113
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:652
allowed_pix_fmts
static enum AVPixelFormat allowed_pix_fmts[]
Definition: vc2enc.c:1220
encode_hq_slice
static int encode_hq_slice(AVCodecContext *avctx, void *arg)
Definition: vc2enc.c:724
VC2BaseVideoFormat::level
int level
Definition: vc2enc.c:44
VC2EncContext::interlaced
int interlaced
Definition: vc2enc.c:178
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:417
VC2EncContext::picture_number
uint32_t picture_number
Definition: vc2enc.c:143
VC2EncContext::last_parse_code
enum DiracParseCodes last_parse_code
Definition: vc2enc.c:183
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:544
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
VC2TransformContext
Definition: vc2enc_dwt.h:42
VC2BaseVideoFormat::time_base
AVRational time_base
Definition: vc2enc.c:43
SubBand::stride
ptrdiff_t stride
Definition: cfhd.h:109
codec_internal.h
Plane::height
int height
Definition: cfhd.h:119
shift
static int shift(int a, int b)
Definition: bonk.c:261
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
vc2_encode_init
static av_cold int vc2_encode_init(AVCodecContext *avctx)
Definition: vc2enc.c:1023
VC2EncContext::level
int level
Definition: vc2enc.c:147
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
SubBand
Definition: cfhd.h:108
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:452
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
VC2EncContext::bpp
int bpp
Definition: vc2enc.c:139
encode_source_params
static void encode_source_params(VC2EncContext *s)
Definition: vc2enc.c:390
Plane::width
int width
Definition: cfhd.h:118
VC2EncContext::ver
DiracVersionInfo ver
Definition: vc2enc.c:132
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
encode_picture_start
static void encode_picture_start(VC2EncContext *s)
Definition: vc2enc.c:519
DIRAC_PCODE_SEQ_HEADER
@ DIRAC_PCODE_SEQ_HEADER
Definition: dirac.h:62
Plane::coef_stride
ptrdiff_t coef_stride
Definition: vc2enc.c:103
VC2EncContext::num_y
int num_y
Definition: vc2enc.c:158
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
encode_parse_params
static void encode_parse_params(VC2EncContext *s)
Definition: vc2enc.c:263
SliceArgs
Definition: vc2enc.c:106
TransformArgs::ctx
void * ctx
Definition: vc2enc.c:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
VC2EncContext::wavelet_depth
int wavelet_depth
Definition: vc2enc.c:174
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
encode_parse_info
static void encode_parse_info(VC2EncContext *s, enum DiracParseCodes pcode)
Definition: vc2enc.c:231
ff_vc2_encoder
const FFCodec ff_vc2_encoder
Definition: vc2enc.c:1227
vc2_encode_end
static av_cold int vc2_encode_end(AVCodecContext *avctx)
Definition: vc2enc.c:1006
version.h
SubBand::buf
dwtcoef * buf
Definition: vc2enc.c:90
vc2enc_defaults
static const FFCodecDefault vc2enc_defaults[]
Definition: vc2enc.c:1215
ret
ret
Definition: filter_design.txt:187
VC2BaseVideoFormat::name
const char * name
Definition: vc2enc.c:45
VC2EncContext::transform_args
TransformArgs transform_args[3]
Definition: vc2enc.c:135
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
VC2EncContext
Definition: vc2enc.c:127
VC2EncContext::num_x
int num_x
Definition: vc2enc.c:157
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1379
VC2EncContext::q_ceil
int q_ceil
Definition: vc2enc.c:168
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
VC2EncContext::profile
int profile
Definition: vc2enc.c:148
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
DIRAC_PCODE_END_SEQ
@ DIRAC_PCODE_END_SEQ
Definition: dirac.h:63
AVCodecContext
main external API structure.
Definition: avcodec.h:445
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
SliceArgs::cache
int cache[DIRAC_MAX_QUANT_INDEX]
Definition: vc2enc.c:108
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:106
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
encode_seq_header
static void encode_seq_header(VC2EncContext *s)
Definition: vc2enc.c:403
skip_put_bytes
static void skip_put_bytes(PutBitContext *s, int n)
Skip the given number of bytes.
Definition: put_bits.h:386
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
VC2_TRANSFORM_HAAR
@ VC2_TRANSFORM_HAAR
Definition: vc2enc_dwt.h:34
VC2ENC_FLAGS
#define VC2ENC_FLAGS
Definition: vc2enc.c:1189
Plane
Definition: cfhd.h:117
VC2EncContext::strict_compliance
int strict_compliance
Definition: vc2enc.c:175
put_vc2_ue_uint
static av_always_inline void put_vc2_ue_uint(PutBitContext *pb, uint32_t val)
Definition: vc2enc.c:186
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QUANT
#define QUANT(c, mul, add, shift)
Definition: vc2enc.c:527
Plane::dwt_height
int dwt_height
Definition: vc2enc.c:102
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
count_vc2_ue_uint
static av_always_inline int count_vc2_ue_uint(uint32_t val)
Definition: vc2enc.c:214
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
TransformArgs::field
int field
Definition: vc2enc.c:123
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
calc_slice_sizes
static int calc_slice_sizes(VC2EncContext *s)
Definition: vc2enc.c:646
VC2EncContext::base_vf
int base_vf
Definition: vc2enc.c:146
VC2EncContext::slice_height
int slice_height
Definition: vc2enc.c:176
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:501
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
VC2EncContext::plane
Plane plane[3]
Definition: vc2enc.c:130
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
VC2_QM_DEF
@ VC2_QM_DEF
Definition: vc2enc.c:82
AV_CLASS_CATEGORY_ENCODER
@ AV_CLASS_CATEGORY_ENCODER
Definition: log.h:34
h
h
Definition: vp9dsp_template.c:2038
DIRAC_PCODE_PICTURE_HQ
@ DIRAC_PCODE_PICTURE_HQ
Definition: dirac.h:69
TransformArgs::istride
ptrdiff_t istride
Definition: vc2enc.c:122
put_bits.h
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
SLICE_REDIST_TOTAL
#define SLICE_REDIST_TOTAL
Definition: vc2enc.c:39
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
VC2_TRANSFORM_HAAR_S
@ VC2_TRANSFORM_HAAR_S
Definition: vc2enc_dwt.h:35
encode_sample_fmt
static void encode_sample_fmt(VC2EncContext *s)
Definition: vc2enc.c:283
VC2EncContext::frame_max_bytes
int frame_max_bytes
Definition: vc2enc.c:165
VC2BaseVideoFormat::width
int width
Definition: vc2enc.c:44
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
VC2_TRANSFORM_5_3
@ VC2_TRANSFORM_5_3
Definition: vc2enc_dwt.h:32
encode_signal_range
static void encode_signal_range(VC2EncContext *s)
Definition: vc2enc.c:337
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
TransformArgs::idata
const void * idata
Definition: vc2enc.c:121
SubBand::height
int height
Definition: cfhd.h:113
VC2EncContext::bpp_idx
int bpp_idx
Definition: vc2enc.c:140