FFmpeg
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/pixdesc.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "codec_internal.h"
36 #include "decode.h"
37 #include "get_bits.h"
38 #include "internal.h"
39 #include "thread.h"
40 #include "cfhd.h"
41 
42 #define ALPHA_COMPAND_DC_OFFSET 256
43 #define ALPHA_COMPAND_GAIN 9400
44 
45 static av_cold int cfhd_init(AVCodecContext *avctx)
46 {
47  CFHDContext *s = avctx->priv_data;
48 
49  s->avctx = avctx;
50 
51  for (int i = 0; i < 64; i++) {
52  int val = i;
53 
54  if (val >= 40) {
55  if (val >= 54) {
56  val -= 54;
57  val <<= 2;
58  val += 54;
59  }
60 
61  val -= 40;
62  val <<= 2;
63  val += 40;
64  }
65 
66  s->lut[0][i] = val;
67  }
68 
69  for (int i = 0; i < 256; i++)
70  s->lut[1][i] = i + ((768LL * i * i * i) / (256 * 256 * 256));
71 
72  return ff_cfhd_init_vlcs(s);
73 }
74 
76 {
77  s->subband_num = 0;
78  s->level = 0;
79  s->subband_num_actual = 0;
80 }
81 
83 {
84  s->peak.level = 0;
85  s->peak.offset = 0;
86  memset(&s->peak.base, 0, sizeof(s->peak.base));
87 }
88 
90 {
91  s->coded_width = 0;
92  s->coded_height = 0;
93  s->coded_format = AV_PIX_FMT_YUV422P10;
94  s->cropped_height = 0;
95  s->bpc = 10;
96  s->channel_cnt = 3;
97  s->subband_cnt = SUBBAND_COUNT;
98  s->channel_num = 0;
99  s->lowpass_precision = 16;
100  s->quantisation = 1;
101  s->codebook = 0;
102  s->difference_coding = 0;
103  s->frame_type = 0;
104  s->sample_type = 0;
105  if (s->transform_type != 2)
106  s->transform_type = -1;
109 }
110 
111 static inline int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
112 {
113  if (codebook == 0 || codebook == 1) {
114  return s->lut[codebook][abs(level)] * FFSIGN(level) * quantisation;
115  } else
116  return level * quantisation;
117 }
118 
119 static inline void difference_coding(int16_t *band, int width, int height)
120 {
121  for (int i = 0; i < height; i++) {
122  for (int j = 1; j < width; j++) {
123  band[j] += band[j-1];
124  }
125  band += width;
126  }
127 }
128 
129 static inline void peak_table(int16_t *band, Peak *peak, int length)
130 {
131  for (int i = 0; i < length; i++)
132  if (abs(band[i]) > peak->level)
133  band[i] = bytestream2_get_le16(&peak->base);
134 }
135 
136 static inline void process_alpha(int16_t *alpha, int width)
137 {
138  for (int i = 0; i < width; i++) {
139  int channel = alpha[i];
141  channel <<= 3;
143  channel >>= 16;
145  alpha[i] = channel;
146  }
147 }
148 
149 static inline void process_bayer(AVFrame *frame, int bpc)
150 {
151  const int linesize = frame->linesize[0];
152  uint16_t *r = (uint16_t *)frame->data[0];
153  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
154  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
155  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
156  const int mid = 1 << (bpc - 1);
157  const int factor = 1 << (16 - bpc);
158 
159  for (int y = 0; y < frame->height >> 1; y++) {
160  for (int x = 0; x < frame->width; x += 2) {
161  int R, G1, G2, B;
162  int g, rg, bg, gd;
163 
164  g = r[x];
165  rg = g1[x];
166  bg = g2[x];
167  gd = b[x];
168  gd -= mid;
169 
170  R = (rg - mid) * 2 + g;
171  G1 = g + gd;
172  G2 = g - gd;
173  B = (bg - mid) * 2 + g;
174 
175  R = av_clip_uintp2(R * factor, 16);
176  G1 = av_clip_uintp2(G1 * factor, 16);
177  G2 = av_clip_uintp2(G2 * factor, 16);
178  B = av_clip_uintp2(B * factor, 16);
179 
180  r[x] = R;
181  g1[x] = G1;
182  g2[x] = G2;
183  b[x] = B;
184  }
185 
186  r += linesize;
187  g1 += linesize;
188  g2 += linesize;
189  b += linesize;
190  }
191 }
192 
193 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
194  int width, int linesize, int plane)
195 {
196  for (int i = 0; i < width; i++) {
197  int16_t even = (low[i] - high[i])/2;
198  int16_t odd = (low[i] + high[i])/2;
199  output[i] = av_clip_uintp2(even, 10);
200  output[i + linesize] = av_clip_uintp2(odd, 10);
201  }
202 }
203 
204 static inline void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
205 {
206  for (int i = 0; i < width; i++) {
207  int even = (low[i] - high[i]) / 2;
208  int odd = (low[i] + high[i]) / 2;
209 
210  low[i] = even;
211  high[i] = odd;
212  }
213 }
214 
216 {
217  for (size_t i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
218  Plane *p = &s->plane[i];
219  av_freep(&s->plane[i].idwt_buf);
220  av_freep(&s->plane[i].idwt_tmp);
221  s->plane[i].idwt_size = 0;
222 
223  for (int j = 0; j < SUBBAND_COUNT_3D; j++)
224  s->plane[i].subband[j] = NULL;
225 
226  for (int j = 0; j < 10; j++)
227  s->plane[i].l_h[j] = NULL;
228 
229  for (int j = 0; j < DWT_LEVELS_3D; j++)
230  p->band[j][0].read_ok =
231  p->band[j][1].read_ok =
232  p->band[j][2].read_ok =
233  p->band[j][3].read_ok = 0;
234  }
235  s->a_height = 0;
236  s->a_width = 0;
237  s->a_transform_type = INT_MIN;
238 }
239 
240 static int alloc_buffers(AVCodecContext *avctx)
241 {
242  CFHDContext *s = avctx->priv_data;
243  int ret, planes, bayer = 0;
244  int chroma_x_shift, chroma_y_shift;
245 
246  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
247  return ret;
248  avctx->pix_fmt = s->coded_format;
249 
250  ff_cfhddsp_init(&s->dsp, s->bpc, avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
251 
252  if ((ret = av_pix_fmt_get_chroma_sub_sample(s->coded_format,
253  &chroma_x_shift,
254  &chroma_y_shift)) < 0)
255  return ret;
256  planes = av_pix_fmt_count_planes(s->coded_format);
257  if (s->coded_format == AV_PIX_FMT_BAYER_RGGB16) {
258  planes = 4;
259  chroma_x_shift = 1;
260  chroma_y_shift = 1;
261  bayer = 1;
262  }
263 
264  for (int i = 0; i < planes; i++) {
265  int w8, h8, w4, h4, w2, h2;
266  int width = (i || bayer) ? s->coded_width >> chroma_x_shift : s->coded_width;
267  int height = (i || bayer) ? s->coded_height >> chroma_y_shift : s->coded_height;
268  ptrdiff_t stride = (FFALIGN(width / 8, 8) + 64) * 8;
269 
270  if ((ret = av_image_check_size2(stride, height, avctx->max_pixels, s->coded_format, 0, avctx)) < 0)
271  return ret;
272 
273  if (chroma_y_shift && !bayer)
274  height = FFALIGN(height / 8, 2) * 8;
275  s->plane[i].width = width;
276  s->plane[i].height = height;
277  s->plane[i].stride = stride;
278 
279  w8 = FFALIGN(s->plane[i].width / 8, 8) + 64;
280  h8 = FFALIGN(height, 8) / 8;
281  w4 = w8 * 2;
282  h4 = h8 * 2;
283  w2 = w4 * 2;
284  h2 = h4 * 2;
285 
286  if (s->transform_type == 0) {
287  s->plane[i].idwt_size = FFALIGN(height, 8) * stride;
288  s->plane[i].idwt_buf =
289  av_calloc(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
290  s->plane[i].idwt_tmp =
291  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
292  } else {
293  s->plane[i].idwt_size = FFALIGN(height, 8) * stride * 2;
294  s->plane[i].idwt_buf =
295  av_calloc(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
296  s->plane[i].idwt_tmp =
297  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
298  }
299 
300  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
301  return AVERROR(ENOMEM);
302 
303  s->plane[i].subband[0] = s->plane[i].idwt_buf;
304  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
305  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
306  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
307  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
308  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
309  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
310  if (s->transform_type == 0) {
311  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
312  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
313  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
314  } else {
315  int16_t *frame2 =
316  s->plane[i].subband[7] = s->plane[i].idwt_buf + 4 * w2 * h2;
317  s->plane[i].subband[8] = frame2 + 2 * w4 * h4;
318  s->plane[i].subband[9] = frame2 + 1 * w4 * h4;
319  s->plane[i].subband[10] = frame2 + 3 * w4 * h4;
320  s->plane[i].subband[11] = frame2 + 2 * w2 * h2;
321  s->plane[i].subband[12] = frame2 + 1 * w2 * h2;
322  s->plane[i].subband[13] = frame2 + 3 * w2 * h2;
323  s->plane[i].subband[14] = s->plane[i].idwt_buf + 2 * w2 * h2;
324  s->plane[i].subband[15] = s->plane[i].idwt_buf + 1 * w2 * h2;
325  s->plane[i].subband[16] = s->plane[i].idwt_buf + 3 * w2 * h2;
326  }
327 
328  if (s->transform_type == 0) {
329  for (int j = 0; j < DWT_LEVELS; j++) {
330  for (unsigned k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
331  s->plane[i].band[j][k].a_width = w8 << j;
332  s->plane[i].band[j][k].a_height = h8 << j;
333  }
334  }
335  } else {
336  for (int j = 0; j < DWT_LEVELS_3D; j++) {
337  int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
338 
339  for (unsigned k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
340  s->plane[i].band[j][k].a_width = w8 << t;
341  s->plane[i].band[j][k].a_height = h8 << t;
342  }
343  }
344  }
345 
346  /* ll2 and ll1 commented out because they are done in-place */
347  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
348  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
349  // s->plane[i].l_h[2] = ll2;
350  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
351  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
352  // s->plane[i].l_h[5] = ll1;
353  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
354  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
355  if (s->transform_type != 0) {
356  int16_t *frame2 = s->plane[i].idwt_tmp + 4 * w2 * h2;
357 
358  s->plane[i].l_h[8] = frame2;
359  s->plane[i].l_h[9] = frame2 + 2 * w2 * h2;
360  }
361  }
362 
363  s->a_transform_type = s->transform_type;
364  s->a_height = s->coded_height;
365  s->a_width = s->coded_width;
366  s->a_format = s->coded_format;
367 
368  return 0;
369 }
370 
371 static int cfhd_decode(AVCodecContext *avctx, AVFrame *pic,
372  int *got_frame, AVPacket *avpkt)
373 {
374  CFHDContext *s = avctx->priv_data;
375  CFHDDSPContext *dsp = &s->dsp;
376  GetByteContext gb;
377  int ret = 0, got_buffer = 0;
378 
380  s->planes = av_pix_fmt_count_planes(s->coded_format);
381 
382  bytestream2_init(&gb, avpkt->data, avpkt->size);
383 
384  while (bytestream2_get_bytes_left(&gb) >= 4) {
385  /* Bit weird but implement the tag parsing as the spec says */
386  uint16_t tagu = bytestream2_get_be16(&gb);
387  int16_t tag = (int16_t)tagu;
388  int8_t tag8 = (int8_t)(tagu >> 8);
389  uint16_t abstag = abs(tag);
390  int8_t abs_tag8 = abs(tag8);
391  uint16_t data = bytestream2_get_be16(&gb);
392  int16_t *coeff_data;
393 
394  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
395  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
396  } else if (tag == SampleFlags) {
397  av_log(avctx, AV_LOG_DEBUG, "Progressive? %"PRIu16"\n", data);
398  s->progressive = data & 0x0001;
399  } else if (tag == FrameType) {
400  s->frame_type = data;
401  av_log(avctx, AV_LOG_DEBUG, "Frame type %"PRIu16"\n", data);
402  } else if (abstag == VersionMajor) {
403  av_log(avctx, AV_LOG_DEBUG, "Version major %"PRIu16"\n", data);
404  } else if (abstag == VersionMinor) {
405  av_log(avctx, AV_LOG_DEBUG, "Version minor %"PRIu16"\n", data);
406  } else if (abstag == VersionRevision) {
407  av_log(avctx, AV_LOG_DEBUG, "Version revision %"PRIu16"\n", data);
408  } else if (abstag == VersionEdit) {
409  av_log(avctx, AV_LOG_DEBUG, "Version edit %"PRIu16"\n", data);
410  } else if (abstag == Version) {
411  av_log(avctx, AV_LOG_DEBUG, "Version %"PRIu16"\n", data);
412  } else if (tag == ImageWidth) {
413  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
414  s->coded_width = data;
415  } else if (tag == ImageHeight) {
416  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
417  s->coded_height = data;
418  } else if (tag == ChannelCount) {
419  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
420  s->channel_cnt = data;
421  if (data > 4) {
422  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
424  goto end;
425  }
426  } else if (tag == SubbandCount) {
427  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
428  if (data != SUBBAND_COUNT && data != SUBBAND_COUNT_3D) {
429  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
431  goto end;
432  }
433  } else if (tag == ChannelNumber) {
434  s->channel_num = data;
435  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
436  if (s->channel_num >= s->planes) {
437  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
438  ret = AVERROR(EINVAL);
439  goto end;
440  }
442  } else if (tag == SubbandNumber) {
443  if (s->subband_num != 0 && data == 1 && (s->transform_type == 0 || s->transform_type == 2)) // hack
444  s->level++;
445  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
446  s->subband_num = data;
447  if ((s->transform_type == 0 && s->level >= DWT_LEVELS) ||
448  (s->transform_type == 2 && s->level >= DWT_LEVELS_3D)) {
449  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
450  ret = AVERROR(EINVAL);
451  goto end;
452  }
453  if (s->subband_num > 3) {
454  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
455  ret = AVERROR(EINVAL);
456  goto end;
457  }
458  } else if (tag == SubbandBand) {
459  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
460  if ((s->transform_type == 0 && data >= SUBBAND_COUNT) ||
461  (s->transform_type == 2 && data >= SUBBAND_COUNT_3D && data != 255)) {
462  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
463  ret = AVERROR(EINVAL);
464  goto end;
465  }
466  if (s->transform_type == 0 || s->transform_type == 2)
467  s->subband_num_actual = data;
468  else
469  av_log(avctx, AV_LOG_WARNING, "Ignoring subband num actual %"PRIu16"\n", data);
470  } else if (tag == LowpassPrecision)
471  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
472  else if (tag == Quantization) {
473  s->quantisation = data;
474  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
475  } else if (tag == PrescaleTable) {
476  for (int i = 0; i < 8; i++)
477  s->prescale_table[i] = (data >> (14 - i * 2)) & 0x3;
478  av_log(avctx, AV_LOG_DEBUG, "Prescale table: %x\n", data);
479  } else if (tag == BandEncoding) {
480  if (!data || data > 5) {
481  av_log(avctx, AV_LOG_ERROR, "Invalid band encoding\n");
482  ret = AVERROR(EINVAL);
483  goto end;
484  }
485  s->band_encoding = data;
486  av_log(avctx, AV_LOG_DEBUG, "Encode Method for Subband %d : %x\n", s->subband_num_actual, data);
487  } else if (tag == LowpassWidth) {
488  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
489  s->plane[s->channel_num].band[0][0].width = data;
490  s->plane[s->channel_num].band[0][0].stride = data;
491  } else if (tag == LowpassHeight) {
492  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
493  s->plane[s->channel_num].band[0][0].height = data;
494  } else if (tag == SampleType) {
495  s->sample_type = data;
496  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
497  } else if (tag == TransformType) {
498  if (data > 2) {
499  av_log(avctx, AV_LOG_ERROR, "Invalid transform type\n");
500  ret = AVERROR(EINVAL);
501  goto end;
502  } else if (data == 1) {
503  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
505  goto end;
506  }
507  if (s->transform_type == -1) {
508  s->transform_type = data;
509  av_log(avctx, AV_LOG_DEBUG, "Transform type %"PRIu16"\n", data);
510  } else {
511  av_log(avctx, AV_LOG_DEBUG, "Ignoring additional transform type %"PRIu16"\n", data);
512  }
513  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
514  if (abstag == 0x4001)
515  s->peak.level = 0;
516  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
517  bytestream2_skipu(&gb, data * 4);
518  } else if (tag == FrameIndex) {
519  av_log(avctx, AV_LOG_DEBUG, "Frame index %"PRIu16"\n", data);
520  s->frame_index = data;
521  } else if (tag == SampleIndexTable) {
522  av_log(avctx, AV_LOG_DEBUG, "Sample index table - skipping %i values\n", data);
523  if (data > bytestream2_get_bytes_left(&gb) / 4) {
524  av_log(avctx, AV_LOG_ERROR, "too many values (%d)\n", data);
526  goto end;
527  }
528  for (int i = 0; i < data; i++) {
529  uint32_t offset = bytestream2_get_be32(&gb);
530  av_log(avctx, AV_LOG_DEBUG, "Offset = %"PRIu32"\n", offset);
531  }
532  } else if (tag == HighpassWidth) {
533  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
534  if (data < 3) {
535  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
536  ret = AVERROR(EINVAL);
537  goto end;
538  }
539  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
540  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
541  } else if (tag == HighpassHeight) {
542  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
543  if (data < 3) {
544  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
545  ret = AVERROR(EINVAL);
546  goto end;
547  }
548  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
549  } else if (tag == BandWidth) {
550  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
551  if (data < 3) {
552  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
553  ret = AVERROR(EINVAL);
554  goto end;
555  }
556  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
557  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
558  } else if (tag == BandHeight) {
559  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
560  if (data < 3) {
561  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
562  ret = AVERROR(EINVAL);
563  goto end;
564  }
565  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
566  } else if (tag == InputFormat) {
567  av_log(avctx, AV_LOG_DEBUG, "Input format %i\n", data);
568  if (s->coded_format == AV_PIX_FMT_NONE ||
569  s->coded_format == AV_PIX_FMT_YUV422P10) {
570  if (data >= 100 && data <= 105) {
571  s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
572  } else if (data >= 122 && data <= 128) {
573  s->coded_format = AV_PIX_FMT_GBRP12;
574  } else if (data == 30) {
575  s->coded_format = AV_PIX_FMT_GBRAP12;
576  } else {
577  s->coded_format = AV_PIX_FMT_YUV422P10;
578  }
579  s->planes = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 4 : av_pix_fmt_count_planes(s->coded_format);
580  }
581  } else if (tag == BandCodingFlags) {
582  s->codebook = data & 0xf;
583  s->difference_coding = (data >> 4) & 1;
584  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
585  } else if (tag == Precision) {
586  av_log(avctx, AV_LOG_DEBUG, "Precision %i\n", data);
587  if (!(data == 10 || data == 12)) {
588  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
589  ret = AVERROR(EINVAL);
590  goto end;
591  }
592  avctx->bits_per_raw_sample = s->bpc = data;
593  } else if (tag == EncodedFormat) {
594  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
595  if (data == 1) {
596  s->coded_format = AV_PIX_FMT_YUV422P10;
597  } else if (data == 2) {
598  s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
599  } else if (data == 3) {
600  s->coded_format = AV_PIX_FMT_GBRP12;
601  } else if (data == 4) {
602  s->coded_format = AV_PIX_FMT_GBRAP12;
603  } else {
604  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
606  goto end;
607  }
608  s->planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
609  } else if (tag == -DisplayHeight) {
610  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
611  s->cropped_height = data;
612  } else if (tag == -PeakOffsetLow) {
613  s->peak.offset &= ~0xffff;
614  s->peak.offset |= (data & 0xffff);
615  s->peak.base = gb;
616  s->peak.level = 0;
617  } else if (tag == -PeakOffsetHigh) {
618  s->peak.offset &= 0xffff;
619  s->peak.offset |= (data & 0xffffU)<<16;
620  s->peak.base = gb;
621  s->peak.level = 0;
622  } else if (tag == -PeakLevel && s->peak.offset) {
623  s->peak.level = data;
624  if (s->peak.offset < 4 - bytestream2_tell(&s->peak.base) ||
625  s->peak.offset > 4 + bytestream2_get_bytes_left(&s->peak.base)
626  ) {
628  goto end;
629  }
630  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
631  } else
632  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
633 
635  s->coded_format != AV_PIX_FMT_NONE) {
636  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
637  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
638  int factor = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 2 : 1;
639 
640  if (s->coded_width) {
641  s->coded_width *= factor;
642  }
643 
644  if (s->coded_height) {
645  s->coded_height *= factor;
646  }
647 
648  if (!s->a_width && !s->coded_width) {
649  s->coded_width = lowpass_width * factor * 8;
650  }
651 
652  if (!s->a_height && !s->coded_height) {
653  s->coded_height = lowpass_height * factor * 8;
654  }
655 
656  if (s->a_width && !s->coded_width)
657  s->coded_width = s->a_width;
658  if (s->a_height && !s->coded_height)
659  s->coded_height = s->a_height;
660 
661  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
662  s->a_format != s->coded_format ||
663  s->transform_type != s->a_transform_type) {
664  free_buffers(s);
665  if ((ret = alloc_buffers(avctx)) < 0) {
666  free_buffers(s);
667  return ret;
668  }
669  }
670  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
671  if (ret < 0)
672  return ret;
673  if (s->cropped_height) {
674  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
675  if (avctx->height < height)
676  return AVERROR_INVALIDDATA;
677  avctx->height = height;
678  }
679  pic->width = pic->height = 0;
680 
681  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
682  return ret;
683 
684  s->coded_width = 0;
685  s->coded_height = 0;
686  s->coded_format = AV_PIX_FMT_NONE;
687  got_buffer = 1;
688  } else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
689  pic->width = pic->height = 0;
690 
691  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
692  return ret;
693  s->coded_width = 0;
694  s->coded_height = 0;
695  s->coded_format = AV_PIX_FMT_NONE;
696  got_buffer = 1;
697  }
698 
699  if (s->subband_num_actual == 255)
700  goto finish;
701 
702  if (tag == BitstreamMarker && data == CoefficientSegment || tag == BandHeader || tag == BandSecondPass || s->peak.level)
703  if (s->transform_type != s->a_transform_type)
704  return AVERROR_PATCHWELCOME;
705 
706  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
707 
708  /* Lowpass coefficients */
710  int lowpass_height, lowpass_width, lowpass_a_height, lowpass_a_width;
711 
712  if (!s->a_width || !s->a_height) {
714  goto end;
715  }
716 
717  lowpass_height = s->plane[s->channel_num].band[0][0].height;
718  lowpass_width = s->plane[s->channel_num].band[0][0].width;
719  lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
720  lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
721 
722  if (lowpass_width < 3 ||
723  lowpass_width > lowpass_a_width) {
724  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
725  ret = AVERROR(EINVAL);
726  goto end;
727  }
728 
729  if (lowpass_height < 3 ||
730  lowpass_height > lowpass_a_height) {
731  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
732  ret = AVERROR(EINVAL);
733  goto end;
734  }
735 
736  if (!got_buffer) {
737  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
738  ret = AVERROR(EINVAL);
739  goto end;
740  }
741 
742  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
743  lowpass_width * lowpass_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
744  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
745  ret = AVERROR(EINVAL);
746  goto end;
747  }
748 
749  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
750  for (int i = 0; i < lowpass_height; i++) {
751  for (int j = 0; j < lowpass_width; j++)
752  coeff_data[j] = bytestream2_get_be16u(&gb);
753 
754  coeff_data += lowpass_width;
755  }
756 
757  /* Align to mod-4 position to continue reading tags */
758  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
759 
760  /* Copy last line of coefficients if odd height */
761  if (lowpass_height & 1) {
762  memcpy(&coeff_data[lowpass_height * lowpass_width],
763  &coeff_data[(lowpass_height - 1) * lowpass_width],
764  lowpass_width * sizeof(*coeff_data));
765  }
766 
767  s->plane[s->channel_num].band[0][0].read_ok = 1;
768 
769  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
770  }
771 
772  av_assert0(s->subband_num_actual != 255);
773  if (tag == BandHeader || tag == BandSecondPass) {
774  int highpass_height, highpass_width, highpass_a_width, highpass_a_height, highpass_stride, a_expected;
775  int expected;
776  GetBitContext gbit;
777  int count = 0, bytes;
778 
779  if (!s->a_width || !s->a_height) {
781  goto end;
782  }
783 
784  highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
785  highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
786  highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
787  highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
788  highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
789  a_expected = highpass_a_height * highpass_a_width;
790 
791  if (!got_buffer) {
792  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
793  ret = AVERROR(EINVAL);
794  goto end;
795  }
796 
797  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
798  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
799  ret = AVERROR(EINVAL);
800  goto end;
801  }
802  expected = highpass_height * highpass_stride;
803 
804  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
805 
807  if (ret < 0)
808  goto end;
809  {
810  OPEN_READER(re, &gbit);
811 
812  const int lossless = s->band_encoding == 5;
813 
814  if (s->codebook == 0 && s->transform_type == 2 && s->subband_num_actual == 7)
815  s->codebook = 1;
816  if (!s->codebook) {
817  while (1) {
818  int level, run, coeff;
819 
820  UPDATE_CACHE(re, &gbit);
821  GET_RL_VLC(level, run, re, &gbit, s->table_9_rl_vlc,
822  VLC_BITS, 3, 1);
823 
824  /* escape */
825  if (!run)
826  break;
827 
828  count += run;
829 
830  if (count > expected)
831  break;
832 
833  if (!lossless)
834  coeff = dequant_and_decompand(s, level, s->quantisation, 0);
835  else
836  coeff = level;
837  if (tag == BandSecondPass) {
838  const uint16_t q = s->quantisation;
839 
840  for (int i = 0; i < run; i++) {
841  *coeff_data |= coeff * 256U;
842  *coeff_data++ *= q;
843  }
844  } else {
845  for (int i = 0; i < run; i++)
846  *coeff_data++ = coeff;
847  }
848  }
849  } else {
850  while (1) {
851  int level, run, coeff;
852 
853  UPDATE_CACHE(re, &gbit);
854  GET_RL_VLC(level, run, re, &gbit, s->table_18_rl_vlc,
855  VLC_BITS, 3, 1);
856 
857  /* escape */
858  if (!run)
859  break;
860 
861  count += run;
862 
863  if (count > expected)
864  break;
865 
866  if (!lossless)
867  coeff = dequant_and_decompand(s, level, s->quantisation, s->codebook);
868  else
869  coeff = level;
870  if (tag == BandSecondPass) {
871  const uint16_t q = s->quantisation;
872 
873  for (int i = 0; i < run; i++) {
874  *coeff_data |= coeff * 256U;
875  *coeff_data++ *= q;
876  }
877  } else {
878  for (int i = 0; i < run; i++)
879  *coeff_data++ = coeff;
880  }
881  }
882  }
883  CLOSE_READER(re, &gbit);
884  }
885 
886  if (count > expected) {
887  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
888  ret = AVERROR(EINVAL);
889  goto end;
890  }
891  if (s->peak.level)
892  peak_table(coeff_data - count, &s->peak, count);
893  if (s->difference_coding)
894  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
895 
896  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&gbit), 3), 4);
897  if (bytes > bytestream2_get_bytes_left(&gb)) {
898  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
899  ret = AVERROR(EINVAL);
900  goto end;
901  } else
902  bytestream2_seek(&gb, bytes, SEEK_CUR);
903 
904  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
905  s->plane[s->channel_num].band[s->level][s->subband_num].read_ok = 1;
906 finish:
907  if (s->subband_num_actual != 255)
908  s->codebook = 0;
909  }
910  }
911 
912  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
913  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
914  s->progressive = 1;
915  s->planes = 4;
916  }
917 
918  ff_thread_finish_setup(avctx);
919 
920  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
921  s->a_transform_type == INT_MIN ||
922  s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) {
923  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
924  ret = AVERROR(EINVAL);
925  goto end;
926  }
927 
928  if (!got_buffer) {
929  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
930  ret = AVERROR(EINVAL);
931  goto end;
932  }
933 
934  for (int plane = 0; plane < s->planes; plane++) {
935  for (int level = 0; level < (s->transform_type == 0 ? DWT_LEVELS : DWT_LEVELS_3D) ; level++) {
936  if (s->transform_type == 2)
937  if (level == 2 || level == 5)
938  continue;
939  for (int o = !!level; o < 4 ; o++) {
940  if (!s->plane[plane].band[level][o].read_ok) {
942  goto end;
943  }
944  }
945  }
946  }
947 
948  if (s->transform_type == 0 && s->sample_type != 1) {
949  for (int plane = 0; plane < s->planes && !ret; plane++) {
950  /* level 1 */
951  int lowpass_height = s->plane[plane].band[0][0].height;
952  int output_stride = s->plane[plane].band[0][0].a_width;
953  int lowpass_width = s->plane[plane].band[0][0].width;
954  int highpass_stride = s->plane[plane].band[0][1].stride;
955  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
956  ptrdiff_t dst_linesize;
957  int16_t *low, *high, *output, *dst;
958 
959  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
960  act_plane = 0;
961  dst_linesize = pic->linesize[act_plane];
962  } else {
963  dst_linesize = pic->linesize[act_plane] / 2;
964  }
965 
966  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
967  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
968  lowpass_width < 3 || lowpass_height < 3) {
969  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
970  ret = AVERROR(EINVAL);
971  goto end;
972  }
973 
974  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
975 
976  low = s->plane[plane].subband[0];
977  high = s->plane[plane].subband[2];
978  output = s->plane[plane].l_h[0];
979  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
980 
981  low = s->plane[plane].subband[1];
982  high = s->plane[plane].subband[3];
983  output = s->plane[plane].l_h[1];
984 
985  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
986 
987  low = s->plane[plane].l_h[0];
988  high = s->plane[plane].l_h[1];
989  output = s->plane[plane].subband[0];
990  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
991  if (s->bpc == 12) {
992  output = s->plane[plane].subband[0];
993  for (int i = 0; i < lowpass_height * 2; i++) {
994  for (int j = 0; j < lowpass_width * 2; j++)
995  output[j] *= 4;
996 
997  output += output_stride * 2;
998  }
999  }
1000 
1001  /* level 2 */
1002  lowpass_height = s->plane[plane].band[1][1].height;
1003  output_stride = s->plane[plane].band[1][1].a_width;
1004  lowpass_width = s->plane[plane].band[1][1].width;
1005  highpass_stride = s->plane[plane].band[1][1].stride;
1006 
1007  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1008  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1009  lowpass_width < 3 || lowpass_height < 3) {
1010  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1011  ret = AVERROR(EINVAL);
1012  goto end;
1013  }
1014 
1015  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1016 
1017  low = s->plane[plane].subband[0];
1018  high = s->plane[plane].subband[5];
1019  output = s->plane[plane].l_h[3];
1020  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1021 
1022  low = s->plane[plane].subband[4];
1023  high = s->plane[plane].subband[6];
1024  output = s->plane[plane].l_h[4];
1025  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1026 
1027  low = s->plane[plane].l_h[3];
1028  high = s->plane[plane].l_h[4];
1029  output = s->plane[plane].subband[0];
1030  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1031 
1032  output = s->plane[plane].subband[0];
1033  for (int i = 0; i < lowpass_height * 2; i++) {
1034  for (int j = 0; j < lowpass_width * 2; j++)
1035  output[j] *= 4;
1036 
1037  output += output_stride * 2;
1038  }
1039 
1040  /* level 3 */
1041  lowpass_height = s->plane[plane].band[2][1].height;
1042  output_stride = s->plane[plane].band[2][1].a_width;
1043  lowpass_width = s->plane[plane].band[2][1].width;
1044  highpass_stride = s->plane[plane].band[2][1].stride;
1045 
1046  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
1047  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width ||
1048  lowpass_height < 3 || lowpass_width < 3 || lowpass_width * 2 > s->plane[plane].width) {
1049  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1050  ret = AVERROR(EINVAL);
1051  goto end;
1052  }
1053 
1054  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1055  if (s->progressive) {
1056  low = s->plane[plane].subband[0];
1057  high = s->plane[plane].subband[8];
1058  output = s->plane[plane].l_h[6];
1059  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1060 
1061  low = s->plane[plane].subband[7];
1062  high = s->plane[plane].subband[9];
1063  output = s->plane[plane].l_h[7];
1064  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1065 
1066  dst = (int16_t *)pic->data[act_plane];
1067  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1068  if (plane & 1)
1069  dst++;
1070  if (plane > 1)
1071  dst += pic->linesize[act_plane] >> 1;
1072  }
1073  low = s->plane[plane].l_h[6];
1074  high = s->plane[plane].l_h[7];
1075 
1076  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1077  (lowpass_height * 2 > avctx->coded_height / 2 ||
1078  lowpass_width * 2 > avctx->coded_width / 2 )
1079  ) {
1081  goto end;
1082  }
1083 
1084  for (int i = 0; i < s->plane[act_plane].height; i++) {
1085  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1086  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
1087  process_alpha(dst, lowpass_width * 2);
1088  low += output_stride;
1089  high += output_stride;
1090  dst += dst_linesize;
1091  }
1092  } else {
1093  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", !!(pic->flags & AV_FRAME_FLAG_INTERLACED));
1095  low = s->plane[plane].subband[0];
1096  high = s->plane[plane].subband[7];
1097  output = s->plane[plane].l_h[6];
1098  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1099 
1100  low = s->plane[plane].subband[8];
1101  high = s->plane[plane].subband[9];
1102  output = s->plane[plane].l_h[7];
1103  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1104 
1105  dst = (int16_t *)pic->data[act_plane];
1106  low = s->plane[plane].l_h[6];
1107  high = s->plane[plane].l_h[7];
1108  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1109  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1110  low += output_stride * 2;
1111  high += output_stride * 2;
1112  dst += pic->linesize[act_plane];
1113  }
1114  }
1115  }
1116  } else if (s->transform_type == 2 && (avctx->internal->is_copy || s->frame_index == 1 || s->sample_type != 1)) {
1117  for (int plane = 0; plane < s->planes && !ret; plane++) {
1118  int lowpass_height = s->plane[plane].band[0][0].height;
1119  int output_stride = s->plane[plane].band[0][0].a_width;
1120  int lowpass_width = s->plane[plane].band[0][0].width;
1121  int highpass_stride = s->plane[plane].band[0][1].stride;
1122  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1123  int16_t *low, *high, *output, *dst;
1124  ptrdiff_t dst_linesize;
1125 
1126  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1127  act_plane = 0;
1128  dst_linesize = pic->linesize[act_plane];
1129  } else {
1130  dst_linesize = pic->linesize[act_plane] / 2;
1131  }
1132 
1133  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
1134  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
1135  lowpass_width < 3 || lowpass_height < 3) {
1136  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1137  ret = AVERROR(EINVAL);
1138  goto end;
1139  }
1140 
1141  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1142 
1143  low = s->plane[plane].subband[0];
1144  high = s->plane[plane].subband[2];
1145  output = s->plane[plane].l_h[0];
1146  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1147 
1148  low = s->plane[plane].subband[1];
1149  high = s->plane[plane].subband[3];
1150  output = s->plane[plane].l_h[1];
1151  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1152 
1153  low = s->plane[plane].l_h[0];
1154  high = s->plane[plane].l_h[1];
1155  output = s->plane[plane].l_h[7];
1156  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1157  if (s->bpc == 12) {
1158  output = s->plane[plane].l_h[7];
1159  for (int i = 0; i < lowpass_height * 2; i++) {
1160  for (int j = 0; j < lowpass_width * 2; j++)
1161  output[j] *= 4;
1162 
1163  output += output_stride * 2;
1164  }
1165  }
1166 
1167  lowpass_height = s->plane[plane].band[1][1].height;
1168  output_stride = s->plane[plane].band[1][1].a_width;
1169  lowpass_width = s->plane[plane].band[1][1].width;
1170  highpass_stride = s->plane[plane].band[1][1].stride;
1171 
1172  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1173  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1174  lowpass_width < 3 || lowpass_height < 3) {
1175  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1176  ret = AVERROR(EINVAL);
1177  goto end;
1178  }
1179 
1180  av_log(avctx, AV_LOG_DEBUG, "Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1181 
1182  low = s->plane[plane].l_h[7];
1183  high = s->plane[plane].subband[5];
1184  output = s->plane[plane].l_h[3];
1185  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1186 
1187  low = s->plane[plane].subband[4];
1188  high = s->plane[plane].subband[6];
1189  output = s->plane[plane].l_h[4];
1190  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1191 
1192  low = s->plane[plane].l_h[3];
1193  high = s->plane[plane].l_h[4];
1194  output = s->plane[plane].l_h[7];
1195  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1196 
1197  output = s->plane[plane].l_h[7];
1198  for (int i = 0; i < lowpass_height * 2; i++) {
1199  for (int j = 0; j < lowpass_width * 2; j++)
1200  output[j] *= 4;
1201  output += output_stride * 2;
1202  }
1203 
1204  low = s->plane[plane].subband[7];
1205  high = s->plane[plane].subband[9];
1206  output = s->plane[plane].l_h[3];
1207  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1208 
1209  low = s->plane[plane].subband[8];
1210  high = s->plane[plane].subband[10];
1211  output = s->plane[plane].l_h[4];
1212  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1213 
1214  low = s->plane[plane].l_h[3];
1215  high = s->plane[plane].l_h[4];
1216  output = s->plane[plane].l_h[9];
1217  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1218 
1219  lowpass_height = s->plane[plane].band[4][1].height;
1220  output_stride = s->plane[plane].band[4][1].a_width;
1221  lowpass_width = s->plane[plane].band[4][1].width;
1222  highpass_stride = s->plane[plane].band[4][1].stride;
1223  av_log(avctx, AV_LOG_DEBUG, "temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1224 
1225  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1226  !highpass_stride || s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1227  lowpass_width < 3 || lowpass_height < 3) {
1228  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1229  ret = AVERROR(EINVAL);
1230  goto end;
1231  }
1232 
1233  low = s->plane[plane].l_h[7];
1234  high = s->plane[plane].l_h[9];
1235  output = s->plane[plane].l_h[7];
1236  for (int i = 0; i < lowpass_height; i++) {
1237  inverse_temporal_filter(low, high, lowpass_width);
1238  low += output_stride;
1239  high += output_stride;
1240  }
1241  if (s->progressive) {
1242  low = s->plane[plane].l_h[7];
1243  high = s->plane[plane].subband[15];
1244  output = s->plane[plane].l_h[6];
1245  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1246 
1247  low = s->plane[plane].subband[14];
1248  high = s->plane[plane].subband[16];
1249  output = s->plane[plane].l_h[7];
1250  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1251 
1252  low = s->plane[plane].l_h[9];
1253  high = s->plane[plane].subband[12];
1254  output = s->plane[plane].l_h[8];
1255  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1256 
1257  low = s->plane[plane].subband[11];
1258  high = s->plane[plane].subband[13];
1259  output = s->plane[plane].l_h[9];
1260  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1261 
1262  if (s->sample_type == 1)
1263  continue;
1264 
1265  dst = (int16_t *)pic->data[act_plane];
1266  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1267  if (plane & 1)
1268  dst++;
1269  if (plane > 1)
1270  dst += pic->linesize[act_plane] >> 1;
1271  }
1272 
1273  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1274  (lowpass_height * 2 > avctx->coded_height / 2 ||
1275  lowpass_width * 2 > avctx->coded_width / 2 )
1276  ) {
1278  goto end;
1279  }
1280 
1281  low = s->plane[plane].l_h[6];
1282  high = s->plane[plane].l_h[7];
1283  for (int i = 0; i < s->plane[act_plane].height; i++) {
1284  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1285  low += output_stride;
1286  high += output_stride;
1287  dst += dst_linesize;
1288  }
1289  } else {
1291  low = s->plane[plane].l_h[7];
1292  high = s->plane[plane].subband[14];
1293  output = s->plane[plane].l_h[6];
1294  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1295 
1296  low = s->plane[plane].subband[15];
1297  high = s->plane[plane].subband[16];
1298  output = s->plane[plane].l_h[7];
1299  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1300 
1301  low = s->plane[plane].l_h[9];
1302  high = s->plane[plane].subband[11];
1303  output = s->plane[plane].l_h[8];
1304  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1305 
1306  low = s->plane[plane].subband[12];
1307  high = s->plane[plane].subband[13];
1308  output = s->plane[plane].l_h[9];
1309  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1310 
1311  if (s->sample_type == 1)
1312  continue;
1313 
1314  dst = (int16_t *)pic->data[act_plane];
1315  low = s->plane[plane].l_h[6];
1316  high = s->plane[plane].l_h[7];
1317  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1318  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1319  low += output_stride * 2;
1320  high += output_stride * 2;
1321  dst += pic->linesize[act_plane];
1322  }
1323  }
1324  }
1325  }
1326 
1327  if (s->transform_type == 2 && s->sample_type == 1) {
1328  int16_t *low, *high, *dst;
1329  int output_stride, lowpass_height, lowpass_width;
1330  ptrdiff_t dst_linesize;
1331 
1332  for (int plane = 0; plane < s->planes; plane++) {
1333  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1334 
1335  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1336  act_plane = 0;
1337  dst_linesize = pic->linesize[act_plane];
1338  } else {
1339  dst_linesize = pic->linesize[act_plane] / 2;
1340  }
1341 
1342  lowpass_height = s->plane[plane].band[4][1].height;
1343  output_stride = s->plane[plane].band[4][1].a_width;
1344  lowpass_width = s->plane[plane].band[4][1].width;
1345 
1346  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1347  s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1348  lowpass_width < 3 || lowpass_height < 3) {
1349  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1350  ret = AVERROR(EINVAL);
1351  goto end;
1352  }
1353 
1354  if (s->progressive) {
1355  dst = (int16_t *)pic->data[act_plane];
1356  low = s->plane[plane].l_h[8];
1357  high = s->plane[plane].l_h[9];
1358 
1359  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1360  if (plane & 1)
1361  dst++;
1362  if (plane > 1)
1363  dst += pic->linesize[act_plane] >> 1;
1364  }
1365 
1366  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1367  (lowpass_height * 2 > avctx->coded_height / 2 ||
1368  lowpass_width * 2 > avctx->coded_width / 2 )
1369  ) {
1371  goto end;
1372  }
1373 
1374  for (int i = 0; i < s->plane[act_plane].height; i++) {
1375  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1376  low += output_stride;
1377  high += output_stride;
1378  dst += dst_linesize;
1379  }
1380  } else {
1381  dst = (int16_t *)pic->data[act_plane];
1382  low = s->plane[plane].l_h[8];
1383  high = s->plane[plane].l_h[9];
1384  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1385  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1386  low += output_stride * 2;
1387  high += output_stride * 2;
1388  dst += pic->linesize[act_plane];
1389  }
1390  }
1391  }
1392  }
1393 
1394  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1395  process_bayer(pic, s->bpc);
1396 end:
1397  if (ret < 0)
1398  return ret;
1399 
1400  *got_frame = 1;
1401  return avpkt->size;
1402 }
1403 
1405 {
1406  CFHDContext *s = avctx->priv_data;
1407 
1408  free_buffers(s);
1409 
1410  return 0;
1411 }
1412 
1413 #if HAVE_THREADS
1415 {
1416  CFHDContext *psrc = src->priv_data;
1417  CFHDContext *pdst = dst->priv_data;
1418  int ret;
1419 
1420  if (dst == src || psrc->transform_type == 0)
1421  return 0;
1422 
1423  if (pdst->plane[0].idwt_size != psrc->plane[0].idwt_size ||
1424  pdst->a_format != psrc->a_format ||
1425  pdst->a_width != psrc->a_width ||
1426  pdst->a_height != psrc->a_height ||
1427  pdst->a_transform_type != psrc->a_transform_type)
1428  free_buffers(pdst);
1429 
1430  pdst->a_format = psrc->a_format;
1431  pdst->a_width = psrc->a_width;
1432  pdst->a_height = psrc->a_height;
1433  pdst->a_transform_type = psrc->a_transform_type;
1434  pdst->transform_type = psrc->transform_type;
1435  pdst->progressive = psrc->progressive;
1436  pdst->planes = psrc->planes;
1437 
1438  if (!pdst->plane[0].idwt_buf) {
1439  pdst->coded_width = pdst->a_width;
1440  pdst->coded_height = pdst->a_height;
1441  pdst->coded_format = pdst->a_format;
1442  pdst->transform_type = pdst->a_transform_type;
1443  ret = alloc_buffers(dst);
1444  if (ret < 0)
1445  return ret;
1446  }
1447 
1448  for (int plane = 0; plane < pdst->planes; plane++) {
1449  memcpy(pdst->plane[plane].band, psrc->plane[plane].band, sizeof(pdst->plane[plane].band));
1450  memcpy(pdst->plane[plane].idwt_buf, psrc->plane[plane].idwt_buf,
1451  pdst->plane[plane].idwt_size * sizeof(int16_t));
1452  }
1453 
1454  return 0;
1455 }
1456 #endif
1457 
1459  .p.name = "cfhd",
1460  CODEC_LONG_NAME("GoPro CineForm HD"),
1461  .p.type = AVMEDIA_TYPE_VIDEO,
1462  .p.id = AV_CODEC_ID_CFHD,
1463  .priv_data_size = sizeof(CFHDContext),
1464  .init = cfhd_init,
1465  .close = cfhd_close,
1468  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1469  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1470 };
ChannelNumber
@ ChannelNumber
Definition: cfhd.h:76
ChannelCount
@ ChannelCount
Definition: cfhd.h:40
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
BandSecondPass
@ BandSecondPass
Definition: cfhd.h:86
level
uint8_t level
Definition: svq3.c:208
Precision
@ Precision
Definition: cfhd.h:79
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
Peak::level
int level
Definition: cfhd.h:142
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
BandHeader
@ BandHeader
Definition: cfhd.h:74
PrescaleTable
@ PrescaleTable
Definition: cfhd.h:87
GetByteContext
Definition: bytestream.h:33
G2
#define G2(m)
Definition: itx_1d.c:64
CFHDContext::progressive
int progressive
Definition: cfhd.h:164
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
BandHeight
@ BandHeight
Definition: cfhd.h:69
ff_cfhd_decoder
const FFCodec ff_cfhd_decoder
Definition: cfhd.c:1458
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
SampleType
int32_t SampleType
Definition: ac3enc.h:65
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
AVFrame::width
int width
Definition: frame.h:499
CFHDDSPContext
Definition: cfhddsp.h:25
CFHDDSPContext::horiz_filter_clip
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
Definition: cfhddsp.h:36
internal.h
even
Tag MUST be even
Definition: snow.txt:206
AVPacket::data
uint8_t * data
Definition: packet.h:588
CFHDContext::a_format
int a_format
Definition: cfhd.h:168
b
#define b
Definition: input.c:42
HighpassWidth
@ HighpassWidth
Definition: cfhd.h:61
data
const char data[16]
Definition: mxf.c:149
R
#define R
Definition: huffyuv.h:44
high
int high
Definition: dovi_rpuenc.c:39
FFCodec
Definition: codec_internal.h:127
ALPHA_COMPAND_DC_OFFSET
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:42
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:213
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:671
LowpassWidth
@ LowpassWidth
Definition: cfhd.h:52
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
cfhd_init
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:45
difference_coding
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:119
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
ff_cfhd_init_vlcs
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:181
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:54
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
VersionMajor
@ VersionMajor
Definition: cfhd.h:34
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
init_peak_table_defaults
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:82
cfhd.h
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
GetBitContext
Definition: get_bits.h:109
DWT_LEVELS
#define DWT_LEVELS
Definition: cfhd.h:113
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
LowpassPrecision
@ LowpassPrecision
Definition: cfhd.h:56
Quantization
@ Quantization
Definition: cfhd.h:72
av_image_check_size2
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:289
BandEncoding
@ BandEncoding
Definition: cfhd.h:71
SubbandNumber
@ SubbandNumber
Definition: cfhd.h:67
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
peak_table
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:129
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
Plane::idwt_size
int idwt_size
Definition: cfhd.h:132
process_alpha
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:136
AV_CODEC_ID_CFHD
@ AV_CODEC_ID_CFHD
Definition: codec_id.h:271
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:189
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
intreadwrite.h
Version
@ Version
Definition: cfhd.h:85
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:563
ALPHA_COMPAND_GAIN
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:43
TransformType
TransformType
Definition: webp.c:113
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
PeakOffsetHigh
@ PeakOffsetHigh
Definition: cfhd.h:84
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1043
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
VersionMinor
@ VersionMinor
Definition: cfhd.h:35
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
B
#define B
Definition: huffyuv.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1561
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
decode.h
get_bits.h
DisplayHeight
@ DisplayHeight
Definition: cfhd.h:89
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1790
process_bayer
static void process_bayer(AVFrame *frame, int bpc)
Definition: cfhd.c:149
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
FrameType
FrameType
G723.1 frame types.
Definition: g723_1.h:63
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
FrameIndex
@ FrameIndex
Definition: cfhd.h:49
InputFormat
@ InputFormat
Definition: cfhd.h:80
dequant_and_decompand
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
Definition: cfhd.c:111
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
CFHDContext::planes
int planes
Definition: cfhd.h:155
CFHDDSPContext::vert_filter
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:31
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
CFHDDSPContext::horiz_filter
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:26
Peak
Definition: cfhd.h:141
abs
#define abs(x)
Definition: cuda_runtime.h:35
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:341
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
VLC_BITS
#define VLC_BITS
Definition: cfhd.h:103
HighpassHeight
@ HighpassHeight
Definition: cfhd.h:62
free_buffers
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:215
SUBBAND_COUNT
#define SUBBAND_COUNT
Definition: cfhd.h:104
alloc_buffers
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:240
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
Peak::base
GetByteContext base
Definition: cfhd.h:144
AVPacket::size
int size
Definition: packet.h:589
VersionRevision
@ VersionRevision
Definition: cfhd.h:36
height
#define height
Definition: dsp.h:89
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
BitstreamMarker
@ BitstreamMarker
Definition: cfhd.h:33
cfhd_decode
static int cfhd_decode(AVCodecContext *avctx, AVFrame *pic, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:371
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
CFHDContext::a_width
int a_width
Definition: cfhd.h:166
CFHDContext::coded_height
int coded_height
Definition: cfhd.h:161
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
VersionEdit
@ VersionEdit
Definition: cfhd.h:37
CFHDContext::coded_format
enum AVPixelFormat coded_format
Definition: cfhd.h:163
CFHDContext::transform_type
int transform_type
Definition: cfhd.h:159
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
SubbandCount
@ SubbandCount
Definition: cfhd.h:42
CFHDContext::plane
Plane plane[4]
Definition: cfhd.h:186
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
CFHDContext
Definition: cfhd.h:147
common.h
SubbandBand
@ SubbandBand
Definition: cfhd.h:70
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
SUBBAND_COUNT_3D
#define SUBBAND_COUNT_3D
Definition: cfhd.h:105
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
ImageWidth
@ ImageWidth
Definition: cfhd.h:47
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
avcodec.h
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:605
tag
uint32_t tag
Definition: movenc.c:2032
ret
ret
Definition: filter_design.txt:187
CFHDContext::coded_width
int coded_width
Definition: cfhd.h:160
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
inverse_temporal_filter
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
Definition: cfhd.c:204
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
PeakLevel
@ PeakLevel
Definition: cfhd.h:82
BandCodingFlags
@ BandCodingFlags
Definition: cfhd.h:81
U
#define U(x)
Definition: vpx_arith.h:37
interlaced_vertical_filter
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:193
EncodedFormat
@ EncodedFormat
Definition: cfhd.h:88
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFrame::height
int height
Definition: frame.h:499
init_plane_defaults
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:75
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
CFHDContext::a_transform_type
int a_transform_type
Definition: cfhd.h:169
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_cfhddsp_init
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
Definition: cfhddsp.c:105
ImageHeight
@ ImageHeight
Definition: cfhd.h:48
planes
static const struct @554 planes[]
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
Plane
Definition: cfhd.h:125
BandWidth
@ BandWidth
Definition: cfhd.h:68
factor
static const int factor[16]
Definition: vf_pp7.c:80
SampleFlags
@ SampleFlags
Definition: cfhd.h:77
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
Plane::idwt_buf
int16_t * idwt_buf
Definition: cfhd.h:130
DWT_LEVELS_3D
#define DWT_LEVELS_3D
Definition: cfhd.h:114
mem.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:138
LowpassHeight
@ LowpassHeight
Definition: cfhd.h:53
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
CoefficientSegment
@ CoefficientSegment
Definition: cfhd.h:100
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
stride
#define stride
Definition: h264pred_template.c:536
width
#define width
Definition: dsp.h:89
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:572
SampleIndexTable
@ SampleIndexTable
Definition: cfhd.h:32
cfhd_close
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1404
src
#define src
Definition: vp8dsp.c:248
init_frame_defaults
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:89
channel
channel
Definition: ebur128.h:39
PeakOffsetLow
@ PeakOffsetLow
Definition: cfhd.h:83
codebook
static const unsigned codebook[256][2]
Definition: cfhdenc.c:41
CFHDContext::a_height
int a_height
Definition: cfhd.h:167