FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
cfhd.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Cineform HD video decoder
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/pixdesc.h"
32 
33 #include "avcodec.h"
34 #include "bytestream.h"
35 #include "codec_internal.h"
36 #include "decode.h"
37 #include "get_bits.h"
38 #include "internal.h"
39 #include "thread.h"
40 #include "cfhd.h"
41 
42 #define ALPHA_COMPAND_DC_OFFSET 256
43 #define ALPHA_COMPAND_GAIN 9400
44 
45 static av_cold int cfhd_init(AVCodecContext *avctx)
46 {
47  CFHDContext *s = avctx->priv_data;
48 
49  s->avctx = avctx;
50 
51  for (int i = 0; i < 64; i++) {
52  int val = i;
53 
54  if (val >= 40) {
55  if (val >= 54) {
56  val -= 54;
57  val <<= 2;
58  val += 54;
59  }
60 
61  val -= 40;
62  val <<= 2;
63  val += 40;
64  }
65 
66  s->lut[0][i] = val;
67  }
68 
69  for (int i = 0; i < 256; i++)
70  s->lut[1][i] = i + ((768LL * i * i * i) / (256 * 256 * 256));
71 
72  return ff_cfhd_init_vlcs(s);
73 }
74 
76 {
77  s->subband_num = 0;
78  s->level = 0;
79  s->subband_num_actual = 0;
80 }
81 
83 {
84  s->peak.level = 0;
85  s->peak.offset = 0;
86  memset(&s->peak.base, 0, sizeof(s->peak.base));
87 }
88 
90 {
91  s->coded_width = 0;
92  s->coded_height = 0;
93  s->coded_format = AV_PIX_FMT_YUV422P10;
94  s->cropped_height = 0;
95  s->bpc = 10;
96  s->channel_cnt = 3;
97  s->subband_cnt = SUBBAND_COUNT;
98  s->channel_num = 0;
99  s->lowpass_precision = 16;
100  s->quantisation = 1;
101  s->codebook = 0;
102  s->difference_coding = 0;
103  s->frame_type = 0;
104  s->sample_type = 0;
105  if (s->transform_type != 2)
106  s->transform_type = -1;
109 }
110 
111 static inline int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
112 {
113  if (codebook == 0 || codebook == 1) {
114  return s->lut[codebook][abs(level)] * FFSIGN(level) * quantisation;
115  } else
116  return level * quantisation;
117 }
118 
119 static inline void difference_coding(int16_t *band, int width, int height)
120 {
121  for (int i = 0; i < height; i++) {
122  for (int j = 1; j < width; j++) {
123  band[j] += band[j-1];
124  }
125  band += width;
126  }
127 }
128 
129 static inline void peak_table(int16_t *band, Peak *peak, int length)
130 {
131  for (int i = 0; i < length; i++)
132  if (abs(band[i]) > peak->level)
133  band[i] = bytestream2_get_le16(&peak->base);
134 }
135 
136 static inline void process_alpha(int16_t *alpha, int width)
137 {
138  for (int i = 0; i < width; i++) {
139  int channel = alpha[i];
141  channel <<= 3;
143  channel >>= 16;
145  alpha[i] = channel;
146  }
147 }
148 
149 static inline void process_bayer(AVFrame *frame, int bpc)
150 {
151  const int linesize = frame->linesize[0];
152  uint16_t *r = (uint16_t *)frame->data[0];
153  uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
154  uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
155  uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
156  const int mid = 1 << (bpc - 1);
157  const int factor = 1 << (16 - bpc);
158 
159  for (int y = 0; y < frame->height >> 1; y++) {
160  for (int x = 0; x < frame->width; x += 2) {
161  int R, G1, G2, B;
162  int g, rg, bg, gd;
163 
164  g = r[x];
165  rg = g1[x];
166  bg = g2[x];
167  gd = b[x];
168  gd -= mid;
169 
170  R = (rg - mid) * 2 + g;
171  G1 = g + gd;
172  G2 = g - gd;
173  B = (bg - mid) * 2 + g;
174 
175  R = av_clip_uintp2(R * factor, 16);
176  G1 = av_clip_uintp2(G1 * factor, 16);
177  G2 = av_clip_uintp2(G2 * factor, 16);
178  B = av_clip_uintp2(B * factor, 16);
179 
180  r[x] = R;
181  g1[x] = G1;
182  g2[x] = G2;
183  b[x] = B;
184  }
185 
186  r += linesize;
187  g1 += linesize;
188  g2 += linesize;
189  b += linesize;
190  }
191 }
192 
193 static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
194  int width, int linesize, int plane)
195 {
196  for (int i = 0; i < width; i++) {
197  int16_t even = (low[i] - high[i])/2;
198  int16_t odd = (low[i] + high[i])/2;
199  output[i] = av_clip_uintp2(even, 10);
200  output[i + linesize] = av_clip_uintp2(odd, 10);
201  }
202 }
203 
204 static inline void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
205 {
206  for (int i = 0; i < width; i++) {
207  int even = (low[i] - high[i]) / 2;
208  int odd = (low[i] + high[i]) / 2;
209 
210  low[i] = even;
211  high[i] = odd;
212  }
213 }
214 
216 {
217  for (size_t i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
218  Plane *p = &s->plane[i];
219  av_freep(&s->plane[i].idwt_buf);
220  av_freep(&s->plane[i].idwt_tmp);
221  s->plane[i].idwt_size = 0;
222 
223  for (int j = 0; j < SUBBAND_COUNT_3D; j++)
224  s->plane[i].subband[j] = NULL;
225 
226  for (int j = 0; j < 10; j++)
227  s->plane[i].l_h[j] = NULL;
228 
229  for (int j = 0; j < DWT_LEVELS_3D; j++)
230  p->band[j][0].read_ok =
231  p->band[j][1].read_ok =
232  p->band[j][2].read_ok =
233  p->band[j][3].read_ok = 0;
234  }
235  s->a_height = 0;
236  s->a_width = 0;
237  s->a_transform_type = INT_MIN;
238 }
239 
240 static int alloc_buffers(AVCodecContext *avctx)
241 {
242  CFHDContext *s = avctx->priv_data;
243  int ret, planes, bayer = 0;
244  int chroma_x_shift, chroma_y_shift;
245 
246  if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
247  return ret;
248  avctx->pix_fmt = s->coded_format;
249 
250  ff_cfhddsp_init(&s->dsp, s->bpc, avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
251 
252  if ((ret = av_pix_fmt_get_chroma_sub_sample(s->coded_format,
253  &chroma_x_shift,
254  &chroma_y_shift)) < 0)
255  return ret;
256  planes = av_pix_fmt_count_planes(s->coded_format);
257  if (s->coded_format == AV_PIX_FMT_BAYER_RGGB16) {
258  planes = 4;
259  chroma_x_shift = 1;
260  chroma_y_shift = 1;
261  bayer = 1;
262  }
263 
264  for (int i = 0; i < planes; i++) {
265  int w8, h8, w4, h4, w2, h2;
266  int width = (i || bayer) ? s->coded_width >> chroma_x_shift : s->coded_width;
267  int height = (i || bayer) ? s->coded_height >> chroma_y_shift : s->coded_height;
268  ptrdiff_t stride = (FFALIGN(width / 8, 8) + 64) * 8;
269 
270  if ((ret = av_image_check_size2(stride, height, avctx->max_pixels, s->coded_format, 0, avctx)) < 0)
271  return ret;
272 
273  if (chroma_y_shift && !bayer)
274  height = FFALIGN(height / 8, 2) * 8;
275  s->plane[i].width = width;
276  s->plane[i].height = height;
277  s->plane[i].stride = stride;
278 
279  w8 = FFALIGN(s->plane[i].width / 8, 8) + 64;
280  h8 = FFALIGN(height, 8) / 8;
281  w4 = w8 * 2;
282  h4 = h8 * 2;
283  w2 = w4 * 2;
284  h2 = h4 * 2;
285 
286  if (s->transform_type == 0) {
287  s->plane[i].idwt_size = FFALIGN(height, 8) * stride;
288  s->plane[i].idwt_buf =
289  av_calloc(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
290  s->plane[i].idwt_tmp =
291  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
292  } else {
293  s->plane[i].idwt_size = FFALIGN(height, 8) * stride * 2;
294  s->plane[i].idwt_buf =
295  av_calloc(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_buf));
296  s->plane[i].idwt_tmp =
297  av_malloc_array(s->plane[i].idwt_size, sizeof(*s->plane[i].idwt_tmp));
298  }
299 
300  if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
301  return AVERROR(ENOMEM);
302 
303  s->plane[i].subband[0] = s->plane[i].idwt_buf;
304  s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
305  s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
306  s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
307  s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
308  s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
309  s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
310  if (s->transform_type == 0) {
311  s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
312  s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
313  s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
314  } else {
315  int16_t *frame2 =
316  s->plane[i].subband[7] = s->plane[i].idwt_buf + 4 * w2 * h2;
317  s->plane[i].subband[8] = frame2 + 2 * w4 * h4;
318  s->plane[i].subband[9] = frame2 + 1 * w4 * h4;
319  s->plane[i].subband[10] = frame2 + 3 * w4 * h4;
320  s->plane[i].subband[11] = frame2 + 2 * w2 * h2;
321  s->plane[i].subband[12] = frame2 + 1 * w2 * h2;
322  s->plane[i].subband[13] = frame2 + 3 * w2 * h2;
323  s->plane[i].subband[14] = s->plane[i].idwt_buf + 2 * w2 * h2;
324  s->plane[i].subband[15] = s->plane[i].idwt_buf + 1 * w2 * h2;
325  s->plane[i].subband[16] = s->plane[i].idwt_buf + 3 * w2 * h2;
326  }
327 
328  if (s->transform_type == 0) {
329  for (int j = 0; j < DWT_LEVELS; j++) {
330  for (unsigned k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
331  s->plane[i].band[j][k].a_width = w8 << j;
332  s->plane[i].band[j][k].a_height = h8 << j;
333  }
334  }
335  } else {
336  for (int j = 0; j < DWT_LEVELS_3D; j++) {
337  int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
338 
339  for (unsigned k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
340  s->plane[i].band[j][k].a_width = w8 << t;
341  s->plane[i].band[j][k].a_height = h8 << t;
342  }
343  }
344  }
345 
346  /* ll2 and ll1 commented out because they are done in-place */
347  s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
348  s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
349  // s->plane[i].l_h[2] = ll2;
350  s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
351  s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
352  // s->plane[i].l_h[5] = ll1;
353  s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
354  s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
355  if (s->transform_type != 0) {
356  int16_t *frame2 = s->plane[i].idwt_tmp + 4 * w2 * h2;
357 
358  s->plane[i].l_h[8] = frame2;
359  s->plane[i].l_h[9] = frame2 + 2 * w2 * h2;
360  }
361  }
362 
363  s->a_transform_type = s->transform_type;
364  s->a_height = s->coded_height;
365  s->a_width = s->coded_width;
366  s->a_format = s->coded_format;
367 
368  return 0;
369 }
370 
371 static int cfhd_decode(AVCodecContext *avctx, AVFrame *pic,
372  int *got_frame, AVPacket *avpkt)
373 {
374  CFHDContext *s = avctx->priv_data;
375  CFHDDSPContext *dsp = &s->dsp;
376  GetByteContext gb;
377  int ret = 0, got_buffer = 0;
378 
380  s->planes = av_pix_fmt_count_planes(s->coded_format);
381 
382  bytestream2_init(&gb, avpkt->data, avpkt->size);
383 
384  while (bytestream2_get_bytes_left(&gb) >= 4) {
385  /* Bit weird but implement the tag parsing as the spec says */
386  uint16_t tagu = bytestream2_get_be16(&gb);
387  int16_t tag = (int16_t)tagu;
388  int8_t tag8 = (int8_t)(tagu >> 8);
389  uint16_t abstag = abs(tag);
390  int8_t abs_tag8 = abs(tag8);
391  uint16_t data = bytestream2_get_be16(&gb);
392  int16_t *coeff_data;
393 
394  if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
395  av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
396  } else if (tag == SampleFlags) {
397  av_log(avctx, AV_LOG_DEBUG, "Progressive? %"PRIu16"\n", data);
398  s->progressive = data & 0x0001;
399  } else if (tag == FrameType) {
400  s->frame_type = data;
401  av_log(avctx, AV_LOG_DEBUG, "Frame type %"PRIu16"\n", data);
402  } else if (abstag == VersionMajor) {
403  av_log(avctx, AV_LOG_DEBUG, "Version major %"PRIu16"\n", data);
404  } else if (abstag == VersionMinor) {
405  av_log(avctx, AV_LOG_DEBUG, "Version minor %"PRIu16"\n", data);
406  } else if (abstag == VersionRevision) {
407  av_log(avctx, AV_LOG_DEBUG, "Version revision %"PRIu16"\n", data);
408  } else if (abstag == VersionEdit) {
409  av_log(avctx, AV_LOG_DEBUG, "Version edit %"PRIu16"\n", data);
410  } else if (abstag == Version) {
411  av_log(avctx, AV_LOG_DEBUG, "Version %"PRIu16"\n", data);
412  } else if (tag == ImageWidth) {
413  av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
414  s->coded_width = data;
415  } else if (tag == ImageHeight) {
416  av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
417  s->coded_height = data;
418  } else if (tag == ChannelCount) {
419  av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
420  s->channel_cnt = data;
421  if (data > 4) {
422  av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
424  goto end;
425  }
426  } else if (tag == SubbandCount) {
427  av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
428  if (data != SUBBAND_COUNT && data != SUBBAND_COUNT_3D) {
429  av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
431  goto end;
432  }
433  } else if (tag == ChannelNumber) {
434  s->channel_num = data;
435  av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
436  if (s->channel_num >= s->planes) {
437  av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
438  ret = AVERROR(EINVAL);
439  goto end;
440  }
442  } else if (tag == SubbandNumber) {
443  if (s->subband_num != 0 && data == 1 && (s->transform_type == 0 || s->transform_type == 2)) // hack
444  s->level++;
445  av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
446  s->subband_num = data;
447  if ((s->transform_type == 0 && s->level >= DWT_LEVELS) ||
448  (s->transform_type == 2 && s->level >= DWT_LEVELS_3D)) {
449  av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
450  ret = AVERROR(EINVAL);
451  goto end;
452  }
453  if (s->subband_num > 3) {
454  av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
455  ret = AVERROR(EINVAL);
456  goto end;
457  }
458  } else if (tag == SubbandBand) {
459  av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
460  if ((s->transform_type == 0 && data >= SUBBAND_COUNT) ||
461  (s->transform_type == 2 && data >= SUBBAND_COUNT_3D && data != 255)) {
462  av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
463  ret = AVERROR(EINVAL);
464  goto end;
465  }
466  if (s->transform_type == 0 || s->transform_type == 2)
467  s->subband_num_actual = data;
468  else
469  av_log(avctx, AV_LOG_WARNING, "Ignoring subband num actual %"PRIu16"\n", data);
470  } else if (tag == LowpassPrecision)
471  av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
472  else if (tag == Quantization) {
473  s->quantisation = data;
474  av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
475  } else if (tag == PrescaleTable) {
476  for (int i = 0; i < 8; i++)
477  s->prescale_table[i] = (data >> (14 - i * 2)) & 0x3;
478  av_log(avctx, AV_LOG_DEBUG, "Prescale table: %x\n", data);
479  } else if (tag == BandEncoding) {
480  if (!data || data > 5) {
481  av_log(avctx, AV_LOG_ERROR, "Invalid band encoding\n");
482  ret = AVERROR(EINVAL);
483  goto end;
484  }
485  s->band_encoding = data;
486  av_log(avctx, AV_LOG_DEBUG, "Encode Method for Subband %d : %x\n", s->subband_num_actual, data);
487  } else if (tag == LowpassWidth) {
488  av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
489  s->plane[s->channel_num].band[0][0].width = data;
490  s->plane[s->channel_num].band[0][0].stride = data;
491  } else if (tag == LowpassHeight) {
492  av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
493  s->plane[s->channel_num].band[0][0].height = data;
494  } else if (tag == SampleType) {
495  s->sample_type = data;
496  av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
497  } else if (tag == TransformType) {
498  if (data > 2) {
499  av_log(avctx, AV_LOG_ERROR, "Invalid transform type\n");
500  ret = AVERROR(EINVAL);
501  goto end;
502  } else if (data == 1) {
503  av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
505  goto end;
506  }
507  if (s->transform_type == -1) {
508  s->transform_type = data;
509  av_log(avctx, AV_LOG_DEBUG, "Transform type %"PRIu16"\n", data);
510  } else {
511  av_log(avctx, AV_LOG_DEBUG, "Ignoring additional transform type %"PRIu16"\n", data);
512  }
513  } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
514  if (abstag == 0x4001)
515  s->peak.level = 0;
516  av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
517  bytestream2_skipu(&gb, data * 4);
518  } else if (tag == FrameIndex) {
519  av_log(avctx, AV_LOG_DEBUG, "Frame index %"PRIu16"\n", data);
520  s->frame_index = data;
521  } else if (tag == SampleIndexTable) {
522  av_log(avctx, AV_LOG_DEBUG, "Sample index table - skipping %i values\n", data);
523  if (data > bytestream2_get_bytes_left(&gb) / 4) {
524  av_log(avctx, AV_LOG_ERROR, "too many values (%d)\n", data);
526  goto end;
527  }
528  for (int i = 0; i < data; i++) {
529  uint32_t offset = bytestream2_get_be32(&gb);
530  av_log(avctx, AV_LOG_DEBUG, "Offset = %"PRIu32"\n", offset);
531  }
532  } else if (tag == HighpassWidth) {
533  av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
534  if (data < 3) {
535  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
536  ret = AVERROR(EINVAL);
537  goto end;
538  }
539  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
540  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
541  } else if (tag == HighpassHeight) {
542  av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
543  if (data < 3) {
544  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
545  ret = AVERROR(EINVAL);
546  goto end;
547  }
548  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
549  } else if (tag == BandWidth) {
550  av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
551  if (data < 3) {
552  av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
553  ret = AVERROR(EINVAL);
554  goto end;
555  }
556  s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
557  s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
558  } else if (tag == BandHeight) {
559  av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
560  if (data < 3) {
561  av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
562  ret = AVERROR(EINVAL);
563  goto end;
564  }
565  s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
566  } else if (tag == InputFormat) {
567  av_log(avctx, AV_LOG_DEBUG, "Input format %i\n", data);
568  if (s->coded_format == AV_PIX_FMT_NONE ||
569  s->coded_format == AV_PIX_FMT_YUV422P10) {
570  if (data >= 100 && data <= 105) {
571  s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
572  } else if (data >= 122 && data <= 128) {
573  s->coded_format = AV_PIX_FMT_GBRP12;
574  } else if (data == 30) {
575  s->coded_format = AV_PIX_FMT_GBRAP12;
576  } else {
577  s->coded_format = AV_PIX_FMT_YUV422P10;
578  }
579  s->planes = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 4 : av_pix_fmt_count_planes(s->coded_format);
580  }
581  } else if (tag == BandCodingFlags) {
582  s->codebook = data & 0xf;
583  s->difference_coding = (data >> 4) & 1;
584  av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
585  } else if (tag == Precision) {
586  av_log(avctx, AV_LOG_DEBUG, "Precision %i\n", data);
587  if (!(data == 10 || data == 12)) {
588  av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
589  ret = AVERROR(EINVAL);
590  goto end;
591  }
592  avctx->bits_per_raw_sample = s->bpc = data;
593  } else if (tag == EncodedFormat) {
594  av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
595  if (data == 1) {
596  s->coded_format = AV_PIX_FMT_YUV422P10;
597  } else if (data == 2) {
598  s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
599  } else if (data == 3) {
600  s->coded_format = AV_PIX_FMT_GBRP12;
601  } else if (data == 4) {
602  s->coded_format = AV_PIX_FMT_GBRAP12;
603  } else {
604  avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
606  goto end;
607  }
608  s->planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
609  } else if (tag == -DisplayHeight) {
610  av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
611  s->cropped_height = data;
612  } else if (tag == -PeakOffsetLow) {
613  s->peak.offset &= ~0xffff;
614  s->peak.offset |= (data & 0xffff);
615  s->peak.base = gb;
616  s->peak.level = 0;
617  } else if (tag == -PeakOffsetHigh) {
618  s->peak.offset &= 0xffff;
619  s->peak.offset |= (data & 0xffffU)<<16;
620  s->peak.base = gb;
621  s->peak.level = 0;
622  } else if (tag == -PeakLevel && s->peak.offset) {
623  s->peak.level = data;
624  if (s->peak.offset < 4 - bytestream2_tell(&s->peak.base) ||
625  s->peak.offset > 4 + bytestream2_get_bytes_left(&s->peak.base)
626  ) {
628  goto end;
629  }
630  bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
631  } else
632  av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
633 
634  if (tag == BitstreamMarker && data == 0xf0f &&
635  s->coded_format != AV_PIX_FMT_NONE) {
636  int lowpass_height = s->plane[s->channel_num].band[0][0].height;
637  int lowpass_width = s->plane[s->channel_num].band[0][0].width;
638  int factor = s->coded_format == AV_PIX_FMT_BAYER_RGGB16 ? 2 : 1;
639 
640  if (s->coded_width) {
641  s->coded_width *= factor;
642  }
643 
644  if (s->coded_height) {
645  s->coded_height *= factor;
646  }
647 
648  if (!s->a_width && !s->coded_width) {
649  s->coded_width = lowpass_width * factor * 8;
650  }
651 
652  if (!s->a_height && !s->coded_height) {
653  s->coded_height = lowpass_height * factor * 8;
654  }
655 
656  if (s->a_width && !s->coded_width)
657  s->coded_width = s->a_width;
658  if (s->a_height && !s->coded_height)
659  s->coded_height = s->a_height;
660 
661  if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
662  s->a_format != s->coded_format ||
663  s->transform_type != s->a_transform_type) {
664  free_buffers(s);
665  if ((ret = alloc_buffers(avctx)) < 0) {
666  free_buffers(s);
667  return ret;
668  }
669  }
670  ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
671  if (ret < 0)
672  return ret;
673  if (s->cropped_height) {
674  unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
675  if (avctx->height < height)
676  return AVERROR_INVALIDDATA;
677  avctx->height = height;
678  }
679  pic->width = pic->height = 0;
680 
681  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
682  return ret;
683 
684  s->coded_width = 0;
685  s->coded_height = 0;
686  s->coded_format = AV_PIX_FMT_NONE;
687  got_buffer = 1;
688  } else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
689  pic->width = pic->height = 0;
690 
691  if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
692  return ret;
693  s->coded_width = 0;
694  s->coded_height = 0;
695  s->coded_format = AV_PIX_FMT_NONE;
696  got_buffer = 1;
697  }
698 
699  if (s->subband_num_actual == 255)
700  goto finish;
701  coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
702 
703  /* Lowpass coefficients */
704  if (tag == BitstreamMarker && data == 0xf0f) {
705  int lowpass_height, lowpass_width, lowpass_a_height, lowpass_a_width;
706 
707  if (!s->a_width || !s->a_height) {
709  goto end;
710  }
711 
712  lowpass_height = s->plane[s->channel_num].band[0][0].height;
713  lowpass_width = s->plane[s->channel_num].band[0][0].width;
714  lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
715  lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
716 
717  if (lowpass_width < 3 ||
718  lowpass_width > lowpass_a_width) {
719  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
720  ret = AVERROR(EINVAL);
721  goto end;
722  }
723 
724  if (lowpass_height < 3 ||
725  lowpass_height > lowpass_a_height) {
726  av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
727  ret = AVERROR(EINVAL);
728  goto end;
729  }
730 
731  if (!got_buffer) {
732  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
733  ret = AVERROR(EINVAL);
734  goto end;
735  }
736 
737  if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
738  lowpass_width * lowpass_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
739  av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
740  ret = AVERROR(EINVAL);
741  goto end;
742  }
743 
744  av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
745  for (int i = 0; i < lowpass_height; i++) {
746  for (int j = 0; j < lowpass_width; j++)
747  coeff_data[j] = bytestream2_get_be16u(&gb);
748 
749  coeff_data += lowpass_width;
750  }
751 
752  /* Align to mod-4 position to continue reading tags */
753  bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
754 
755  /* Copy last line of coefficients if odd height */
756  if (lowpass_height & 1) {
757  memcpy(&coeff_data[lowpass_height * lowpass_width],
758  &coeff_data[(lowpass_height - 1) * lowpass_width],
759  lowpass_width * sizeof(*coeff_data));
760  }
761 
762  s->plane[s->channel_num].band[0][0].read_ok = 1;
763 
764  av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
765  }
766 
767  av_assert0(s->subband_num_actual != 255);
768  if (tag == BandHeader || tag == BandSecondPass) {
769  int highpass_height, highpass_width, highpass_a_width, highpass_a_height, highpass_stride, a_expected;
770  int expected;
771  GetBitContext gbit;
772  int count = 0, bytes;
773 
774  if (!s->a_width || !s->a_height) {
776  goto end;
777  }
778 
779  highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
780  highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
781  highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
782  highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
783  highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
784  a_expected = highpass_a_height * highpass_a_width;
785 
786  if (!got_buffer) {
787  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
788  ret = AVERROR(EINVAL);
789  goto end;
790  }
791 
792  if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
793  av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
794  ret = AVERROR(EINVAL);
795  goto end;
796  }
797  expected = highpass_height * highpass_stride;
798 
799  av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
800 
802  if (ret < 0)
803  goto end;
804  {
805  OPEN_READER(re, &gbit);
806 
807  const int lossless = s->band_encoding == 5;
808 
809  if (s->codebook == 0 && s->transform_type == 2 && s->subband_num_actual == 7)
810  s->codebook = 1;
811  if (!s->codebook) {
812  while (1) {
813  int level, run, coeff;
814 
815  UPDATE_CACHE(re, &gbit);
816  GET_RL_VLC(level, run, re, &gbit, s->table_9_rl_vlc,
817  VLC_BITS, 3, 1);
818 
819  /* escape */
820  if (!run)
821  break;
822 
823  count += run;
824 
825  if (count > expected)
826  break;
827 
828  if (!lossless)
829  coeff = dequant_and_decompand(s, level, s->quantisation, 0);
830  else
831  coeff = level;
832  if (tag == BandSecondPass) {
833  const uint16_t q = s->quantisation;
834 
835  for (int i = 0; i < run; i++) {
836  *coeff_data |= coeff * 256U;
837  *coeff_data++ *= q;
838  }
839  } else {
840  for (int i = 0; i < run; i++)
841  *coeff_data++ = coeff;
842  }
843  }
844  } else {
845  while (1) {
846  int level, run, coeff;
847 
848  UPDATE_CACHE(re, &gbit);
849  GET_RL_VLC(level, run, re, &gbit, s->table_18_rl_vlc,
850  VLC_BITS, 3, 1);
851 
852  /* escape */
853  if (!run)
854  break;
855 
856  count += run;
857 
858  if (count > expected)
859  break;
860 
861  if (!lossless)
862  coeff = dequant_and_decompand(s, level, s->quantisation, s->codebook);
863  else
864  coeff = level;
865  if (tag == BandSecondPass) {
866  const uint16_t q = s->quantisation;
867 
868  for (int i = 0; i < run; i++) {
869  *coeff_data |= coeff * 256U;
870  *coeff_data++ *= q;
871  }
872  } else {
873  for (int i = 0; i < run; i++)
874  *coeff_data++ = coeff;
875  }
876  }
877  }
878  CLOSE_READER(re, &gbit);
879  }
880 
881  if (count > expected) {
882  av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
883  ret = AVERROR(EINVAL);
884  goto end;
885  }
886  if (s->peak.level)
887  peak_table(coeff_data - count, &s->peak, count);
888  if (s->difference_coding)
889  difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
890 
891  bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&gbit), 3), 4);
892  if (bytes > bytestream2_get_bytes_left(&gb)) {
893  av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
894  ret = AVERROR(EINVAL);
895  goto end;
896  } else
897  bytestream2_seek(&gb, bytes, SEEK_CUR);
898 
899  av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
900  s->plane[s->channel_num].band[s->level][s->subband_num].read_ok = 1;
901 finish:
902  if (s->subband_num_actual != 255)
903  s->codebook = 0;
904  }
905  }
906 
907  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
908  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
909  s->progressive = 1;
910  s->planes = 4;
911  }
912 
913  ff_thread_finish_setup(avctx);
914 
915  if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
916  s->a_transform_type == INT_MIN ||
917  s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) {
918  av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
919  ret = AVERROR(EINVAL);
920  goto end;
921  }
922 
923  if (!got_buffer) {
924  av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
925  ret = AVERROR(EINVAL);
926  goto end;
927  }
928 
929  for (int plane = 0; plane < s->planes; plane++) {
930  for (int level = 0; level < (s->transform_type == 0 ? DWT_LEVELS : DWT_LEVELS_3D) ; level++) {
931  if (s->transform_type == 2)
932  if (level == 2 || level == 5)
933  continue;
934  for (int o = !!level; o < 4 ; o++) {
935  if (!s->plane[plane].band[level][o].read_ok) {
937  goto end;
938  }
939  }
940  }
941  }
942 
943  if (s->transform_type == 0 && s->sample_type != 1) {
944  for (int plane = 0; plane < s->planes && !ret; plane++) {
945  /* level 1 */
946  int lowpass_height = s->plane[plane].band[0][0].height;
947  int output_stride = s->plane[plane].band[0][0].a_width;
948  int lowpass_width = s->plane[plane].band[0][0].width;
949  int highpass_stride = s->plane[plane].band[0][1].stride;
950  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
951  ptrdiff_t dst_linesize;
952  int16_t *low, *high, *output, *dst;
953 
954  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
955  act_plane = 0;
956  dst_linesize = pic->linesize[act_plane];
957  } else {
958  dst_linesize = pic->linesize[act_plane] / 2;
959  }
960 
961  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
962  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
963  lowpass_width < 3 || lowpass_height < 3) {
964  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
965  ret = AVERROR(EINVAL);
966  goto end;
967  }
968 
969  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
970 
971  low = s->plane[plane].subband[0];
972  high = s->plane[plane].subband[2];
973  output = s->plane[plane].l_h[0];
974  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
975 
976  low = s->plane[plane].subband[1];
977  high = s->plane[plane].subband[3];
978  output = s->plane[plane].l_h[1];
979 
980  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
981 
982  low = s->plane[plane].l_h[0];
983  high = s->plane[plane].l_h[1];
984  output = s->plane[plane].subband[0];
985  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
986  if (s->bpc == 12) {
987  output = s->plane[plane].subband[0];
988  for (int i = 0; i < lowpass_height * 2; i++) {
989  for (int j = 0; j < lowpass_width * 2; j++)
990  output[j] *= 4;
991 
992  output += output_stride * 2;
993  }
994  }
995 
996  /* level 2 */
997  lowpass_height = s->plane[plane].band[1][1].height;
998  output_stride = s->plane[plane].band[1][1].a_width;
999  lowpass_width = s->plane[plane].band[1][1].width;
1000  highpass_stride = s->plane[plane].band[1][1].stride;
1001 
1002  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1003  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1004  lowpass_width < 3 || lowpass_height < 3) {
1005  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1006  ret = AVERROR(EINVAL);
1007  goto end;
1008  }
1009 
1010  av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1011 
1012  low = s->plane[plane].subband[0];
1013  high = s->plane[plane].subband[5];
1014  output = s->plane[plane].l_h[3];
1015  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1016 
1017  low = s->plane[plane].subband[4];
1018  high = s->plane[plane].subband[6];
1019  output = s->plane[plane].l_h[4];
1020  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1021 
1022  low = s->plane[plane].l_h[3];
1023  high = s->plane[plane].l_h[4];
1024  output = s->plane[plane].subband[0];
1025  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1026 
1027  output = s->plane[plane].subband[0];
1028  for (int i = 0; i < lowpass_height * 2; i++) {
1029  for (int j = 0; j < lowpass_width * 2; j++)
1030  output[j] *= 4;
1031 
1032  output += output_stride * 2;
1033  }
1034 
1035  /* level 3 */
1036  lowpass_height = s->plane[plane].band[2][1].height;
1037  output_stride = s->plane[plane].band[2][1].a_width;
1038  lowpass_width = s->plane[plane].band[2][1].width;
1039  highpass_stride = s->plane[plane].band[2][1].stride;
1040 
1041  if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
1042  !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width ||
1043  lowpass_height < 3 || lowpass_width < 3 || lowpass_width * 2 > s->plane[plane].width) {
1044  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1045  ret = AVERROR(EINVAL);
1046  goto end;
1047  }
1048 
1049  av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1050  if (s->progressive) {
1051  low = s->plane[plane].subband[0];
1052  high = s->plane[plane].subband[8];
1053  output = s->plane[plane].l_h[6];
1054  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1055 
1056  low = s->plane[plane].subband[7];
1057  high = s->plane[plane].subband[9];
1058  output = s->plane[plane].l_h[7];
1059  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1060 
1061  dst = (int16_t *)pic->data[act_plane];
1062  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1063  if (plane & 1)
1064  dst++;
1065  if (plane > 1)
1066  dst += pic->linesize[act_plane] >> 1;
1067  }
1068  low = s->plane[plane].l_h[6];
1069  high = s->plane[plane].l_h[7];
1070 
1071  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1072  (lowpass_height * 2 > avctx->coded_height / 2 ||
1073  lowpass_width * 2 > avctx->coded_width / 2 )
1074  ) {
1076  goto end;
1077  }
1078 
1079  for (int i = 0; i < s->plane[act_plane].height; i++) {
1080  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1081  if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
1082  process_alpha(dst, lowpass_width * 2);
1083  low += output_stride;
1084  high += output_stride;
1085  dst += dst_linesize;
1086  }
1087  } else {
1088  av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", !!(pic->flags & AV_FRAME_FLAG_INTERLACED));
1090  low = s->plane[plane].subband[0];
1091  high = s->plane[plane].subband[7];
1092  output = s->plane[plane].l_h[6];
1093  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1094 
1095  low = s->plane[plane].subband[8];
1096  high = s->plane[plane].subband[9];
1097  output = s->plane[plane].l_h[7];
1098  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1099 
1100  dst = (int16_t *)pic->data[act_plane];
1101  low = s->plane[plane].l_h[6];
1102  high = s->plane[plane].l_h[7];
1103  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1104  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1105  low += output_stride * 2;
1106  high += output_stride * 2;
1107  dst += pic->linesize[act_plane];
1108  }
1109  }
1110  }
1111  } else if (s->transform_type == 2 && (avctx->internal->is_copy || s->frame_index == 1 || s->sample_type != 1)) {
1112  for (int plane = 0; plane < s->planes && !ret; plane++) {
1113  int lowpass_height = s->plane[plane].band[0][0].height;
1114  int output_stride = s->plane[plane].band[0][0].a_width;
1115  int lowpass_width = s->plane[plane].band[0][0].width;
1116  int highpass_stride = s->plane[plane].band[0][1].stride;
1117  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1118  int16_t *low, *high, *output, *dst;
1119  ptrdiff_t dst_linesize;
1120 
1121  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1122  act_plane = 0;
1123  dst_linesize = pic->linesize[act_plane];
1124  } else {
1125  dst_linesize = pic->linesize[act_plane] / 2;
1126  }
1127 
1128  if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
1129  !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width ||
1130  lowpass_width < 3 || lowpass_height < 3) {
1131  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1132  ret = AVERROR(EINVAL);
1133  goto end;
1134  }
1135 
1136  av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1137 
1138  low = s->plane[plane].subband[0];
1139  high = s->plane[plane].subband[2];
1140  output = s->plane[plane].l_h[0];
1141  dsp->vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1142 
1143  low = s->plane[plane].subband[1];
1144  high = s->plane[plane].subband[3];
1145  output = s->plane[plane].l_h[1];
1146  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1147 
1148  low = s->plane[plane].l_h[0];
1149  high = s->plane[plane].l_h[1];
1150  output = s->plane[plane].l_h[7];
1151  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1152  if (s->bpc == 12) {
1153  output = s->plane[plane].l_h[7];
1154  for (int i = 0; i < lowpass_height * 2; i++) {
1155  for (int j = 0; j < lowpass_width * 2; j++)
1156  output[j] *= 4;
1157 
1158  output += output_stride * 2;
1159  }
1160  }
1161 
1162  lowpass_height = s->plane[plane].band[1][1].height;
1163  output_stride = s->plane[plane].band[1][1].a_width;
1164  lowpass_width = s->plane[plane].band[1][1].width;
1165  highpass_stride = s->plane[plane].band[1][1].stride;
1166 
1167  if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
1168  !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width ||
1169  lowpass_width < 3 || lowpass_height < 3) {
1170  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1171  ret = AVERROR(EINVAL);
1172  goto end;
1173  }
1174 
1175  av_log(avctx, AV_LOG_DEBUG, "Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1176 
1177  low = s->plane[plane].l_h[7];
1178  high = s->plane[plane].subband[5];
1179  output = s->plane[plane].l_h[3];
1180  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1181 
1182  low = s->plane[plane].subband[4];
1183  high = s->plane[plane].subband[6];
1184  output = s->plane[plane].l_h[4];
1185  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1186 
1187  low = s->plane[plane].l_h[3];
1188  high = s->plane[plane].l_h[4];
1189  output = s->plane[plane].l_h[7];
1190  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1191 
1192  output = s->plane[plane].l_h[7];
1193  for (int i = 0; i < lowpass_height * 2; i++) {
1194  for (int j = 0; j < lowpass_width * 2; j++)
1195  output[j] *= 4;
1196  output += output_stride * 2;
1197  }
1198 
1199  low = s->plane[plane].subband[7];
1200  high = s->plane[plane].subband[9];
1201  output = s->plane[plane].l_h[3];
1202  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1203 
1204  low = s->plane[plane].subband[8];
1205  high = s->plane[plane].subband[10];
1206  output = s->plane[plane].l_h[4];
1207  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1208 
1209  low = s->plane[plane].l_h[3];
1210  high = s->plane[plane].l_h[4];
1211  output = s->plane[plane].l_h[9];
1212  dsp->horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1213 
1214  lowpass_height = s->plane[plane].band[4][1].height;
1215  output_stride = s->plane[plane].band[4][1].a_width;
1216  lowpass_width = s->plane[plane].band[4][1].width;
1217  highpass_stride = s->plane[plane].band[4][1].stride;
1218  av_log(avctx, AV_LOG_DEBUG, "temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1219 
1220  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1221  !highpass_stride || s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1222  lowpass_width < 3 || lowpass_height < 3) {
1223  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1224  ret = AVERROR(EINVAL);
1225  goto end;
1226  }
1227 
1228  low = s->plane[plane].l_h[7];
1229  high = s->plane[plane].l_h[9];
1230  output = s->plane[plane].l_h[7];
1231  for (int i = 0; i < lowpass_height; i++) {
1232  inverse_temporal_filter(low, high, lowpass_width);
1233  low += output_stride;
1234  high += output_stride;
1235  }
1236  if (s->progressive) {
1237  low = s->plane[plane].l_h[7];
1238  high = s->plane[plane].subband[15];
1239  output = s->plane[plane].l_h[6];
1240  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1241 
1242  low = s->plane[plane].subband[14];
1243  high = s->plane[plane].subband[16];
1244  output = s->plane[plane].l_h[7];
1245  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1246 
1247  low = s->plane[plane].l_h[9];
1248  high = s->plane[plane].subband[12];
1249  output = s->plane[plane].l_h[8];
1250  dsp->vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1251 
1252  low = s->plane[plane].subband[11];
1253  high = s->plane[plane].subband[13];
1254  output = s->plane[plane].l_h[9];
1255  dsp->vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1256 
1257  if (s->sample_type == 1)
1258  continue;
1259 
1260  dst = (int16_t *)pic->data[act_plane];
1261  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1262  if (plane & 1)
1263  dst++;
1264  if (plane > 1)
1265  dst += pic->linesize[act_plane] >> 1;
1266  }
1267 
1268  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1269  (lowpass_height * 2 > avctx->coded_height / 2 ||
1270  lowpass_width * 2 > avctx->coded_width / 2 )
1271  ) {
1273  goto end;
1274  }
1275 
1276  low = s->plane[plane].l_h[6];
1277  high = s->plane[plane].l_h[7];
1278  for (int i = 0; i < s->plane[act_plane].height; i++) {
1279  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1280  low += output_stride;
1281  high += output_stride;
1282  dst += dst_linesize;
1283  }
1284  } else {
1286  low = s->plane[plane].l_h[7];
1287  high = s->plane[plane].subband[14];
1288  output = s->plane[plane].l_h[6];
1289  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1290 
1291  low = s->plane[plane].subband[15];
1292  high = s->plane[plane].subband[16];
1293  output = s->plane[plane].l_h[7];
1294  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1295 
1296  low = s->plane[plane].l_h[9];
1297  high = s->plane[plane].subband[11];
1298  output = s->plane[plane].l_h[8];
1299  dsp->horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1300 
1301  low = s->plane[plane].subband[12];
1302  high = s->plane[plane].subband[13];
1303  output = s->plane[plane].l_h[9];
1304  dsp->horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1305 
1306  if (s->sample_type == 1)
1307  continue;
1308 
1309  dst = (int16_t *)pic->data[act_plane];
1310  low = s->plane[plane].l_h[6];
1311  high = s->plane[plane].l_h[7];
1312  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1313  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1314  low += output_stride * 2;
1315  high += output_stride * 2;
1316  dst += pic->linesize[act_plane];
1317  }
1318  }
1319  }
1320  }
1321 
1322  if (s->transform_type == 2 && s->sample_type == 1) {
1323  int16_t *low, *high, *dst;
1324  int output_stride, lowpass_height, lowpass_width;
1325  ptrdiff_t dst_linesize;
1326 
1327  for (int plane = 0; plane < s->planes; plane++) {
1328  int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1329 
1330  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1331  act_plane = 0;
1332  dst_linesize = pic->linesize[act_plane];
1333  } else {
1334  dst_linesize = pic->linesize[act_plane] / 2;
1335  }
1336 
1337  lowpass_height = s->plane[plane].band[4][1].height;
1338  output_stride = s->plane[plane].band[4][1].a_width;
1339  lowpass_width = s->plane[plane].band[4][1].width;
1340 
1341  if (lowpass_height > s->plane[plane].band[4][1].a_height || lowpass_width > s->plane[plane].band[4][1].a_width ||
1342  s->plane[plane].band[4][1].width > s->plane[plane].band[4][1].a_width ||
1343  lowpass_width < 3 || lowpass_height < 3) {
1344  av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
1345  ret = AVERROR(EINVAL);
1346  goto end;
1347  }
1348 
1349  if (s->progressive) {
1350  dst = (int16_t *)pic->data[act_plane];
1351  low = s->plane[plane].l_h[8];
1352  high = s->plane[plane].l_h[9];
1353 
1354  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
1355  if (plane & 1)
1356  dst++;
1357  if (plane > 1)
1358  dst += pic->linesize[act_plane] >> 1;
1359  }
1360 
1361  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
1362  (lowpass_height * 2 > avctx->coded_height / 2 ||
1363  lowpass_width * 2 > avctx->coded_width / 2 )
1364  ) {
1366  goto end;
1367  }
1368 
1369  for (int i = 0; i < s->plane[act_plane].height; i++) {
1370  dsp->horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
1371  low += output_stride;
1372  high += output_stride;
1373  dst += dst_linesize;
1374  }
1375  } else {
1376  dst = (int16_t *)pic->data[act_plane];
1377  low = s->plane[plane].l_h[8];
1378  high = s->plane[plane].l_h[9];
1379  for (int i = 0; i < s->plane[act_plane].height / 2; i++) {
1380  interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
1381  low += output_stride * 2;
1382  high += output_stride * 2;
1383  dst += pic->linesize[act_plane];
1384  }
1385  }
1386  }
1387  }
1388 
1389  if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
1390  process_bayer(pic, s->bpc);
1391 end:
1392  if (ret < 0)
1393  return ret;
1394 
1395  *got_frame = 1;
1396  return avpkt->size;
1397 }
1398 
1400 {
1401  CFHDContext *s = avctx->priv_data;
1402 
1403  free_buffers(s);
1404 
1405  return 0;
1406 }
1407 
1408 #if HAVE_THREADS
1410 {
1411  CFHDContext *psrc = src->priv_data;
1412  CFHDContext *pdst = dst->priv_data;
1413  int ret;
1414 
1415  if (dst == src || psrc->transform_type == 0)
1416  return 0;
1417 
1418  if (pdst->plane[0].idwt_size != psrc->plane[0].idwt_size ||
1419  pdst->a_format != psrc->a_format ||
1420  pdst->a_width != psrc->a_width ||
1421  pdst->a_height != psrc->a_height ||
1422  pdst->a_transform_type != psrc->a_transform_type)
1423  free_buffers(pdst);
1424 
1425  pdst->a_format = psrc->a_format;
1426  pdst->a_width = psrc->a_width;
1427  pdst->a_height = psrc->a_height;
1428  pdst->a_transform_type = psrc->a_transform_type;
1429  pdst->transform_type = psrc->transform_type;
1430  pdst->progressive = psrc->progressive;
1431  pdst->planes = psrc->planes;
1432 
1433  if (!pdst->plane[0].idwt_buf) {
1434  pdst->coded_width = pdst->a_width;
1435  pdst->coded_height = pdst->a_height;
1436  pdst->coded_format = pdst->a_format;
1437  pdst->transform_type = pdst->a_transform_type;
1438  ret = alloc_buffers(dst);
1439  if (ret < 0)
1440  return ret;
1441  }
1442 
1443  for (int plane = 0; plane < pdst->planes; plane++) {
1444  memcpy(pdst->plane[plane].band, psrc->plane[plane].band, sizeof(pdst->plane[plane].band));
1445  memcpy(pdst->plane[plane].idwt_buf, psrc->plane[plane].idwt_buf,
1446  pdst->plane[plane].idwt_size * sizeof(int16_t));
1447  }
1448 
1449  return 0;
1450 }
1451 #endif
1452 
1454  .p.name = "cfhd",
1455  CODEC_LONG_NAME("GoPro CineForm HD"),
1456  .p.type = AVMEDIA_TYPE_VIDEO,
1457  .p.id = AV_CODEC_ID_CFHD,
1458  .priv_data_size = sizeof(CFHDContext),
1459  .init = cfhd_init,
1460  .close = cfhd_close,
1463  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1464  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1465 };
ChannelNumber
@ ChannelNumber
Definition: cfhd.h:76
ChannelCount
@ ChannelCount
Definition: cfhd.h:40
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
BandSecondPass
@ BandSecondPass
Definition: cfhd.h:86
level
uint8_t level
Definition: svq3.c:208
Precision
@ Precision
Definition: cfhd.h:79
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
Peak::level
int level
Definition: cfhd.h:133
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
BandHeader
@ BandHeader
Definition: cfhd.h:74
PrescaleTable
@ PrescaleTable
Definition: cfhd.h:87
GetByteContext
Definition: bytestream.h:33
G2
#define G2(m)
Definition: itx_1d.c:64
CFHDContext::progressive
int progressive
Definition: cfhd.h:155
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
BandHeight
@ BandHeight
Definition: cfhd.h:69
ff_cfhd_decoder
const FFCodec ff_cfhd_decoder
Definition: cfhd.c:1453
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
SampleType
int32_t SampleType
Definition: ac3enc.h:65
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:421
pixdesc.h
AVFrame::width
int width
Definition: frame.h:493
CFHDDSPContext
Definition: cfhddsp.h:25
CFHDDSPContext::horiz_filter_clip
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
Definition: cfhddsp.h:36
internal.h
even
Tag MUST be even
Definition: snow.txt:206
AVPacket::data
uint8_t * data
Definition: packet.h:552
CFHDContext::a_format
int a_format
Definition: cfhd.h:159
b
#define b
Definition: input.c:42
HighpassWidth
@ HighpassWidth
Definition: cfhd.h:61
data
const char data[16]
Definition: mxf.c:149
R
#define R
Definition: huffyuv.h:44
high
int high
Definition: dovi_rpuenc.c:39
FFCodec
Definition: codec_internal.h:127
ALPHA_COMPAND_DC_OFFSET
#define ALPHA_COMPAND_DC_OFFSET
Definition: cfhd.c:42
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:209
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:665
LowpassWidth
@ LowpassWidth
Definition: cfhd.h:52
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
cfhd_init
static av_cold int cfhd_init(AVCodecContext *avctx)
Definition: cfhd.c:45
difference_coding
static void difference_coding(int16_t *band, int width, int height)
Definition: cfhd.c:119
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:442
ff_cfhd_init_vlcs
int ff_cfhd_init_vlcs(CFHDContext *s)
Definition: cfhddata.c:181
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:54
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3481
VersionMajor
@ VersionMajor
Definition: cfhd.h:34
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
init_peak_table_defaults
static void init_peak_table_defaults(CFHDContext *s)
Definition: cfhd.c:82
cfhd.h
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
GetBitContext
Definition: get_bits.h:109
DWT_LEVELS
#define DWT_LEVELS
Definition: cfhd.h:104
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3469
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
LowpassPrecision
@ LowpassPrecision
Definition: cfhd.h:56
Quantization
@ Quantization
Definition: cfhd.h:72
av_image_check_size2
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:289
BandEncoding
@ BandEncoding
Definition: cfhd.h:71
SubbandNumber
@ SubbandNumber
Definition: cfhd.h:67
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
peak_table
static void peak_table(int16_t *band, Peak *peak, int length)
Definition: cfhd.c:129
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
Plane::idwt_size
int idwt_size
Definition: cfhd.h:123
process_alpha
static void process_alpha(int16_t *alpha, int width)
Definition: cfhd.c:136
AV_CODEC_ID_CFHD
@ AV_CODEC_ID_CFHD
Definition: codec_id.h:271
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:185
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
intreadwrite.h
Version
@ Version
Definition: cfhd.h:85
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:563
ALPHA_COMPAND_GAIN
#define ALPHA_COMPAND_GAIN
Definition: cfhd.c:43
TransformType
TransformType
Definition: webp.c:113
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
PeakOffsetHigh
@ PeakOffsetHigh
Definition: cfhd.h:84
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1048
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
VersionMinor
@ VersionMinor
Definition: cfhd.h:35
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
B
#define B
Definition: huffyuv.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
decode.h
get_bits.h
DisplayHeight
@ DisplayHeight
Definition: cfhd.h:89
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1782
process_bayer
static void process_bayer(AVFrame *frame, int bpc)
Definition: cfhd.c:149
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
FrameType
FrameType
G723.1 frame types.
Definition: g723_1.h:63
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
FrameIndex
@ FrameIndex
Definition: cfhd.h:49
InputFormat
@ InputFormat
Definition: cfhd.h:80
dequant_and_decompand
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
Definition: cfhd.c:111
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
CFHDContext::planes
int planes
Definition: cfhd.h:146
CFHDDSPContext::vert_filter
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:31
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
CFHDDSPContext::horiz_filter
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
Definition: cfhddsp.h:26
Peak
Definition: cfhd.h:132
abs
#define abs(x)
Definition: cuda_runtime.h:35
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:335
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
VLC_BITS
#define VLC_BITS
Definition: cfhd.h:94
HighpassHeight
@ HighpassHeight
Definition: cfhd.h:62
free_buffers
static void free_buffers(CFHDContext *s)
Definition: cfhd.c:215
SUBBAND_COUNT
#define SUBBAND_COUNT
Definition: cfhd.h:95
alloc_buffers
static int alloc_buffers(AVCodecContext *avctx)
Definition: cfhd.c:240
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
Peak::base
GetByteContext base
Definition: cfhd.h:135
AVPacket::size
int size
Definition: packet.h:553
VersionRevision
@ VersionRevision
Definition: cfhd.h:36
height
#define height
Definition: dsp.h:89
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
BitstreamMarker
@ BitstreamMarker
Definition: cfhd.h:33
cfhd_decode
static int cfhd_decode(AVCodecContext *avctx, AVFrame *pic, int *got_frame, AVPacket *avpkt)
Definition: cfhd.c:371
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:174
CFHDContext::a_width
int a_width
Definition: cfhd.h:157
CFHDContext::coded_height
int coded_height
Definition: cfhd.h:152
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
VersionEdit
@ VersionEdit
Definition: cfhd.h:37
CFHDContext::coded_format
enum AVPixelFormat coded_format
Definition: cfhd.h:154
CFHDContext::transform_type
int transform_type
Definition: cfhd.h:150
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
SubbandCount
@ SubbandCount
Definition: cfhd.h:42
CFHDContext::plane
Plane plane[4]
Definition: cfhd.h:177
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
CFHDContext
Definition: cfhd.h:138
common.h
SubbandBand
@ SubbandBand
Definition: cfhd.h:70
SubBand::read_ok
int8_t read_ok
Definition: cfhd.h:113
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
SUBBAND_COUNT_3D
#define SUBBAND_COUNT_3D
Definition: cfhd.h:96
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:644
ImageWidth
@ ImageWidth
Definition: cfhd.h:47
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:600
tag
uint32_t tag
Definition: movenc.c:1934
ret
ret
Definition: filter_design.txt:187
CFHDContext::coded_width
int coded_width
Definition: cfhd.h:151
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
inverse_temporal_filter
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
Definition: cfhd.c:204
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
PeakLevel
@ PeakLevel
Definition: cfhd.h:82
BandCodingFlags
@ BandCodingFlags
Definition: cfhd.h:81
U
#define U(x)
Definition: vpx_arith.h:37
planes
static const struct @512 planes[]
interlaced_vertical_filter
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Definition: cfhd.c:193
EncodedFormat
@ EncodedFormat
Definition: cfhd.h:88
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:493
init_plane_defaults
static void init_plane_defaults(CFHDContext *s)
Definition: cfhd.c:75
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
CFHDContext::a_transform_type
int a_transform_type
Definition: cfhd.h:160
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_cfhddsp_init
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
Definition: cfhddsp.c:105
ImageHeight
@ ImageHeight
Definition: cfhd.h:48
Plane
Definition: cfhd.h:116
BandWidth
@ BandWidth
Definition: cfhd.h:68
factor
static const int factor[16]
Definition: vf_pp7.c:80
SampleFlags
@ SampleFlags
Definition: cfhd.h:77
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
Plane::idwt_buf
int16_t * idwt_buf
Definition: cfhd.h:121
DWT_LEVELS_3D
#define DWT_LEVELS_3D
Definition: cfhd.h:105
mem.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:129
LowpassHeight
@ LowpassHeight
Definition: cfhd.h:53
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:529
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:466
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
width
#define width
Definition: dsp.h:89
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:572
SampleIndexTable
@ SampleIndexTable
Definition: cfhd.h:32
cfhd_close
static av_cold int cfhd_close(AVCodecContext *avctx)
Definition: cfhd.c:1399
src
#define src
Definition: vp8dsp.c:248
init_frame_defaults
static void init_frame_defaults(CFHDContext *s)
Definition: cfhd.c:89
channel
channel
Definition: ebur128.h:39
PeakOffsetLow
@ PeakOffsetLow
Definition: cfhd.h:83
codebook
static const unsigned codebook[256][2]
Definition: cfhdenc.c:41
CFHDContext::a_height
int a_height
Definition: cfhd.h:158