FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/mem.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "exif.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 
57 
59 {
60  static const struct {
61  int class;
62  int index;
63  const uint8_t *bits;
64  const uint8_t *values;
65  int length;
66  } ht[] = {
68  ff_mjpeg_val_dc, 12 },
70  ff_mjpeg_val_dc, 12 },
79  };
80  int i, ret;
81 
82  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
83  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
84  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
85  ht[i].bits, ht[i].values,
86  ht[i].class == 1, s->avctx);
87  if (ret < 0)
88  return ret;
89 
90  if (ht[i].class < 2) {
91  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
92  ht[i].bits + 1, 16);
93  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
94  ht[i].values, ht[i].length);
95  }
96  }
97 
98  return 0;
99 }
100 
101 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
102 {
103  if (len > 12 && buf[12] == 1) /* 1 - NTSC */
104  s->interlace_polarity = 1;
105  if (len > 12 && buf[12] == 2) /* 2 - PAL */
106  s->interlace_polarity = 0;
107  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
108  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 12 ? buf[12] : -1);
109 }
110 
111 static void init_idct(AVCodecContext *avctx)
112 {
113  MJpegDecodeContext *s = avctx->priv_data;
114 
115  ff_idctdsp_init(&s->idsp, avctx);
116  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
117  s->idsp.idct_permutation);
118 }
119 
121 {
122  MJpegDecodeContext *s = avctx->priv_data;
123  int ret;
124 
125  if (!s->picture_ptr) {
126  s->picture = av_frame_alloc();
127  if (!s->picture)
128  return AVERROR(ENOMEM);
129  s->picture_ptr = s->picture;
130  }
131 
132  s->avctx = avctx;
133  ff_blockdsp_init(&s->bdsp);
134  init_idct(avctx);
135  s->buffer_size = 0;
136  s->buffer = NULL;
137  s->first_picture = 1;
138  s->got_picture = 0;
139  s->orig_height = avctx->coded_height;
141  avctx->colorspace = AVCOL_SPC_BT470BG;
142  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
143 
144  if ((ret = init_default_huffman_tables(s)) < 0)
145  return ret;
146 
147  if (s->extern_huff && avctx->extradata) {
148  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
149  bytestream2_init(&s->gB, avctx->extradata, avctx->extradata_size);
150  if (ff_mjpeg_decode_dht(s)) {
151  av_log(avctx, AV_LOG_ERROR,
152  "error using external huffman table, switching back to internal\n");
153  if ((ret = init_default_huffman_tables(s)) < 0)
154  return ret;
155  }
156  }
157  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
158  s->interlace_polarity = 1; /* bottom field first */
159  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
160  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
161  if (avctx->codec_tag == AV_RL32("MJPG"))
162  s->interlace_polarity = 1;
163  }
164 
165  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
166  if (avctx->extradata_size >= 4)
167  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
168 
169  if (s->smv_frames_per_jpeg <= 0) {
170  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
171  return AVERROR_INVALIDDATA;
172  }
173 
174  s->smv_frame = av_frame_alloc();
175  if (!s->smv_frame)
176  return AVERROR(ENOMEM);
177  } else if (avctx->extradata_size > 8
178  && AV_RL32(avctx->extradata) == 0x2C
179  && AV_RL32(avctx->extradata+4) == 0x18) {
180  parse_avid(s, avctx->extradata, avctx->extradata_size);
181  }
182 
183  if (avctx->codec->id == AV_CODEC_ID_AMV)
184  s->flipped = 1;
185 
186  return 0;
187 }
188 
189 
190 static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
191 {
192  int len = bytestream2_get_be16u(&s->gB);
193  if (len < 2 || bytestream2_get_bytes_left(&s->gB) < (len - 2)) {
194  av_log(s->avctx, AV_LOG_ERROR, "%s: invalid len %d\n", name, len);
195  return AVERROR_INVALIDDATA;
196  }
197  *plen = len - 2;
198  return 0;
199 }
200 
201 /* quantize tables */
203 {
204  int len, index, i;
205 
206  int ret = mjpeg_parse_len(s, &len, "dqt");
207  if (ret < 0)
208  return ret;
209 
210  while (len >= 65) {
211  uint8_t b = bytestream2_get_byteu(&s->gB);
212  int pr = b >> 4;
213  if (pr > 1) {
214  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
215  return AVERROR_INVALIDDATA;
216  }
217  if (len < (1 + 64 * (1+pr)))
218  return AVERROR_INVALIDDATA;
219  index = b & 0x0F;
220  if (index >= 4)
221  return AVERROR_INVALIDDATA;
222  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
223  /* read quant table */
224  for (i = 0; i < 64; i++) {
225  s->quant_matrixes[index][i] = pr ? bytestream2_get_be16u(&s->gB) : bytestream2_get_byteu(&s->gB);
226  if (s->quant_matrixes[index][i] == 0) {
227  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
228  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
229  if (s->avctx->err_recognition & AV_EF_EXPLODE)
230  return AVERROR_INVALIDDATA;
231  }
232  }
233 
234  // XXX FIXME fine-tune, and perhaps add dc too
235  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
236  s->quant_matrixes[index][8]) >> 1;
237  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
238  index, s->qscale[index]);
239  len -= 1 + 64 * (1+pr);
240  }
241  return 0;
242 }
243 
244 /* decode huffman tables and build VLC decoders */
246 {
247  int len, index, i, class, n, v;
248  uint8_t bits_table[17];
249  uint8_t val_table[256];
250  int ret = 0;
251 
252  ret = mjpeg_parse_len(s, &len, "dht");
253  if (ret < 0)
254  return ret;
255 
256  while (len > 0) {
257  if (len < 17)
258  return AVERROR_INVALIDDATA;
259  uint8_t b = bytestream2_get_byteu(&s->gB);
260  class = b >> 4;
261  if (class >= 2)
262  return AVERROR_INVALIDDATA;
263  index = b & 0x0F;
264  if (index >= 4)
265  return AVERROR_INVALIDDATA;
266  n = 0;
267  for (i = 1; i <= 16; i++) {
268  bits_table[i] = bytestream2_get_byteu(&s->gB);
269  n += bits_table[i];
270  }
271  len -= 17;
272  if (len < n || n > 256)
273  return AVERROR_INVALIDDATA;
274 
275  for (i = 0; i < n; i++) {
276  v = bytestream2_get_byteu(&s->gB);
277  val_table[i] = v;
278  }
279  len -= n;
280 
281  /* build VLC and flush previous vlc if present */
282  ff_vlc_free(&s->vlcs[class][index]);
283  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
284  class, index, n);
285  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
286  val_table, class > 0, s->avctx)) < 0)
287  return ret;
288 
289  if (class > 0) {
290  ff_vlc_free(&s->vlcs[2][index]);
291  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
292  val_table, 0, s->avctx)) < 0)
293  return ret;
294  }
295 
296  for (i = 0; i < 16; i++)
297  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
298  for (i = 0; i < 256; i++)
299  s->raw_huffman_values[class][index][i] = val_table[i];
300  }
301  return 0;
302 }
303 
305 {
306  int len, nb_components, i, width, height, bits, ret, size_change;
307  unsigned pix_fmt_id;
308  int h_count[MAX_COMPONENTS] = { 0 };
309  int v_count[MAX_COMPONENTS] = { 0 };
310 
311  s->cur_scan = 0;
312  memset(s->upscale_h, 0, sizeof(s->upscale_h));
313  memset(s->upscale_v, 0, sizeof(s->upscale_v));
314 
315  ret = mjpeg_parse_len(s, &len, "sof");
316  if (ret < 0)
317  return ret;
318  if (len < 6)
319  return AVERROR_INVALIDDATA;
320  bits = bytestream2_get_byteu(&s->gB);
321 
322  if (bits > 16 || bits < 1) {
323  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
324  return AVERROR_INVALIDDATA;
325  }
326 
327  if (s->avctx->bits_per_raw_sample != bits) {
328  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
329  s->avctx->bits_per_raw_sample = bits;
330  init_idct(s->avctx);
331  }
332  if (s->pegasus_rct)
333  bits = 9;
334  if (bits == 9 && !s->pegasus_rct)
335  s->rct = 1; // FIXME ugly
336 
337  if(s->lossless && s->avctx->lowres){
338  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
339  return AVERROR(ENOSYS);
340  }
341 
342  height = bytestream2_get_be16u(&s->gB);
343  width = bytestream2_get_be16u(&s->gB);
344 
345  // HACK for odd_height.mov
346  if (s->interlaced && s->width == width && s->height == height + 1)
347  height= s->height;
348 
349  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
350  if (av_image_check_size(width, height, 0, s->avctx) < 0)
351  return AVERROR_INVALIDDATA;
352 
353  if (!s->progressive && !s->ls) {
354  // A valid frame requires at least 1 bit for DC + 1 bit for AC for each 8x8 block.
355  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
356  return AVERROR_INVALIDDATA;
357  }
358 
359  nb_components = bytestream2_get_byteu(&s->gB);
360  if (nb_components <= 0 ||
361  nb_components > MAX_COMPONENTS)
362  return AVERROR_INVALIDDATA;
363  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
364  if (nb_components != s->nb_components) {
365  av_log(s->avctx, AV_LOG_ERROR,
366  "nb_components changing in interlaced picture\n");
367  return AVERROR_INVALIDDATA;
368  }
369  }
370  if (s->ls && !(bits <= 8 || nb_components == 1)) {
372  "JPEG-LS that is not <= 8 "
373  "bits/component or 16-bit gray");
374  return AVERROR_PATCHWELCOME;
375  }
376  len -= 6;
377  if (len != 3 * nb_components) {
378  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
379  return AVERROR_INVALIDDATA;
380  }
381 
382  s->nb_components = nb_components;
383  s->h_max = 1;
384  s->v_max = 1;
385  for (i = 0; i < nb_components; i++) {
386  /* component id */
387  s->component_id[i] = bytestream2_get_byteu(&s->gB);
388  uint8_t b = bytestream2_get_byteu(&s->gB);
389  h_count[i] = b >> 4;
390  v_count[i] = b & 0x0F;
391  /* compute hmax and vmax (only used in interleaved case) */
392  if (h_count[i] > s->h_max)
393  s->h_max = h_count[i];
394  if (v_count[i] > s->v_max)
395  s->v_max = v_count[i];
396  s->quant_index[i] = bytestream2_get_byteu(&s->gB);
397  if (s->quant_index[i] >= 4) {
398  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
399  return AVERROR_INVALIDDATA;
400  }
401  if (!h_count[i] || !v_count[i]) {
402  av_log(s->avctx, AV_LOG_ERROR,
403  "Invalid sampling factor in component %d %d:%d\n",
404  i, h_count[i], v_count[i]);
405  return AVERROR_INVALIDDATA;
406  }
407 
408  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
409  i, h_count[i], v_count[i],
410  s->component_id[i], s->quant_index[i]);
411  }
412  if ( nb_components == 4
413  && s->component_id[0] == 'C'
414  && s->component_id[1] == 'M'
415  && s->component_id[2] == 'Y'
416  && s->component_id[3] == 'K')
417  s->adobe_transform = 0;
418 
419  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
420  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
421  return AVERROR_PATCHWELCOME;
422  }
423 
424  if (s->bayer) {
425  if (nb_components == 2) {
426  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
427  width stored in their SOF3 markers is the width of each one. We only output
428  a single component, therefore we need to adjust the output image width. We
429  handle the deinterleaving (but not the debayering) in this file. */
430  width *= 2;
431  }
432  /* They can also contain 1 component, which is double the width and half the height
433  of the final image (rows are interleaved). We don't handle the decoding in this
434  file, but leave that to the TIFF/DNG decoder. */
435  }
436 
437  /* if different size, realloc/alloc picture */
438  if (width != s->width || height != s->height || bits != s->bits ||
439  memcmp(s->h_count, h_count, sizeof(h_count)) ||
440  memcmp(s->v_count, v_count, sizeof(v_count))) {
441  size_change = 1;
442 
443  s->width = width;
444  s->height = height;
445  s->bits = bits;
446  memcpy(s->h_count, h_count, sizeof(h_count));
447  memcpy(s->v_count, v_count, sizeof(v_count));
448  s->interlaced = 0;
449  s->got_picture = 0;
450 
451  /* test interlaced mode */
452  if (s->first_picture &&
453  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
454  s->orig_height != 0 &&
455  s->height < ((s->orig_height * 3) / 4)) {
456  s->interlaced = 1;
457  s->bottom_field = s->interlace_polarity;
458  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
459  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
460  height *= 2;
461  }
462 
463  ret = ff_set_dimensions(s->avctx, width, height);
464  if (ret < 0)
465  return ret;
466 
467  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
468  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
469  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
470  s->orig_height < height)
471  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
472 
473  s->first_picture = 0;
474  } else {
475  size_change = 0;
476  }
477 
478  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
479  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
480  if (s->avctx->height <= 0)
481  return AVERROR_INVALIDDATA;
482  }
483  if (s->bayer && s->progressive) {
484  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
485  return AVERROR_INVALIDDATA;
486  }
487 
488  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
489  if (s->progressive) {
490  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
491  return AVERROR_INVALIDDATA;
492  }
493  } else {
494  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
495  s->rgb = 1;
496  else if (!s->lossless)
497  s->rgb = 0;
498  /* XXX: not complete test ! */
499  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
500  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
501  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
502  (s->h_count[3] << 4) | s->v_count[3];
503  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
504  /* NOTE we do not allocate pictures large enough for the possible
505  * padding of h/v_count being 4 */
506  if (!(pix_fmt_id & 0xD0D0D0D0))
507  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
508  if (!(pix_fmt_id & 0x0D0D0D0D))
509  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
510 
511  for (i = 0; i < 8; i++) {
512  int j = 6 + (i&1) - (i&6);
513  int is = (pix_fmt_id >> (4*i)) & 0xF;
514  int js = (pix_fmt_id >> (4*j)) & 0xF;
515 
516  if (is == 1 && js != 2 && (i < 2 || i > 5))
517  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
518  if (is == 1 && js != 2 && (i < 2 || i > 5))
519  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
520 
521  if (is == 1 && js == 2) {
522  if (i & 1) s->upscale_h[j/2] = 1;
523  else s->upscale_v[j/2] = 1;
524  }
525  }
526 
527  if (s->bayer) {
528  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
529  goto unk_pixfmt;
530  }
531 
532  switch (pix_fmt_id) {
533  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
534  if (!s->bayer)
535  goto unk_pixfmt;
536  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
537  break;
538  case 0x11111100:
539  if (s->rgb)
540  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
541  else {
542  if ( s->adobe_transform == 0
543  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
544  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
545  } else {
546  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
547  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
548  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
549  }
550  }
551  av_assert0(s->nb_components == 3);
552  break;
553  case 0x11111111:
554  if (s->rgb)
555  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
556  else {
557  if (s->adobe_transform == 0 && s->bits <= 8) {
558  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
559  } else {
560  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
561  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
562  }
563  }
564  av_assert0(s->nb_components == 4);
565  break;
566  case 0x11412100:
567  if (s->bits > 8)
568  goto unk_pixfmt;
569  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
570  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
571  s->upscale_h[0] = 4;
572  s->upscale_h[1] = 0;
573  s->upscale_h[2] = 1;
574  } else {
575  goto unk_pixfmt;
576  }
577  break;
578  case 0x22111122:
579  case 0x22111111:
580  if (s->adobe_transform == 0 && s->bits <= 8) {
581  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
582  s->upscale_v[1] = s->upscale_v[2] = 1;
583  s->upscale_h[1] = s->upscale_h[2] = 1;
584  } else if (s->adobe_transform == 2 && s->bits <= 8) {
585  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
586  s->upscale_v[1] = s->upscale_v[2] = 1;
587  s->upscale_h[1] = s->upscale_h[2] = 1;
588  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
589  } else {
590  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
591  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
592  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
593  }
594  av_assert0(s->nb_components == 4);
595  break;
596  case 0x12121100:
597  case 0x22122100:
598  case 0x21211100:
599  case 0x21112100:
600  case 0x22211200:
601  case 0x22221100:
602  case 0x22112200:
603  case 0x11222200:
604  if (s->bits > 8)
605  goto unk_pixfmt;
606  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
607  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
608  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
609  } else {
610  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
611  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
612  }
613  break;
614  case 0x11000000:
615  case 0x13000000:
616  case 0x14000000:
617  case 0x31000000:
618  case 0x33000000:
619  case 0x34000000:
620  case 0x41000000:
621  case 0x43000000:
622  case 0x44000000:
623  if(s->bits <= 8)
624  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
625  else
626  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
627  break;
628  case 0x12111100:
629  case 0x14121200:
630  case 0x14111100:
631  case 0x22211100:
632  case 0x22112100:
633  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
634  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
635  else
636  goto unk_pixfmt;
637  s->upscale_v[1] = s->upscale_v[2] = 1;
638  } else {
639  if (pix_fmt_id == 0x14111100)
640  s->upscale_v[1] = s->upscale_v[2] = 1;
641  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
642  else
643  goto unk_pixfmt;
644  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
645  }
646  break;
647  case 0x21111100:
648  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
649  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
650  else
651  goto unk_pixfmt;
652  s->upscale_h[1] = s->upscale_h[2] = 1;
653  } else {
654  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
655  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
656  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
657  }
658  break;
659  case 0x11311100:
660  if (s->bits > 8)
661  goto unk_pixfmt;
662  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
663  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
664  else
665  goto unk_pixfmt;
666  s->upscale_h[0] = s->upscale_h[2] = 2;
667  break;
668  case 0x31111100:
669  if (s->bits > 8)
670  goto unk_pixfmt;
671  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
672  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
673  s->upscale_h[1] = s->upscale_h[2] = 2;
674  break;
675  case 0x22121100:
676  case 0x22111200:
677  case 0x41211100:
678  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
679  else
680  goto unk_pixfmt;
681  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
682  break;
683  case 0x22111100:
684  case 0x23111100:
685  case 0x42111100:
686  case 0x24111100:
687  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
688  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
689  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
690  if (pix_fmt_id == 0x42111100) {
691  if (s->bits > 8)
692  goto unk_pixfmt;
693  s->upscale_h[1] = s->upscale_h[2] = 1;
694  } else if (pix_fmt_id == 0x24111100) {
695  if (s->bits > 8)
696  goto unk_pixfmt;
697  s->upscale_v[1] = s->upscale_v[2] = 1;
698  } else if (pix_fmt_id == 0x23111100) {
699  if (s->bits > 8)
700  goto unk_pixfmt;
701  s->upscale_v[1] = s->upscale_v[2] = 2;
702  }
703  break;
704  case 0x41111100:
705  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
706  else
707  goto unk_pixfmt;
708  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
709  break;
710  default:
711  unk_pixfmt:
712  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
713  memset(s->upscale_h, 0, sizeof(s->upscale_h));
714  memset(s->upscale_v, 0, sizeof(s->upscale_v));
715  return AVERROR_PATCHWELCOME;
716  }
717  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
718  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
719  return AVERROR_PATCHWELCOME;
720  }
721  if (s->ls) {
722  memset(s->upscale_h, 0, sizeof(s->upscale_h));
723  memset(s->upscale_v, 0, sizeof(s->upscale_v));
724  if (s->nb_components == 3) {
725  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
726  } else if (s->nb_components != 1) {
727  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
728  return AVERROR_PATCHWELCOME;
729  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
730  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
731  else if (s->bits <= 8)
732  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
733  else
734  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
735  }
736 
737  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
738  if (!s->pix_desc) {
739  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
740  return AVERROR_BUG;
741  }
742 
743  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
744  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
745  } else {
746  enum AVPixelFormat pix_fmts[] = {
747 #if CONFIG_MJPEG_NVDEC_HWACCEL
749 #endif
750 #if CONFIG_MJPEG_VAAPI_HWACCEL
752 #endif
753  s->avctx->pix_fmt,
755  };
756  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
757  if (s->hwaccel_pix_fmt < 0)
758  return AVERROR(EINVAL);
759 
760  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
761  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
762  }
763 
764  if (s->avctx->skip_frame == AVDISCARD_ALL) {
765  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
766  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
767  s->got_picture = 1;
768  return 0;
769  }
770 
771  av_frame_unref(s->picture_ptr);
772  ret = ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF);
773  if (ret < 0)
774  return ret;
775  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
776  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
777  s->got_picture = 1;
778 
779  // Lets clear the palette to avoid leaving uninitialized values in it
780  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
781  memset(s->picture_ptr->data[1], 0, 1024);
782 
783  for (i = 0; i < 4; i++)
784  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
785 
786  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
787  s->width, s->height, s->linesize[0], s->linesize[1],
788  s->interlaced, s->avctx->height);
789 
790  }
791 
792  if ((s->rgb && !s->lossless && !s->ls) ||
793  (!s->rgb && s->ls && s->nb_components > 1) ||
794  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
795  ) {
796  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
797  return AVERROR_PATCHWELCOME;
798  }
799 
800  /* totally blank picture as progressive JPEG will only add details to it */
801  if (s->progressive) {
802  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
803  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
804  for (i = 0; i < s->nb_components; i++) {
805  int size = bw * bh * s->h_count[i] * s->v_count[i];
806  av_freep(&s->blocks[i]);
807  av_freep(&s->last_nnz[i]);
808  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
809  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
810  if (!s->blocks[i] || !s->last_nnz[i])
811  return AVERROR(ENOMEM);
812  s->block_stride[i] = bw * s->h_count[i];
813  }
814  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
815  }
816 
817  if (s->avctx->hwaccel) {
818  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
819  s->hwaccel_picture_private =
820  av_mallocz(hwaccel->frame_priv_data_size);
821  if (!s->hwaccel_picture_private)
822  return AVERROR(ENOMEM);
823 
824  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
825  s->raw_image_buffer_size);
826  if (ret < 0)
827  return ret;
828  }
829 
830  return 0;
831 }
832 
833 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
834 {
835  int code;
836  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
837  if (code < 0 || code > 16) {
838  av_log(s->avctx, AV_LOG_ERROR,
839  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
840  return AVERROR_INVALIDDATA;
841  }
842 
843  *val = code ? get_xbits(&s->gb, code) : 0;
844  return 0;
845 }
846 
847 /* decode block and dequantize */
848 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
849  int dc_index, int ac_index, uint16_t *quant_matrix)
850 {
851  int code, i, j, level, val;
852 
853  /* DC coef */
854  int ret = mjpeg_decode_dc(s, dc_index, &val);
855  if (ret < 0)
856  return ret;
857 
858  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
859  s->last_dc[component] = val;
860  block[0] = av_clip_int16(val);
861  /* AC coefs */
862  i = 0;
863  {OPEN_READER(re, &s->gb);
864  do {
865  UPDATE_CACHE(re, &s->gb);
866  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
867 
868  i += ((unsigned)code) >> 4;
869  code &= 0xf;
870  if (code) {
871  // GET_VLC updates the cache if parsing reaches the second stage.
872  // So we have at least MIN_CACHE_BITS - 9 > 15 bits left here
873  // and don't need to refill the cache.
874  {
875  int cache = GET_CACHE(re, &s->gb);
876  int sign = (~cache) >> 31;
877  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
878  }
879 
880  LAST_SKIP_BITS(re, &s->gb, code);
881 
882  if (i > 63) {
883  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
884  return AVERROR_INVALIDDATA;
885  }
886  j = s->permutated_scantable[i];
887  block[j] = level * quant_matrix[i];
888  }
889  } while (i < 63);
890  CLOSE_READER(re, &s->gb);}
891 
892  return 0;
893 }
894 
896  int component, int dc_index,
897  uint16_t *quant_matrix, int Al)
898 {
899  unsigned val;
900  s->bdsp.clear_block(block);
901  int ret = mjpeg_decode_dc(s, dc_index, &val);
902  if (ret < 0)
903  return ret;
904 
905  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
906  s->last_dc[component] = val;
907  block[0] = val;
908  return 0;
909 }
910 
911 /* decode block and dequantize - progressive JPEG version */
913  uint8_t *last_nnz, int ac_index,
914  uint16_t *quant_matrix,
915  int ss, int se, int Al, int *EOBRUN)
916 {
917  int code, i, j, val, run;
918  unsigned level;
919 
920  if (*EOBRUN) {
921  (*EOBRUN)--;
922  return 0;
923  }
924 
925  {
926  OPEN_READER(re, &s->gb);
927  for (i = ss; ; i++) {
928  UPDATE_CACHE(re, &s->gb);
929  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
930 
931  run = ((unsigned) code) >> 4;
932  code &= 0xF;
933  if (code) {
934  i += run;
935 
936  {
937  int cache = GET_CACHE(re, &s->gb);
938  int sign = (~cache) >> 31;
939  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
940  }
941 
942  LAST_SKIP_BITS(re, &s->gb, code);
943 
944  if (i >= se) {
945  if (i == se) {
946  j = s->permutated_scantable[se];
947  block[j] = level * (quant_matrix[se] << Al);
948  break;
949  }
950  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
951  return AVERROR_INVALIDDATA;
952  }
953  j = s->permutated_scantable[i];
954  block[j] = level * (quant_matrix[i] << Al);
955  } else {
956  if (run == 0xF) {// ZRL - skip 15 coefficients
957  i += 15;
958  if (i >= se) {
959  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
960  return AVERROR_INVALIDDATA;
961  }
962  } else {
963  val = (1 << run);
964  if (run) {
965  // Given that GET_VLC reloads internally, we always
966  // have at least 16 bits in the cache here.
967  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
968  LAST_SKIP_BITS(re, &s->gb, run);
969  }
970  *EOBRUN = val - 1;
971  break;
972  }
973  }
974  }
975  CLOSE_READER(re, &s->gb);
976  }
977 
978  if (i > *last_nnz)
979  *last_nnz = i;
980 
981  return 0;
982 }
983 
984 #define REFINE_BIT(j) { \
985  UPDATE_CACHE(re, &s->gb); \
986  sign = block[j] >> 15; \
987  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
988  ((quant_matrix[i] ^ sign) - sign) << Al; \
989  LAST_SKIP_BITS(re, &s->gb, 1); \
990 }
991 
992 #define ZERO_RUN \
993 for (; ; i++) { \
994  if (i > last) { \
995  i += run; \
996  if (i > se) { \
997  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
998  return -1; \
999  } \
1000  break; \
1001  } \
1002  j = s->permutated_scantable[i]; \
1003  if (block[j]) \
1004  REFINE_BIT(j) \
1005  else if (run-- == 0) \
1006  break; \
1007 }
1008 
1009 /* decode block and dequantize - progressive JPEG refinement pass */
1011  uint8_t *last_nnz,
1012  int ac_index, uint16_t *quant_matrix,
1013  int ss, int se, int Al, int *EOBRUN)
1014 {
1015  int code, i = ss, j, sign, val, run;
1016  int last = FFMIN(se, *last_nnz);
1017 
1018  OPEN_READER(re, &s->gb);
1019  if (*EOBRUN) {
1020  (*EOBRUN)--;
1021  } else {
1022  for (; ; i++) {
1023  UPDATE_CACHE(re, &s->gb);
1024  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1025 
1026  if (code & 0xF) {
1027  run = ((unsigned) code) >> 4;
1028  val = SHOW_UBITS(re, &s->gb, 1);
1029  LAST_SKIP_BITS(re, &s->gb, 1);
1030  ZERO_RUN;
1031  j = s->permutated_scantable[i];
1032  val--;
1033  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1034  if (i == se) {
1035  if (i > *last_nnz)
1036  *last_nnz = i;
1037  CLOSE_READER(re, &s->gb);
1038  return 0;
1039  }
1040  } else {
1041  run = ((unsigned) code) >> 4;
1042  if (run == 0xF) {
1043  ZERO_RUN;
1044  } else {
1045  val = run;
1046  run = (1 << run);
1047  if (val) {
1048  // Given that GET_VLC reloads internally, we always
1049  // have at least 16 bits in the cache here.
1050  run += SHOW_UBITS(re, &s->gb, val);
1051  LAST_SKIP_BITS(re, &s->gb, val);
1052  }
1053  *EOBRUN = run - 1;
1054  break;
1055  }
1056  }
1057  }
1058 
1059  if (i > *last_nnz)
1060  *last_nnz = i;
1061  }
1062 
1063  for (; i <= last; i++) {
1064  j = s->permutated_scantable[i];
1065  if (block[j])
1066  REFINE_BIT(j)
1067  }
1068  CLOSE_READER(re, &s->gb);
1069 
1070  return 0;
1071 }
1072 #undef REFINE_BIT
1073 #undef ZERO_RUN
1074 
1075 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1076 {
1077  int i;
1078  int reset = 0;
1079 
1080  if (s->restart_interval) {
1081  s->restart_count--;
1082  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1083  align_get_bits(&s->gb);
1084  for (i = 0; i < nb_components; i++) /* reset dc */
1085  s->last_dc[i] = (4 << s->bits);
1086  }
1087 
1088  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1089  /* skip RSTn */
1090  if (s->restart_count == 0) {
1091  if( show_bits(&s->gb, i) == (1 << i) - 1
1092  || show_bits(&s->gb, i) == 0xFF) {
1093  int pos = get_bits_count(&s->gb);
1094  align_get_bits(&s->gb);
1095  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1096  skip_bits(&s->gb, 8);
1097  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1098  for (i = 0; i < nb_components; i++) /* reset dc */
1099  s->last_dc[i] = (4 << s->bits);
1100  reset = 1;
1101  } else
1102  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1103  }
1104  }
1105  }
1106  return reset;
1107 }
1108 
1109 /* Handles 1 to 4 components */
1110 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1111 {
1112  int i, mb_x, mb_y;
1113  unsigned width;
1114  uint16_t (*buffer)[4];
1115  int left[4], top[4], topleft[4];
1116  const int linesize = s->linesize[0];
1117  const int mask = ((1 << s->bits) - 1) << point_transform;
1118  int resync_mb_y = 0;
1119  int resync_mb_x = 0;
1120  int vpred[6];
1121  int ret;
1122 
1123  if (!s->bayer && s->nb_components < 3)
1124  return AVERROR_INVALIDDATA;
1125  if (s->bayer && s->nb_components > 2)
1126  return AVERROR_INVALIDDATA;
1127  if (s->nb_components <= 0 || s->nb_components > 4)
1128  return AVERROR_INVALIDDATA;
1129  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1130  return AVERROR_INVALIDDATA;
1131  if (s->bayer) {
1132  if (s->rct || s->pegasus_rct)
1133  return AVERROR_INVALIDDATA;
1134  }
1135 
1136 
1137  s->restart_count = s->restart_interval;
1138 
1139  if (s->restart_interval == 0)
1140  s->restart_interval = INT_MAX;
1141 
1142  if (s->bayer)
1143  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1144  else
1145  width = s->mb_width;
1146 
1147  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1148  if (!s->ljpeg_buffer)
1149  return AVERROR(ENOMEM);
1150 
1151  buffer = s->ljpeg_buffer;
1152 
1153  for (i = 0; i < 4; i++)
1154  buffer[0][i] = 1 << (s->bits - 1);
1155 
1156  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1157  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1158 
1159  if (s->interlaced && s->bottom_field)
1160  ptr += linesize >> 1;
1161 
1162  for (i = 0; i < 4; i++)
1163  top[i] = left[i] = topleft[i] = buffer[0][i];
1164 
1165  if ((mb_y * s->width) % s->restart_interval == 0) {
1166  for (i = 0; i < 6; i++)
1167  vpred[i] = 1 << (s->bits-1);
1168  }
1169 
1170  for (mb_x = 0; mb_x < width; mb_x++) {
1171  int modified_predictor = predictor;
1172 
1173  if (get_bits_left(&s->gb) < 1) {
1174  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1175  return AVERROR_INVALIDDATA;
1176  }
1177 
1178  if (s->restart_interval && !s->restart_count){
1179  s->restart_count = s->restart_interval;
1180  resync_mb_x = mb_x;
1181  resync_mb_y = mb_y;
1182  for(i=0; i<4; i++)
1183  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1184  }
1185  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1186  modified_predictor = 1;
1187 
1188  for (i=0;i<nb_components;i++) {
1189  int pred, dc;
1190 
1191  topleft[i] = top[i];
1192  top[i] = buffer[mb_x][i];
1193 
1194  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1195  if (ret < 0)
1196  return ret;
1197 
1198  if (!s->bayer || mb_x) {
1199  pred = left[i];
1200  } else { /* This path runs only for the first line in bayer images */
1201  vpred[i] += dc;
1202  pred = vpred[i] - dc;
1203  }
1204 
1205  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1206 
1207  left[i] = buffer[mb_x][i] =
1208  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1209  }
1210 
1211  if (s->restart_interval && !--s->restart_count) {
1212  align_get_bits(&s->gb);
1213  skip_bits(&s->gb, 16); /* skip RSTn */
1214  }
1215  }
1216  if (s->rct && s->nb_components == 4) {
1217  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1218  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1219  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1220  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1221  ptr[4*mb_x + 0] = buffer[mb_x][3];
1222  }
1223  } else if (s->nb_components == 4) {
1224  for(i=0; i<nb_components; i++) {
1225  int c= s->comp_index[i];
1226  if (s->bits <= 8) {
1227  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1228  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1229  }
1230  } else if(s->bits == 9) {
1231  return AVERROR_PATCHWELCOME;
1232  } else {
1233  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1234  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1235  }
1236  }
1237  }
1238  } else if (s->rct) {
1239  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1240  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1241  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1242  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1243  }
1244  } else if (s->pegasus_rct) {
1245  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1246  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1247  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1248  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1249  }
1250  } else if (s->bayer) {
1251  if (s->bits <= 8)
1252  return AVERROR_PATCHWELCOME;
1253  if (nb_components == 1) {
1254  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1255  for (mb_x = 0; mb_x < width; mb_x++)
1256  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1257  } else if (nb_components == 2) {
1258  for (mb_x = 0; mb_x < width; mb_x++) {
1259  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1260  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1261  }
1262  }
1263  } else {
1264  for(i=0; i<nb_components; i++) {
1265  int c= s->comp_index[i];
1266  if (s->bits <= 8) {
1267  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1268  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1269  }
1270  } else if(s->bits == 9) {
1271  return AVERROR_PATCHWELCOME;
1272  } else {
1273  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1274  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1275  }
1276  }
1277  }
1278  }
1279  }
1280  return 0;
1281 }
1282 
1284  int point_transform, int nb_components)
1285 {
1286  int i, mb_x, mb_y, mask;
1287  int bits= (s->bits+7)&~7;
1288  int resync_mb_y = 0;
1289  int resync_mb_x = 0;
1290  int ret;
1291 
1292  point_transform += bits - s->bits;
1293  mask = ((1 << s->bits) - 1) << point_transform;
1294 
1295  av_assert0(nb_components>=1 && nb_components<=4);
1296 
1297  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1298  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1299  if (get_bits_left(&s->gb) < 1) {
1300  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1301  return AVERROR_INVALIDDATA;
1302  }
1303  if (s->restart_interval && !s->restart_count){
1304  s->restart_count = s->restart_interval;
1305  resync_mb_x = mb_x;
1306  resync_mb_y = mb_y;
1307  }
1308 
1309  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1310  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1311  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1312  for (i = 0; i < nb_components; i++) {
1313  uint8_t *ptr;
1314  uint16_t *ptr16;
1315  int n, h, v, x, y, c, j, linesize;
1316  n = s->nb_blocks[i];
1317  c = s->comp_index[i];
1318  h = s->h_scount[i];
1319  v = s->v_scount[i];
1320  x = 0;
1321  y = 0;
1322  linesize= s->linesize[c];
1323 
1324  if(bits>8) linesize /= 2;
1325 
1326  for(j=0; j<n; j++) {
1327  int pred, dc;
1328 
1329  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1330  if (ret < 0)
1331  return ret;
1332 
1333  if ( h * mb_x + x >= s->width
1334  || v * mb_y + y >= s->height) {
1335  // Nothing to do
1336  } else if (bits<=8) {
1337  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1338  if(y==0 && toprow){
1339  if(x==0 && leftcol){
1340  pred= 1 << (bits - 1);
1341  }else{
1342  pred= ptr[-1];
1343  }
1344  }else{
1345  if(x==0 && leftcol){
1346  pred= ptr[-linesize];
1347  }else{
1348  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1349  }
1350  }
1351 
1352  if (s->interlaced && s->bottom_field)
1353  ptr += linesize >> 1;
1354  pred &= mask;
1355  *ptr= pred + ((unsigned)dc << point_transform);
1356  }else{
1357  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1358  if(y==0 && toprow){
1359  if(x==0 && leftcol){
1360  pred= 1 << (bits - 1);
1361  }else{
1362  pred= ptr16[-1];
1363  }
1364  }else{
1365  if(x==0 && leftcol){
1366  pred= ptr16[-linesize];
1367  }else{
1368  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1369  }
1370  }
1371 
1372  if (s->interlaced && s->bottom_field)
1373  ptr16 += linesize >> 1;
1374  pred &= mask;
1375  *ptr16= pred + ((unsigned)dc << point_transform);
1376  }
1377  if (++x == h) {
1378  x = 0;
1379  y++;
1380  }
1381  }
1382  }
1383  } else {
1384  for (i = 0; i < nb_components; i++) {
1385  uint8_t *ptr;
1386  uint16_t *ptr16;
1387  int n, h, v, x, y, c, j, linesize, dc;
1388  n = s->nb_blocks[i];
1389  c = s->comp_index[i];
1390  h = s->h_scount[i];
1391  v = s->v_scount[i];
1392  x = 0;
1393  y = 0;
1394  linesize = s->linesize[c];
1395 
1396  if(bits>8) linesize /= 2;
1397 
1398  for (j = 0; j < n; j++) {
1399  int pred;
1400 
1401  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1402  if (ret < 0)
1403  return ret;
1404 
1405  if ( h * mb_x + x >= s->width
1406  || v * mb_y + y >= s->height) {
1407  // Nothing to do
1408  } else if (bits<=8) {
1409  ptr = s->picture_ptr->data[c] +
1410  (linesize * (v * mb_y + y)) +
1411  (h * mb_x + x); //FIXME optimize this crap
1412  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1413 
1414  pred &= mask;
1415  *ptr = pred + ((unsigned)dc << point_transform);
1416  }else{
1417  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1418  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1419 
1420  pred &= mask;
1421  *ptr16= pred + ((unsigned)dc << point_transform);
1422  }
1423 
1424  if (++x == h) {
1425  x = 0;
1426  y++;
1427  }
1428  }
1429  }
1430  }
1431  if (s->restart_interval && !--s->restart_count) {
1432  align_get_bits(&s->gb);
1433  skip_bits(&s->gb, 16); /* skip RSTn */
1434  }
1435  }
1436  }
1437  return 0;
1438 }
1439 
1441  uint8_t *dst, const uint8_t *src,
1442  int linesize, int lowres)
1443 {
1444  switch (lowres) {
1445  case 0: s->copy_block(dst, src, linesize, 8);
1446  break;
1447  case 1: copy_block4(dst, src, linesize, linesize, 4);
1448  break;
1449  case 2: copy_block2(dst, src, linesize, linesize, 2);
1450  break;
1451  case 3: *dst = *src;
1452  break;
1453  }
1454 }
1455 
1456 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1457 {
1458  int block_x, block_y;
1459  int size = 8 >> s->avctx->lowres;
1460  if (s->bits > 8) {
1461  for (block_y=0; block_y<size; block_y++)
1462  for (block_x=0; block_x<size; block_x++)
1463  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1464  } else {
1465  for (block_y=0; block_y<size; block_y++)
1466  for (block_x=0; block_x<size; block_x++)
1467  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1468  }
1469 }
1470 
1471 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1472  int Al, const uint8_t *mb_bitmask,
1473  int mb_bitmask_size,
1474  const AVFrame *reference)
1475 {
1476  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1477  uint8_t *data[MAX_COMPONENTS];
1478  const uint8_t *reference_data[MAX_COMPONENTS];
1479  int linesize[MAX_COMPONENTS];
1480  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1481  int bytes_per_pixel = 1 + (s->bits > 8);
1482 
1483  if (mb_bitmask) {
1484  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1485  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1486  return AVERROR_INVALIDDATA;
1487  }
1488  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1489  }
1490 
1491  s->restart_count = 0;
1492 
1493  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1494  &chroma_v_shift);
1495  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1496  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1497 
1498  for (i = 0; i < nb_components; i++) {
1499  int c = s->comp_index[i];
1500  data[c] = s->picture_ptr->data[c];
1501  reference_data[c] = reference ? reference->data[c] : NULL;
1502  linesize[c] = s->linesize[c];
1503  s->coefs_finished[c] |= 1;
1504  }
1505 
1506  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1507  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1508  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1509 
1510  if (s->restart_interval && !s->restart_count)
1511  s->restart_count = s->restart_interval;
1512 
1513  if (get_bits_left(&s->gb) < 0) {
1514  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1515  -get_bits_left(&s->gb));
1516  return AVERROR_INVALIDDATA;
1517  }
1518  for (i = 0; i < nb_components; i++) {
1519  uint8_t *ptr;
1520  int n, h, v, x, y, c, j;
1521  int block_offset;
1522  n = s->nb_blocks[i];
1523  c = s->comp_index[i];
1524  h = s->h_scount[i];
1525  v = s->v_scount[i];
1526  x = 0;
1527  y = 0;
1528  for (j = 0; j < n; j++) {
1529  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1530  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1531 
1532  if (s->interlaced && s->bottom_field)
1533  block_offset += linesize[c] >> 1;
1534  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1535  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1536  ptr = data[c] + block_offset;
1537  } else
1538  ptr = NULL;
1539  if (!s->progressive) {
1540  if (copy_mb) {
1541  if (ptr)
1542  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1543  linesize[c], s->avctx->lowres);
1544 
1545  } else {
1546  s->bdsp.clear_block(s->block);
1547  if (decode_block(s, s->block, i,
1548  s->dc_index[i], s->ac_index[i],
1549  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1550  av_log(s->avctx, AV_LOG_ERROR,
1551  "error y=%d x=%d\n", mb_y, mb_x);
1552  return AVERROR_INVALIDDATA;
1553  }
1554  if (ptr && linesize[c]) {
1555  s->idsp.idct_put(ptr, linesize[c], s->block);
1556  if (s->bits & 7)
1557  shift_output(s, ptr, linesize[c]);
1558  }
1559  }
1560  } else {
1561  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1562  (h * mb_x + x);
1563  int16_t *block = s->blocks[c][block_idx];
1564  if (Ah)
1565  block[0] += get_bits1(&s->gb) *
1566  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1567  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1568  s->quant_matrixes[s->quant_sindex[i]],
1569  Al) < 0) {
1570  av_log(s->avctx, AV_LOG_ERROR,
1571  "error y=%d x=%d\n", mb_y, mb_x);
1572  return AVERROR_INVALIDDATA;
1573  }
1574  }
1575  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1576  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1577  mb_x, mb_y, x, y, c, s->bottom_field,
1578  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1579  if (++x == h) {
1580  x = 0;
1581  y++;
1582  }
1583  }
1584  }
1585 
1586  handle_rstn(s, nb_components);
1587  }
1588  }
1589  return 0;
1590 }
1591 
1593  int se, int Ah, int Al)
1594 {
1595  int mb_x, mb_y;
1596  int EOBRUN = 0;
1597  int c = s->comp_index[0];
1598  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1599 
1600  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1601  if (se < ss || se > 63) {
1602  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1603  return AVERROR_INVALIDDATA;
1604  }
1605 
1606  // s->coefs_finished is a bitmask for coefficients coded
1607  // ss and se are parameters telling start and end coefficients
1608  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1609 
1610  s->restart_count = 0;
1611 
1612  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1613  int block_idx = mb_y * s->block_stride[c];
1614  int16_t (*block)[64] = &s->blocks[c][block_idx];
1615  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1616  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1617  int ret;
1618  if (s->restart_interval && !s->restart_count)
1619  s->restart_count = s->restart_interval;
1620 
1621  if (Ah)
1622  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1623  quant_matrix, ss, se, Al, &EOBRUN);
1624  else
1625  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1626  quant_matrix, ss, se, Al, &EOBRUN);
1627 
1628  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1630  if (ret < 0) {
1631  av_log(s->avctx, AV_LOG_ERROR,
1632  "error y=%d x=%d\n", mb_y, mb_x);
1633  return AVERROR_INVALIDDATA;
1634  }
1635 
1636  if (handle_rstn(s, 0))
1637  EOBRUN = 0;
1638  }
1639  }
1640  return 0;
1641 }
1642 
1644 {
1645  int mb_x, mb_y;
1646  int c;
1647  const int bytes_per_pixel = 1 + (s->bits > 8);
1648  const int block_size = s->lossless ? 1 : 8;
1649 
1650  for (c = 0; c < s->nb_components; c++) {
1651  uint8_t *data = s->picture_ptr->data[c];
1652  int linesize = s->linesize[c];
1653  int h = s->h_max / s->h_count[c];
1654  int v = s->v_max / s->v_count[c];
1655  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1656  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1657 
1658  if (~s->coefs_finished[c])
1659  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1660 
1661  if (s->interlaced && s->bottom_field)
1662  data += linesize >> 1;
1663 
1664  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1665  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1666  int block_idx = mb_y * s->block_stride[c];
1667  int16_t (*block)[64] = &s->blocks[c][block_idx];
1668  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1669  s->idsp.idct_put(ptr, linesize, *block);
1670  if (s->bits & 7)
1671  shift_output(s, ptr, linesize);
1672  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1673  }
1674  }
1675  }
1676 }
1677 
1678 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1679  int mb_bitmask_size, const AVFrame *reference)
1680 {
1681  int len, nb_components, i, h, v, predictor, point_transform;
1682  int index, id, ret;
1683  const int block_size = s->lossless ? 1 : 8;
1684  int ilv, prev_shift;
1685 
1686  if (!s->got_picture) {
1687  av_log(s->avctx, AV_LOG_WARNING,
1688  "Can not process SOS before SOF, skipping\n");
1689  return AVERROR_INVALIDDATA;
1690  }
1691 
1692  ret = mjpeg_parse_len(s, &len, "sos");
1693  if (ret < 0)
1694  return ret;
1695  if (len < 1)
1696  return AVERROR_INVALIDDATA;
1697  nb_components = bytestream2_get_byteu(&s->gB);
1698  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1700  "decode_sos: nb_components (%d)",
1701  nb_components);
1702  return AVERROR_PATCHWELCOME;
1703  }
1704  if (len != 4 + 2 * nb_components) {
1705  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: len(%d) mismatch %d components\n", len, nb_components);
1706  return AVERROR_INVALIDDATA;
1707  }
1708  for (i = 0; i < nb_components; i++) {
1709  id = bytestream2_get_byteu(&s->gB);
1710  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1711  /* find component index */
1712  for (index = 0; index < s->nb_components; index++)
1713  if (id == s->component_id[index])
1714  break;
1715  if (index == s->nb_components) {
1716  av_log(s->avctx, AV_LOG_ERROR,
1717  "decode_sos: index(%d) out of components\n", index);
1718  return AVERROR_INVALIDDATA;
1719  }
1720  /* Metasoft MJPEG codec has Cb and Cr swapped */
1721  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1722  && nb_components == 3 && s->nb_components == 3 && i)
1723  index = 3 - i;
1724 
1725  s->quant_sindex[i] = s->quant_index[index];
1726  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1727  s->h_scount[i] = s->h_count[index];
1728  s->v_scount[i] = s->v_count[index];
1729 
1730  s->comp_index[i] = index;
1731 
1732  uint8_t b = bytestream2_get_byteu(&s->gB);
1733  s->dc_index[i] = b >> 4;
1734  s->ac_index[i] = b & 0x0F;
1735 
1736  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1737  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1738  goto out_of_range;
1739  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1740  goto out_of_range;
1741  }
1742 
1743  predictor = bytestream2_get_byteu(&s->gB); /* JPEG Ss / lossless JPEG predictor / JPEG-LS NEAR */
1744  ilv = bytestream2_get_byteu(&s->gB); /* JPEG Se / JPEG-LS ILV */
1745  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1746  uint8_t b = bytestream2_get_byteu(&s->gB);
1747  prev_shift = b >> 4; /* Ah */
1748  point_transform = b & 0x0F; /* Al */
1749  }else
1750  prev_shift = point_transform = 0;
1751 
1752  if (nb_components > 1) {
1753  /* interleaved stream */
1754  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1755  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1756  } else if (!s->ls) { /* skip this for JPEG-LS */
1757  h = s->h_max / s->h_scount[0];
1758  v = s->v_max / s->v_scount[0];
1759  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1760  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1761  s->nb_blocks[0] = 1;
1762  s->h_scount[0] = 1;
1763  s->v_scount[0] = 1;
1764  }
1765 
1766  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1767  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1768  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1769  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1770  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1771 
1772 
1773  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1774  if (s->mjpb_skiptosod)
1775  bytestream2_skip(&s->gB, s->mjpb_skiptosod);
1776 
1777  ret = init_get_bits8(&s->gb, s->gB.buffer, bytestream2_get_bytes_left(&s->gB));
1778  if (ret < 0)
1779  return ret;
1780 
1781 next_field:
1782  for (i = 0; i < nb_components; i++)
1783  s->last_dc[i] = (4 << s->bits);
1784 
1785  if (s->avctx->hwaccel) {
1786  int bytes_to_start = bytestream2_tell(&s->gB);
1787  av_assert0(bytes_to_start >= 0 &&
1788  s->raw_scan_buffer_size >= bytes_to_start);
1789 
1790  ret = FF_HW_CALL(s->avctx, decode_slice,
1791  s->raw_scan_buffer + bytes_to_start,
1792  s->raw_scan_buffer_size - bytes_to_start);
1793  if (ret < 0)
1794  return ret;
1795 
1796  } else if (s->lossless) {
1797  av_assert0(s->picture_ptr == s->picture);
1798  if (CONFIG_JPEGLS_DECODER && s->ls) {
1799 // for () {
1800 // reset_ls_coding_parameters(s, 0);
1801 
1803  point_transform, ilv)) < 0)
1804  return ret;
1805  } else {
1806  if (s->rgb || s->bayer) {
1807  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1808  return ret;
1809  } else {
1811  point_transform,
1812  nb_components)) < 0)
1813  return ret;
1814  }
1815  }
1816  } else {
1817  if (s->progressive && predictor) {
1818  av_assert0(s->picture_ptr == s->picture);
1820  ilv, prev_shift,
1821  point_transform)) < 0)
1822  return ret;
1823  } else {
1824  if ((ret = mjpeg_decode_scan(s, nb_components,
1825  prev_shift, point_transform,
1826  mb_bitmask, mb_bitmask_size, reference)) < 0)
1827  return ret;
1828  }
1829  }
1830 
1831  if (s->interlaced &&
1832  get_bits_left(&s->gb) > 32 &&
1833  show_bits(&s->gb, 8) == 0xFF) {
1834  GetBitContext bak = s->gb;
1835  align_get_bits(&bak);
1836  if (show_bits(&bak, 16) == 0xFFD1) {
1837  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1838  s->gb = bak;
1839  skip_bits(&s->gb, 16);
1840  s->bottom_field ^= 1;
1841 
1842  goto next_field;
1843  }
1844  }
1845 
1846  /* Add the amount of bits read from the unescaped image data buffer
1847  * into the GetByteContext. */
1848  bytestream2_skipu(&s->gB, (get_bits_count(&s->gb) + 7) / 8);
1849 
1850  return 0;
1851  out_of_range:
1852  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1853  return AVERROR_INVALIDDATA;
1854 }
1855 
1857 {
1858  if (bytestream2_get_be16u(&s->gB) != 4)
1859  return AVERROR_INVALIDDATA;
1860  s->restart_interval = bytestream2_get_be16u(&s->gB);
1861  s->restart_count = 0;
1862  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1863  s->restart_interval);
1864 
1865  return 0;
1866 }
1867 
1869 {
1870  int len, id, i;
1871 
1872  int ret = mjpeg_parse_len(s, &len, "app");
1873  if (ret < 0)
1874  return AVERROR_INVALIDDATA;
1875 
1876  if (len < 4) {
1877  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1878  return AVERROR_INVALIDDATA;
1879  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1880  goto out;
1881  }
1882 
1883  id = bytestream2_get_be32u(&s->gB);
1884  len -= 4;
1885 
1886  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1887  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1888  av_fourcc2str(av_bswap32(id)), id, len);
1889 
1890  /* This fourcc is used by non-avid files too, it holds some
1891  information, but it's always present in AVID-created files. */
1892  if (id == AV_RB32("AVI1")) {
1893  /* structure:
1894  4bytes AVI1
1895  1bytes polarity
1896  1bytes always zero
1897  4bytes field_size
1898  4bytes field_size_less_padding
1899  */
1900  if (len < 1)
1901  goto out;
1902  i = bytestream2_get_byteu(&s->gB); len--;
1903  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1904  goto out;
1905  }
1906 
1907  if (id == AV_RB32("JFIF")) {
1908  int t_w, t_h, v1, v2;
1909  if (len < 8)
1910  goto out;
1911  bytestream2_skipu(&s->gB, 1); /* the trailing zero-byte */
1912  v1 = bytestream2_get_byteu(&s->gB);
1913  v2 = bytestream2_get_byteu(&s->gB);
1914  bytestream2_skipu(&s->gB, 1);
1915 
1916  s->avctx->sample_aspect_ratio.num = bytestream2_get_be16u(&s->gB);
1917  s->avctx->sample_aspect_ratio.den = bytestream2_get_be16u(&s->gB);
1918  if ( s->avctx->sample_aspect_ratio.num <= 0
1919  || s->avctx->sample_aspect_ratio.den <= 0) {
1920  s->avctx->sample_aspect_ratio.num = 0;
1921  s->avctx->sample_aspect_ratio.den = 1;
1922  }
1923 
1924  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1925  av_log(s->avctx, AV_LOG_INFO,
1926  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1927  v1, v2,
1928  s->avctx->sample_aspect_ratio.num,
1929  s->avctx->sample_aspect_ratio.den);
1930 
1931  len -= 8;
1932  if (len >= 2) {
1933  t_w = bytestream2_get_byteu(&s->gB);
1934  t_h = bytestream2_get_byteu(&s->gB);
1935  if (t_w && t_h) {
1936  /* skip thumbnail */
1937  if (len -10 - (t_w * t_h * 3) > 0)
1938  len -= t_w * t_h * 3;
1939  }
1940  len -= 2;
1941  }
1942  goto out;
1943  }
1944 
1945  if ( id == AV_RB32("Adob")
1946  && len >= 8
1947  && bytestream2_peek_byteu(&s->gB) == 'e'
1948  && bytestream2_peek_be32u(&s->gB) != AV_RB32("e_CM")) {
1949  bytestream2_skipu(&s->gB, 1); /* 'e' */
1950  bytestream2_skipu(&s->gB, 2); /* version */
1951  bytestream2_skipu(&s->gB, 2); /* flags0 */
1952  bytestream2_skipu(&s->gB, 2); /* flags1 */
1953  s->adobe_transform = bytestream2_get_byteu(&s->gB);
1954  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1955  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1956  len -= 8;
1957  goto out;
1958  }
1959 
1960  if (id == AV_RB32("LJIF")) {
1961  int rgb = s->rgb;
1962  int pegasus_rct = s->pegasus_rct;
1963  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1964  av_log(s->avctx, AV_LOG_INFO,
1965  "Pegasus lossless jpeg header found\n");
1966  if (len < 9)
1967  goto out;
1968  bytestream2_skipu(&s->gB, 2); /* version ? */
1969  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1970  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1971  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1972  switch (i=bytestream2_get_byteu(&s->gB)) {
1973  case 1:
1974  rgb = 1;
1975  pegasus_rct = 0;
1976  break;
1977  case 2:
1978  rgb = 1;
1979  pegasus_rct = 1;
1980  break;
1981  default:
1982  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1983  }
1984 
1985  len -= 9;
1986  if (s->bayer)
1987  goto out;
1988  if (s->got_picture)
1989  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1990  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1991  goto out;
1992  }
1993 
1994  s->rgb = rgb;
1995  s->pegasus_rct = pegasus_rct;
1996 
1997  goto out;
1998  }
1999  if (id == AV_RL32("colr") && len > 0) {
2000  s->colr = bytestream2_get_byteu(&s->gB);
2001  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2002  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
2003  len --;
2004  goto out;
2005  }
2006  if (id == AV_RL32("xfrm") && len > 0) {
2007  s->xfrm = bytestream2_get_byteu(&s->gB);
2008  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2009  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
2010  len --;
2011  goto out;
2012  }
2013 
2014  /* JPS extension by VRex */
2015  if (start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
2016  int flags, layout, type;
2017  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2018  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2019 
2020  bytestream2_skipu(&s->gB, 4); len -= 4; /* JPS_ */
2021  bytestream2_skipu(&s->gB, 2); len -= 2; /* block length */
2022  bytestream2_skipu(&s->gB, 1); /* reserved */
2023  flags = bytestream2_get_byteu(&s->gB);
2024  layout = bytestream2_get_byteu(&s->gB);
2025  type = bytestream2_get_byteu(&s->gB);
2026  len -= 4;
2027 
2028  av_freep(&s->stereo3d);
2029  s->stereo3d = av_stereo3d_alloc();
2030  if (!s->stereo3d) {
2031  goto out;
2032  }
2033  if (type == 0) {
2034  s->stereo3d->type = AV_STEREO3D_2D;
2035  } else if (type == 1) {
2036  switch (layout) {
2037  case 0x01:
2038  s->stereo3d->type = AV_STEREO3D_LINES;
2039  break;
2040  case 0x02:
2041  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2042  break;
2043  case 0x03:
2044  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2045  break;
2046  }
2047  if (!(flags & 0x04)) {
2048  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2049  }
2050  }
2051  goto out;
2052  }
2053 
2054  /* EXIF metadata */
2055  if (start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2056  int ret;
2057 
2058  bytestream2_skipu(&s->gB, 2); // skip padding
2059  len -= 2;
2060 
2061  ret = av_exif_parse_buffer(s->avctx, s->gB.buffer, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2062  if (ret < 0) {
2063  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2064  goto out;
2065  }
2066 
2067  bytestream2_skipu(&s->gB, ret);
2068  len -= ret;
2069 
2070  goto out;
2071  }
2072 
2073  /* Apple MJPEG-A */
2074  if ((start_code == APP1) && (len > (0x28 - 8))) {
2075  id = bytestream2_get_be32u(&s->gB);
2076  len -= 4;
2077  /* Apple MJPEG-A */
2078  if (id == AV_RB32("mjpg")) {
2079  /* structure:
2080  4bytes field size
2081  4bytes pad field size
2082  4bytes next off
2083  4bytes quant off
2084  4bytes huff off
2085  4bytes image off
2086  4bytes scan off
2087  4bytes data off
2088  */
2089  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2090  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2091  }
2092  }
2093 
2094  if (start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2095  int id2;
2096  unsigned seqno;
2097  unsigned nummarkers;
2098 
2099  id = bytestream2_get_be32u(&s->gB);
2100  id2 = bytestream2_get_be24u(&s->gB);
2101  len -= 7;
2102  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2103  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2104  goto out;
2105  }
2106 
2107  bytestream2_skipu(&s->gB, 1);
2108  seqno = bytestream2_get_byteu(&s->gB);
2109  len -= 2;
2110  if (seqno == 0) {
2111  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2112  goto out;
2113  }
2114 
2115  nummarkers = bytestream2_get_byteu(&s->gB);
2116  len -= 1;
2117  if (nummarkers == 0) {
2118  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2119  goto out;
2120  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2121  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2122  goto out;
2123  } else if (seqno > nummarkers) {
2124  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2125  goto out;
2126  }
2127 
2128  /* Allocate if this is the first APP2 we've seen. */
2129  if (s->iccnum == 0) {
2130  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2131  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2132  return AVERROR(ENOMEM);
2133  }
2134  s->iccnum = nummarkers;
2135  }
2136 
2137  if (s->iccentries[seqno - 1].data) {
2138  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2139  goto out;
2140  }
2141 
2142  s->iccentries[seqno - 1].length = len;
2143  s->iccentries[seqno - 1].data = av_malloc(len);
2144  if (!s->iccentries[seqno - 1].data) {
2145  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2146  return AVERROR(ENOMEM);
2147  }
2148 
2149  bytestream2_get_bufferu(&s->gB, s->iccentries[seqno - 1].data, len);
2150  len = 0;
2151  s->iccread++;
2152 
2153  if (s->iccread > s->iccnum)
2154  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2155  }
2156 
2157 out:
2158  /* slow but needed for extreme adobe jpegs */
2159  if (len < 0)
2160  av_log(s->avctx, AV_LOG_ERROR,
2161  "mjpeg: error, decode_app parser read over the end\n");
2162  if (len > 0)
2163  bytestream2_skipu(&s->gB, len);
2164 
2165  return 0;
2166 }
2167 
2169 {
2170  int len;
2171  int ret = mjpeg_parse_len(s, &len, "com");
2172  if (ret < 0)
2173  return ret;
2174  if (!len)
2175  return 0;
2176 
2177  int i;
2178  char *cbuf = av_malloc(len + 1);
2179  if (!cbuf)
2180  return AVERROR(ENOMEM);
2181 
2182  for (i = 0; i < len; i++)
2183  cbuf[i] = bytestream2_get_byteu(&s->gB);
2184  if (cbuf[i - 1] == '\n')
2185  cbuf[i - 1] = 0;
2186  else
2187  cbuf[i] = 0;
2188 
2189  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2190  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2191 
2192  /* buggy avid, it puts EOI only at every 10th frame */
2193  if (!strncmp(cbuf, "AVID", 4)) {
2194  parse_avid(s, cbuf, len);
2195  } else if (!strcmp(cbuf, "CS=ITU601"))
2196  s->cs_itu601 = 1;
2197  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2198  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2199  s->flipped = 1;
2200  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2201  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2202  s->multiscope = 2;
2203  }
2204 
2205  av_free(cbuf);
2206 
2207  return 0;
2208 }
2209 
2210 /* return the 8 bit start code value and update the search
2211  state. Return -1 if no start code found */
2212 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2213 {
2214  const uint8_t *buf_ptr;
2215  int val;
2216  int skipped = 0;
2217 
2218  buf_ptr = *pbuf_ptr;
2219  while ((buf_ptr = memchr(buf_ptr, 0xff, buf_end - buf_ptr))) {
2220  buf_ptr++;
2221  while (buf_ptr < buf_end) {
2222  val = *buf_ptr++;
2223  if (val != 0xff) {
2224  if ((val >= SOF0) && (val <= COM))
2225  goto found;
2226  break;
2227  }
2228  }
2229  skipped++;
2230  }
2231  buf_ptr = buf_end;
2232  val = -1;
2233 found:
2234  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2235  *pbuf_ptr = buf_ptr;
2236  return val;
2237 }
2238 
2240  const uint8_t **buf_ptr, const uint8_t *buf_end,
2241  const uint8_t **unescaped_buf_ptr,
2242  int *unescaped_buf_size)
2243 {
2244  int start_code;
2245  start_code = find_marker(buf_ptr, buf_end);
2246 
2247  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2248  if (!s->buffer)
2249  return AVERROR(ENOMEM);
2250 
2251  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2252  if (start_code == SOS && !s->ls) {
2253  const uint8_t *src = *buf_ptr;
2254  const uint8_t *ptr = src;
2255  uint8_t *dst = s->buffer;
2256 
2257  #define copy_data_segment(skip) do { \
2258  ptrdiff_t length = (ptr - src) - (skip); \
2259  if (length > 0) { \
2260  memcpy(dst, src, length); \
2261  dst += length; \
2262  src = ptr; \
2263  } \
2264  } while (0)
2265 
2266  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2267  ptr = buf_end;
2268  copy_data_segment(0);
2269  } else {
2270  while (ptr < buf_end) {
2271  uint8_t x = *(ptr++);
2272 
2273  if (x == 0xff) {
2274  ptrdiff_t skip = 0;
2275  while (ptr < buf_end && x == 0xff) {
2276  x = *(ptr++);
2277  skip++;
2278  }
2279 
2280  /* 0xFF, 0xFF, ... */
2281  if (skip > 1) {
2283 
2284  /* decrement src as it is equal to ptr after the
2285  * copy_data_segment macro and we might want to
2286  * copy the current value of x later on */
2287  src--;
2288  }
2289 
2290  if (x < RST0 || x > RST7) {
2291  copy_data_segment(1);
2292  if (x)
2293  break;
2294  }
2295  }
2296  }
2297  if (src < ptr)
2298  copy_data_segment(0);
2299  }
2300  #undef copy_data_segment
2301 
2302  *unescaped_buf_ptr = s->buffer;
2303  *unescaped_buf_size = dst - s->buffer;
2304  memset(s->buffer + *unescaped_buf_size, 0,
2306 
2307  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
2308  (buf_end - *buf_ptr) - (dst - s->buffer));
2309  } else if (start_code == SOS && s->ls) {
2310  const uint8_t *src = *buf_ptr;
2311  uint8_t *dst = s->buffer;
2312  int bit_count = 0;
2313  int t = 0, b = 0;
2314  PutBitContext pb;
2315 
2316  /* find marker */
2317  while (src + t < buf_end) {
2318  uint8_t x = src[t++];
2319  if (x == 0xff) {
2320  while ((src + t < buf_end) && x == 0xff)
2321  x = src[t++];
2322  if (x & 0x80) {
2323  t -= FFMIN(2, t);
2324  break;
2325  }
2326  }
2327  }
2328  bit_count = t * 8;
2329  init_put_bits(&pb, dst, t);
2330 
2331  /* unescape bitstream */
2332  while (b < t) {
2333  uint8_t x = src[b++];
2334  put_bits(&pb, 8, x);
2335  if (x == 0xFF && b < t) {
2336  x = src[b++];
2337  if (x & 0x80) {
2338  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2339  x &= 0x7f;
2340  }
2341  put_bits(&pb, 7, x);
2342  bit_count--;
2343  }
2344  }
2345  flush_put_bits(&pb);
2346 
2347  *unescaped_buf_ptr = dst;
2348  *unescaped_buf_size = (bit_count + 7) >> 3;
2349  memset(s->buffer + *unescaped_buf_size, 0,
2351  } else {
2352  *unescaped_buf_ptr = *buf_ptr;
2353  *unescaped_buf_size = buf_end - *buf_ptr;
2354  }
2355 
2356  return start_code;
2357 }
2358 
2360 {
2361  int i;
2362 
2363  if (s->iccentries) {
2364  for (i = 0; i < s->iccnum; i++)
2365  av_freep(&s->iccentries[i].data);
2366  av_freep(&s->iccentries);
2367  }
2368 
2369  s->iccread = 0;
2370  s->iccnum = 0;
2371 }
2372 
2374  int *got_frame, const AVPacket *avpkt,
2375  const uint8_t *buf, const int buf_size)
2376 {
2377  MJpegDecodeContext *s = avctx->priv_data;
2378  const uint8_t *buf_end, *buf_ptr;
2379  const uint8_t *unescaped_buf_ptr;
2380  int hshift, vshift;
2381  int unescaped_buf_size;
2382  int start_code;
2383  int index;
2384  int ret = 0;
2385  int is16bit;
2386 
2387  s->force_pal8 = 0;
2388 
2389  s->buf_size = buf_size;
2390 
2391  av_exif_free(&s->exif_metadata);
2392  av_freep(&s->stereo3d);
2393  s->adobe_transform = -1;
2394 
2395  if (s->iccnum != 0)
2397 
2398 redo_for_pal8:
2399  buf_ptr = buf;
2400  buf_end = buf + buf_size;
2401  while (buf_ptr < buf_end) {
2402  /* find start next marker */
2403  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2404  &unescaped_buf_ptr,
2405  &unescaped_buf_size);
2406  /* EOF */
2407  if (start_code < 0) {
2408  break;
2409  } else if (unescaped_buf_size > INT_MAX / 8) {
2410  av_log(avctx, AV_LOG_ERROR,
2411  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2412  start_code, unescaped_buf_size, buf_size);
2413  return AVERROR_INVALIDDATA;
2414  }
2415  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
2416  start_code, buf_end - buf_ptr);
2417 
2418  bytestream2_init(&s->gB, unescaped_buf_ptr, unescaped_buf_size);
2419 
2420  if (avctx->debug & FF_DEBUG_STARTCODE)
2421  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2422 
2423  /* process markers */
2424  if (start_code >= RST0 && start_code <= RST7) {
2425  av_log(avctx, AV_LOG_DEBUG,
2426  "restart marker: %d\n", start_code & 0x0f);
2427  /* APP fields */
2428  } else if (start_code >= APP0 && start_code <= APP15) {
2429  if ((ret = mjpeg_decode_app(s, start_code)) < 0)
2430  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2431  av_err2str(ret));
2432  /* Comment */
2433  } else if (start_code == COM) {
2434  ret = mjpeg_decode_com(s);
2435  if (ret < 0)
2436  return ret;
2437  } else if (start_code == DQT) {
2439  if (ret < 0)
2440  return ret;
2441  }
2442 
2443  ret = -1;
2444 
2445  if (!CONFIG_JPEGLS_DECODER &&
2446  (start_code == SOF55 || start_code == LSE)) {
2447  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2448  return AVERROR(ENOSYS);
2449  }
2450 
2451  if (avctx->skip_frame == AVDISCARD_ALL) {
2452  switch(start_code) {
2453  case SOF0:
2454  case SOF1:
2455  case SOF2:
2456  case SOF3:
2457  case SOF55:
2458  break;
2459  default:
2460  goto skip;
2461  }
2462  }
2463 
2464  switch (start_code) {
2465  case SOI:
2466  s->restart_interval = 0;
2467  s->restart_count = 0;
2468  s->raw_image_buffer = buf_ptr;
2469  s->raw_image_buffer_size = buf_end - buf_ptr;
2470  /* nothing to do on SOI */
2471  break;
2472  case DHT:
2473  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2474  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2475  goto fail;
2476  }
2477  break;
2478  case SOF0:
2479  case SOF1:
2480  if (start_code == SOF0)
2482  else
2484  s->lossless = 0;
2485  s->ls = 0;
2486  s->progressive = 0;
2487  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2488  goto fail;
2489  break;
2490  case SOF2:
2492  s->lossless = 0;
2493  s->ls = 0;
2494  s->progressive = 1;
2495  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2496  goto fail;
2497  break;
2498  case SOF3:
2500 #if FF_API_CODEC_PROPS
2504 #endif
2505  s->lossless = 1;
2506  s->ls = 0;
2507  s->progressive = 0;
2508  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2509  goto fail;
2510  break;
2511  case SOF55:
2513 #if FF_API_CODEC_PROPS
2517 #endif
2518  s->lossless = 1;
2519  s->ls = 1;
2520  s->progressive = 0;
2521  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2522  goto fail;
2523  break;
2524  case LSE:
2525  if (!CONFIG_JPEGLS_DECODER ||
2526  (ret = ff_jpegls_decode_lse(s)) < 0)
2527  goto fail;
2528  if (ret == 1)
2529  goto redo_for_pal8;
2530  break;
2531  case EOI:
2532 eoi_parser:
2533  if (!avctx->hwaccel &&
2534  s->progressive && s->cur_scan && s->got_picture)
2536  s->cur_scan = 0;
2537  if (!s->got_picture) {
2538  av_log(avctx, AV_LOG_WARNING,
2539  "Found EOI before any SOF, ignoring\n");
2540  break;
2541  }
2542  if (s->interlaced) {
2543  s->bottom_field ^= 1;
2544  /* if not bottom field, do not output image yet */
2545  if (s->bottom_field == !s->interlace_polarity)
2546  break;
2547  }
2548  if (avctx->hwaccel) {
2549  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2550  if (ret < 0)
2551  return ret;
2552 
2553  av_freep(&s->hwaccel_picture_private);
2554  }
2555  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2556  return ret;
2557  if (s->lossless)
2558  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2559  *got_frame = 1;
2560  s->got_picture = 0;
2561 
2562  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2563  int qp = FFMAX3(s->qscale[0],
2564  s->qscale[1],
2565  s->qscale[2]);
2566 
2567  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2568  }
2569 
2570  goto the_end;
2571  case SOS:
2572  s->raw_scan_buffer = buf_ptr;
2573  s->raw_scan_buffer_size = buf_end - buf_ptr;
2574 
2575  s->cur_scan++;
2576 
2577  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2578  (avctx->err_recognition & AV_EF_EXPLODE))
2579  goto fail;
2580  break;
2581  case DRI:
2582  if ((ret = mjpeg_decode_dri(s)) < 0)
2583  return ret;
2584  break;
2585  case SOF5:
2586  case SOF6:
2587  case SOF7:
2588  case SOF9:
2589  case SOF10:
2590  case SOF11:
2591  case SOF13:
2592  case SOF14:
2593  case SOF15:
2594  case JPG:
2595  av_log(avctx, AV_LOG_ERROR,
2596  "mjpeg: unsupported coding type (%x)\n", start_code);
2597  break;
2598  }
2599 
2600  if (avctx->skip_frame == AVDISCARD_ALL) {
2601  switch(start_code) {
2602  case SOF0:
2603  case SOF1:
2604  case SOF2:
2605  case SOF3:
2606  case SOF55:
2607  s->got_picture = 0;
2608  goto the_end_no_picture;
2609  }
2610  }
2611 
2612 skip:
2613  /* eof process start code */
2614  buf_ptr += bytestream2_tell(&s->gB);
2615  av_log(avctx, AV_LOG_DEBUG,
2616  "marker parser used %d bytes\n",
2617  bytestream2_tell(&s->gB));
2618  }
2619  if (s->got_picture && s->cur_scan) {
2620  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2621  goto eoi_parser;
2622  }
2623  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2624  return AVERROR_INVALIDDATA;
2625 fail:
2626  s->got_picture = 0;
2627  return ret;
2628 the_end:
2629 
2630  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2631 
2632  if (AV_RB32(s->upscale_h)) {
2633  int p;
2635  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2636  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2637  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2638  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2639  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2640  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2641  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2642  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2643  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2644  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2645  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2646  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2647  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2648  );
2649  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2650  if (ret)
2651  return ret;
2652 
2653  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2654  for (p = 0; p<s->nb_components; p++) {
2655  uint8_t *line = s->picture_ptr->data[p];
2656  int w = s->width;
2657  int h = s->height;
2658  if (!s->upscale_h[p])
2659  continue;
2660  if (p==1 || p==2) {
2661  w = AV_CEIL_RSHIFT(w, hshift);
2662  h = AV_CEIL_RSHIFT(h, vshift);
2663  }
2664  if (s->upscale_v[p] == 1)
2665  h = (h+1)>>1;
2666  av_assert0(w > 0);
2667  for (int i = 0; i < h; i++) {
2668  if (s->upscale_h[p] == 1) {
2669  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2670  else line[w - 1] = line[(w - 1) / 2];
2671  for (index = w - 2; index > 0; index--) {
2672  if (is16bit)
2673  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2674  else
2675  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2676  }
2677  } else if (s->upscale_h[p] == 2) {
2678  if (is16bit) {
2679  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2680  if (w > 1)
2681  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2682  } else {
2683  line[w - 1] = line[(w - 1) / 3];
2684  if (w > 1)
2685  line[w - 2] = line[w - 1];
2686  }
2687  for (index = w - 3; index > 0; index--) {
2688  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2689  }
2690  } else if (s->upscale_h[p] == 4){
2691  if (is16bit) {
2692  uint16_t *line16 = (uint16_t *) line;
2693  line16[w - 1] = line16[(w - 1) >> 2];
2694  if (w > 1)
2695  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2696  if (w > 2)
2697  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2698  } else {
2699  line[w - 1] = line[(w - 1) >> 2];
2700  if (w > 1)
2701  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2702  if (w > 2)
2703  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2704  }
2705  for (index = w - 4; index > 0; index--)
2706  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2707  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2708  }
2709  line += s->linesize[p];
2710  }
2711  }
2712  }
2713  if (AV_RB32(s->upscale_v)) {
2714  int p;
2716  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2717  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2718  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2719  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2720  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2721  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2722  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2723  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2726  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2727  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2728  );
2729  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2730  if (ret)
2731  return ret;
2732 
2733  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2734  for (p = 0; p < s->nb_components; p++) {
2735  uint8_t *dst;
2736  int w = s->width;
2737  int h = s->height;
2738  if (!s->upscale_v[p])
2739  continue;
2740  if (p==1 || p==2) {
2741  w = AV_CEIL_RSHIFT(w, hshift);
2742  h = AV_CEIL_RSHIFT(h, vshift);
2743  }
2744  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2745  for (int i = h - 1; i; i--) {
2746  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2747  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2748  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2749  memcpy(dst, src1, w);
2750  } else {
2751  for (index = 0; index < w; index++)
2752  dst[index] = (src1[index] + src2[index]) >> 1;
2753  }
2754  dst -= s->linesize[p];
2755  }
2756  }
2757  }
2758  if (s->flipped && !s->rgb) {
2759  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2760  if (ret)
2761  return ret;
2762 
2763  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2764  for (index=0; index<s->nb_components; index++) {
2765  int h = frame->height;
2766  if (index && index < 3)
2767  h = AV_CEIL_RSHIFT(h, vshift);
2768  if (frame->data[index]) {
2769  frame->data[index] += (h - 1) * frame->linesize[index];
2770  frame->linesize[index] *= -1;
2771  }
2772  }
2773  }
2774 
2775  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2776  av_assert0(s->nb_components == 3);
2777  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2778  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2779  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2780  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2781  }
2782 
2783  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2784  int w = s->picture_ptr->width;
2785  int h = s->picture_ptr->height;
2786  av_assert0(s->nb_components == 4);
2787  for (int i = 0; i < h; i++) {
2788  int j;
2789  uint8_t *dst[4];
2790  for (index=0; index<4; index++) {
2791  dst[index] = s->picture_ptr->data[index]
2792  + s->picture_ptr->linesize[index]*i;
2793  }
2794  for (j=0; j<w; j++) {
2795  int k = dst[3][j];
2796  int r = dst[0][j] * k;
2797  int g = dst[1][j] * k;
2798  int b = dst[2][j] * k;
2799  dst[0][j] = g*257 >> 16;
2800  dst[1][j] = b*257 >> 16;
2801  dst[2][j] = r*257 >> 16;
2802  }
2803  memset(dst[3], 255, w);
2804  }
2805  }
2806  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2807  int w = s->picture_ptr->width;
2808  int h = s->picture_ptr->height;
2809  av_assert0(s->nb_components == 4);
2810  for (int i = 0; i < h; i++) {
2811  int j;
2812  uint8_t *dst[4];
2813  for (index=0; index<4; index++) {
2814  dst[index] = s->picture_ptr->data[index]
2815  + s->picture_ptr->linesize[index]*i;
2816  }
2817  for (j=0; j<w; j++) {
2818  int k = dst[3][j];
2819  int r = (255 - dst[0][j]) * k;
2820  int g = (128 - dst[1][j]) * k;
2821  int b = (128 - dst[2][j]) * k;
2822  dst[0][j] = r*257 >> 16;
2823  dst[1][j] = (g*257 >> 16) + 128;
2824  dst[2][j] = (b*257 >> 16) + 128;
2825  }
2826  memset(dst[3], 255, w);
2827  }
2828  }
2829 
2830  if (s->stereo3d) {
2832  if (stereo) {
2833  stereo->type = s->stereo3d->type;
2834  stereo->flags = s->stereo3d->flags;
2835  }
2836  av_freep(&s->stereo3d);
2837  }
2838 
2839  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2840  AVFrameSideData *sd;
2841  size_t offset = 0;
2842  int total_size = 0;
2843 
2844  /* Sum size of all parts. */
2845  for (int i = 0; i < s->iccnum; i++)
2846  total_size += s->iccentries[i].length;
2847 
2848  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2849  if (ret < 0) {
2850  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2851  return ret;
2852  }
2853 
2854  if (sd) {
2855  /* Reassemble the parts, which are now in-order. */
2856  for (int i = 0; i < s->iccnum; i++) {
2857  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2858  offset += s->iccentries[i].length;
2859  }
2860  }
2861  }
2862 
2863  if (s->exif_metadata.entries) {
2864  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2865  av_exif_free(&s->exif_metadata);
2866  if (ret < 0)
2867  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2868  }
2869 
2870  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2871  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2872  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2873  avctx->coded_height > s->orig_height) {
2874  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2875  frame->crop_top = frame->height - avctx->height;
2876  }
2877 
2878 the_end_no_picture:
2879  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %td bytes\n",
2880  buf_end - buf_ptr);
2881  return buf_ptr - buf;
2882 }
2883 
2884 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2885  AVPacket *avpkt)
2886 {
2887  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2888  avpkt, avpkt->data, avpkt->size);
2889 }
2890 
2891 
2892 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2893  * even without having called ff_mjpeg_decode_init(). */
2895 {
2896  MJpegDecodeContext *s = avctx->priv_data;
2897  int i, j;
2898 
2899  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2900  av_log(avctx, AV_LOG_INFO, "Single field\n");
2901  }
2902 
2903  av_frame_free(&s->picture);
2904  s->picture_ptr = NULL;
2905 
2906  av_frame_free(&s->smv_frame);
2907 
2908  av_freep(&s->buffer);
2909  av_freep(&s->stereo3d);
2910  av_freep(&s->ljpeg_buffer);
2911  s->ljpeg_buffer_size = 0;
2912 
2913  for (i = 0; i < 3; i++) {
2914  for (j = 0; j < 4; j++)
2915  ff_vlc_free(&s->vlcs[i][j]);
2916  }
2917  for (i = 0; i < MAX_COMPONENTS; i++) {
2918  av_freep(&s->blocks[i]);
2919  av_freep(&s->last_nnz[i]);
2920  }
2921  av_exif_free(&s->exif_metadata);
2922 
2924 
2925  av_freep(&s->hwaccel_picture_private);
2926  av_freep(&s->jls_state);
2927 
2928  return 0;
2929 }
2930 
2932 {
2933  MJpegDecodeContext *s = avctx->priv_data;
2934  s->got_picture = 0;
2935 
2936  s->smv_next_frame = 0;
2937  av_frame_unref(s->smv_frame);
2938 }
2939 
2940 #if CONFIG_MJPEG_DECODER
2941 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2942 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2943 static const AVOption options[] = {
2944  { "extern_huff", "Use external huffman table.",
2945  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2946  { NULL },
2947 };
2948 
2949 static const AVClass mjpegdec_class = {
2950  .class_name = "MJPEG decoder",
2951  .item_name = av_default_item_name,
2952  .option = options,
2953  .version = LIBAVUTIL_VERSION_INT,
2954 };
2955 
2956 const FFCodec ff_mjpeg_decoder = {
2957  .p.name = "mjpeg",
2958  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2959  .p.type = AVMEDIA_TYPE_VIDEO,
2960  .p.id = AV_CODEC_ID_MJPEG,
2961  .priv_data_size = sizeof(MJpegDecodeContext),
2965  .flush = decode_flush,
2966  .p.capabilities = AV_CODEC_CAP_DR1,
2967  .p.max_lowres = 3,
2968  .p.priv_class = &mjpegdec_class,
2969  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2970  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2973  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2974 #if CONFIG_MJPEG_NVDEC_HWACCEL
2975  HWACCEL_NVDEC(mjpeg),
2976 #endif
2977 #if CONFIG_MJPEG_VAAPI_HWACCEL
2978  HWACCEL_VAAPI(mjpeg),
2979 #endif
2980  NULL
2981  },
2982 };
2983 #endif
2984 #if CONFIG_THP_DECODER
2985 const FFCodec ff_thp_decoder = {
2986  .p.name = "thp",
2987  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2988  .p.type = AVMEDIA_TYPE_VIDEO,
2989  .p.id = AV_CODEC_ID_THP,
2990  .priv_data_size = sizeof(MJpegDecodeContext),
2994  .flush = decode_flush,
2995  .p.capabilities = AV_CODEC_CAP_DR1,
2996  .p.max_lowres = 3,
2997  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2998 };
2999 #endif
3000 
3001 #if CONFIG_SMVJPEG_DECODER
3002 // SMV JPEG just stacks several output frames into one JPEG picture
3003 // we handle that by setting up the cropping parameters appropriately
3004 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3005 {
3006  MJpegDecodeContext *s = avctx->priv_data;
3007 
3008  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3009 
3010  frame->width = avctx->coded_width;
3011  frame->height = avctx->coded_height;
3012  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3013  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3014 
3015  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3016  s->smv_frame->pts += s->smv_frame->duration;
3017  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3018 
3019  if (s->smv_next_frame == 0)
3020  av_frame_unref(s->smv_frame);
3021 }
3022 
3023 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3024 {
3025  MJpegDecodeContext *s = avctx->priv_data;
3026  AVPacket *const pkt = avctx->internal->in_pkt;
3027  int got_frame = 0;
3028  int ret;
3029 
3030  if (s->smv_next_frame > 0)
3031  goto return_frame;
3032 
3033  ret = ff_decode_get_packet(avctx, pkt);
3034  if (ret < 0)
3035  return ret;
3036 
3037  av_frame_unref(s->smv_frame);
3038 
3039  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3040  s->smv_frame->pkt_dts = pkt->dts;
3042  if (ret < 0)
3043  return ret;
3044 
3045  if (!got_frame)
3046  return AVERROR(EAGAIN);
3047 
3048  // packet duration covers all the frames in the packet
3049  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3050 
3051 return_frame:
3052  av_assert0(s->smv_frame->buf[0]);
3053  ret = av_frame_ref(frame, s->smv_frame);
3054  if (ret < 0)
3055  return ret;
3056 
3057  smv_process_frame(avctx, frame);
3058  return 0;
3059 }
3060 
3061 const FFCodec ff_smvjpeg_decoder = {
3062  .p.name = "smvjpeg",
3063  CODEC_LONG_NAME("SMV JPEG"),
3064  .p.type = AVMEDIA_TYPE_VIDEO,
3065  .p.id = AV_CODEC_ID_SMVJPEG,
3066  .priv_data_size = sizeof(MJpegDecodeContext),
3069  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3070  .flush = decode_flush,
3071  .p.capabilities = AV_CODEC_CAP_DR1,
3072  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3074 };
3075 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1413
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:280
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:249
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:881
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1208
out
static FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1440
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
APP1
@ APP1
Definition: mjpeg.h:80
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:992
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1406
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:573
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:111
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:588
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:690
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:213
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1383
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:251
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:245
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1283
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1456
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:120
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1670
fail
#define fail()
Definition: checkasm.h:218
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2373
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2168
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:58
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:658
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:895
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1646
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1075
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:189
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2359
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2894
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SOF55
@ SOF55
JPEG-LS.
Definition: mjpeg.h:103
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
mjpeg_parse_len
static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
Definition: mjpegdec.c:190
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2430
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1643
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:202
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:833
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:235
MJpegDecodeContext
Definition: mjpegdec.h:56
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1471
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1010
lowres
static int lowres
Definition: ffplay.c:332
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1592
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:651
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s, int start_code)
Definition: mjpegdec.c:1868
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1705
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1729
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1110
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2884
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:912
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1678
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2127
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:294
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2212
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:180
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2334
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:848
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
VD
#define VD
Definition: amfdec.c:664
src2
const pixel * src2
Definition: h264pred_template.c:421
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1856
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:63
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2931
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1390
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1886
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:984
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1387
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:549
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2239
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:439
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:355
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:247
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1626
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1382
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:304
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
SOI
@ SOI
Definition: mjpeg.h:70
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
w
uint8_t w
Definition: llvidencdsp.c:39
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1645
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:47
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347