FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/mem.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "exif.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 
57 
59 {
60  static const struct {
61  int class;
62  int index;
63  const uint8_t *bits;
64  const uint8_t *values;
65  int length;
66  } ht[] = {
68  ff_mjpeg_val_dc, 12 },
70  ff_mjpeg_val_dc, 12 },
79  };
80  int i, ret;
81 
82  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
83  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
84  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
85  ht[i].bits, ht[i].values,
86  ht[i].class == 1, s->avctx);
87  if (ret < 0)
88  return ret;
89 
90  if (ht[i].class < 2) {
91  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
92  ht[i].bits + 1, 16);
93  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
94  ht[i].values, ht[i].length);
95  }
96  }
97 
98  return 0;
99 }
100 
101 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
102 {
103  if (len > 12 && buf[12] == 1) /* 1 - NTSC */
104  s->interlace_polarity = 1;
105  if (len > 12 && buf[12] == 2) /* 2 - PAL */
106  s->interlace_polarity = 0;
107  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
108  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 12 ? buf[12] : -1);
109 }
110 
111 static void init_idct(AVCodecContext *avctx)
112 {
113  MJpegDecodeContext *s = avctx->priv_data;
114 
115  ff_idctdsp_init(&s->idsp, avctx);
116  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
117  s->idsp.idct_permutation);
118 }
119 
121 {
122  MJpegDecodeContext *s = avctx->priv_data;
123  int ret;
124 
125  if (!s->picture_ptr) {
126  s->picture = av_frame_alloc();
127  if (!s->picture)
128  return AVERROR(ENOMEM);
129  s->picture_ptr = s->picture;
130  }
131 
132  s->avctx = avctx;
133  ff_blockdsp_init(&s->bdsp);
134  init_idct(avctx);
135  s->buffer_size = 0;
136  s->buffer = NULL;
137  s->start_code = -1;
138  s->first_picture = 1;
139  s->got_picture = 0;
140  s->orig_height = avctx->coded_height;
142  avctx->colorspace = AVCOL_SPC_BT470BG;
143  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
144 
145  if ((ret = init_default_huffman_tables(s)) < 0)
146  return ret;
147 
148  if (s->extern_huff) {
149  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
150  bytestream2_init(&s->gB, avctx->extradata, avctx->extradata_size);
151  if (ff_mjpeg_decode_dht(s)) {
152  av_log(avctx, AV_LOG_ERROR,
153  "error using external huffman table, switching back to internal\n");
154  if ((ret = init_default_huffman_tables(s)) < 0)
155  return ret;
156  }
157  }
158  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
159  s->interlace_polarity = 1; /* bottom field first */
160  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
161  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
162  if (avctx->codec_tag == AV_RL32("MJPG"))
163  s->interlace_polarity = 1;
164  }
165 
166  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
167  if (avctx->extradata_size >= 4)
168  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
169 
170  if (s->smv_frames_per_jpeg <= 0) {
171  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
172  return AVERROR_INVALIDDATA;
173  }
174 
175  s->smv_frame = av_frame_alloc();
176  if (!s->smv_frame)
177  return AVERROR(ENOMEM);
178  } else if (avctx->extradata_size > 8
179  && AV_RL32(avctx->extradata) == 0x2C
180  && AV_RL32(avctx->extradata+4) == 0x18) {
181  parse_avid(s, avctx->extradata, avctx->extradata_size);
182  }
183 
184  if (avctx->codec->id == AV_CODEC_ID_AMV)
185  s->flipped = 1;
186 
187  return 0;
188 }
189 
190 
191 static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
192 {
193  int len = bytestream2_get_be16u(&s->gB);
194  if (len < 2 || bytestream2_get_bytes_left(&s->gB) < (len - 2)) {
195  av_log(s->avctx, AV_LOG_ERROR, "%s: invalid len %d\n", name, len);
196  return AVERROR_INVALIDDATA;
197  }
198  *plen = len - 2;
199  return 0;
200 }
201 
202 /* quantize tables */
204 {
205  int len, index, i;
206 
207  int ret = mjpeg_parse_len(s, &len, "dqt");
208  if (ret < 0)
209  return ret;
210 
211  while (len >= 65) {
212  uint8_t b = bytestream2_get_byteu(&s->gB);
213  int pr = b >> 4;
214  if (pr > 1) {
215  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
216  return AVERROR_INVALIDDATA;
217  }
218  if (len < (1 + 64 * (1+pr)))
219  return AVERROR_INVALIDDATA;
220  index = b & 0x0F;
221  if (index >= 4)
222  return AVERROR_INVALIDDATA;
223  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
224  /* read quant table */
225  for (i = 0; i < 64; i++) {
226  s->quant_matrixes[index][i] = pr ? bytestream2_get_be16u(&s->gB) : bytestream2_get_byteu(&s->gB);
227  if (s->quant_matrixes[index][i] == 0) {
228  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
229  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
230  if (s->avctx->err_recognition & AV_EF_EXPLODE)
231  return AVERROR_INVALIDDATA;
232  }
233  }
234 
235  // XXX FIXME fine-tune, and perhaps add dc too
236  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
237  s->quant_matrixes[index][8]) >> 1;
238  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
239  index, s->qscale[index]);
240  len -= 1 + 64 * (1+pr);
241  }
242  return 0;
243 }
244 
245 /* decode huffman tables and build VLC decoders */
247 {
248  int len, index, i, class, n, v;
249  uint8_t bits_table[17];
250  uint8_t val_table[256];
251  int ret = 0;
252 
253  ret = mjpeg_parse_len(s, &len, "dht");
254  if (ret < 0)
255  return ret;
256 
257  while (len > 0) {
258  if (len < 17)
259  return AVERROR_INVALIDDATA;
260  uint8_t b = bytestream2_get_byteu(&s->gB);
261  class = b >> 4;
262  if (class >= 2)
263  return AVERROR_INVALIDDATA;
264  index = b & 0x0F;
265  if (index >= 4)
266  return AVERROR_INVALIDDATA;
267  n = 0;
268  for (i = 1; i <= 16; i++) {
269  bits_table[i] = bytestream2_get_byteu(&s->gB);
270  n += bits_table[i];
271  }
272  len -= 17;
273  if (len < n || n > 256)
274  return AVERROR_INVALIDDATA;
275 
276  for (i = 0; i < n; i++) {
277  v = bytestream2_get_byteu(&s->gB);
278  val_table[i] = v;
279  }
280  len -= n;
281 
282  /* build VLC and flush previous vlc if present */
283  ff_vlc_free(&s->vlcs[class][index]);
284  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
285  class, index, n);
286  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
287  val_table, class > 0, s->avctx)) < 0)
288  return ret;
289 
290  if (class > 0) {
291  ff_vlc_free(&s->vlcs[2][index]);
292  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
293  val_table, 0, s->avctx)) < 0)
294  return ret;
295  }
296 
297  for (i = 0; i < 16; i++)
298  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
299  for (i = 0; i < 256; i++)
300  s->raw_huffman_values[class][index][i] = val_table[i];
301  }
302  return 0;
303 }
304 
306 {
307  int len, nb_components, i, width, height, bits, ret, size_change;
308  unsigned pix_fmt_id;
309  int h_count[MAX_COMPONENTS] = { 0 };
310  int v_count[MAX_COMPONENTS] = { 0 };
311 
312  s->cur_scan = 0;
313  memset(s->upscale_h, 0, sizeof(s->upscale_h));
314  memset(s->upscale_v, 0, sizeof(s->upscale_v));
315 
316  ret = mjpeg_parse_len(s, &len, "sof");
317  if (ret < 0)
318  return ret;
319  if (len < 6)
320  return AVERROR_INVALIDDATA;
321  bits = bytestream2_get_byteu(&s->gB);
322 
323  if (bits > 16 || bits < 1) {
324  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
325  return AVERROR_INVALIDDATA;
326  }
327 
328  if (s->avctx->bits_per_raw_sample != bits) {
329  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
330  s->avctx->bits_per_raw_sample = bits;
331  init_idct(s->avctx);
332  }
333  if (s->pegasus_rct)
334  bits = 9;
335  if (bits == 9 && !s->pegasus_rct)
336  s->rct = 1; // FIXME ugly
337 
338  if(s->lossless && s->avctx->lowres){
339  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
340  return AVERROR(ENOSYS);
341  }
342 
343  height = bytestream2_get_be16u(&s->gB);
344  width = bytestream2_get_be16u(&s->gB);
345 
346  // HACK for odd_height.mov
347  if (s->interlaced && s->width == width && s->height == height + 1)
348  height= s->height;
349 
350  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
351  if (av_image_check_size(width, height, 0, s->avctx) < 0)
352  return AVERROR_INVALIDDATA;
353 
354  if (!s->progressive && !s->ls) {
355  // A valid frame requires at least 1 bit for DC + 1 bit for AC for each 8x8 block.
356  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
357  return AVERROR_INVALIDDATA;
358  }
359 
360  nb_components = bytestream2_get_byteu(&s->gB);
361  if (nb_components <= 0 ||
362  nb_components > MAX_COMPONENTS)
363  return AVERROR_INVALIDDATA;
364  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
365  if (nb_components != s->nb_components) {
366  av_log(s->avctx, AV_LOG_ERROR,
367  "nb_components changing in interlaced picture\n");
368  return AVERROR_INVALIDDATA;
369  }
370  }
371  if (s->ls && !(bits <= 8 || nb_components == 1)) {
373  "JPEG-LS that is not <= 8 "
374  "bits/component or 16-bit gray");
375  return AVERROR_PATCHWELCOME;
376  }
377  len -= 6;
378  if (len != 3 * nb_components) {
379  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
380  return AVERROR_INVALIDDATA;
381  }
382 
383  s->nb_components = nb_components;
384  s->h_max = 1;
385  s->v_max = 1;
386  for (i = 0; i < nb_components; i++) {
387  /* component id */
388  s->component_id[i] = bytestream2_get_byteu(&s->gB);
389  uint8_t b = bytestream2_get_byteu(&s->gB);
390  h_count[i] = b >> 4;
391  v_count[i] = b & 0x0F;
392  /* compute hmax and vmax (only used in interleaved case) */
393  if (h_count[i] > s->h_max)
394  s->h_max = h_count[i];
395  if (v_count[i] > s->v_max)
396  s->v_max = v_count[i];
397  s->quant_index[i] = bytestream2_get_byteu(&s->gB);
398  if (s->quant_index[i] >= 4) {
399  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
400  return AVERROR_INVALIDDATA;
401  }
402  if (!h_count[i] || !v_count[i]) {
403  av_log(s->avctx, AV_LOG_ERROR,
404  "Invalid sampling factor in component %d %d:%d\n",
405  i, h_count[i], v_count[i]);
406  return AVERROR_INVALIDDATA;
407  }
408 
409  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
410  i, h_count[i], v_count[i],
411  s->component_id[i], s->quant_index[i]);
412  }
413  if ( nb_components == 4
414  && s->component_id[0] == 'C'
415  && s->component_id[1] == 'M'
416  && s->component_id[2] == 'Y'
417  && s->component_id[3] == 'K')
418  s->adobe_transform = 0;
419 
420  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
421  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
422  return AVERROR_PATCHWELCOME;
423  }
424 
425  if (s->bayer) {
426  if (nb_components == 2) {
427  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
428  width stored in their SOF3 markers is the width of each one. We only output
429  a single component, therefore we need to adjust the output image width. We
430  handle the deinterleaving (but not the debayering) in this file. */
431  width *= 2;
432  }
433  /* They can also contain 1 component, which is double the width and half the height
434  of the final image (rows are interleaved). We don't handle the decoding in this
435  file, but leave that to the TIFF/DNG decoder. */
436  }
437 
438  /* if different size, realloc/alloc picture */
439  if (width != s->width || height != s->height || bits != s->bits ||
440  memcmp(s->h_count, h_count, sizeof(h_count)) ||
441  memcmp(s->v_count, v_count, sizeof(v_count))) {
442  size_change = 1;
443 
444  s->width = width;
445  s->height = height;
446  s->bits = bits;
447  memcpy(s->h_count, h_count, sizeof(h_count));
448  memcpy(s->v_count, v_count, sizeof(v_count));
449  s->interlaced = 0;
450  s->got_picture = 0;
451 
452  /* test interlaced mode */
453  if (s->first_picture &&
454  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
455  s->orig_height != 0 &&
456  s->height < ((s->orig_height * 3) / 4)) {
457  s->interlaced = 1;
458  s->bottom_field = s->interlace_polarity;
459  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
460  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
461  height *= 2;
462  }
463 
464  ret = ff_set_dimensions(s->avctx, width, height);
465  if (ret < 0)
466  return ret;
467 
468  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
469  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
470  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
471  s->orig_height < height)
472  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
473 
474  s->first_picture = 0;
475  } else {
476  size_change = 0;
477  }
478 
479  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
480  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
481  if (s->avctx->height <= 0)
482  return AVERROR_INVALIDDATA;
483  }
484  if (s->bayer && s->progressive) {
485  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
486  return AVERROR_INVALIDDATA;
487  }
488 
489  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
490  if (s->progressive) {
491  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
492  return AVERROR_INVALIDDATA;
493  }
494  } else {
495  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
496  s->rgb = 1;
497  else if (!s->lossless)
498  s->rgb = 0;
499  /* XXX: not complete test ! */
500  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
501  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
502  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
503  (s->h_count[3] << 4) | s->v_count[3];
504  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
505  /* NOTE we do not allocate pictures large enough for the possible
506  * padding of h/v_count being 4 */
507  if (!(pix_fmt_id & 0xD0D0D0D0))
508  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
509  if (!(pix_fmt_id & 0x0D0D0D0D))
510  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
511 
512  for (i = 0; i < 8; i++) {
513  int j = 6 + (i&1) - (i&6);
514  int is = (pix_fmt_id >> (4*i)) & 0xF;
515  int js = (pix_fmt_id >> (4*j)) & 0xF;
516 
517  if (is == 1 && js != 2 && (i < 2 || i > 5))
518  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
519  if (is == 1 && js != 2 && (i < 2 || i > 5))
520  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
521 
522  if (is == 1 && js == 2) {
523  if (i & 1) s->upscale_h[j/2] = 1;
524  else s->upscale_v[j/2] = 1;
525  }
526  }
527 
528  if (s->bayer) {
529  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
530  goto unk_pixfmt;
531  }
532 
533  switch (pix_fmt_id) {
534  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
535  if (!s->bayer)
536  goto unk_pixfmt;
537  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
538  break;
539  case 0x11111100:
540  if (s->rgb)
541  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
542  else {
543  if ( s->adobe_transform == 0
544  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
545  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
546  } else {
547  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
548  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
549  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
550  }
551  }
552  av_assert0(s->nb_components == 3);
553  break;
554  case 0x11111111:
555  if (s->rgb)
556  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
557  else {
558  if (s->adobe_transform == 0 && s->bits <= 8) {
559  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
560  } else {
561  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
562  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
563  }
564  }
565  av_assert0(s->nb_components == 4);
566  break;
567  case 0x11412100:
568  if (s->bits > 8)
569  goto unk_pixfmt;
570  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
571  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
572  s->upscale_h[0] = 4;
573  s->upscale_h[1] = 0;
574  s->upscale_h[2] = 1;
575  } else {
576  goto unk_pixfmt;
577  }
578  break;
579  case 0x22111122:
580  case 0x22111111:
581  if (s->adobe_transform == 0 && s->bits <= 8) {
582  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
583  s->upscale_v[1] = s->upscale_v[2] = 1;
584  s->upscale_h[1] = s->upscale_h[2] = 1;
585  } else if (s->adobe_transform == 2 && s->bits <= 8) {
586  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
587  s->upscale_v[1] = s->upscale_v[2] = 1;
588  s->upscale_h[1] = s->upscale_h[2] = 1;
589  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
590  } else {
591  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
592  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
593  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
594  }
595  av_assert0(s->nb_components == 4);
596  break;
597  case 0x12121100:
598  case 0x22122100:
599  case 0x21211100:
600  case 0x21112100:
601  case 0x22211200:
602  case 0x22221100:
603  case 0x22112200:
604  case 0x11222200:
605  if (s->bits > 8)
606  goto unk_pixfmt;
607  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
608  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
609  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
610  } else {
611  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
612  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
613  }
614  break;
615  case 0x11000000:
616  case 0x13000000:
617  case 0x14000000:
618  case 0x31000000:
619  case 0x33000000:
620  case 0x34000000:
621  case 0x41000000:
622  case 0x43000000:
623  case 0x44000000:
624  if(s->bits <= 8)
625  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
626  else
627  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
628  break;
629  case 0x12111100:
630  case 0x14121200:
631  case 0x14111100:
632  case 0x22211100:
633  case 0x22112100:
634  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
635  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
636  else
637  goto unk_pixfmt;
638  s->upscale_v[1] = s->upscale_v[2] = 1;
639  } else {
640  if (pix_fmt_id == 0x14111100)
641  s->upscale_v[1] = s->upscale_v[2] = 1;
642  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
643  else
644  goto unk_pixfmt;
645  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
646  }
647  break;
648  case 0x21111100:
649  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
650  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
651  else
652  goto unk_pixfmt;
653  s->upscale_h[1] = s->upscale_h[2] = 1;
654  } else {
655  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
656  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
657  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
658  }
659  break;
660  case 0x11311100:
661  if (s->bits > 8)
662  goto unk_pixfmt;
663  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
664  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
665  else
666  goto unk_pixfmt;
667  s->upscale_h[0] = s->upscale_h[2] = 2;
668  break;
669  case 0x31111100:
670  if (s->bits > 8)
671  goto unk_pixfmt;
672  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
673  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
674  s->upscale_h[1] = s->upscale_h[2] = 2;
675  break;
676  case 0x22121100:
677  case 0x22111200:
678  case 0x41211100:
679  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
680  else
681  goto unk_pixfmt;
682  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
683  break;
684  case 0x22111100:
685  case 0x23111100:
686  case 0x42111100:
687  case 0x24111100:
688  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
689  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
690  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
691  if (pix_fmt_id == 0x42111100) {
692  if (s->bits > 8)
693  goto unk_pixfmt;
694  s->upscale_h[1] = s->upscale_h[2] = 1;
695  } else if (pix_fmt_id == 0x24111100) {
696  if (s->bits > 8)
697  goto unk_pixfmt;
698  s->upscale_v[1] = s->upscale_v[2] = 1;
699  } else if (pix_fmt_id == 0x23111100) {
700  if (s->bits > 8)
701  goto unk_pixfmt;
702  s->upscale_v[1] = s->upscale_v[2] = 2;
703  }
704  break;
705  case 0x41111100:
706  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
707  else
708  goto unk_pixfmt;
709  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
710  break;
711  default:
712  unk_pixfmt:
713  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
714  memset(s->upscale_h, 0, sizeof(s->upscale_h));
715  memset(s->upscale_v, 0, sizeof(s->upscale_v));
716  return AVERROR_PATCHWELCOME;
717  }
718  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
719  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
720  return AVERROR_PATCHWELCOME;
721  }
722  if (s->ls) {
723  memset(s->upscale_h, 0, sizeof(s->upscale_h));
724  memset(s->upscale_v, 0, sizeof(s->upscale_v));
725  if (s->nb_components == 3) {
726  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
727  } else if (s->nb_components != 1) {
728  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
729  return AVERROR_PATCHWELCOME;
730  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
731  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
732  else if (s->bits <= 8)
733  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
734  else
735  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
736  }
737 
738  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
739  if (!s->pix_desc) {
740  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
741  return AVERROR_BUG;
742  }
743 
744  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
745  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
746  } else {
747  enum AVPixelFormat pix_fmts[] = {
748 #if CONFIG_MJPEG_NVDEC_HWACCEL
750 #endif
751 #if CONFIG_MJPEG_VAAPI_HWACCEL
753 #endif
754  s->avctx->pix_fmt,
756  };
757  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
758  if (s->hwaccel_pix_fmt < 0)
759  return AVERROR(EINVAL);
760 
761  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
762  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
763  }
764 
765  if (s->avctx->skip_frame == AVDISCARD_ALL) {
766  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
767  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
768  s->got_picture = 1;
769  return 0;
770  }
771 
772  av_frame_unref(s->picture_ptr);
773  ret = ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF);
774  if (ret < 0)
775  return ret;
776  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
777  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
778  s->got_picture = 1;
779 
780  // Lets clear the palette to avoid leaving uninitialized values in it
781  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
782  memset(s->picture_ptr->data[1], 0, 1024);
783 
784  for (i = 0; i < 4; i++)
785  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
786 
787  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
788  s->width, s->height, s->linesize[0], s->linesize[1],
789  s->interlaced, s->avctx->height);
790 
791  }
792 
793  if ((s->rgb && !s->lossless && !s->ls) ||
794  (!s->rgb && s->ls && s->nb_components > 1) ||
795  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
796  ) {
797  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
798  return AVERROR_PATCHWELCOME;
799  }
800 
801  /* totally blank picture as progressive JPEG will only add details to it */
802  if (s->progressive) {
803  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
804  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
805  for (i = 0; i < s->nb_components; i++) {
806  int size = bw * bh * s->h_count[i] * s->v_count[i];
807  av_freep(&s->blocks[i]);
808  av_freep(&s->last_nnz[i]);
809  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
810  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
811  if (!s->blocks[i] || !s->last_nnz[i])
812  return AVERROR(ENOMEM);
813  s->block_stride[i] = bw * s->h_count[i];
814  }
815  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
816  }
817 
818  if (s->avctx->hwaccel) {
819  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
820  s->hwaccel_picture_private =
821  av_mallocz(hwaccel->frame_priv_data_size);
822  if (!s->hwaccel_picture_private)
823  return AVERROR(ENOMEM);
824 
825  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
826  s->raw_image_buffer_size);
827  if (ret < 0)
828  return ret;
829  }
830 
831  return 0;
832 }
833 
834 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
835 {
836  int code;
837  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
838  if (code < 0 || code > 16) {
839  av_log(s->avctx, AV_LOG_ERROR,
840  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
841  return AVERROR_INVALIDDATA;
842  }
843 
844  *val = code ? get_xbits(&s->gb, code) : 0;
845  return 0;
846 }
847 
848 /* decode block and dequantize */
849 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
850  int dc_index, int ac_index, uint16_t *quant_matrix)
851 {
852  int code, i, j, level, val;
853 
854  /* DC coef */
855  int ret = mjpeg_decode_dc(s, dc_index, &val);
856  if (ret < 0)
857  return ret;
858 
859  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
860  s->last_dc[component] = val;
861  block[0] = av_clip_int16(val);
862  /* AC coefs */
863  i = 0;
864  {OPEN_READER(re, &s->gb);
865  do {
866  UPDATE_CACHE(re, &s->gb);
867  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
868 
869  i += ((unsigned)code) >> 4;
870  code &= 0xf;
871  if (code) {
872  // GET_VLC updates the cache if parsing reaches the second stage.
873  // So we have at least MIN_CACHE_BITS - 9 > 15 bits left here
874  // and don't need to refill the cache.
875  {
876  int cache = GET_CACHE(re, &s->gb);
877  int sign = (~cache) >> 31;
878  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
879  }
880 
881  LAST_SKIP_BITS(re, &s->gb, code);
882 
883  if (i > 63) {
884  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
885  return AVERROR_INVALIDDATA;
886  }
887  j = s->permutated_scantable[i];
888  block[j] = level * quant_matrix[i];
889  }
890  } while (i < 63);
891  CLOSE_READER(re, &s->gb);}
892 
893  return 0;
894 }
895 
897  int component, int dc_index,
898  uint16_t *quant_matrix, int Al)
899 {
900  unsigned val;
901  s->bdsp.clear_block(block);
902  int ret = mjpeg_decode_dc(s, dc_index, &val);
903  if (ret < 0)
904  return ret;
905 
906  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
907  s->last_dc[component] = val;
908  block[0] = val;
909  return 0;
910 }
911 
912 /* decode block and dequantize - progressive JPEG version */
914  uint8_t *last_nnz, int ac_index,
915  uint16_t *quant_matrix,
916  int ss, int se, int Al, int *EOBRUN)
917 {
918  int code, i, j, val, run;
919  unsigned level;
920 
921  if (*EOBRUN) {
922  (*EOBRUN)--;
923  return 0;
924  }
925 
926  {
927  OPEN_READER(re, &s->gb);
928  for (i = ss; ; i++) {
929  UPDATE_CACHE(re, &s->gb);
930  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
931 
932  run = ((unsigned) code) >> 4;
933  code &= 0xF;
934  if (code) {
935  i += run;
936 
937  {
938  int cache = GET_CACHE(re, &s->gb);
939  int sign = (~cache) >> 31;
940  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
941  }
942 
943  LAST_SKIP_BITS(re, &s->gb, code);
944 
945  if (i >= se) {
946  if (i == se) {
947  j = s->permutated_scantable[se];
948  block[j] = level * (quant_matrix[se] << Al);
949  break;
950  }
951  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
952  return AVERROR_INVALIDDATA;
953  }
954  j = s->permutated_scantable[i];
955  block[j] = level * (quant_matrix[i] << Al);
956  } else {
957  if (run == 0xF) {// ZRL - skip 15 coefficients
958  i += 15;
959  if (i >= se) {
960  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
961  return AVERROR_INVALIDDATA;
962  }
963  } else {
964  val = (1 << run);
965  if (run) {
966  // Given that GET_VLC reloads internally, we always
967  // have at least 16 bits in the cache here.
968  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
969  LAST_SKIP_BITS(re, &s->gb, run);
970  }
971  *EOBRUN = val - 1;
972  break;
973  }
974  }
975  }
976  CLOSE_READER(re, &s->gb);
977  }
978 
979  if (i > *last_nnz)
980  *last_nnz = i;
981 
982  return 0;
983 }
984 
985 #define REFINE_BIT(j) { \
986  UPDATE_CACHE(re, &s->gb); \
987  sign = block[j] >> 15; \
988  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
989  ((quant_matrix[i] ^ sign) - sign) << Al; \
990  LAST_SKIP_BITS(re, &s->gb, 1); \
991 }
992 
993 #define ZERO_RUN \
994 for (; ; i++) { \
995  if (i > last) { \
996  i += run; \
997  if (i > se) { \
998  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
999  return -1; \
1000  } \
1001  break; \
1002  } \
1003  j = s->permutated_scantable[i]; \
1004  if (block[j]) \
1005  REFINE_BIT(j) \
1006  else if (run-- == 0) \
1007  break; \
1008 }
1009 
1010 /* decode block and dequantize - progressive JPEG refinement pass */
1012  uint8_t *last_nnz,
1013  int ac_index, uint16_t *quant_matrix,
1014  int ss, int se, int Al, int *EOBRUN)
1015 {
1016  int code, i = ss, j, sign, val, run;
1017  int last = FFMIN(se, *last_nnz);
1018 
1019  OPEN_READER(re, &s->gb);
1020  if (*EOBRUN) {
1021  (*EOBRUN)--;
1022  } else {
1023  for (; ; i++) {
1024  UPDATE_CACHE(re, &s->gb);
1025  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1026 
1027  if (code & 0xF) {
1028  run = ((unsigned) code) >> 4;
1029  val = SHOW_UBITS(re, &s->gb, 1);
1030  LAST_SKIP_BITS(re, &s->gb, 1);
1031  ZERO_RUN;
1032  j = s->permutated_scantable[i];
1033  val--;
1034  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1035  if (i == se) {
1036  if (i > *last_nnz)
1037  *last_nnz = i;
1038  CLOSE_READER(re, &s->gb);
1039  return 0;
1040  }
1041  } else {
1042  run = ((unsigned) code) >> 4;
1043  if (run == 0xF) {
1044  ZERO_RUN;
1045  } else {
1046  val = run;
1047  run = (1 << run);
1048  if (val) {
1049  // Given that GET_VLC reloads internally, we always
1050  // have at least 16 bits in the cache here.
1051  run += SHOW_UBITS(re, &s->gb, val);
1052  LAST_SKIP_BITS(re, &s->gb, val);
1053  }
1054  *EOBRUN = run - 1;
1055  break;
1056  }
1057  }
1058  }
1059 
1060  if (i > *last_nnz)
1061  *last_nnz = i;
1062  }
1063 
1064  for (; i <= last; i++) {
1065  j = s->permutated_scantable[i];
1066  if (block[j])
1067  REFINE_BIT(j)
1068  }
1069  CLOSE_READER(re, &s->gb);
1070 
1071  return 0;
1072 }
1073 #undef REFINE_BIT
1074 #undef ZERO_RUN
1075 
1076 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1077 {
1078  int i;
1079  int reset = 0;
1080 
1081  if (s->restart_interval) {
1082  s->restart_count--;
1083  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1084  align_get_bits(&s->gb);
1085  for (i = 0; i < nb_components; i++) /* reset dc */
1086  s->last_dc[i] = (4 << s->bits);
1087  }
1088 
1089  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1090  /* skip RSTn */
1091  if (s->restart_count == 0) {
1092  if( show_bits(&s->gb, i) == (1 << i) - 1
1093  || show_bits(&s->gb, i) == 0xFF) {
1094  int pos = get_bits_count(&s->gb);
1095  align_get_bits(&s->gb);
1096  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1097  skip_bits(&s->gb, 8);
1098  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1099  for (i = 0; i < nb_components; i++) /* reset dc */
1100  s->last_dc[i] = (4 << s->bits);
1101  reset = 1;
1102  } else
1103  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1104  }
1105  }
1106  }
1107  return reset;
1108 }
1109 
1110 /* Handles 1 to 4 components */
1111 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1112 {
1113  int i, mb_x, mb_y;
1114  unsigned width;
1115  uint16_t (*buffer)[4];
1116  int left[4], top[4], topleft[4];
1117  const int linesize = s->linesize[0];
1118  const int mask = ((1 << s->bits) - 1) << point_transform;
1119  int resync_mb_y = 0;
1120  int resync_mb_x = 0;
1121  int vpred[6];
1122  int ret;
1123 
1124  if (!s->bayer && s->nb_components < 3)
1125  return AVERROR_INVALIDDATA;
1126  if (s->bayer && s->nb_components > 2)
1127  return AVERROR_INVALIDDATA;
1128  if (s->nb_components <= 0 || s->nb_components > 4)
1129  return AVERROR_INVALIDDATA;
1130  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1131  return AVERROR_INVALIDDATA;
1132  if (s->bayer) {
1133  if (s->rct || s->pegasus_rct)
1134  return AVERROR_INVALIDDATA;
1135  }
1136 
1137 
1138  s->restart_count = s->restart_interval;
1139 
1140  if (s->restart_interval == 0)
1141  s->restart_interval = INT_MAX;
1142 
1143  if (s->bayer)
1144  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1145  else
1146  width = s->mb_width;
1147 
1148  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1149  if (!s->ljpeg_buffer)
1150  return AVERROR(ENOMEM);
1151 
1152  buffer = s->ljpeg_buffer;
1153 
1154  for (i = 0; i < 4; i++)
1155  buffer[0][i] = 1 << (s->bits - 1);
1156 
1157  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1158  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1159 
1160  if (s->interlaced && s->bottom_field)
1161  ptr += linesize >> 1;
1162 
1163  for (i = 0; i < 4; i++)
1164  top[i] = left[i] = topleft[i] = buffer[0][i];
1165 
1166  if ((mb_y * s->width) % s->restart_interval == 0) {
1167  for (i = 0; i < 6; i++)
1168  vpred[i] = 1 << (s->bits-1);
1169  }
1170 
1171  for (mb_x = 0; mb_x < width; mb_x++) {
1172  int modified_predictor = predictor;
1173 
1174  if (get_bits_left(&s->gb) < 1) {
1175  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1176  return AVERROR_INVALIDDATA;
1177  }
1178 
1179  if (s->restart_interval && !s->restart_count){
1180  s->restart_count = s->restart_interval;
1181  resync_mb_x = mb_x;
1182  resync_mb_y = mb_y;
1183  for(i=0; i<4; i++)
1184  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1185  }
1186  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1187  modified_predictor = 1;
1188 
1189  for (i=0;i<nb_components;i++) {
1190  int pred, dc;
1191 
1192  topleft[i] = top[i];
1193  top[i] = buffer[mb_x][i];
1194 
1195  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1196  if (ret < 0)
1197  return ret;
1198 
1199  if (!s->bayer || mb_x) {
1200  pred = left[i];
1201  } else { /* This path runs only for the first line in bayer images */
1202  vpred[i] += dc;
1203  pred = vpred[i] - dc;
1204  }
1205 
1206  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1207 
1208  left[i] = buffer[mb_x][i] =
1209  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1210  }
1211 
1212  if (s->restart_interval && !--s->restart_count) {
1213  align_get_bits(&s->gb);
1214  skip_bits(&s->gb, 16); /* skip RSTn */
1215  }
1216  }
1217  if (s->rct && s->nb_components == 4) {
1218  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1219  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1220  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1221  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1222  ptr[4*mb_x + 0] = buffer[mb_x][3];
1223  }
1224  } else if (s->nb_components == 4) {
1225  for(i=0; i<nb_components; i++) {
1226  int c= s->comp_index[i];
1227  if (s->bits <= 8) {
1228  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1229  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1230  }
1231  } else if(s->bits == 9) {
1232  return AVERROR_PATCHWELCOME;
1233  } else {
1234  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1235  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1236  }
1237  }
1238  }
1239  } else if (s->rct) {
1240  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1241  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1242  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1243  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1244  }
1245  } else if (s->pegasus_rct) {
1246  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1247  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1248  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1249  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1250  }
1251  } else if (s->bayer) {
1252  if (s->bits <= 8)
1253  return AVERROR_PATCHWELCOME;
1254  if (nb_components == 1) {
1255  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1256  for (mb_x = 0; mb_x < width; mb_x++)
1257  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1258  } else if (nb_components == 2) {
1259  for (mb_x = 0; mb_x < width; mb_x++) {
1260  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1261  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1262  }
1263  }
1264  } else {
1265  for(i=0; i<nb_components; i++) {
1266  int c= s->comp_index[i];
1267  if (s->bits <= 8) {
1268  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1269  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1270  }
1271  } else if(s->bits == 9) {
1272  return AVERROR_PATCHWELCOME;
1273  } else {
1274  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1275  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1276  }
1277  }
1278  }
1279  }
1280  }
1281  return 0;
1282 }
1283 
1285  int point_transform, int nb_components)
1286 {
1287  int i, mb_x, mb_y, mask;
1288  int bits= (s->bits+7)&~7;
1289  int resync_mb_y = 0;
1290  int resync_mb_x = 0;
1291  int ret;
1292 
1293  point_transform += bits - s->bits;
1294  mask = ((1 << s->bits) - 1) << point_transform;
1295 
1296  av_assert0(nb_components>=1 && nb_components<=4);
1297 
1298  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1299  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1300  if (get_bits_left(&s->gb) < 1) {
1301  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1302  return AVERROR_INVALIDDATA;
1303  }
1304  if (s->restart_interval && !s->restart_count){
1305  s->restart_count = s->restart_interval;
1306  resync_mb_x = mb_x;
1307  resync_mb_y = mb_y;
1308  }
1309 
1310  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1311  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1312  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1313  for (i = 0; i < nb_components; i++) {
1314  uint8_t *ptr;
1315  uint16_t *ptr16;
1316  int n, h, v, x, y, c, j, linesize;
1317  n = s->nb_blocks[i];
1318  c = s->comp_index[i];
1319  h = s->h_scount[i];
1320  v = s->v_scount[i];
1321  x = 0;
1322  y = 0;
1323  linesize= s->linesize[c];
1324 
1325  if(bits>8) linesize /= 2;
1326 
1327  for(j=0; j<n; j++) {
1328  int pred, dc;
1329 
1330  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1331  if (ret < 0)
1332  return ret;
1333 
1334  if ( h * mb_x + x >= s->width
1335  || v * mb_y + y >= s->height) {
1336  // Nothing to do
1337  } else if (bits<=8) {
1338  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1339  if(y==0 && toprow){
1340  if(x==0 && leftcol){
1341  pred= 1 << (bits - 1);
1342  }else{
1343  pred= ptr[-1];
1344  }
1345  }else{
1346  if(x==0 && leftcol){
1347  pred= ptr[-linesize];
1348  }else{
1349  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1350  }
1351  }
1352 
1353  if (s->interlaced && s->bottom_field)
1354  ptr += linesize >> 1;
1355  pred &= mask;
1356  *ptr= pred + ((unsigned)dc << point_transform);
1357  }else{
1358  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1359  if(y==0 && toprow){
1360  if(x==0 && leftcol){
1361  pred= 1 << (bits - 1);
1362  }else{
1363  pred= ptr16[-1];
1364  }
1365  }else{
1366  if(x==0 && leftcol){
1367  pred= ptr16[-linesize];
1368  }else{
1369  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1370  }
1371  }
1372 
1373  if (s->interlaced && s->bottom_field)
1374  ptr16 += linesize >> 1;
1375  pred &= mask;
1376  *ptr16= pred + ((unsigned)dc << point_transform);
1377  }
1378  if (++x == h) {
1379  x = 0;
1380  y++;
1381  }
1382  }
1383  }
1384  } else {
1385  for (i = 0; i < nb_components; i++) {
1386  uint8_t *ptr;
1387  uint16_t *ptr16;
1388  int n, h, v, x, y, c, j, linesize, dc;
1389  n = s->nb_blocks[i];
1390  c = s->comp_index[i];
1391  h = s->h_scount[i];
1392  v = s->v_scount[i];
1393  x = 0;
1394  y = 0;
1395  linesize = s->linesize[c];
1396 
1397  if(bits>8) linesize /= 2;
1398 
1399  for (j = 0; j < n; j++) {
1400  int pred;
1401 
1402  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1403  if (ret < 0)
1404  return ret;
1405 
1406  if ( h * mb_x + x >= s->width
1407  || v * mb_y + y >= s->height) {
1408  // Nothing to do
1409  } else if (bits<=8) {
1410  ptr = s->picture_ptr->data[c] +
1411  (linesize * (v * mb_y + y)) +
1412  (h * mb_x + x); //FIXME optimize this crap
1413  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1414 
1415  pred &= mask;
1416  *ptr = pred + ((unsigned)dc << point_transform);
1417  }else{
1418  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1419  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1420 
1421  pred &= mask;
1422  *ptr16= pred + ((unsigned)dc << point_transform);
1423  }
1424 
1425  if (++x == h) {
1426  x = 0;
1427  y++;
1428  }
1429  }
1430  }
1431  }
1432  if (s->restart_interval && !--s->restart_count) {
1433  align_get_bits(&s->gb);
1434  skip_bits(&s->gb, 16); /* skip RSTn */
1435  }
1436  }
1437  }
1438  return 0;
1439 }
1440 
1442  uint8_t *dst, const uint8_t *src,
1443  int linesize, int lowres)
1444 {
1445  switch (lowres) {
1446  case 0: s->copy_block(dst, src, linesize, 8);
1447  break;
1448  case 1: copy_block4(dst, src, linesize, linesize, 4);
1449  break;
1450  case 2: copy_block2(dst, src, linesize, linesize, 2);
1451  break;
1452  case 3: *dst = *src;
1453  break;
1454  }
1455 }
1456 
1457 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1458 {
1459  int block_x, block_y;
1460  int size = 8 >> s->avctx->lowres;
1461  if (s->bits > 8) {
1462  for (block_y=0; block_y<size; block_y++)
1463  for (block_x=0; block_x<size; block_x++)
1464  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1465  } else {
1466  for (block_y=0; block_y<size; block_y++)
1467  for (block_x=0; block_x<size; block_x++)
1468  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1469  }
1470 }
1471 
1472 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1473  int Al, const uint8_t *mb_bitmask,
1474  int mb_bitmask_size,
1475  const AVFrame *reference)
1476 {
1477  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1478  uint8_t *data[MAX_COMPONENTS];
1479  const uint8_t *reference_data[MAX_COMPONENTS];
1480  int linesize[MAX_COMPONENTS];
1481  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1482  int bytes_per_pixel = 1 + (s->bits > 8);
1483 
1484  if (mb_bitmask) {
1485  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1486  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1487  return AVERROR_INVALIDDATA;
1488  }
1489  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1490  }
1491 
1492  s->restart_count = 0;
1493 
1494  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1495  &chroma_v_shift);
1496  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1497  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1498 
1499  for (i = 0; i < nb_components; i++) {
1500  int c = s->comp_index[i];
1501  data[c] = s->picture_ptr->data[c];
1502  reference_data[c] = reference ? reference->data[c] : NULL;
1503  linesize[c] = s->linesize[c];
1504  s->coefs_finished[c] |= 1;
1505  }
1506 
1507  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1508  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1509  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1510 
1511  if (s->restart_interval && !s->restart_count)
1512  s->restart_count = s->restart_interval;
1513 
1514  if (get_bits_left(&s->gb) < 0) {
1515  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1516  -get_bits_left(&s->gb));
1517  return AVERROR_INVALIDDATA;
1518  }
1519  for (i = 0; i < nb_components; i++) {
1520  uint8_t *ptr;
1521  int n, h, v, x, y, c, j;
1522  int block_offset;
1523  n = s->nb_blocks[i];
1524  c = s->comp_index[i];
1525  h = s->h_scount[i];
1526  v = s->v_scount[i];
1527  x = 0;
1528  y = 0;
1529  for (j = 0; j < n; j++) {
1530  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1531  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1532 
1533  if (s->interlaced && s->bottom_field)
1534  block_offset += linesize[c] >> 1;
1535  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1536  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1537  ptr = data[c] + block_offset;
1538  } else
1539  ptr = NULL;
1540  if (!s->progressive) {
1541  if (copy_mb) {
1542  if (ptr)
1543  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1544  linesize[c], s->avctx->lowres);
1545 
1546  } else {
1547  s->bdsp.clear_block(s->block);
1548  if (decode_block(s, s->block, i,
1549  s->dc_index[i], s->ac_index[i],
1550  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1551  av_log(s->avctx, AV_LOG_ERROR,
1552  "error y=%d x=%d\n", mb_y, mb_x);
1553  return AVERROR_INVALIDDATA;
1554  }
1555  if (ptr && linesize[c]) {
1556  s->idsp.idct_put(ptr, linesize[c], s->block);
1557  if (s->bits & 7)
1558  shift_output(s, ptr, linesize[c]);
1559  }
1560  }
1561  } else {
1562  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1563  (h * mb_x + x);
1564  int16_t *block = s->blocks[c][block_idx];
1565  if (Ah)
1566  block[0] += get_bits1(&s->gb) *
1567  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1568  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1569  s->quant_matrixes[s->quant_sindex[i]],
1570  Al) < 0) {
1571  av_log(s->avctx, AV_LOG_ERROR,
1572  "error y=%d x=%d\n", mb_y, mb_x);
1573  return AVERROR_INVALIDDATA;
1574  }
1575  }
1576  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1577  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1578  mb_x, mb_y, x, y, c, s->bottom_field,
1579  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1580  if (++x == h) {
1581  x = 0;
1582  y++;
1583  }
1584  }
1585  }
1586 
1587  handle_rstn(s, nb_components);
1588  }
1589  }
1590  return 0;
1591 }
1592 
1594  int se, int Ah, int Al)
1595 {
1596  int mb_x, mb_y;
1597  int EOBRUN = 0;
1598  int c = s->comp_index[0];
1599  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1600 
1601  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1602  if (se < ss || se > 63) {
1603  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1604  return AVERROR_INVALIDDATA;
1605  }
1606 
1607  // s->coefs_finished is a bitmask for coefficients coded
1608  // ss and se are parameters telling start and end coefficients
1609  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1610 
1611  s->restart_count = 0;
1612 
1613  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1614  int block_idx = mb_y * s->block_stride[c];
1615  int16_t (*block)[64] = &s->blocks[c][block_idx];
1616  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1617  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1618  int ret;
1619  if (s->restart_interval && !s->restart_count)
1620  s->restart_count = s->restart_interval;
1621 
1622  if (Ah)
1623  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1624  quant_matrix, ss, se, Al, &EOBRUN);
1625  else
1626  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1627  quant_matrix, ss, se, Al, &EOBRUN);
1628 
1629  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1631  if (ret < 0) {
1632  av_log(s->avctx, AV_LOG_ERROR,
1633  "error y=%d x=%d\n", mb_y, mb_x);
1634  return AVERROR_INVALIDDATA;
1635  }
1636 
1637  if (handle_rstn(s, 0))
1638  EOBRUN = 0;
1639  }
1640  }
1641  return 0;
1642 }
1643 
1645 {
1646  int mb_x, mb_y;
1647  int c;
1648  const int bytes_per_pixel = 1 + (s->bits > 8);
1649  const int block_size = s->lossless ? 1 : 8;
1650 
1651  for (c = 0; c < s->nb_components; c++) {
1652  uint8_t *data = s->picture_ptr->data[c];
1653  int linesize = s->linesize[c];
1654  int h = s->h_max / s->h_count[c];
1655  int v = s->v_max / s->v_count[c];
1656  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1657  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1658 
1659  if (~s->coefs_finished[c])
1660  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1661 
1662  if (s->interlaced && s->bottom_field)
1663  data += linesize >> 1;
1664 
1665  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1666  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1667  int block_idx = mb_y * s->block_stride[c];
1668  int16_t (*block)[64] = &s->blocks[c][block_idx];
1669  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1670  s->idsp.idct_put(ptr, linesize, *block);
1671  if (s->bits & 7)
1672  shift_output(s, ptr, linesize);
1673  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1674  }
1675  }
1676  }
1677 }
1678 
1679 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1680  int mb_bitmask_size, const AVFrame *reference)
1681 {
1682  int len, nb_components, i, h, v, predictor, point_transform;
1683  int index, id, ret;
1684  const int block_size = s->lossless ? 1 : 8;
1685  int ilv, prev_shift;
1686 
1687  if (!s->got_picture) {
1688  av_log(s->avctx, AV_LOG_WARNING,
1689  "Can not process SOS before SOF, skipping\n");
1690  return AVERROR_INVALIDDATA;
1691  }
1692 
1693  ret = mjpeg_parse_len(s, &len, "sos");
1694  if (ret < 0)
1695  return ret;
1696  if (len < 1)
1697  return AVERROR_INVALIDDATA;
1698  nb_components = bytestream2_get_byteu(&s->gB);
1699  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1701  "decode_sos: nb_components (%d)",
1702  nb_components);
1703  return AVERROR_PATCHWELCOME;
1704  }
1705  if (len != 4 + 2 * nb_components) {
1706  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: len(%d) mismatch %d components\n", len, nb_components);
1707  return AVERROR_INVALIDDATA;
1708  }
1709  for (i = 0; i < nb_components; i++) {
1710  id = bytestream2_get_byteu(&s->gB);
1711  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1712  /* find component index */
1713  for (index = 0; index < s->nb_components; index++)
1714  if (id == s->component_id[index])
1715  break;
1716  if (index == s->nb_components) {
1717  av_log(s->avctx, AV_LOG_ERROR,
1718  "decode_sos: index(%d) out of components\n", index);
1719  return AVERROR_INVALIDDATA;
1720  }
1721  /* Metasoft MJPEG codec has Cb and Cr swapped */
1722  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1723  && nb_components == 3 && s->nb_components == 3 && i)
1724  index = 3 - i;
1725 
1726  s->quant_sindex[i] = s->quant_index[index];
1727  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1728  s->h_scount[i] = s->h_count[index];
1729  s->v_scount[i] = s->v_count[index];
1730 
1731  s->comp_index[i] = index;
1732 
1733  uint8_t b = bytestream2_get_byteu(&s->gB);
1734  s->dc_index[i] = b >> 4;
1735  s->ac_index[i] = b & 0x0F;
1736 
1737  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1738  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1739  goto out_of_range;
1740  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1741  goto out_of_range;
1742  }
1743 
1744  predictor = bytestream2_get_byteu(&s->gB); /* JPEG Ss / lossless JPEG predictor / JPEG-LS NEAR */
1745  ilv = bytestream2_get_byteu(&s->gB); /* JPEG Se / JPEG-LS ILV */
1746  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1747  uint8_t b = bytestream2_get_byteu(&s->gB);
1748  prev_shift = b >> 4; /* Ah */
1749  point_transform = b & 0x0F; /* Al */
1750  }else
1751  prev_shift = point_transform = 0;
1752 
1753  if (nb_components > 1) {
1754  /* interleaved stream */
1755  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1756  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1757  } else if (!s->ls) { /* skip this for JPEG-LS */
1758  h = s->h_max / s->h_scount[0];
1759  v = s->v_max / s->v_scount[0];
1760  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1761  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1762  s->nb_blocks[0] = 1;
1763  s->h_scount[0] = 1;
1764  s->v_scount[0] = 1;
1765  }
1766 
1767  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1768  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1769  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1770  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1771  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1772 
1773 
1774  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1775  if (s->mjpb_skiptosod)
1776  bytestream2_skip(&s->gB, s->mjpb_skiptosod);
1777 
1778  ret = init_get_bits8(&s->gb, s->gB.buffer, bytestream2_get_bytes_left(&s->gB));
1779  if (ret < 0)
1780  return ret;
1781 
1782 next_field:
1783  for (i = 0; i < nb_components; i++)
1784  s->last_dc[i] = (4 << s->bits);
1785 
1786  if (s->avctx->hwaccel) {
1787  int bytes_to_start = bytestream2_tell(&s->gB);
1788  av_assert0(bytes_to_start >= 0 &&
1789  s->raw_scan_buffer_size >= bytes_to_start);
1790 
1791  ret = FF_HW_CALL(s->avctx, decode_slice,
1792  s->raw_scan_buffer + bytes_to_start,
1793  s->raw_scan_buffer_size - bytes_to_start);
1794  if (ret < 0)
1795  return ret;
1796 
1797  } else if (s->lossless) {
1798  av_assert0(s->picture_ptr == s->picture);
1799  if (CONFIG_JPEGLS_DECODER && s->ls) {
1800 // for () {
1801 // reset_ls_coding_parameters(s, 0);
1802 
1804  point_transform, ilv)) < 0)
1805  return ret;
1806  } else {
1807  if (s->rgb || s->bayer) {
1808  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1809  return ret;
1810  } else {
1812  point_transform,
1813  nb_components)) < 0)
1814  return ret;
1815  }
1816  }
1817  } else {
1818  if (s->progressive && predictor) {
1819  av_assert0(s->picture_ptr == s->picture);
1821  ilv, prev_shift,
1822  point_transform)) < 0)
1823  return ret;
1824  } else {
1825  if ((ret = mjpeg_decode_scan(s, nb_components,
1826  prev_shift, point_transform,
1827  mb_bitmask, mb_bitmask_size, reference)) < 0)
1828  return ret;
1829  }
1830  }
1831 
1832  if (s->interlaced &&
1833  get_bits_left(&s->gb) > 32 &&
1834  show_bits(&s->gb, 8) == 0xFF) {
1835  GetBitContext bak = s->gb;
1836  align_get_bits(&bak);
1837  if (show_bits(&bak, 16) == 0xFFD1) {
1838  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1839  s->gb = bak;
1840  skip_bits(&s->gb, 16);
1841  s->bottom_field ^= 1;
1842 
1843  goto next_field;
1844  }
1845  }
1846 
1847  /* Add the amount of bits read from the unescaped image data buffer
1848  * into the GetByteContext. */
1849  bytestream2_skipu(&s->gB, (get_bits_count(&s->gb) + 7) / 8);
1850 
1851  return 0;
1852  out_of_range:
1853  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1854  return AVERROR_INVALIDDATA;
1855 }
1856 
1858 {
1859  if (bytestream2_get_be16u(&s->gB) != 4)
1860  return AVERROR_INVALIDDATA;
1861  s->restart_interval = bytestream2_get_be16u(&s->gB);
1862  s->restart_count = 0;
1863  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1864  s->restart_interval);
1865 
1866  return 0;
1867 }
1868 
1870 {
1871  int len, id, i;
1872 
1873  int ret = mjpeg_parse_len(s, &len, "app");
1874  if (ret < 0)
1875  return AVERROR_INVALIDDATA;
1876 
1877  if (len < 4) {
1878  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1879  return AVERROR_INVALIDDATA;
1880  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1881  goto out;
1882  }
1883 
1884  id = bytestream2_get_be32u(&s->gB);
1885  len -= 4;
1886 
1887  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1888  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1889  av_fourcc2str(av_bswap32(id)), id, len);
1890 
1891  /* This fourcc is used by non-avid files too, it holds some
1892  information, but it's always present in AVID-created files. */
1893  if (id == AV_RB32("AVI1")) {
1894  /* structure:
1895  4bytes AVI1
1896  1bytes polarity
1897  1bytes always zero
1898  4bytes field_size
1899  4bytes field_size_less_padding
1900  */
1901  if (len < 1)
1902  goto out;
1903  i = bytestream2_get_byteu(&s->gB); len--;
1904  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1905  goto out;
1906  }
1907 
1908  if (id == AV_RB32("JFIF")) {
1909  int t_w, t_h, v1, v2;
1910  if (len < 8)
1911  goto out;
1912  bytestream2_skipu(&s->gB, 1); /* the trailing zero-byte */
1913  v1 = bytestream2_get_byteu(&s->gB);
1914  v2 = bytestream2_get_byteu(&s->gB);
1915  bytestream2_skipu(&s->gB, 1);
1916 
1917  s->avctx->sample_aspect_ratio.num = bytestream2_get_be16u(&s->gB);
1918  s->avctx->sample_aspect_ratio.den = bytestream2_get_be16u(&s->gB);
1919  if ( s->avctx->sample_aspect_ratio.num <= 0
1920  || s->avctx->sample_aspect_ratio.den <= 0) {
1921  s->avctx->sample_aspect_ratio.num = 0;
1922  s->avctx->sample_aspect_ratio.den = 1;
1923  }
1924 
1925  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1926  av_log(s->avctx, AV_LOG_INFO,
1927  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1928  v1, v2,
1929  s->avctx->sample_aspect_ratio.num,
1930  s->avctx->sample_aspect_ratio.den);
1931 
1932  len -= 8;
1933  if (len >= 2) {
1934  t_w = bytestream2_get_byteu(&s->gB);
1935  t_h = bytestream2_get_byteu(&s->gB);
1936  if (t_w && t_h) {
1937  /* skip thumbnail */
1938  if (len -10 - (t_w * t_h * 3) > 0)
1939  len -= t_w * t_h * 3;
1940  }
1941  len -= 2;
1942  }
1943  goto out;
1944  }
1945 
1946  if ( id == AV_RB32("Adob")
1947  && len >= 8
1948  && bytestream2_peek_byteu(&s->gB) == 'e'
1949  && bytestream2_peek_be32u(&s->gB) != AV_RB32("e_CM")) {
1950  bytestream2_skipu(&s->gB, 1); /* 'e' */
1951  bytestream2_skipu(&s->gB, 2); /* version */
1952  bytestream2_skipu(&s->gB, 2); /* flags0 */
1953  bytestream2_skipu(&s->gB, 2); /* flags1 */
1954  s->adobe_transform = bytestream2_get_byteu(&s->gB);
1955  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1956  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1957  len -= 8;
1958  goto out;
1959  }
1960 
1961  if (id == AV_RB32("LJIF")) {
1962  int rgb = s->rgb;
1963  int pegasus_rct = s->pegasus_rct;
1964  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1965  av_log(s->avctx, AV_LOG_INFO,
1966  "Pegasus lossless jpeg header found\n");
1967  if (len < 9)
1968  goto out;
1969  bytestream2_skipu(&s->gB, 2); /* version ? */
1970  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1971  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1972  bytestream2_skipu(&s->gB, 2); /* unknown always 0? */
1973  switch (i=bytestream2_get_byteu(&s->gB)) {
1974  case 1:
1975  rgb = 1;
1976  pegasus_rct = 0;
1977  break;
1978  case 2:
1979  rgb = 1;
1980  pegasus_rct = 1;
1981  break;
1982  default:
1983  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1984  }
1985 
1986  len -= 9;
1987  if (s->bayer)
1988  goto out;
1989  if (s->got_picture)
1990  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1991  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1992  goto out;
1993  }
1994 
1995  s->rgb = rgb;
1996  s->pegasus_rct = pegasus_rct;
1997 
1998  goto out;
1999  }
2000  if (id == AV_RL32("colr") && len > 0) {
2001  s->colr = bytestream2_get_byteu(&s->gB);
2002  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2003  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
2004  len --;
2005  goto out;
2006  }
2007  if (id == AV_RL32("xfrm") && len > 0) {
2008  s->xfrm = bytestream2_get_byteu(&s->gB);
2009  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2010  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
2011  len --;
2012  goto out;
2013  }
2014 
2015  /* JPS extension by VRex */
2016  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
2017  int flags, layout, type;
2018  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2019  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2020 
2021  bytestream2_skipu(&s->gB, 4); len -= 4; /* JPS_ */
2022  bytestream2_skipu(&s->gB, 2); len -= 2; /* block length */
2023  bytestream2_skipu(&s->gB, 1); /* reserved */
2024  flags = bytestream2_get_byteu(&s->gB);
2025  layout = bytestream2_get_byteu(&s->gB);
2026  type = bytestream2_get_byteu(&s->gB);
2027  len -= 4;
2028 
2029  av_freep(&s->stereo3d);
2030  s->stereo3d = av_stereo3d_alloc();
2031  if (!s->stereo3d) {
2032  goto out;
2033  }
2034  if (type == 0) {
2035  s->stereo3d->type = AV_STEREO3D_2D;
2036  } else if (type == 1) {
2037  switch (layout) {
2038  case 0x01:
2039  s->stereo3d->type = AV_STEREO3D_LINES;
2040  break;
2041  case 0x02:
2042  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2043  break;
2044  case 0x03:
2045  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2046  break;
2047  }
2048  if (!(flags & 0x04)) {
2049  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2050  }
2051  }
2052  goto out;
2053  }
2054 
2055  /* EXIF metadata */
2056  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2057  int ret;
2058 
2059  bytestream2_skipu(&s->gB, 2); // skip padding
2060  len -= 2;
2061 
2062  ret = av_exif_parse_buffer(s->avctx, s->gB.buffer, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2063  if (ret < 0) {
2064  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2065  goto out;
2066  }
2067 
2068  bytestream2_skipu(&s->gB, ret);
2069  len -= ret;
2070 
2071  goto out;
2072  }
2073 
2074  /* Apple MJPEG-A */
2075  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2076  id = bytestream2_get_be32u(&s->gB);
2077  len -= 4;
2078  /* Apple MJPEG-A */
2079  if (id == AV_RB32("mjpg")) {
2080  /* structure:
2081  4bytes field size
2082  4bytes pad field size
2083  4bytes next off
2084  4bytes quant off
2085  4bytes huff off
2086  4bytes image off
2087  4bytes scan off
2088  4bytes data off
2089  */
2090  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2091  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2092  }
2093  }
2094 
2095  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2096  int id2;
2097  unsigned seqno;
2098  unsigned nummarkers;
2099 
2100  id = bytestream2_get_be32u(&s->gB);
2101  id2 = bytestream2_get_be24u(&s->gB);
2102  len -= 7;
2103  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2104  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2105  goto out;
2106  }
2107 
2108  bytestream2_skipu(&s->gB, 1);
2109  seqno = bytestream2_get_byteu(&s->gB);
2110  len -= 2;
2111  if (seqno == 0) {
2112  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2113  goto out;
2114  }
2115 
2116  nummarkers = bytestream2_get_byteu(&s->gB);
2117  len -= 1;
2118  if (nummarkers == 0) {
2119  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2120  goto out;
2121  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2122  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2123  goto out;
2124  } else if (seqno > nummarkers) {
2125  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2126  goto out;
2127  }
2128 
2129  /* Allocate if this is the first APP2 we've seen. */
2130  if (s->iccnum == 0) {
2131  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2132  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2133  return AVERROR(ENOMEM);
2134  }
2135  s->iccnum = nummarkers;
2136  }
2137 
2138  if (s->iccentries[seqno - 1].data) {
2139  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2140  goto out;
2141  }
2142 
2143  s->iccentries[seqno - 1].length = len;
2144  s->iccentries[seqno - 1].data = av_malloc(len);
2145  if (!s->iccentries[seqno - 1].data) {
2146  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2147  return AVERROR(ENOMEM);
2148  }
2149 
2150  bytestream2_get_bufferu(&s->gB, s->iccentries[seqno - 1].data, len);
2151  len = 0;
2152  s->iccread++;
2153 
2154  if (s->iccread > s->iccnum)
2155  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2156  }
2157 
2158 out:
2159  /* slow but needed for extreme adobe jpegs */
2160  if (len < 0)
2161  av_log(s->avctx, AV_LOG_ERROR,
2162  "mjpeg: error, decode_app parser read over the end\n");
2163  if (len > 0)
2164  bytestream2_skipu(&s->gB, len);
2165 
2166  return 0;
2167 }
2168 
2170 {
2171  int len;
2172  int ret = mjpeg_parse_len(s, &len, "com");
2173  if (ret < 0)
2174  return ret;
2175  if (!len)
2176  return 0;
2177 
2178  int i;
2179  char *cbuf = av_malloc(len + 1);
2180  if (!cbuf)
2181  return AVERROR(ENOMEM);
2182 
2183  for (i = 0; i < len; i++)
2184  cbuf[i] = bytestream2_get_byteu(&s->gB);
2185  if (cbuf[i - 1] == '\n')
2186  cbuf[i - 1] = 0;
2187  else
2188  cbuf[i] = 0;
2189 
2190  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2191  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2192 
2193  /* buggy avid, it puts EOI only at every 10th frame */
2194  if (!strncmp(cbuf, "AVID", 4)) {
2195  parse_avid(s, cbuf, len);
2196  } else if (!strcmp(cbuf, "CS=ITU601"))
2197  s->cs_itu601 = 1;
2198  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2199  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2200  s->flipped = 1;
2201  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2202  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2203  s->multiscope = 2;
2204  }
2205 
2206  av_free(cbuf);
2207 
2208  return 0;
2209 }
2210 
2211 /* return the 8 bit start code value and update the search
2212  state. Return -1 if no start code found */
2213 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2214 {
2215  const uint8_t *buf_ptr;
2216  int val;
2217  int skipped = 0;
2218 
2219  buf_ptr = *pbuf_ptr;
2220  while ((buf_ptr = memchr(buf_ptr, 0xff, buf_end - buf_ptr))) {
2221  buf_ptr++;
2222  while (buf_ptr < buf_end) {
2223  val = *buf_ptr++;
2224  if (val != 0xff) {
2225  if ((val >= SOF0) && (val <= COM))
2226  goto found;
2227  break;
2228  }
2229  }
2230  skipped++;
2231  }
2232  buf_ptr = buf_end;
2233  val = -1;
2234 found:
2235  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2236  *pbuf_ptr = buf_ptr;
2237  return val;
2238 }
2239 
2241  const uint8_t **buf_ptr, const uint8_t *buf_end,
2242  const uint8_t **unescaped_buf_ptr,
2243  int *unescaped_buf_size)
2244 {
2245  int start_code;
2246  start_code = find_marker(buf_ptr, buf_end);
2247 
2248  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2249  if (!s->buffer)
2250  return AVERROR(ENOMEM);
2251 
2252  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2253  if (start_code == SOS && !s->ls) {
2254  const uint8_t *src = *buf_ptr;
2255  const uint8_t *ptr = src;
2256  uint8_t *dst = s->buffer;
2257 
2258  #define copy_data_segment(skip) do { \
2259  ptrdiff_t length = (ptr - src) - (skip); \
2260  if (length > 0) { \
2261  memcpy(dst, src, length); \
2262  dst += length; \
2263  src = ptr; \
2264  } \
2265  } while (0)
2266 
2267  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2268  ptr = buf_end;
2269  copy_data_segment(0);
2270  } else {
2271  while (ptr < buf_end) {
2272  uint8_t x = *(ptr++);
2273 
2274  if (x == 0xff) {
2275  ptrdiff_t skip = 0;
2276  while (ptr < buf_end && x == 0xff) {
2277  x = *(ptr++);
2278  skip++;
2279  }
2280 
2281  /* 0xFF, 0xFF, ... */
2282  if (skip > 1) {
2284 
2285  /* decrement src as it is equal to ptr after the
2286  * copy_data_segment macro and we might want to
2287  * copy the current value of x later on */
2288  src--;
2289  }
2290 
2291  if (x < RST0 || x > RST7) {
2292  copy_data_segment(1);
2293  if (x)
2294  break;
2295  }
2296  }
2297  }
2298  if (src < ptr)
2299  copy_data_segment(0);
2300  }
2301  #undef copy_data_segment
2302 
2303  *unescaped_buf_ptr = s->buffer;
2304  *unescaped_buf_size = dst - s->buffer;
2305  memset(s->buffer + *unescaped_buf_size, 0,
2307 
2308  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
2309  (buf_end - *buf_ptr) - (dst - s->buffer));
2310  } else if (start_code == SOS && s->ls) {
2311  const uint8_t *src = *buf_ptr;
2312  uint8_t *dst = s->buffer;
2313  int bit_count = 0;
2314  int t = 0, b = 0;
2315  PutBitContext pb;
2316 
2317  /* find marker */
2318  while (src + t < buf_end) {
2319  uint8_t x = src[t++];
2320  if (x == 0xff) {
2321  while ((src + t < buf_end) && x == 0xff)
2322  x = src[t++];
2323  if (x & 0x80) {
2324  t -= FFMIN(2, t);
2325  break;
2326  }
2327  }
2328  }
2329  bit_count = t * 8;
2330  init_put_bits(&pb, dst, t);
2331 
2332  /* unescape bitstream */
2333  while (b < t) {
2334  uint8_t x = src[b++];
2335  put_bits(&pb, 8, x);
2336  if (x == 0xFF && b < t) {
2337  x = src[b++];
2338  if (x & 0x80) {
2339  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2340  x &= 0x7f;
2341  }
2342  put_bits(&pb, 7, x);
2343  bit_count--;
2344  }
2345  }
2346  flush_put_bits(&pb);
2347 
2348  *unescaped_buf_ptr = dst;
2349  *unescaped_buf_size = (bit_count + 7) >> 3;
2350  memset(s->buffer + *unescaped_buf_size, 0,
2352  } else {
2353  *unescaped_buf_ptr = *buf_ptr;
2354  *unescaped_buf_size = buf_end - *buf_ptr;
2355  }
2356 
2357  return start_code;
2358 }
2359 
2361 {
2362  int i;
2363 
2364  if (s->iccentries) {
2365  for (i = 0; i < s->iccnum; i++)
2366  av_freep(&s->iccentries[i].data);
2367  av_freep(&s->iccentries);
2368  }
2369 
2370  s->iccread = 0;
2371  s->iccnum = 0;
2372 }
2373 
2375  int *got_frame, const AVPacket *avpkt,
2376  const uint8_t *buf, const int buf_size)
2377 {
2378  MJpegDecodeContext *s = avctx->priv_data;
2379  const uint8_t *buf_end, *buf_ptr;
2380  const uint8_t *unescaped_buf_ptr;
2381  int hshift, vshift;
2382  int unescaped_buf_size;
2383  int start_code;
2384  int index;
2385  int ret = 0;
2386  int is16bit;
2387 
2388  s->force_pal8 = 0;
2389 
2390  s->buf_size = buf_size;
2391 
2392  av_exif_free(&s->exif_metadata);
2393  av_freep(&s->stereo3d);
2394  s->adobe_transform = -1;
2395 
2396  if (s->iccnum != 0)
2398 
2399 redo_for_pal8:
2400  buf_ptr = buf;
2401  buf_end = buf + buf_size;
2402  while (buf_ptr < buf_end) {
2403  /* find start next marker */
2404  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2405  &unescaped_buf_ptr,
2406  &unescaped_buf_size);
2407  /* EOF */
2408  if (start_code < 0) {
2409  break;
2410  } else if (unescaped_buf_size > INT_MAX / 8) {
2411  av_log(avctx, AV_LOG_ERROR,
2412  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2413  start_code, unescaped_buf_size, buf_size);
2414  return AVERROR_INVALIDDATA;
2415  }
2416  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
2417  start_code, buf_end - buf_ptr);
2418 
2419  bytestream2_init(&s->gB, unescaped_buf_ptr, unescaped_buf_size);
2420 
2421  s->start_code = start_code;
2422  if (avctx->debug & FF_DEBUG_STARTCODE)
2423  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2424 
2425  /* process markers */
2426  if (start_code >= RST0 && start_code <= RST7) {
2427  av_log(avctx, AV_LOG_DEBUG,
2428  "restart marker: %d\n", start_code & 0x0f);
2429  /* APP fields */
2430  } else if (start_code >= APP0 && start_code <= APP15) {
2431  if ((ret = mjpeg_decode_app(s)) < 0)
2432  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2433  av_err2str(ret));
2434  /* Comment */
2435  } else if (start_code == COM) {
2436  ret = mjpeg_decode_com(s);
2437  if (ret < 0)
2438  return ret;
2439  } else if (start_code == DQT) {
2441  if (ret < 0)
2442  return ret;
2443  }
2444 
2445  ret = -1;
2446 
2447  if (!CONFIG_JPEGLS_DECODER &&
2448  (start_code == SOF55 || start_code == LSE)) {
2449  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2450  return AVERROR(ENOSYS);
2451  }
2452 
2453  if (avctx->skip_frame == AVDISCARD_ALL) {
2454  switch(start_code) {
2455  case SOF0:
2456  case SOF1:
2457  case SOF2:
2458  case SOF3:
2459  case SOF55:
2460  break;
2461  default:
2462  goto skip;
2463  }
2464  }
2465 
2466  switch (start_code) {
2467  case SOI:
2468  s->restart_interval = 0;
2469  s->restart_count = 0;
2470  s->raw_image_buffer = buf_ptr;
2471  s->raw_image_buffer_size = buf_end - buf_ptr;
2472  /* nothing to do on SOI */
2473  break;
2474  case DHT:
2475  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2476  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2477  goto fail;
2478  }
2479  break;
2480  case SOF0:
2481  case SOF1:
2482  if (start_code == SOF0)
2484  else
2486  s->lossless = 0;
2487  s->ls = 0;
2488  s->progressive = 0;
2489  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2490  goto fail;
2491  break;
2492  case SOF2:
2494  s->lossless = 0;
2495  s->ls = 0;
2496  s->progressive = 1;
2497  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2498  goto fail;
2499  break;
2500  case SOF3:
2502 #if FF_API_CODEC_PROPS
2506 #endif
2507  s->lossless = 1;
2508  s->ls = 0;
2509  s->progressive = 0;
2510  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2511  goto fail;
2512  break;
2513  case SOF55:
2515 #if FF_API_CODEC_PROPS
2519 #endif
2520  s->lossless = 1;
2521  s->ls = 1;
2522  s->progressive = 0;
2523  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2524  goto fail;
2525  break;
2526  case LSE:
2527  if (!CONFIG_JPEGLS_DECODER ||
2528  (ret = ff_jpegls_decode_lse(s)) < 0)
2529  goto fail;
2530  if (ret == 1)
2531  goto redo_for_pal8;
2532  break;
2533  case EOI:
2534 eoi_parser:
2535  if (!avctx->hwaccel &&
2536  s->progressive && s->cur_scan && s->got_picture)
2538  s->cur_scan = 0;
2539  if (!s->got_picture) {
2540  av_log(avctx, AV_LOG_WARNING,
2541  "Found EOI before any SOF, ignoring\n");
2542  break;
2543  }
2544  if (s->interlaced) {
2545  s->bottom_field ^= 1;
2546  /* if not bottom field, do not output image yet */
2547  if (s->bottom_field == !s->interlace_polarity)
2548  break;
2549  }
2550  if (avctx->hwaccel) {
2551  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2552  if (ret < 0)
2553  return ret;
2554 
2555  av_freep(&s->hwaccel_picture_private);
2556  }
2557  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2558  return ret;
2559  if (s->lossless)
2560  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2561  *got_frame = 1;
2562  s->got_picture = 0;
2563 
2564  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2565  int qp = FFMAX3(s->qscale[0],
2566  s->qscale[1],
2567  s->qscale[2]);
2568 
2569  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2570  }
2571 
2572  goto the_end;
2573  case SOS:
2574  s->raw_scan_buffer = buf_ptr;
2575  s->raw_scan_buffer_size = buf_end - buf_ptr;
2576 
2577  s->cur_scan++;
2578 
2579  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2580  (avctx->err_recognition & AV_EF_EXPLODE))
2581  goto fail;
2582  break;
2583  case DRI:
2584  if ((ret = mjpeg_decode_dri(s)) < 0)
2585  return ret;
2586  break;
2587  case SOF5:
2588  case SOF6:
2589  case SOF7:
2590  case SOF9:
2591  case SOF10:
2592  case SOF11:
2593  case SOF13:
2594  case SOF14:
2595  case SOF15:
2596  case JPG:
2597  av_log(avctx, AV_LOG_ERROR,
2598  "mjpeg: unsupported coding type (%x)\n", start_code);
2599  break;
2600  }
2601 
2602  if (avctx->skip_frame == AVDISCARD_ALL) {
2603  switch(start_code) {
2604  case SOF0:
2605  case SOF1:
2606  case SOF2:
2607  case SOF3:
2608  case SOF55:
2609  s->got_picture = 0;
2610  goto the_end_no_picture;
2611  }
2612  }
2613 
2614 skip:
2615  /* eof process start code */
2616  buf_ptr += bytestream2_tell(&s->gB);
2617  av_log(avctx, AV_LOG_DEBUG,
2618  "marker parser used %d bytes\n",
2619  bytestream2_tell(&s->gB));
2620  }
2621  if (s->got_picture && s->cur_scan) {
2622  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2623  goto eoi_parser;
2624  }
2625  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2626  return AVERROR_INVALIDDATA;
2627 fail:
2628  s->got_picture = 0;
2629  return ret;
2630 the_end:
2631 
2632  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2633 
2634  if (AV_RB32(s->upscale_h)) {
2635  int p;
2637  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2638  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2639  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2640  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2641  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2642  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2643  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2644  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2645  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2646  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2647  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2648  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2649  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2650  );
2651  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2652  if (ret)
2653  return ret;
2654 
2655  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2656  for (p = 0; p<s->nb_components; p++) {
2657  uint8_t *line = s->picture_ptr->data[p];
2658  int w = s->width;
2659  int h = s->height;
2660  if (!s->upscale_h[p])
2661  continue;
2662  if (p==1 || p==2) {
2663  w = AV_CEIL_RSHIFT(w, hshift);
2664  h = AV_CEIL_RSHIFT(h, vshift);
2665  }
2666  if (s->upscale_v[p] == 1)
2667  h = (h+1)>>1;
2668  av_assert0(w > 0);
2669  for (int i = 0; i < h; i++) {
2670  if (s->upscale_h[p] == 1) {
2671  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2672  else line[w - 1] = line[(w - 1) / 2];
2673  for (index = w - 2; index > 0; index--) {
2674  if (is16bit)
2675  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2676  else
2677  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2678  }
2679  } else if (s->upscale_h[p] == 2) {
2680  if (is16bit) {
2681  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2682  if (w > 1)
2683  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2684  } else {
2685  line[w - 1] = line[(w - 1) / 3];
2686  if (w > 1)
2687  line[w - 2] = line[w - 1];
2688  }
2689  for (index = w - 3; index > 0; index--) {
2690  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2691  }
2692  } else if (s->upscale_h[p] == 4){
2693  if (is16bit) {
2694  uint16_t *line16 = (uint16_t *) line;
2695  line16[w - 1] = line16[(w - 1) >> 2];
2696  if (w > 1)
2697  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2698  if (w > 2)
2699  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2700  } else {
2701  line[w - 1] = line[(w - 1) >> 2];
2702  if (w > 1)
2703  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2704  if (w > 2)
2705  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2706  }
2707  for (index = w - 4; index > 0; index--)
2708  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2709  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2710  }
2711  line += s->linesize[p];
2712  }
2713  }
2714  }
2715  if (AV_RB32(s->upscale_v)) {
2716  int p;
2718  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2719  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2720  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2721  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2722  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2723  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2726  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2727  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2728  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2729  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2730  );
2731  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2732  if (ret)
2733  return ret;
2734 
2735  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2736  for (p = 0; p < s->nb_components; p++) {
2737  uint8_t *dst;
2738  int w = s->width;
2739  int h = s->height;
2740  if (!s->upscale_v[p])
2741  continue;
2742  if (p==1 || p==2) {
2743  w = AV_CEIL_RSHIFT(w, hshift);
2744  h = AV_CEIL_RSHIFT(h, vshift);
2745  }
2746  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2747  for (int i = h - 1; i; i--) {
2748  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2749  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2750  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2751  memcpy(dst, src1, w);
2752  } else {
2753  for (index = 0; index < w; index++)
2754  dst[index] = (src1[index] + src2[index]) >> 1;
2755  }
2756  dst -= s->linesize[p];
2757  }
2758  }
2759  }
2760  if (s->flipped && !s->rgb) {
2761  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2762  if (ret)
2763  return ret;
2764 
2765  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2766  for (index=0; index<s->nb_components; index++) {
2767  int h = frame->height;
2768  if (index && index < 3)
2769  h = AV_CEIL_RSHIFT(h, vshift);
2770  if (frame->data[index]) {
2771  frame->data[index] += (h - 1) * frame->linesize[index];
2772  frame->linesize[index] *= -1;
2773  }
2774  }
2775  }
2776 
2777  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2778  av_assert0(s->nb_components == 3);
2779  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2780  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2781  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2782  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2783  }
2784 
2785  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2786  int w = s->picture_ptr->width;
2787  int h = s->picture_ptr->height;
2788  av_assert0(s->nb_components == 4);
2789  for (int i = 0; i < h; i++) {
2790  int j;
2791  uint8_t *dst[4];
2792  for (index=0; index<4; index++) {
2793  dst[index] = s->picture_ptr->data[index]
2794  + s->picture_ptr->linesize[index]*i;
2795  }
2796  for (j=0; j<w; j++) {
2797  int k = dst[3][j];
2798  int r = dst[0][j] * k;
2799  int g = dst[1][j] * k;
2800  int b = dst[2][j] * k;
2801  dst[0][j] = g*257 >> 16;
2802  dst[1][j] = b*257 >> 16;
2803  dst[2][j] = r*257 >> 16;
2804  }
2805  memset(dst[3], 255, w);
2806  }
2807  }
2808  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2809  int w = s->picture_ptr->width;
2810  int h = s->picture_ptr->height;
2811  av_assert0(s->nb_components == 4);
2812  for (int i = 0; i < h; i++) {
2813  int j;
2814  uint8_t *dst[4];
2815  for (index=0; index<4; index++) {
2816  dst[index] = s->picture_ptr->data[index]
2817  + s->picture_ptr->linesize[index]*i;
2818  }
2819  for (j=0; j<w; j++) {
2820  int k = dst[3][j];
2821  int r = (255 - dst[0][j]) * k;
2822  int g = (128 - dst[1][j]) * k;
2823  int b = (128 - dst[2][j]) * k;
2824  dst[0][j] = r*257 >> 16;
2825  dst[1][j] = (g*257 >> 16) + 128;
2826  dst[2][j] = (b*257 >> 16) + 128;
2827  }
2828  memset(dst[3], 255, w);
2829  }
2830  }
2831 
2832  if (s->stereo3d) {
2834  if (stereo) {
2835  stereo->type = s->stereo3d->type;
2836  stereo->flags = s->stereo3d->flags;
2837  }
2838  av_freep(&s->stereo3d);
2839  }
2840 
2841  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2842  AVFrameSideData *sd;
2843  size_t offset = 0;
2844  int total_size = 0;
2845 
2846  /* Sum size of all parts. */
2847  for (int i = 0; i < s->iccnum; i++)
2848  total_size += s->iccentries[i].length;
2849 
2850  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2851  if (ret < 0) {
2852  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2853  return ret;
2854  }
2855 
2856  if (sd) {
2857  /* Reassemble the parts, which are now in-order. */
2858  for (int i = 0; i < s->iccnum; i++) {
2859  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2860  offset += s->iccentries[i].length;
2861  }
2862  }
2863  }
2864 
2865  if (s->exif_metadata.entries) {
2866  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2867  av_exif_free(&s->exif_metadata);
2868  if (ret < 0)
2869  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2870  }
2871 
2872  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2873  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2874  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2875  avctx->coded_height > s->orig_height) {
2876  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2877  frame->crop_top = frame->height - avctx->height;
2878  }
2879 
2880 the_end_no_picture:
2881  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %td bytes\n",
2882  buf_end - buf_ptr);
2883  return buf_ptr - buf;
2884 }
2885 
2886 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2887  AVPacket *avpkt)
2888 {
2889  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2890  avpkt, avpkt->data, avpkt->size);
2891 }
2892 
2893 
2894 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2895  * even without having called ff_mjpeg_decode_init(). */
2897 {
2898  MJpegDecodeContext *s = avctx->priv_data;
2899  int i, j;
2900 
2901  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2902  av_log(avctx, AV_LOG_INFO, "Single field\n");
2903  }
2904 
2905  av_frame_free(&s->picture);
2906  s->picture_ptr = NULL;
2907 
2908  av_frame_free(&s->smv_frame);
2909 
2910  av_freep(&s->buffer);
2911  av_freep(&s->stereo3d);
2912  av_freep(&s->ljpeg_buffer);
2913  s->ljpeg_buffer_size = 0;
2914 
2915  for (i = 0; i < 3; i++) {
2916  for (j = 0; j < 4; j++)
2917  ff_vlc_free(&s->vlcs[i][j]);
2918  }
2919  for (i = 0; i < MAX_COMPONENTS; i++) {
2920  av_freep(&s->blocks[i]);
2921  av_freep(&s->last_nnz[i]);
2922  }
2923  av_exif_free(&s->exif_metadata);
2924 
2926 
2927  av_freep(&s->hwaccel_picture_private);
2928  av_freep(&s->jls_state);
2929 
2930  return 0;
2931 }
2932 
2934 {
2935  MJpegDecodeContext *s = avctx->priv_data;
2936  s->got_picture = 0;
2937 
2938  s->smv_next_frame = 0;
2939  av_frame_unref(s->smv_frame);
2940 }
2941 
2942 #if CONFIG_MJPEG_DECODER
2943 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2944 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2945 static const AVOption options[] = {
2946  { "extern_huff", "Use external huffman table.",
2947  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2948  { NULL },
2949 };
2950 
2951 static const AVClass mjpegdec_class = {
2952  .class_name = "MJPEG decoder",
2953  .item_name = av_default_item_name,
2954  .option = options,
2955  .version = LIBAVUTIL_VERSION_INT,
2956 };
2957 
2958 const FFCodec ff_mjpeg_decoder = {
2959  .p.name = "mjpeg",
2960  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2961  .p.type = AVMEDIA_TYPE_VIDEO,
2962  .p.id = AV_CODEC_ID_MJPEG,
2963  .priv_data_size = sizeof(MJpegDecodeContext),
2967  .flush = decode_flush,
2968  .p.capabilities = AV_CODEC_CAP_DR1,
2969  .p.max_lowres = 3,
2970  .p.priv_class = &mjpegdec_class,
2971  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2972  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2975  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2976 #if CONFIG_MJPEG_NVDEC_HWACCEL
2977  HWACCEL_NVDEC(mjpeg),
2978 #endif
2979 #if CONFIG_MJPEG_VAAPI_HWACCEL
2980  HWACCEL_VAAPI(mjpeg),
2981 #endif
2982  NULL
2983  },
2984 };
2985 #endif
2986 #if CONFIG_THP_DECODER
2987 const FFCodec ff_thp_decoder = {
2988  .p.name = "thp",
2989  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2990  .p.type = AVMEDIA_TYPE_VIDEO,
2991  .p.id = AV_CODEC_ID_THP,
2992  .priv_data_size = sizeof(MJpegDecodeContext),
2996  .flush = decode_flush,
2997  .p.capabilities = AV_CODEC_CAP_DR1,
2998  .p.max_lowres = 3,
2999  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3000 };
3001 #endif
3002 
3003 #if CONFIG_SMVJPEG_DECODER
3004 // SMV JPEG just stacks several output frames into one JPEG picture
3005 // we handle that by setting up the cropping parameters appropriately
3006 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3007 {
3008  MJpegDecodeContext *s = avctx->priv_data;
3009 
3010  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3011 
3012  frame->width = avctx->coded_width;
3013  frame->height = avctx->coded_height;
3014  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3015  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3016 
3017  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3018  s->smv_frame->pts += s->smv_frame->duration;
3019  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3020 
3021  if (s->smv_next_frame == 0)
3022  av_frame_unref(s->smv_frame);
3023 }
3024 
3025 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3026 {
3027  MJpegDecodeContext *s = avctx->priv_data;
3028  AVPacket *const pkt = avctx->internal->in_pkt;
3029  int got_frame = 0;
3030  int ret;
3031 
3032  if (s->smv_next_frame > 0)
3033  goto return_frame;
3034 
3035  ret = ff_decode_get_packet(avctx, pkt);
3036  if (ret < 0)
3037  return ret;
3038 
3039  av_frame_unref(s->smv_frame);
3040 
3041  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3042  s->smv_frame->pkt_dts = pkt->dts;
3044  if (ret < 0)
3045  return ret;
3046 
3047  if (!got_frame)
3048  return AVERROR(EAGAIN);
3049 
3050  // packet duration covers all the frames in the packet
3051  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3052 
3053 return_frame:
3054  av_assert0(s->smv_frame->buf[0]);
3055  ret = av_frame_ref(frame, s->smv_frame);
3056  if (ret < 0)
3057  return ret;
3058 
3059  smv_process_frame(avctx, frame);
3060  return 0;
3061 }
3062 
3063 const FFCodec ff_smvjpeg_decoder = {
3064  .p.name = "smvjpeg",
3065  CODEC_LONG_NAME("SMV JPEG"),
3066  .p.type = AVMEDIA_TYPE_VIDEO,
3067  .p.id = AV_CODEC_ID_SMVJPEG,
3068  .priv_data_size = sizeof(MJpegDecodeContext),
3071  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3072  .flush = decode_flush,
3073  .p.capabilities = AV_CODEC_CAP_DR1,
3074  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3076 };
3077 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1413
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:280
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:249
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:873
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1208
out
FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1441
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
APP1
@ APP1
Definition: mjpeg.h:80
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:993
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1406
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:573
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:111
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:588
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:690
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:213
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1383
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:251
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:246
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1284
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1457
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:120
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1670
fail
#define fail()
Definition: checkasm.h:216
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2374
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2169
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:58
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:650
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:896
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1646
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1076
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:189
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2360
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2896
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
SOF55
@ SOF55
JPEG-LS.
Definition: mjpeg.h:103
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
mjpeg_parse_len
static int mjpeg_parse_len(MJpegDecodeContext *s, int *plen, const char *name)
Definition: mjpegdec.c:191
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2430
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1644
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:203
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:834
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:235
MJpegDecodeContext
Definition: mjpegdec.h:56
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1472
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1011
lowres
static int lowres
Definition: ffplay.c:330
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1593
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:651
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1705
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1729
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1111
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2886
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:913
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1679
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2127
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:294
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2213
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:180
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2334
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:849
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
VD
#define VD
Definition: amfdec.c:664
src2
const pixel * src2
Definition: h264pred_template.c:421
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1857
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:63
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2933
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1390
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1886
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:985
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1387
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:549
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2240
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:439
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:355
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:247
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1626
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1382
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:305
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1869
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
w
uint8_t w
Definition: llvidencdsp.c:39
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1645
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:47
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347