FFmpeg
notchlc.c
Go to the documentation of this file.
1 /*
2  * NotchLC decoder
3  * Copyright (c) 2020 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #define BITSTREAM_READER_LE
27 #include "libavutil/intreadwrite.h"
28 #include "avcodec.h"
29 #include "bytestream.h"
30 #include "get_bits.h"
31 #include "internal.h"
32 #include "lzf.h"
33 #include "thread.h"
34 
35 typedef struct NotchLCContext {
36  unsigned compressed_size;
37  unsigned format;
38 
41 
43  int64_t lzf_size;
44 
45  unsigned texture_size_x;
46  unsigned texture_size_y;
51  unsigned y_data_offset;
52  unsigned uv_data_offset;
53  unsigned y_data_size;
54  unsigned a_data_offset;
55  unsigned uv_count_offset;
56  unsigned a_count_size;
57  unsigned data_end;
58 
62 
64 {
67  avctx->colorspace = AVCOL_SPC_RGB;
70 
71  return 0;
72 }
73 
74 #define HISTORY_SIZE (64 * 1024)
75 
76 static int lz4_decompress(AVCodecContext *avctx,
77  GetByteContext *gb,
78  PutByteContext *pb)
79 {
80  unsigned reference_pos, match_length, delta, pos = 0;
81  uint8_t history[64 * 1024];
82 
83  while (bytestream2_get_bytes_left(gb) > 0) {
84  uint8_t token = bytestream2_get_byte(gb);
85  unsigned num_literals = token >> 4;
86 
87  if (num_literals == 15) {
88  unsigned char current;
89  do {
90  current = bytestream2_get_byte(gb);
91  num_literals += current;
92  } while (current == 255);
93  }
94 
95  if (pos + num_literals < HISTORY_SIZE) {
96  bytestream2_get_buffer(gb, history + pos, num_literals);
97  pos += num_literals;
98  } else {
99  while (num_literals-- > 0) {
100  history[pos++] = bytestream2_get_byte(gb);
101  if (pos == HISTORY_SIZE) {
102  bytestream2_put_buffer(pb, history, HISTORY_SIZE);
103  pos = 0;
104  }
105  }
106  }
107 
108  if (bytestream2_get_bytes_left(gb) <= 0)
109  break;
110 
111  delta = bytestream2_get_le16(gb);
112  if (delta == 0)
113  return 0;
114  match_length = 4 + (token & 0x0F);
115  if (match_length == 4 + 0x0F) {
116  uint8_t current;
117 
118  do {
119  current = bytestream2_get_byte(gb);
120  match_length += current;
121  } while (current == 255);
122  }
123  reference_pos = (pos >= delta) ? (pos - delta) : (HISTORY_SIZE + pos - delta);
124  if (pos + match_length < HISTORY_SIZE && reference_pos + match_length < HISTORY_SIZE) {
125  if (pos >= reference_pos + match_length || reference_pos >= pos + match_length) {
126  memcpy(history + pos, history + reference_pos, match_length);
127  pos += match_length;
128  } else {
129  while (match_length-- > 0)
130  history[pos++] = history[reference_pos++];
131  }
132  } else {
133  while (match_length-- > 0) {
134  history[pos++] = history[reference_pos++];
135  if (pos == HISTORY_SIZE) {
136  bytestream2_put_buffer(pb, history, HISTORY_SIZE);
137  pos = 0;
138  }
139  reference_pos %= HISTORY_SIZE;
140  }
141  }
142  }
143 
144  bytestream2_put_buffer(pb, history, pos);
145 
146  return bytestream2_tell_p(pb);
147 }
148 
150  unsigned uncompressed_size)
151 {
152  NotchLCContext *s = avctx->priv_data;
153  GetByteContext rgb, dgb, *gb = &s->gb;
155  int ylinesize, ulinesize, vlinesize, alinesize;
156  uint16_t *dsty, *dstu, *dstv, *dsta;
157  int ret;
158 
159  s->texture_size_x = bytestream2_get_le32(gb);
160  s->texture_size_y = bytestream2_get_le32(gb);
161 
162  ret = ff_set_dimensions(avctx, s->texture_size_x, s->texture_size_y);
163  if (ret < 0)
164  return ret;
165 
166  s->uv_offset_data_offset = bytestream2_get_le32(gb);
167  if (s->uv_offset_data_offset >= UINT_MAX / 4)
168  return AVERROR_INVALIDDATA;
169  s->uv_offset_data_offset *= 4;
170  if (s->uv_offset_data_offset >= uncompressed_size)
171  return AVERROR_INVALIDDATA;
172 
173  s->y_control_data_offset = bytestream2_get_le32(gb);
174  if (s->y_control_data_offset >= UINT_MAX / 4)
175  return AVERROR_INVALIDDATA;
176  s->y_control_data_offset *= 4;
177  if (s->y_control_data_offset >= uncompressed_size)
178  return AVERROR_INVALIDDATA;
179 
180  s->a_control_word_offset = bytestream2_get_le32(gb);
181  if (s->a_control_word_offset >= UINT_MAX / 4)
182  return AVERROR_INVALIDDATA;
183  s->a_control_word_offset *= 4;
184  if (s->a_control_word_offset >= uncompressed_size)
185  return AVERROR_INVALIDDATA;
186 
187  s->uv_data_offset = bytestream2_get_le32(gb);
188  if (s->uv_data_offset >= UINT_MAX / 4)
189  return AVERROR_INVALIDDATA;
190  s->uv_data_offset *= 4;
191  if (s->uv_data_offset >= uncompressed_size)
192  return AVERROR_INVALIDDATA;
193 
194  s->y_data_size = bytestream2_get_le32(gb);
195  if (s->y_data_size >= UINT_MAX / 4)
196  return AVERROR_INVALIDDATA;
197 
198  s->a_data_offset = bytestream2_get_le32(gb);
199  if (s->a_data_offset >= UINT_MAX / 4)
200  return AVERROR_INVALIDDATA;
201  s->a_data_offset *= 4;
202  if (s->a_data_offset >= uncompressed_size)
203  return AVERROR_INVALIDDATA;
204 
205  s->a_count_size = bytestream2_get_le32(gb);
206  if (s->a_count_size >= UINT_MAX / 4)
207  return AVERROR_INVALIDDATA;
208  s->a_count_size *= 4;
209  if (s->a_count_size >= uncompressed_size)
210  return AVERROR_INVALIDDATA;
211 
212  s->data_end = bytestream2_get_le32(gb);
213  if (s->data_end > uncompressed_size)
214  return AVERROR_INVALIDDATA;
215 
216  s->y_data_row_offsets = bytestream2_tell(gb);
217  if (s->data_end <= s->y_data_size)
218  return AVERROR_INVALIDDATA;
219  s->y_data_offset = s->data_end - s->y_data_size;
220  if (s->y_data_offset <= s->a_data_offset)
221  return AVERROR_INVALIDDATA;
222  s->uv_count_offset = s->y_data_offset - s->a_data_offset;
223 
224  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
225  return ret;
226 
227  rgb = *gb;
228  dgb = *gb;
229  bytestream2_seek(&rgb, s->y_data_row_offsets, SEEK_SET);
230  bytestream2_seek(gb, s->y_control_data_offset, SEEK_SET);
231 
232  if (bytestream2_get_bytes_left(gb) < (avctx->height + 3) / 4 * ((avctx->width + 3) / 4) * 4)
233  return AVERROR_INVALIDDATA;
234 
235  dsty = (uint16_t *)p->data[0];
236  dsta = (uint16_t *)p->data[3];
237  ylinesize = p->linesize[0] / 2;
238  alinesize = p->linesize[3] / 2;
239 
240  for (int y = 0; y < avctx->height; y += 4) {
241  const unsigned row_offset = bytestream2_get_le32(&rgb);
242 
243  bytestream2_seek(&dgb, s->y_data_offset + row_offset, SEEK_SET);
244 
246  for (int x = 0; x < avctx->width; x += 4) {
247  unsigned item = bytestream2_get_le32(gb);
248  unsigned y_min = item & 4095;
249  unsigned y_max = (item >> 12) & 4095;
250  unsigned y_diff = y_max - y_min;
251  unsigned control[4];
252 
253  control[0] = (item >> 24) & 3;
254  control[1] = (item >> 26) & 3;
255  control[2] = (item >> 28) & 3;
256  control[3] = (item >> 30) & 3;
257 
258  for (int i = 0; i < 4; i++) {
259  const int nb_bits = control[i] + 1;
260  const int div = (1 << nb_bits) - 1;
261  const int add = div - 1;
262 
263  dsty[x + i * ylinesize + 0] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
264  dsty[x + i * ylinesize + 1] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
265  dsty[x + i * ylinesize + 2] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
266  dsty[x + i * ylinesize + 3] = av_clip_uintp2(y_min + ((y_diff * get_bits(&bit, nb_bits) + add) / div), 12);
267  }
268  }
269 
270  dsty += 4 * ylinesize;
271  }
272 
273  rgb = *gb;
274  dgb = *gb;
275  bytestream2_seek(gb, s->a_control_word_offset, SEEK_SET);
276  if (s->uv_count_offset == s->a_control_word_offset) {
277  for (int y = 0; y < avctx->height; y++) {
278  for (int x = 0; x < avctx->width; x++)
279  dsta[x] = 4095;
280  dsta += alinesize;
281  }
282  } else {
283  if (bytestream2_get_bytes_left(gb) < (avctx->height + 15) / 16 * ((avctx->width + 15) / 16) * 8)
284  return AVERROR_INVALIDDATA;
285 
286  for (int y = 0; y < avctx->height; y += 16) {
287  for (int x = 0; x < avctx->width; x += 16) {
288  unsigned m = bytestream2_get_le32(gb);
289  unsigned offset = bytestream2_get_le32(gb);
290  unsigned alpha0, alpha1;
291  uint64_t control;
292 
293  if (offset >= UINT_MAX / 4)
294  return AVERROR_INVALIDDATA;
295  offset = offset * 4 + s->uv_data_offset + s->a_data_offset;
296  if (offset >= s->data_end)
297  return AVERROR_INVALIDDATA;
298 
299  bytestream2_seek(&dgb, offset, SEEK_SET);
300  control = bytestream2_get_le64(&dgb);
301  alpha0 = control & 0xFF;
302  alpha1 = (control >> 8) & 0xFF;
303  control = control >> 16;
304 
305  for (int by = 0; by < 4; by++) {
306  for (int bx = 0; bx < 4; bx++) {
307  switch (m & 3) {
308  case 0:
309  for (int i = 0; i < 4; i++) {
310  for (int j = 0; j < 4; j++) {
311  dsta[x + (i + by * 4) * alinesize + bx * 4 + j] = 0;
312  }
313  }
314  break;
315  case 1:
316  for (int i = 0; i < 4; i++) {
317  for (int j = 0; j < 4; j++) {
318  dsta[x + (i + by * 4) * alinesize + bx * 4 + j] = 4095;
319  }
320  }
321  break;
322  case 2:
323  for (int i = 0; i < 4; i++) {
324  for (int j = 0; j < 4; j++) {
325  dsta[x + (i + by * 4) * alinesize + bx * 4 + j] = (alpha0 + (alpha1 - alpha0) * (control & 7)) << 4;
326  }
327  }
328  break;
329  default:
330  return AVERROR_INVALIDDATA;
331  }
332 
333  control >>= 3;
334  m >>= 2;
335  }
336  }
337  }
338 
339  dsta += 16 * alinesize;
340  }
341  }
342 
343  bytestream2_seek(&rgb, s->uv_offset_data_offset, SEEK_SET);
344 
345  dstu = (uint16_t *)p->data[1];
346  dstv = (uint16_t *)p->data[2];
347  ulinesize = p->linesize[1] / 2;
348  vlinesize = p->linesize[2] / 2;
349 
350  for (int y = 0; y < avctx->height; y += 16) {
351  for (int x = 0; x < avctx->width; x += 16) {
352  unsigned offset = bytestream2_get_le32(&rgb) * 4;
353  int u[16][16] = { 0 }, v[16][16] = { 0 };
354  int u0, v0, u1, v1, udif, vdif;
355  unsigned escape, is8x8, loc;
356 
357  bytestream2_seek(&dgb, s->uv_data_offset + offset, SEEK_SET);
358 
359  is8x8 = bytestream2_get_le16(&dgb);
360  escape = bytestream2_get_le16(&dgb);
361 
362  if (escape == 0 && is8x8 == 0) {
363  u0 = bytestream2_get_byte(&dgb);
364  v0 = bytestream2_get_byte(&dgb);
365  u1 = bytestream2_get_byte(&dgb);
366  v1 = bytestream2_get_byte(&dgb);
367  loc = bytestream2_get_le32(&dgb);
368  u0 = (u0 << 4) | (u0 & 0xF);
369  v0 = (v0 << 4) | (v0 & 0xF);
370  u1 = (u1 << 4) | (u1 & 0xF);
371  v1 = (v1 << 4) | (v1 & 0xF);
372  udif = u1 - u0;
373  vdif = v1 - v0;
374 
375  for (int i = 0; i < 16; i += 4) {
376  for (int j = 0; j < 16; j += 4) {
377  for (int ii = 0; ii < 4; ii++) {
378  for (int jj = 0; jj < 4; jj++) {
379  u[i + ii][j + jj] = u0 + ((udif * (int)(loc & 3) + 2) / 3);
380  v[i + ii][j + jj] = v0 + ((vdif * (int)(loc & 3) + 2) / 3);
381  }
382  }
383 
384  loc >>= 2;
385  }
386  }
387  } else {
388  for (int i = 0; i < 16; i += 8) {
389  for (int j = 0; j < 16; j += 8) {
390  if (is8x8 & 1) {
391  u0 = bytestream2_get_byte(&dgb);
392  v0 = bytestream2_get_byte(&dgb);
393  u1 = bytestream2_get_byte(&dgb);
394  v1 = bytestream2_get_byte(&dgb);
395  loc = bytestream2_get_le32(&dgb);
396  u0 = (u0 << 4) | (u0 & 0xF);
397  v0 = (v0 << 4) | (v0 & 0xF);
398  u1 = (u1 << 4) | (u1 & 0xF);
399  v1 = (v1 << 4) | (v1 & 0xF);
400  udif = u1 - u0;
401  vdif = v1 - v0;
402 
403  for (int ii = 0; ii < 8; ii += 2) {
404  for (int jj = 0; jj < 8; jj += 2) {
405  for (int iii = 0; iii < 2; iii++) {
406  for (int jjj = 0; jjj < 2; jjj++) {
407  u[i + ii + iii][j + jj + jjj] = u0 + ((udif * (int)(loc & 3) + 2) / 3);
408  v[i + ii + iii][j + jj + jjj] = v0 + ((vdif * (int)(loc & 3) + 2) / 3);
409  }
410  }
411 
412  loc >>= 2;
413  }
414  }
415  } else if (escape) {
416  for (int ii = 0; ii < 8; ii += 4) {
417  for (int jj = 0; jj < 8; jj += 4) {
418  u0 = bytestream2_get_byte(&dgb);
419  v0 = bytestream2_get_byte(&dgb);
420  u1 = bytestream2_get_byte(&dgb);
421  v1 = bytestream2_get_byte(&dgb);
422  loc = bytestream2_get_le32(&dgb);
423  u0 = (u0 << 4) | (u0 & 0xF);
424  v0 = (v0 << 4) | (v0 & 0xF);
425  u1 = (u1 << 4) | (u1 & 0xF);
426  v1 = (v1 << 4) | (v1 & 0xF);
427  udif = u1 - u0;
428  vdif = v1 - v0;
429 
430  for (int iii = 0; iii < 4; iii++) {
431  for (int jjj = 0; jjj < 4; jjj++) {
432  u[i + ii + iii][j + jj + jjj] = u0 + ((udif * (int)(loc & 3) + 2) / 3);
433  v[i + ii + iii][j + jj + jjj] = v0 + ((vdif * (int)(loc & 3) + 2) / 3);
434 
435  loc >>= 2;
436  }
437  }
438  }
439  }
440  }
441 
442  is8x8 >>= 1;
443  }
444  }
445  }
446 
447  for (int i = 0; i < 16; i++) {
448  for (int j = 0; j < 16; j++) {
449  dstu[x + i * ulinesize + j] = u[i][j];
450  dstv[x + i * vlinesize + j] = v[i][j];
451  }
452  }
453  }
454 
455  dstu += 16 * ulinesize;
456  dstv += 16 * vlinesize;
457  }
458 
459  return 0;
460 }
461 
462 static int decode_frame(AVCodecContext *avctx,
463  void *data, int *got_frame,
464  AVPacket *avpkt)
465 {
466  NotchLCContext *s = avctx->priv_data;
467  ThreadFrame frame = { .f = data };
468  GetByteContext *gb = &s->gb;
469  PutByteContext *pb = &s->pb;
470  unsigned uncompressed_size;
471  AVFrame *p = data;
472  int ret;
473 
474  if (avpkt->size <= 40)
475  return AVERROR_INVALIDDATA;
476 
477  bytestream2_init(gb, avpkt->data, avpkt->size);
478 
479  if (bytestream2_get_le32(gb) != MKBETAG('N','L','C','1'))
480  return AVERROR_INVALIDDATA;
481 
482  uncompressed_size = bytestream2_get_le32(gb);
483  s->compressed_size = bytestream2_get_le32(gb);
484  s->format = bytestream2_get_le32(gb);
485 
486  if (s->format > 2)
487  return AVERROR_PATCHWELCOME;
488 
489  if (s->format == 0) {
490  ret = ff_lzf_uncompress(gb, &s->lzf_buffer, &s->lzf_size);
491  if (ret < 0)
492  return ret;
493 
494  if (uncompressed_size > s->lzf_size)
495  return AVERROR_INVALIDDATA;
496 
497  bytestream2_init(gb, s->lzf_buffer, uncompressed_size);
498  } else if (s->format == 1) {
499  if (bytestream2_get_bytes_left(gb) < uncompressed_size / 255)
500  return AVERROR_INVALIDDATA;
501 
502  av_fast_padded_malloc(&s->uncompressed_buffer, &s->uncompressed_size,
503  uncompressed_size);
504  if (!s->uncompressed_buffer)
505  return AVERROR(ENOMEM);
506 
507  bytestream2_init_writer(pb, s->uncompressed_buffer, s->uncompressed_size);
508 
509  ret = lz4_decompress(avctx, gb, pb);
510  if (ret != uncompressed_size)
511  return AVERROR_INVALIDDATA;
512 
513  bytestream2_init(gb, s->uncompressed_buffer, uncompressed_size);
514  }
515 
516  ret = decode_blocks(avctx, p, &frame, uncompressed_size);
517  if (ret < 0)
518  return ret;
519 
521  p->key_frame = 1;
522 
523  *got_frame = 1;
524 
525  return avpkt->size;
526 }
527 
529 {
530  NotchLCContext *s = avctx->priv_data;
531 
532  av_freep(&s->uncompressed_buffer);
533  s->uncompressed_size = 0;
534  av_freep(&s->lzf_buffer);
535  s->lzf_size = 0;
536 
537  return 0;
538 }
539 
541  .name = "notchlc",
542  .long_name = NULL_IF_CONFIG_SMALL("NotchLC"),
543  .type = AVMEDIA_TYPE_VIDEO,
544  .id = AV_CODEC_ID_NOTCHLC,
545  .priv_data_size = sizeof(NotchLCContext),
546  .init = decode_init,
547  .close = decode_end,
548  .decode = decode_frame,
550 };
NotchLCContext::lzf_buffer
uint8_t * lzf_buffer
Definition: notchlc.c:42
AVCodec
AVCodec.
Definition: codec.h:197
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1164
GetByteContext
Definition: bytestream.h:33
NotchLCContext::y_data_offset
unsigned y_data_offset
Definition: notchlc.c:51
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:146
ff_notchlc_decoder
AVCodec ff_notchlc_decoder
Definition: notchlc.c:540
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: notchlc.c:462
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1157
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:369
bytestream2_tell_p
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:197
data
const char data[16]
Definition: mxf.c:142
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
NotchLCContext::compressed_size
unsigned compressed_size
Definition: notchlc.c:36
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
rgb
Definition: rpzaenc.c:58
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:497
v0
#define v0
Definition: regdef.h:26
NotchLCContext::texture_size_x
unsigned texture_size_x
Definition: notchlc.c:45
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
NotchLCContext::format
unsigned format
Definition: notchlc.c:37
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: notchlc.c:63
NotchLCContext::texture_size_y
unsigned texture_size_y
Definition: notchlc.c:46
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1150
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
NotchLCContext::pb
PutByteContext pb
Definition: notchlc.c:60
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
decode_blocks
static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame, unsigned uncompressed_size)
Definition: notchlc.c:149
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
NotchLCContext::y_data_size
unsigned y_data_size
Definition: notchlc.c:53
bytestream2_put_buffer
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:286
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:440
get_bits.h
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
NotchLCContext::lzf_size
int64_t lzf_size
Definition: notchlc.c:43
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
NotchLCContext::y_control_data_offset
unsigned y_control_data_offset
Definition: notchlc.c:49
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:460
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
NotchLCContext
Definition: notchlc.c:35
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
HISTORY_SIZE
#define HISTORY_SIZE
Definition: notchlc.c:74
PutByteContext
Definition: bytestream.h:37
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
NotchLCContext::gb
GetByteContext gb
Definition: notchlc.c:59
NotchLCContext::y_data_row_offsets
unsigned y_data_row_offsets
Definition: notchlc.c:47
ff_lzf_uncompress
int ff_lzf_uncompress(GetByteContext *gb, uint8_t **buf, int64_t *size)
Definition: lzf.c:40
MKBETAG
#define MKBETAG(a, b, c, d)
Definition: common.h:479
NotchLCContext::uv_data_offset
unsigned uv_data_offset
Definition: notchlc.c:52
NotchLCContext::uv_count_offset
unsigned uv_count_offset
Definition: notchlc.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AV_CODEC_ID_NOTCHLC
@ AV_CODEC_ID_NOTCHLC
Definition: codec_id.h:301
NotchLCContext::uv_offset_data_offset
unsigned uv_offset_data_offset
Definition: notchlc.c:48
NotchLCContext::data_end
unsigned data_end
Definition: notchlc.c:57
i
int i
Definition: input.c:407
delta
float delta
Definition: vorbis_enc_data.h:457
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
AVCodecContext::height
int height
Definition: avcodec.h:709
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
avcodec.h
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: notchlc.c:528
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
AVCodecContext
main external API structure.
Definition: avcodec.h:536
ThreadFrame
Definition: thread.h:34
NotchLCContext::uncompressed_size
unsigned uncompressed_size
Definition: notchlc.c:40
NotchLCContext::a_data_offset
unsigned a_data_offset
Definition: notchlc.c:54
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:36
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
lz4_decompress
static int lz4_decompress(AVCodecContext *avctx, GetByteContext *gb, PutByteContext *pb)
Definition: notchlc.c:76
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
NotchLCContext::a_control_word_offset
unsigned a_control_word_offset
Definition: notchlc.c:50
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:709
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
NotchLCContext::uncompressed_buffer
uint8_t * uncompressed_buffer
Definition: notchlc.c:39
NotchLCContext::a_count_size
unsigned a_count_size
Definition: notchlc.c:56
int
int
Definition: ffmpeg_filter.c:170
lzf.h