FFmpeg
adpcmenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "config_components.h"
26 
27 #include "libavutil/mem.h"
28 #include "libavutil/opt.h"
29 
30 #include "avcodec.h"
31 #include "put_bits.h"
32 #include "bytestream.h"
33 #include "adpcm.h"
34 #include "adpcm_data.h"
35 #include "codec_internal.h"
36 #include "encode.h"
37 
38 /**
39  * @file
40  * ADPCM encoders
41  * See ADPCM decoder reference documents for codec information.
42  */
43 
44 #define CASE_0(codec_id, ...)
45 #define CASE_1(codec_id, ...) \
46  case codec_id: \
47  { __VA_ARGS__ } \
48  break;
49 #define CASE_2(enabled, codec_id, ...) \
50  CASE_ ## enabled(codec_id, __VA_ARGS__)
51 #define CASE_3(config, codec_id, ...) \
52  CASE_2(config, codec_id, __VA_ARGS__)
53 #define CASE(codec, ...) \
54  CASE_3(CONFIG_ ## codec ## _ENCODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
55 
56 typedef struct TrellisPath {
57  int nibble;
58  int prev;
59 } TrellisPath;
60 
61 typedef struct TrellisNode {
62  uint32_t ssd;
63  int path;
64  int sample1;
65  int sample2;
66  int step;
67 } TrellisNode;
68 
69 typedef struct ADPCMEncodeContext {
70  AVClass *class;
72 
77  uint8_t *trellis_hash;
79 
80 #define FREEZE_INTERVAL 128
81 
83 {
84  ADPCMEncodeContext *s = avctx->priv_data;
85  int channels = avctx->ch_layout.nb_channels;
86 
87  /*
88  * AMV's block size has to match that of the corresponding video
89  * stream. Relax the POT requirement.
90  */
91  if (avctx->codec->id != AV_CODEC_ID_ADPCM_IMA_AMV &&
92  (s->block_size & (s->block_size - 1))) {
93  av_log(avctx, AV_LOG_ERROR, "block size must be power of 2\n");
94  return AVERROR(EINVAL);
95  }
96 
97  if (avctx->trellis) {
98  int frontier, max_paths;
99 
100  if ((unsigned)avctx->trellis > 16U) {
101  av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
102  return AVERROR(EINVAL);
103  }
104 
105  if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_SSI ||
106  avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_APM ||
107  avctx->codec->id == AV_CODEC_ID_ADPCM_ARGO ||
108  avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_WS) {
109  /*
110  * The current trellis implementation doesn't work for extended
111  * runs of samples without periodic resets. Disallow it.
112  */
113  av_log(avctx, AV_LOG_ERROR, "trellis not supported\n");
114  return AVERROR_PATCHWELCOME;
115  }
116 
117  frontier = 1 << avctx->trellis;
118  max_paths = frontier * FREEZE_INTERVAL;
119  if (!FF_ALLOC_TYPED_ARRAY(s->paths, max_paths) ||
120  !FF_ALLOC_TYPED_ARRAY(s->node_buf, 2 * frontier) ||
121  !FF_ALLOC_TYPED_ARRAY(s->nodep_buf, 2 * frontier) ||
122  !FF_ALLOC_TYPED_ARRAY(s->trellis_hash, 65536))
123  return AVERROR(ENOMEM);
124  }
125 
127 
128  switch (avctx->codec->id) {
129  CASE(ADPCM_IMA_WAV,
130  /* each 16 bits sample gives one nibble
131  and we have 4 bytes per channel overhead */
132  avctx->frame_size = (s->block_size - 4 * channels) * 8 /
133  (4 * channels) + 1;
134  /* seems frame_size isn't taken into account...
135  have to buffer the samples :-( */
136  avctx->block_align = s->block_size;
137  avctx->bits_per_coded_sample = 4;
138  ) /* End of CASE */
139  CASE(ADPCM_IMA_QT,
140  avctx->frame_size = 64;
141  avctx->block_align = 34 * channels;
142  ) /* End of CASE */
143  CASE(ADPCM_MS,
144  uint8_t *extradata;
145  /* each 16 bits sample gives one nibble
146  and we have 7 bytes per channel overhead */
147  avctx->frame_size = (s->block_size - 7 * channels) * 2 / channels + 2;
148  avctx->bits_per_coded_sample = 4;
149  avctx->block_align = s->block_size;
150  if (!(avctx->extradata = av_malloc(32 + AV_INPUT_BUFFER_PADDING_SIZE)))
151  return AVERROR(ENOMEM);
152  avctx->extradata_size = 32;
153  extradata = avctx->extradata;
154  bytestream_put_le16(&extradata, avctx->frame_size);
155  bytestream_put_le16(&extradata, 7); /* wNumCoef */
156  for (int i = 0; i < 7; i++) {
157  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
158  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
159  }
160  ) /* End of CASE */
161  CASE(ADPCM_YAMAHA,
162  avctx->frame_size = s->block_size * 2 / channels;
163  avctx->block_align = s->block_size;
164  ) /* End of CASE */
165  CASE(ADPCM_SWF,
166  avctx->frame_size = 4096; /* Hardcoded according to the SWF spec. */
167  avctx->block_align = (2 + channels * (22 + 4 * (avctx->frame_size - 1)) + 7) / 8;
168  ) /* End of CASE */
171  avctx->frame_size = s->block_size * 2 / channels;
172  avctx->block_align = s->block_size;
173  break;
174  CASE(ADPCM_IMA_AMV,
175  avctx->frame_size = s->block_size;
176  avctx->block_align = 8 + (FFALIGN(avctx->frame_size, 2) / 2);
177  ) /* End of CASE */
178  CASE(ADPCM_IMA_APM,
179  avctx->frame_size = s->block_size * 2 / channels;
180  avctx->block_align = s->block_size;
181 
183  return AVERROR(ENOMEM);
184  avctx->extradata_size = 28;
185  ) /* End of CASE */
186  CASE(ADPCM_ARGO,
187  avctx->frame_size = 32;
188  avctx->block_align = 17 * channels;
189  ) /* End of CASE */
190  CASE(ADPCM_IMA_WS,
191  /* each 16 bits sample gives one nibble */
192  avctx->frame_size = s->block_size * 2 / channels;
193  avctx->block_align = s->block_size;
194  ) /* End of CASE */
195  default:
196  av_unreachable("there is a case for every codec using adpcm_encode_init()");
197  }
198 
199  return 0;
200 }
201 
203 {
204  ADPCMEncodeContext *s = avctx->priv_data;
205  av_freep(&s->paths);
206  av_freep(&s->node_buf);
207  av_freep(&s->nodep_buf);
208  av_freep(&s->trellis_hash);
209 
210  return 0;
211 }
212 
213 
215  int16_t sample)
216 {
217  int delta = sample - c->prev_sample;
218  int nibble = FFMIN(7, abs(delta) * 4 /
219  ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
220  c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
221  ff_adpcm_yamaha_difflookup[nibble]) / 8);
222  c->prev_sample = av_clip_int16(c->prev_sample);
223  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
224  return nibble;
225 }
226 
228 {
229  const int delta = sample - c->prev_sample;
230  const int step = ff_adpcm_step_table[c->step_index];
231  const int sign = (delta < 0) * 8;
232 
233  int nibble = FFMIN(abs(delta) * 4 / step, 7);
234  int diff = (step * nibble) >> 2;
235  if (sign)
236  diff = -diff;
237 
238  nibble = sign | nibble;
239 
240  c->prev_sample += diff;
241  c->prev_sample = av_clip_int16(c->prev_sample);
242  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
243  return nibble;
244 }
245 
247  int16_t sample)
248 {
249  int delta = sample - c->prev_sample;
250  int diff, step = ff_adpcm_step_table[c->step_index];
251  int nibble = 8*(delta < 0);
252 
253  delta= abs(delta);
254  diff = delta + (step >> 3);
255 
256  if (delta >= step) {
257  nibble |= 4;
258  delta -= step;
259  }
260  step >>= 1;
261  if (delta >= step) {
262  nibble |= 2;
263  delta -= step;
264  }
265  step >>= 1;
266  if (delta >= step) {
267  nibble |= 1;
268  delta -= step;
269  }
270  diff -= delta;
271 
272  if (nibble & 8)
273  c->prev_sample -= diff;
274  else
275  c->prev_sample += diff;
276 
277  c->prev_sample = av_clip_int16(c->prev_sample);
278  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
279 
280  return nibble;
281 }
282 
284  int16_t sample)
285 {
286  int predictor, nibble, bias;
287 
288  predictor = (((c->sample1) * (c->coeff1)) +
289  (( c->sample2) * (c->coeff2))) / 64;
290 
291  nibble = sample - predictor;
292  if (nibble >= 0)
293  bias = c->idelta / 2;
294  else
295  bias = -c->idelta / 2;
296 
297  nibble = (nibble + bias) / c->idelta;
298  nibble = av_clip_intp2(nibble, 3) & 0x0F;
299 
300  predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
301 
302  c->sample2 = c->sample1;
303  c->sample1 = av_clip_int16(predictor);
304 
305  c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
306  if (c->idelta < 16)
307  c->idelta = 16;
308 
309  return nibble;
310 }
311 
313  int16_t sample)
314 {
315  int nibble, delta;
316 
317  if (!c->step) {
318  c->predictor = 0;
319  c->step = 127;
320  }
321 
322  delta = sample - c->predictor;
323 
324  nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
325 
326  c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
327  c->predictor = av_clip_int16(c->predictor);
328  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
329  c->step = av_clip(c->step, 127, 24576);
330 
331  return nibble;
332 }
333 
335  const int16_t *samples, uint8_t *dst,
336  ADPCMChannelStatus *c, int n, int stride)
337 {
338  //FIXME 6% faster if frontier is a compile-time constant
339  ADPCMEncodeContext *s = avctx->priv_data;
340  const int frontier = 1 << avctx->trellis;
341  const int version = avctx->codec->id;
342  TrellisPath *paths = s->paths, *p;
343  TrellisNode *node_buf = s->node_buf;
344  TrellisNode **nodep_buf = s->nodep_buf;
345  TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
346  TrellisNode **nodes_next = nodep_buf + frontier;
347  int pathn = 0, froze = -1, i, j, k, generation = 0;
348  uint8_t *hash = s->trellis_hash;
349  memset(hash, 0xff, 65536 * sizeof(*hash));
350 
351  memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
352  nodes[0] = node_buf + frontier;
353  nodes[0]->ssd = 0;
354  nodes[0]->path = 0;
355  nodes[0]->step = c->step_index;
356  nodes[0]->sample1 = c->sample1;
357  nodes[0]->sample2 = c->sample2;
362  nodes[0]->sample1 = c->prev_sample;
364  nodes[0]->step = c->idelta;
366  if (c->step == 0) {
367  nodes[0]->step = 127;
368  nodes[0]->sample1 = 0;
369  } else {
370  nodes[0]->step = c->step;
371  nodes[0]->sample1 = c->predictor;
372  }
373  }
374 
375  for (i = 0; i < n; i++) {
376  TrellisNode *t = node_buf + frontier*(i&1);
377  TrellisNode **u;
378  int sample = samples[i * stride];
379  int heap_pos = 0;
380  memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
381  for (j = 0; j < frontier && nodes[j]; j++) {
382  // higher j have higher ssd already, so they're likely
383  // to yield a suboptimal next sample too
384  const int range = (j < frontier / 2) ? 1 : 0;
385  const int step = nodes[j]->step;
386  int nidx;
387  if (version == AV_CODEC_ID_ADPCM_MS) {
388  const int predictor = ((nodes[j]->sample1 * c->coeff1) +
389  (nodes[j]->sample2 * c->coeff2)) / 64;
390  const int div = (sample - predictor) / step;
391  const int nmin = av_clip(div-range, -8, 6);
392  const int nmax = av_clip(div+range, -7, 7);
393  for (nidx = nmin; nidx <= nmax; nidx++) {
394  const int nibble = nidx & 0xf;
395  int dec_sample = predictor + nidx * step;
396 #define STORE_NODE(NAME, STEP_INDEX)\
397  int d;\
398  uint32_t ssd;\
399  int pos;\
400  TrellisNode *u;\
401  uint8_t *h;\
402  dec_sample = av_clip_int16(dec_sample);\
403  d = sample - dec_sample;\
404  ssd = nodes[j]->ssd + d*(unsigned)d;\
405  /* Check for wraparound, skip such samples completely. \
406  * Note, changing ssd to a 64 bit variable would be \
407  * simpler, avoiding this check, but it's slower on \
408  * x86 32 bit at the moment. */\
409  if (ssd < nodes[j]->ssd)\
410  goto next_##NAME;\
411  /* Collapse any two states with the same previous sample value. \
412  * One could also distinguish states by step and by 2nd to last
413  * sample, but the effects of that are negligible.
414  * Since nodes in the previous generation are iterated
415  * through a heap, they're roughly ordered from better to
416  * worse, but not strictly ordered. Therefore, an earlier
417  * node with the same sample value is better in most cases
418  * (and thus the current is skipped), but not strictly
419  * in all cases. Only skipping samples where ssd >=
420  * ssd of the earlier node with the same sample gives
421  * slightly worse quality, though, for some reason. */ \
422  h = &hash[(uint16_t) dec_sample];\
423  if (*h == generation)\
424  goto next_##NAME;\
425  if (heap_pos < frontier) {\
426  pos = heap_pos++;\
427  } else {\
428  /* Try to replace one of the leaf nodes with the new \
429  * one, but try a different slot each time. */\
430  pos = (frontier >> 1) +\
431  (heap_pos & ((frontier >> 1) - 1));\
432  if (ssd > nodes_next[pos]->ssd)\
433  goto next_##NAME;\
434  heap_pos++;\
435  }\
436  *h = generation;\
437  u = nodes_next[pos];\
438  if (!u) {\
439  av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
440  u = t++;\
441  nodes_next[pos] = u;\
442  u->path = pathn++;\
443  }\
444  u->ssd = ssd;\
445  u->step = STEP_INDEX;\
446  u->sample2 = nodes[j]->sample1;\
447  u->sample1 = dec_sample;\
448  paths[u->path].nibble = nibble;\
449  paths[u->path].prev = nodes[j]->path;\
450  /* Sift the newly inserted node up in the heap to \
451  * restore the heap property. */\
452  while (pos > 0) {\
453  int parent = (pos - 1) >> 1;\
454  if (nodes_next[parent]->ssd <= ssd)\
455  break;\
456  FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
457  pos = parent;\
458  }\
459  next_##NAME:;
460  STORE_NODE(ms, FFMAX(16,
461  (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
462  }
463  } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
467 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
468  const int predictor = nodes[j]->sample1;\
469  const int div = (sample - predictor) * 4 / STEP_TABLE;\
470  int nmin = av_clip(div - range, -7, 6);\
471  int nmax = av_clip(div + range, -6, 7);\
472  if (nmin <= 0)\
473  nmin--; /* distinguish -0 from +0 */\
474  if (nmax < 0)\
475  nmax--;\
476  for (nidx = nmin; nidx <= nmax; nidx++) {\
477  const int nibble = nidx < 0 ? 7 - nidx : nidx;\
478  int dec_sample = predictor +\
479  (STEP_TABLE *\
480  ff_adpcm_yamaha_difflookup[nibble]) / 8;\
481  STORE_NODE(NAME, STEP_INDEX);\
482  }
484  av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
485  } else { //AV_CODEC_ID_ADPCM_YAMAHA
486  LOOP_NODES(yamaha, step,
487  av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
488  127, 24576));
489 #undef LOOP_NODES
490 #undef STORE_NODE
491  }
492  }
493 
494  u = nodes;
495  nodes = nodes_next;
496  nodes_next = u;
497 
498  generation++;
499  if (generation == 255) {
500  memset(hash, 0xff, 65536 * sizeof(*hash));
501  generation = 0;
502  }
503 
504  // prevent overflow
505  if (nodes[0]->ssd > (1 << 28)) {
506  for (j = 1; j < frontier && nodes[j]; j++)
507  nodes[j]->ssd -= nodes[0]->ssd;
508  nodes[0]->ssd = 0;
509  }
510 
511  // merge old paths to save memory
512  if (i == froze + FREEZE_INTERVAL) {
513  p = &paths[nodes[0]->path];
514  for (k = i; k > froze; k--) {
515  dst[k] = p->nibble;
516  p = &paths[p->prev];
517  }
518  froze = i;
519  pathn = 0;
520  // other nodes might use paths that don't coincide with the frozen one.
521  // checking which nodes do so is too slow, so just kill them all.
522  // this also slightly improves quality, but I don't know why.
523  memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
524  }
525  }
526 
527  p = &paths[nodes[0]->path];
528  for (i = n - 1; i > froze; i--) {
529  dst[i] = p->nibble;
530  p = &paths[p->prev];
531  }
532 
533  c->predictor = nodes[0]->sample1;
534  c->sample1 = nodes[0]->sample1;
535  c->sample2 = nodes[0]->sample2;
536  c->step_index = nodes[0]->step;
537  c->step = nodes[0]->step;
538  c->idelta = nodes[0]->step;
539 }
540 
541 #if CONFIG_ADPCM_ARGO_ENCODER
542 static inline int adpcm_argo_compress_nibble(const ADPCMChannelStatus *cs, int16_t s,
543  int shift, int flag)
544 {
545  int nibble;
546 
547  if (flag)
548  nibble = 4 * s - 8 * cs->sample1 + 4 * cs->sample2;
549  else
550  nibble = 4 * s - 4 * cs->sample1;
551 
552  return (nibble >> shift) & 0x0F;
553 }
554 
555 static int64_t adpcm_argo_compress_block(ADPCMChannelStatus *cs, PutBitContext *pb,
556  const int16_t *samples, int nsamples,
557  int shift, int flag)
558 {
559  int64_t error = 0;
560 
561  if (pb) {
562  put_bits(pb, 4, shift - 2);
563  put_bits(pb, 1, 0);
564  put_bits(pb, 1, !!flag);
565  put_bits(pb, 2, 0);
566  }
567 
568  for (int n = 0; n < nsamples; n++) {
569  /* Compress the nibble, then expand it to see how much precision we've lost. */
570  int nibble = adpcm_argo_compress_nibble(cs, samples[n], shift, flag);
571  int16_t sample = ff_adpcm_argo_expand_nibble(cs, nibble, shift, flag);
572 
573  error += abs(samples[n] - sample);
574 
575  if (pb)
576  put_bits(pb, 4, nibble);
577  }
578 
579  return error;
580 }
581 #endif
582 
583 static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
584  const AVFrame *frame, int *got_packet_ptr)
585 {
586  int st, pkt_size, ret;
587  const int16_t *samples;
588  const int16_t *const *samples_p;
589  uint8_t *dst;
590  ADPCMEncodeContext *c = avctx->priv_data;
591  int channels = avctx->ch_layout.nb_channels;
592 
593  samples = (const int16_t *)frame->data[0];
594  samples_p = (const int16_t *const *)frame->extended_data;
595  st = channels == 2;
596 
597  if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_SSI ||
601  pkt_size = (frame->nb_samples * channels + 1) / 2;
602  else
603  pkt_size = avctx->block_align;
604  if ((ret = ff_get_encode_buffer(avctx, avpkt, pkt_size, 0)) < 0)
605  return ret;
606  dst = avpkt->data;
607 
608  switch(avctx->codec->id) {
609  CASE(ADPCM_IMA_WAV,
610  int blocks = (frame->nb_samples - 1) / 8;
611 
612  for (int ch = 0; ch < channels; ch++) {
613  ADPCMChannelStatus *status = &c->status[ch];
614  status->prev_sample = samples_p[ch][0];
615  /* status->step_index = 0;
616  XXX: not sure how to init the state machine */
617  bytestream_put_le16(&dst, status->prev_sample);
618  *dst++ = status->step_index;
619  *dst++ = 0; /* unknown */
620  }
621 
622  /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
623  if (avctx->trellis > 0) {
624  uint8_t *buf;
625  if (!FF_ALLOC_TYPED_ARRAY(buf, channels * blocks * 8))
626  return AVERROR(ENOMEM);
627  for (int ch = 0; ch < channels; ch++) {
628  adpcm_compress_trellis(avctx, &samples_p[ch][1],
629  buf + ch * blocks * 8, &c->status[ch],
630  blocks * 8, 1);
631  }
632  for (int i = 0; i < blocks; i++) {
633  for (int ch = 0; ch < channels; ch++) {
634  uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
635  for (int j = 0; j < 8; j += 2)
636  *dst++ = buf1[j] | (buf1[j + 1] << 4);
637  }
638  }
639  av_free(buf);
640  } else {
641  for (int i = 0; i < blocks; i++) {
642  for (int ch = 0; ch < channels; ch++) {
643  ADPCMChannelStatus *status = &c->status[ch];
644  const int16_t *smp = &samples_p[ch][1 + i * 8];
645  for (int j = 0; j < 8; j += 2) {
646  uint8_t v = adpcm_ima_compress_sample(status, smp[j ]);
647  v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4;
648  *dst++ = v;
649  }
650  }
651  }
652  }
653  ) /* End of CASE */
654  CASE(ADPCM_IMA_QT,
655  PutBitContext pb;
656  init_put_bits(&pb, dst, pkt_size);
657 
658  for (int ch = 0; ch < channels; ch++) {
659  ADPCMChannelStatus *status = &c->status[ch];
660  put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7);
661  put_bits(&pb, 7, status->step_index);
662  if (avctx->trellis > 0) {
663  uint8_t buf[64];
664  adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
665  64, 1);
666  for (int i = 0; i < 64; i++)
667  put_bits(&pb, 4, buf[i ^ 1]);
668  status->prev_sample = status->predictor;
669  } else {
670  for (int i = 0; i < 64; i += 2) {
671  int t1, t2;
672  t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]);
673  t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]);
674  put_bits(&pb, 4, t2);
675  put_bits(&pb, 4, t1);
676  }
677  }
678  }
679 
680  flush_put_bits(&pb);
681  ) /* End of CASE */
682  CASE(ADPCM_IMA_SSI,
683  PutBitContext pb;
684  init_put_bits(&pb, dst, pkt_size);
685 
686  av_assert0(avctx->trellis == 0);
687 
688  for (int i = 0; i < frame->nb_samples; i++) {
689  for (int ch = 0; ch < channels; ch++) {
690  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
691  }
692  }
693 
694  flush_put_bits(&pb);
695  ) /* End of CASE */
696  CASE(ADPCM_IMA_ALP,
697  PutBitContext pb;
698  init_put_bits(&pb, dst, pkt_size);
699 
700  av_assert0(avctx->trellis == 0);
701 
702  for (int n = frame->nb_samples / 2; n > 0; n--) {
703  for (int ch = 0; ch < channels; ch++) {
704  put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, *samples++));
705  put_bits(&pb, 4, adpcm_ima_alp_compress_sample(c->status + ch, samples[st]));
706  }
707  samples += channels;
708  }
709 
710  flush_put_bits(&pb);
711  ) /* End of CASE */
712  CASE(ADPCM_SWF,
713  const int n = frame->nb_samples - 1;
714  PutBitContext pb;
715  init_put_bits(&pb, dst, pkt_size);
716 
717  /* NB: This is safe as we don't have AV_CODEC_CAP_SMALL_LAST_FRAME. */
718  av_assert0(n == 4095);
719 
720  // store AdpcmCodeSize
721  put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
722 
723  // init the encoder state
724  for (int i = 0; i < channels; i++) {
725  // clip step so it fits 6 bits
726  c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6);
727  put_sbits(&pb, 16, samples[i]);
728  put_bits(&pb, 6, c->status[i].step_index);
729  c->status[i].prev_sample = samples[i];
730  }
731 
732  if (avctx->trellis > 0) {
733  uint8_t buf[8190 /* = 2 * n */];
735  &c->status[0], n, channels);
736  if (channels == 2)
738  buf + n, &c->status[1], n,
739  channels);
740  for (int i = 0; i < n; i++) {
741  put_bits(&pb, 4, buf[i]);
742  if (channels == 2)
743  put_bits(&pb, 4, buf[n + i]);
744  }
745  } else {
746  for (int i = 1; i < frame->nb_samples; i++) {
747  put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
748  samples[channels * i]));
749  if (channels == 2)
750  put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
751  samples[2 * i + 1]));
752  }
753  }
754  flush_put_bits(&pb);
755  ) /* End of CASE */
756  CASE(ADPCM_MS,
757  for (int i = 0; i < channels; i++) {
758  int predictor = 0;
759  *dst++ = predictor;
760  c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
761  c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
762  }
763  for (int i = 0; i < channels; i++) {
764  if (c->status[i].idelta < 16)
765  c->status[i].idelta = 16;
766  bytestream_put_le16(&dst, c->status[i].idelta);
767  }
768  for (int i = 0; i < channels; i++)
769  c->status[i].sample2= *samples++;
770  for (int i = 0; i < channels; i++) {
771  c->status[i].sample1 = *samples++;
772  bytestream_put_le16(&dst, c->status[i].sample1);
773  }
774  for (int i = 0; i < channels; i++)
775  bytestream_put_le16(&dst, c->status[i].sample2);
776 
777  if (avctx->trellis > 0) {
778  const int n = avctx->block_align - 7 * channels;
779  uint8_t *buf = av_malloc(2 * n);
780  if (!buf)
781  return AVERROR(ENOMEM);
782  if (channels == 1) {
783  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
784  channels);
785  for (int i = 0; i < n; i += 2)
786  *dst++ = (buf[i] << 4) | buf[i + 1];
787  } else {
788  adpcm_compress_trellis(avctx, samples, buf,
789  &c->status[0], n, channels);
790  adpcm_compress_trellis(avctx, samples + 1, buf + n,
791  &c->status[1], n, channels);
792  for (int i = 0; i < n; i++)
793  *dst++ = (buf[i] << 4) | buf[n + i];
794  }
795  av_free(buf);
796  } else {
797  for (int i = 7 * channels; i < avctx->block_align; i++) {
798  int nibble;
799  nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
800  nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
801  *dst++ = nibble;
802  }
803  }
804  ) /* End of CASE */
805  CASE(ADPCM_YAMAHA,
806  int n = frame->nb_samples / 2;
807  if (avctx->trellis > 0) {
808  uint8_t *buf = av_malloc(2 * n * 2);
809  if (!buf)
810  return AVERROR(ENOMEM);
811  n *= 2;
812  if (channels == 1) {
813  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
814  channels);
815  for (int i = 0; i < n; i += 2)
816  *dst++ = buf[i] | (buf[i + 1] << 4);
817  } else {
818  adpcm_compress_trellis(avctx, samples, buf,
819  &c->status[0], n, channels);
820  adpcm_compress_trellis(avctx, samples + 1, buf + n,
821  &c->status[1], n, channels);
822  for (int i = 0; i < n; i++)
823  *dst++ = buf[i] | (buf[n + i] << 4);
824  }
825  av_free(buf);
826  } else
827  for (n *= channels; n > 0; n--) {
828  int nibble;
829  nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
830  nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
831  *dst++ = nibble;
832  }
833  ) /* End of CASE */
834  CASE(ADPCM_IMA_APM,
835  PutBitContext pb;
836  init_put_bits(&pb, dst, pkt_size);
837 
838  av_assert0(avctx->trellis == 0);
839 
840  for (int n = frame->nb_samples / 2; n > 0; n--) {
841  for (int ch = 0; ch < channels; ch++) {
842  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
843  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, samples[st]));
844  }
845  samples += channels;
846  }
847 
848  flush_put_bits(&pb);
849  ) /* End of CASE */
850  CASE(ADPCM_IMA_AMV,
851  av_assert0(channels == 1);
852 
853  c->status[0].prev_sample = *samples;
854  bytestream_put_le16(&dst, c->status[0].prev_sample);
855  bytestream_put_byte(&dst, c->status[0].step_index);
856  bytestream_put_byte(&dst, 0);
857  bytestream_put_le32(&dst, avctx->frame_size);
858 
859  if (avctx->trellis > 0) {
860  const int n = frame->nb_samples >> 1;
861  uint8_t *buf = av_malloc(2 * n);
862 
863  if (!buf)
864  return AVERROR(ENOMEM);
865 
866  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], 2 * n, channels);
867  for (int i = 0; i < n; i++)
868  bytestream_put_byte(&dst, (buf[2 * i] << 4) | buf[2 * i + 1]);
869 
870  samples += 2 * n;
871  av_free(buf);
872  } else for (int n = frame->nb_samples >> 1; n > 0; n--) {
873  int nibble;
874  nibble = adpcm_ima_compress_sample(&c->status[0], *samples++) << 4;
875  nibble |= adpcm_ima_compress_sample(&c->status[0], *samples++) & 0x0F;
876  bytestream_put_byte(&dst, nibble);
877  }
878 
879  if (avctx->frame_size & 1) {
880  int nibble = adpcm_ima_compress_sample(&c->status[0], *samples++) << 4;
881  bytestream_put_byte(&dst, nibble);
882  }
883  ) /* End of CASE */
884  CASE(ADPCM_ARGO,
885  PutBitContext pb;
886  init_put_bits(&pb, dst, pkt_size);
887 
888  av_assert0(frame->nb_samples == 32);
889 
890  for (int ch = 0; ch < channels; ch++) {
891  int64_t error = INT64_MAX, tmperr = INT64_MAX;
892  int shift = 2, flag = 0;
893  int saved1 = c->status[ch].sample1;
894  int saved2 = c->status[ch].sample2;
895 
896  /* Find the optimal coefficients, bail early if we find a perfect result. */
897  for (int s = 2; s < 18 && tmperr != 0; s++) {
898  for (int f = 0; f < 2 && tmperr != 0; f++) {
899  c->status[ch].sample1 = saved1;
900  c->status[ch].sample2 = saved2;
901  tmperr = adpcm_argo_compress_block(c->status + ch, NULL, samples_p[ch],
902  frame->nb_samples, s, f);
903  if (tmperr < error) {
904  shift = s;
905  flag = f;
906  error = tmperr;
907  }
908  }
909  }
910 
911  /* Now actually do the encode. */
912  c->status[ch].sample1 = saved1;
913  c->status[ch].sample2 = saved2;
914  adpcm_argo_compress_block(c->status + ch, &pb, samples_p[ch],
915  frame->nb_samples, shift, flag);
916  }
917 
918  flush_put_bits(&pb);
919  ) /* End of CASE */
920  CASE(ADPCM_IMA_WS,
921  PutBitContext pb;
922  init_put_bits(&pb, dst, pkt_size);
923 
924  av_assert0(avctx->trellis == 0);
925  for (int n = frame->nb_samples / 2; n > 0; n--) {
926  /* stereo: 1 byte (2 samples) for left, 1 byte for right */
927  for (int ch = 0; ch < channels; ch++) {
928  int t1, t2;
929  t1 = adpcm_ima_compress_sample(&c->status[ch], *samples++);
930  t2 = adpcm_ima_compress_sample(&c->status[ch], samples[st]);
931  put_bits(&pb, 4, t2);
932  put_bits(&pb, 4, t1);
933  }
934  samples += channels;
935  }
936  flush_put_bits(&pb);
937  ) /* End of CASE */
938  default:
939  return AVERROR(EINVAL);
940  }
941 
942  *got_packet_ptr = 1;
943  return 0;
944 }
945 
946 static const enum AVSampleFormat sample_fmts[] = {
948 };
949 
950 static const enum AVSampleFormat sample_fmts_p[] = {
952 };
953 
954 static const AVChannelLayout ch_layouts_mono_stereo[] = {
957  { 0 },
958 };
959 
960 static const AVOption options[] = {
961  {
962  .name = "block_size",
963  .help = "set the block size",
964  .offset = offsetof(ADPCMEncodeContext, block_size),
965  .type = AV_OPT_TYPE_INT,
966  .default_val = {.i64 = 1024},
967  .min = 32,
968  .max = 8192, /* Is this a reasonable upper limit? */
970  },
971  { NULL }
972 };
973 
974 static const AVClass adpcm_encoder_class = {
975  .class_name = "ADPCM encoder",
976  .item_name = av_default_item_name,
977  .option = options,
978  .version = LIBAVUTIL_VERSION_INT,
979 };
980 
981 #define ADPCM_ENCODER_0(id_, name_, sample_fmts_, capabilities_, long_name_, ...)
982 #define ADPCM_ENCODER_1(id_, name_, sample_fmts_, capabilities_, long_name_, ...) \
983 const FFCodec ff_ ## name_ ## _encoder = { \
984  .p.name = #name_, \
985  CODEC_LONG_NAME(long_name_), \
986  .p.type = AVMEDIA_TYPE_AUDIO, \
987  .p.id = id_, \
988  .p.capabilities = capabilities_ | AV_CODEC_CAP_DR1 | \
989  AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, \
990  CODEC_SAMPLEFMTS_ARRAY(sample_fmts_), \
991  .priv_data_size = sizeof(ADPCMEncodeContext), \
992  .init = adpcm_encode_init, \
993  FF_CODEC_ENCODE_CB(adpcm_encode_frame), \
994  .close = adpcm_encode_close, \
995  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
996  __VA_ARGS__, \
997 };
998 #define ADPCM_ENCODER_2(enabled, codec_id, name, sample_fmts, capabilities, long_name, ...) \
999  ADPCM_ENCODER_ ## enabled(codec_id, name, sample_fmts, capabilities, long_name, __VA_ARGS__)
1000 #define ADPCM_ENCODER_3(config, codec_id, name, sample_fmts, capabilities, long_name, ...) \
1001  ADPCM_ENCODER_2(config, codec_id, name, sample_fmts, capabilities, long_name, __VA_ARGS__)
1002 #define ADPCM_ENCODER(codec, name, sample_fmts, capabilities, long_name, ...) \
1003  ADPCM_ENCODER_3(CONFIG_ ## codec ## _ENCODER, AV_CODEC_ID_ ## codec, \
1004  name, sample_fmts, capabilities, long_name, __VA_ARGS__)
1005 
1006 #define MONO_STEREO CODEC_CH_LAYOUTS_ARRAY(ch_layouts_mono_stereo)
1007 #define AVCLASS .p.priv_class = &adpcm_encoder_class
1008 
1009 ADPCM_ENCODER(ADPCM_ARGO, adpcm_argo, sample_fmts_p, 0, "ADPCM Argonaut Games", MONO_STEREO)
1010 ADPCM_ENCODER(ADPCM_IMA_AMV, adpcm_ima_amv, sample_fmts, 0, "ADPCM IMA AMV", CODEC_CH_LAYOUTS(AV_CHANNEL_LAYOUT_MONO), CODEC_SAMPLERATES(22050), AVCLASS)
1011 ADPCM_ENCODER(ADPCM_IMA_APM, adpcm_ima_apm, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Ubisoft APM", MONO_STEREO, AVCLASS)
1012 ADPCM_ENCODER(ADPCM_IMA_ALP, adpcm_ima_alp, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA High Voltage Software ALP", MONO_STEREO, AVCLASS)
1013 ADPCM_ENCODER(ADPCM_IMA_QT, adpcm_ima_qt, sample_fmts_p, 0, "ADPCM IMA QuickTime", MONO_STEREO)
1014 ADPCM_ENCODER(ADPCM_IMA_SSI, adpcm_ima_ssi, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Simon & Schuster Interactive", MONO_STEREO, AVCLASS)
1015 ADPCM_ENCODER(ADPCM_IMA_WAV, adpcm_ima_wav, sample_fmts_p, 0, "ADPCM IMA WAV", MONO_STEREO, AVCLASS)
1016 ADPCM_ENCODER(ADPCM_IMA_WS, adpcm_ima_ws, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Westwood", MONO_STEREO, AVCLASS)
1017 ADPCM_ENCODER(ADPCM_MS, adpcm_ms, sample_fmts, 0, "ADPCM Microsoft", MONO_STEREO, AVCLASS)
1018 ADPCM_ENCODER(ADPCM_SWF, adpcm_swf, sample_fmts, 0, "ADPCM Shockwave Flash", MONO_STEREO, CODEC_SAMPLERATES(11025, 22050, 44100))
1019 ADPCM_ENCODER(ADPCM_YAMAHA, adpcm_yamaha, sample_fmts, 0, "ADPCM Yamaha", MONO_STEREO, AVCLASS)
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1059
adpcm_yamaha_compress_sample
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:312
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:383
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:377
av_clip
#define av_clip
Definition: common.h:100
TrellisNode::sample1
int sample1
Definition: adpcmenc.c:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
LOOP_NODES
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:395
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:931
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
adpcm_encoder_class
static const AVClass adpcm_encoder_class
Definition: adpcmenc.c:959
TrellisNode::path
int path
Definition: adpcmenc.c:63
int64_t
long long int64_t
Definition: coverity.c:34
put_sbits
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:291
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
AVPacket::data
uint8_t * data
Definition: packet.h:588
TrellisNode::sample2
int sample2
Definition: adpcmenc.c:65
AVOption
AVOption.
Definition: opt.h:429
encode.h
TrellisNode::step
int step
Definition: adpcmenc.c:66
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
hash
static uint8_t hash[HASH_SIZE]
Definition: movenc.c:58
ADPCMEncodeContext::nodep_buf
TrellisNode ** nodep_buf
Definition: adpcmenc.c:76
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
STORE_NODE
#define STORE_NODE(NAME, STEP_INDEX)
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1047
AVCLASS
#define AVCLASS
Definition: adpcmenc.c:992
TrellisNode
Definition: adpcmenc.c:61
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:77
ADPCMEncodeContext::status
ADPCMChannelStatus status[6]
Definition: adpcmenc.c:73
ADPCMEncodeContext::paths
TrellisPath * paths
Definition: adpcmenc.c:74
ADPCMEncodeContext::node_buf
TrellisNode * node_buf
Definition: adpcmenc.c:75
AV_OPT_FLAG_AUDIO_PARAM
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:357
av_get_bits_per_sample
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
Definition: utils.c:549
TrellisNode::ssd
uint32_t ssd
Definition: adpcmenc.c:62
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
ch_layouts_mono_stereo
static const AVChannelLayout ch_layouts_mono_stereo[]
Definition: adpcmenc.c:939
av_cold
#define av_cold
Definition: attributes.h:106
options
static const AVOption options[]
Definition: adpcmenc.c:945
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
adpcm_data.h
MONO_STEREO
#define MONO_STEREO
Definition: adpcmenc.c:991
s
#define s(width, name)
Definition: cbs_vp9.c:198
TrellisPath::nibble
int nibble
Definition: adpcmenc.c:57
ADPCMEncodeContext::block_size
int block_size
Definition: adpcmenc.c:71
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
channels
channels
Definition: aptx.h:31
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
PutBitContext
Definition: put_bits.h:50
adpcm_compress_trellis
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
Definition: adpcmenc.c:334
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
ADPCM_ENCODER
#define ADPCM_ENCODER(codec, name, sample_fmts, capabilities, long_name,...)
Definition: adpcmenc.c:987
if
if(ret)
Definition: filter_design.txt:179
TrellisPath
Definition: aaccoder.c:295
CODEC_CH_LAYOUTS
#define CODEC_CH_LAYOUTS(...)
Definition: codec_internal.h:380
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:391
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:381
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:419
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
options
Definition: swscale.c:43
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:396
abs
#define abs(x)
Definition: cuda_runtime.h:35
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1313
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
AV_OPT_FLAG_ENCODING_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
A generic parameter which can be set by the user for muxing or encoding.
Definition: opt.h:352
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
adpcm_ima_alp_compress_sample
static uint8_t adpcm_ima_alp_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:227
adpcm.h
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
f
f
Definition: af_crystalizer.c:122
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:423
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:390
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
version
version
Definition: libkvazaar.c:313
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
FREEZE_INTERVAL
#define FREEZE_INTERVAL
Definition: adpcmenc.c:80
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1554
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ADPCMEncodeContext::trellis_hash
uint8_t * trellis_hash
Definition: adpcmenc.c:77
delta
float delta
Definition: vorbis_enc_data.h:430
adpcm_ima_compress_sample
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:214
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:422
TrellisPath::prev
int prev
Definition: aaccoder.c:297
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:968
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
adpcm_encode_frame
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: adpcmenc.c:568
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1065
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ADPCMEncodeContext
Definition: adpcmenc.c:69
CASE
#define CASE(codec,...)
Definition: adpcmenc.c:53
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
sample_fmts_p
static enum AVSampleFormat sample_fmts_p[]
Definition: adpcmenc.c:935
flag
#define flag(name)
Definition: cbs_av1.c:496
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:439
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
ima
#define ima
Definition: vf_colormatrix.c:108
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:105
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
mem.h
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:420
adpcm_encode_init
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
Definition: adpcmenc.c:82
AV_CHANNEL_LAYOUT_MONO
#define AV_CHANNEL_LAYOUT_MONO
Definition: channel_layout.h:394
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:378
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
adpcm_ima_qt_compress_sample
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:246
bytestream.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
stride
#define stride
Definition: h264pred_template.c:536
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:81
put_bits.h
ADPCMChannelStatus
Definition: adpcm.h:31
adpcm_ms_compress_sample
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:283
CODEC_SAMPLERATES
#define CODEC_SAMPLERATES(...)
Definition: codec_internal.h:383
adpcm_encode_close
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
Definition: adpcmenc.c:202