FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  * Sanyo LD-ADPCM decoder by Peter Ross (pross@xvid.org)
21  *
22  * This file is part of FFmpeg.
23  *
24  * FFmpeg is free software; you can redistribute it and/or
25  * modify it under the terms of the GNU Lesser General Public
26  * License as published by the Free Software Foundation; either
27  * version 2.1 of the License, or (at your option) any later version.
28  *
29  * FFmpeg is distributed in the hope that it will be useful,
30  * but WITHOUT ANY WARRANTY; without even the implied warranty of
31  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32  * Lesser General Public License for more details.
33  *
34  * You should have received a copy of the GNU Lesser General Public
35  * License along with FFmpeg; if not, write to the Free Software
36  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
37  */
38 
39 #include "config_components.h"
40 
41 #include "avcodec.h"
42 #include "get_bits.h"
43 #include "bytestream.h"
44 #include "adpcm.h"
45 #include "adpcm_data.h"
46 #include "codec_internal.h"
47 #include "decode.h"
48 
49 /**
50  * @file
51  * ADPCM decoders
52  * Features and limitations:
53  *
54  * Reference documents:
55  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
56  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
57  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
58  * http://openquicktime.sourceforge.net/
59  * XAnim sources (xa_codec.c) http://xanim.polter.net/
60  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
61  * SoX source code http://sox.sourceforge.net/
62  *
63  * CD-ROM XA:
64  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
65  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
66  * readstr http://www.geocities.co.jp/Playtown/2004/
67  */
68 
69 #define CASE_0(codec_id, ...)
70 #define CASE_1(codec_id, ...) \
71  case codec_id: \
72  { __VA_ARGS__ } \
73  break;
74 #define CASE_2(enabled, codec_id, ...) \
75  CASE_ ## enabled(codec_id, __VA_ARGS__)
76 #define CASE_3(config, codec_id, ...) \
77  CASE_2(config, codec_id, __VA_ARGS__)
78 #define CASE(codec, ...) \
79  CASE_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, __VA_ARGS__)
80 
81 /* These are for CD-ROM XA ADPCM */
82 static const int8_t xa_adpcm_table[5][2] = {
83  { 0, 0 },
84  { 60, 0 },
85  { 115, -52 },
86  { 98, -55 },
87  { 122, -60 }
88 };
89 
90 static const int16_t afc_coeffs[2][16] = {
91  { 0, 2048, 0, 1024, 4096, 3584, 3072, 4608, 4200, 4800, 5120, 2048, 1024, -1024, -1024, -2048 },
92  { 0, 0, 2048, 1024, -2048, -1536, -1024, -2560, -2248, -2300, -3072, -2048, -1024, 1024, 0, 0 }
93 };
94 
95 static const int16_t ea_adpcm_table[] = {
96  0, 240, 460, 392,
97  0, 0, -208, -220,
98  0, 1, 3, 4,
99  7, 8, 10, 11,
100  0, -1, -3, -4
101 };
102 
103 /*
104  * Dumped from the binaries:
105  * - FantasticJourney.exe - 0x794D2, DGROUP:0x47A4D2
106  * - BigRaceUSA.exe - 0x9B8AA, DGROUP:0x49C4AA
107  * - Timeshock!.exe - 0x8506A, DGROUP:0x485C6A
108  */
109 static const int8_t ima_cunning_index_table[9] = {
110  -1, -1, -1, -1, 1, 2, 3, 4, -1
111 };
112 
113 /*
114  * Dumped from the binaries:
115  * - FantasticJourney.exe - 0x79458, DGROUP:0x47A458
116  * - BigRaceUSA.exe - 0x9B830, DGROUP:0x49C430
117  * - Timeshock!.exe - 0x84FF0, DGROUP:0x485BF0
118  */
119 static const int16_t ima_cunning_step_table[61] = {
120  1, 1, 1, 1, 2, 2, 3, 3, 4, 5,
121  6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
122  32, 40, 48, 56, 64, 80, 96, 112, 128, 160,
123  192, 224, 256, 320, 384, 448, 512, 640, 768, 896,
124  1024, 1280, 1536, 1792, 2048, 2560, 3072, 3584, 4096, 5120,
125  6144, 7168, 8192, 10240, 12288, 14336, 16384, 20480, 24576, 28672, 0
126 };
127 
128 static const int8_t adpcm_index_table2[4] = {
129  -1, 2,
130  -1, 2,
131 };
132 
133 static const int8_t adpcm_index_table3[8] = {
134  -1, -1, 1, 2,
135  -1, -1, 1, 2,
136 };
137 
138 static const int8_t adpcm_index_table5[32] = {
139  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
140  -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16,
141 };
142 
143 static const int8_t * const adpcm_index_tables[4] = {
144  &adpcm_index_table2[0],
145  &adpcm_index_table3[0],
147  &adpcm_index_table5[0],
148 };
149 
150 static const int16_t mtaf_stepsize[32][16] = {
151  { 1, 5, 9, 13, 16, 20, 24, 28,
152  -1, -5, -9, -13, -16, -20, -24, -28, },
153  { 2, 6, 11, 15, 20, 24, 29, 33,
154  -2, -6, -11, -15, -20, -24, -29, -33, },
155  { 2, 7, 13, 18, 23, 28, 34, 39,
156  -2, -7, -13, -18, -23, -28, -34, -39, },
157  { 3, 9, 15, 21, 28, 34, 40, 46,
158  -3, -9, -15, -21, -28, -34, -40, -46, },
159  { 3, 11, 18, 26, 33, 41, 48, 56,
160  -3, -11, -18, -26, -33, -41, -48, -56, },
161  { 4, 13, 22, 31, 40, 49, 58, 67,
162  -4, -13, -22, -31, -40, -49, -58, -67, },
163  { 5, 16, 26, 37, 48, 59, 69, 80,
164  -5, -16, -26, -37, -48, -59, -69, -80, },
165  { 6, 19, 31, 44, 57, 70, 82, 95,
166  -6, -19, -31, -44, -57, -70, -82, -95, },
167  { 7, 22, 38, 53, 68, 83, 99, 114,
168  -7, -22, -38, -53, -68, -83, -99, -114, },
169  { 9, 27, 45, 63, 81, 99, 117, 135,
170  -9, -27, -45, -63, -81, -99, -117, -135, },
171  { 10, 32, 53, 75, 96, 118, 139, 161,
172  -10, -32, -53, -75, -96, -118, -139, -161, },
173  { 12, 38, 64, 90, 115, 141, 167, 193,
174  -12, -38, -64, -90, -115, -141, -167, -193, },
175  { 15, 45, 76, 106, 137, 167, 198, 228,
176  -15, -45, -76, -106, -137, -167, -198, -228, },
177  { 18, 54, 91, 127, 164, 200, 237, 273,
178  -18, -54, -91, -127, -164, -200, -237, -273, },
179  { 21, 65, 108, 152, 195, 239, 282, 326,
180  -21, -65, -108, -152, -195, -239, -282, -326, },
181  { 25, 77, 129, 181, 232, 284, 336, 388,
182  -25, -77, -129, -181, -232, -284, -336, -388, },
183  { 30, 92, 153, 215, 276, 338, 399, 461,
184  -30, -92, -153, -215, -276, -338, -399, -461, },
185  { 36, 109, 183, 256, 329, 402, 476, 549,
186  -36, -109, -183, -256, -329, -402, -476, -549, },
187  { 43, 130, 218, 305, 392, 479, 567, 654,
188  -43, -130, -218, -305, -392, -479, -567, -654, },
189  { 52, 156, 260, 364, 468, 572, 676, 780,
190  -52, -156, -260, -364, -468, -572, -676, -780, },
191  { 62, 186, 310, 434, 558, 682, 806, 930,
192  -62, -186, -310, -434, -558, -682, -806, -930, },
193  { 73, 221, 368, 516, 663, 811, 958, 1106,
194  -73, -221, -368, -516, -663, -811, -958, -1106, },
195  { 87, 263, 439, 615, 790, 966, 1142, 1318,
196  -87, -263, -439, -615, -790, -966, -1142, -1318, },
197  { 104, 314, 523, 733, 942, 1152, 1361, 1571,
198  -104, -314, -523, -733, -942, -1152, -1361, -1571, },
199  { 124, 374, 623, 873, 1122, 1372, 1621, 1871,
200  -124, -374, -623, -873, -1122, -1372, -1621, -1871, },
201  { 148, 445, 743, 1040, 1337, 1634, 1932, 2229,
202  -148, -445, -743, -1040, -1337, -1634, -1932, -2229, },
203  { 177, 531, 885, 1239, 1593, 1947, 2301, 2655,
204  -177, -531, -885, -1239, -1593, -1947, -2301, -2655, },
205  { 210, 632, 1053, 1475, 1896, 2318, 2739, 3161,
206  -210, -632, -1053, -1475, -1896, -2318, -2739, -3161, },
207  { 251, 753, 1255, 1757, 2260, 2762, 3264, 3766,
208  -251, -753, -1255, -1757, -2260, -2762, -3264, -3766, },
209  { 299, 897, 1495, 2093, 2692, 3290, 3888, 4486,
210  -299, -897, -1495, -2093, -2692, -3290, -3888, -4486, },
211  { 356, 1068, 1781, 2493, 3206, 3918, 4631, 5343,
212  -356, -1068, -1781, -2493, -3206, -3918, -4631, -5343, },
213  { 424, 1273, 2121, 2970, 3819, 4668, 5516, 6365,
214  -424, -1273, -2121, -2970, -3819, -4668, -5516, -6365, },
215 };
216 
217 static const int16_t oki_step_table[49] = {
218  16, 17, 19, 21, 23, 25, 28, 31, 34, 37,
219  41, 45, 50, 55, 60, 66, 73, 80, 88, 97,
220  107, 118, 130, 143, 157, 173, 190, 209, 230, 253,
221  279, 307, 337, 371, 408, 449, 494, 544, 598, 658,
222  724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552
223 };
224 
225 // padded to zero where table size is less then 16
226 static const int8_t swf_index_tables[4][16] = {
227  /*2*/ { -1, 2 },
228  /*3*/ { -1, -1, 2, 4 },
229  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
230  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
231 };
232 
233 static const int8_t zork_index_table[8] = {
234  -1, -1, -1, 1, 4, 7, 10, 12,
235 };
236 
237 static const int8_t mtf_index_table[16] = {
238  8, 6, 4, 2, -1, -1, -1, -1,
239  -1, -1, -1, -1, 2, 4, 6, 8,
240 };
241 
242 /* end of tables */
243 
244 typedef struct ADPCMDecodeContext {
246  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
247  int has_status; /**< Status flag. Reset to 0 after a flush. */
249 
250 static void adpcm_flush(AVCodecContext *avctx);
251 
253 {
254  ADPCMDecodeContext *c = avctx->priv_data;
255  unsigned int min_channels = 1;
256  unsigned int max_channels = 2;
257 
258  adpcm_flush(avctx);
259 
260  switch(avctx->codec->id) {
262  max_channels = 1;
263  break;
265  max_channels = 2;
266  break;
273  max_channels = 6;
274  break;
276  min_channels = 2;
277  max_channels = 8;
278  if (avctx->ch_layout.nb_channels & 1) {
279  avpriv_request_sample(avctx, "channel count %d", avctx->ch_layout.nb_channels);
280  return AVERROR_PATCHWELCOME;
281  }
282  break;
284  min_channels = 2;
285  break;
287  max_channels = 8;
288  if (avctx->ch_layout.nb_channels <= 0 ||
289  avctx->block_align % (16 * avctx->ch_layout.nb_channels))
290  return AVERROR_INVALIDDATA;
291  break;
295  max_channels = 14;
296  break;
297  }
298  if (avctx->ch_layout.nb_channels < min_channels ||
299  avctx->ch_layout.nb_channels > max_channels) {
300  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
301  return AVERROR(EINVAL);
302  }
303 
304  switch(avctx->codec->id) {
306  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
307  return AVERROR_INVALIDDATA;
308  break;
310  if (avctx->bits_per_coded_sample != 4 ||
311  avctx->block_align != 17 * avctx->ch_layout.nb_channels)
312  return AVERROR_INVALIDDATA;
313  break;
315  if (avctx->bits_per_coded_sample < 3 || avctx->bits_per_coded_sample > 5)
316  return AVERROR_INVALIDDATA;
317  break;
319  if (avctx->bits_per_coded_sample != 4)
320  return AVERROR_INVALIDDATA;
321  break;
323  if (avctx->bits_per_coded_sample != 8)
324  return AVERROR_INVALIDDATA;
325  break;
326  default:
327  break;
328  }
329 
330  switch (avctx->codec->id) {
354  break;
356  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
358  break;
360  avctx->sample_fmt = avctx->ch_layout.nb_channels > 2 ? AV_SAMPLE_FMT_S16P :
362  break;
363  default:
364  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
365  }
366  return 0;
367 }
368 
369 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
370 {
371  int delta, pred, step, add;
372 
373  pred = c->predictor;
374  delta = nibble & 7;
375  step = c->step;
376  add = (delta * 2 + 1) * step;
377  if (add < 0)
378  add = add + 7;
379 
380  if ((nibble & 8) == 0)
381  pred = av_clip(pred + (add >> 3), -32767, 32767);
382  else
383  pred = av_clip(pred - (add >> 3), -32767, 32767);
384 
385  switch (delta) {
386  case 7:
387  step *= 0x99;
388  break;
389  case 6:
390  c->step = av_clip(c->step * 2, 127, 24576);
391  c->predictor = pred;
392  return pred;
393  case 5:
394  step *= 0x66;
395  break;
396  case 4:
397  step *= 0x4d;
398  break;
399  default:
400  step *= 0x39;
401  break;
402  }
403 
404  if (step < 0)
405  step += 0x3f;
406 
407  c->step = step >> 6;
408  c->step = av_clip(c->step, 127, 24576);
409  c->predictor = pred;
410  return pred;
411 }
412 
413 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
414 {
415  int step_index;
416  int predictor;
417  int sign, delta, diff, step;
418 
419  step = ff_adpcm_step_table[c->step_index];
420  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
421  step_index = av_clip(step_index, 0, 88);
422 
423  sign = nibble & 8;
424  delta = nibble & 7;
425  /* perform direct multiplication instead of series of jumps proposed by
426  * the reference ADPCM implementation since modern CPUs can do the mults
427  * quickly enough */
428  diff = ((2 * delta + 1) * step) >> shift;
429  predictor = c->predictor;
430  if (sign) predictor -= diff;
431  else predictor += diff;
432 
433  c->predictor = av_clip_int16(predictor);
434  c->step_index = step_index;
435 
436  return (int16_t)c->predictor;
437 }
438 
439 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
440 {
441  int step_index;
442  int predictor;
443  int sign, delta, diff, step;
444 
445  step = ff_adpcm_step_table[c->step_index];
446  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
447  step_index = av_clip(step_index, 0, 88);
448 
449  sign = nibble & 8;
450  delta = nibble & 7;
451  diff = (delta * step) >> shift;
452  predictor = c->predictor;
453  if (sign) predictor -= diff;
454  else predictor += diff;
455 
456  c->predictor = av_clip_int16(predictor);
457  c->step_index = step_index;
458 
459  return (int16_t)c->predictor;
460 }
461 
462 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
463 {
464  int step_index, step, delta, predictor;
465 
466  step = ff_adpcm_step_table[c->step_index];
467 
468  delta = step * (2 * nibble - 15);
469  predictor = c->predictor + delta;
470 
471  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
472  c->predictor = av_clip_int16(predictor >> 4);
473  c->step_index = av_clip(step_index, 0, 88);
474 
475  return (int16_t)c->predictor;
476 }
477 
478 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
479 {
480  int step_index;
481  int predictor;
482  int step;
483 
484  nibble = sign_extend(nibble & 0xF, 4);
485 
486  step = ima_cunning_step_table[c->step_index];
487  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
488  step_index = av_clip(step_index, 0, 60);
489 
490  predictor = c->predictor + step * nibble;
491 
492  c->predictor = av_clip_int16(predictor);
493  c->step_index = step_index;
494 
495  return c->predictor;
496 }
497 
499 {
500  int nibble, step_index, predictor, sign, delta, diff, step, shift;
501 
502  shift = bps - 1;
503  nibble = get_bits_le(gb, bps),
504  step = ff_adpcm_step_table[c->step_index];
505  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
506  step_index = av_clip(step_index, 0, 88);
507 
508  sign = nibble & (1 << shift);
509  delta = av_zero_extend(nibble, shift);
510  diff = ((2 * delta + 1) * step) >> shift;
511  predictor = c->predictor;
512  if (sign) predictor -= diff;
513  else predictor += diff;
514 
515  c->predictor = av_clip_int16(predictor);
516  c->step_index = step_index;
517 
518  return (int16_t)c->predictor;
519 }
520 
521 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
522 {
523  int step_index;
524  int predictor;
525  int diff, step;
526 
527  step = ff_adpcm_step_table[c->step_index];
528  step_index = c->step_index + ff_adpcm_index_table[nibble];
529  step_index = av_clip(step_index, 0, 88);
530 
531  diff = step >> 3;
532  if (nibble & 4) diff += step;
533  if (nibble & 2) diff += step >> 1;
534  if (nibble & 1) diff += step >> 2;
535 
536  if (nibble & 8)
537  predictor = c->predictor - diff;
538  else
539  predictor = c->predictor + diff;
540 
541  c->predictor = av_clip_int16(predictor);
542  c->step_index = step_index;
543 
544  return c->predictor;
545 }
546 
547 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
548 {
549  int predictor;
550 
551  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
552  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
553 
554  c->sample2 = c->sample1;
555  c->sample1 = av_clip_int16(predictor);
556  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
557  if (c->idelta < 16) c->idelta = 16;
558  if (c->idelta > INT_MAX/768) {
559  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
560  c->idelta = INT_MAX/768;
561  }
562 
563  return c->sample1;
564 }
565 
566 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
567 {
568  int step_index, predictor, sign, delta, diff, step;
569 
570  step = oki_step_table[c->step_index];
571  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
572  step_index = av_clip(step_index, 0, 48);
573 
574  sign = nibble & 8;
575  delta = nibble & 7;
576  diff = ((2 * delta + 1) * step) >> 3;
577  predictor = c->predictor;
578  if (sign) predictor -= diff;
579  else predictor += diff;
580 
581  c->predictor = av_clip_intp2(predictor, 11);
582  c->step_index = step_index;
583 
584  return c->predictor * 16;
585 }
586 
587 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
588 {
589  int sign, delta, diff;
590  int new_step;
591 
592  sign = nibble & 8;
593  delta = nibble & 7;
594  /* perform direct multiplication instead of series of jumps proposed by
595  * the reference ADPCM implementation since modern CPUs can do the mults
596  * quickly enough */
597  diff = ((2 * delta + 1) * c->step) >> 3;
598  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
599  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
600  c->predictor = av_clip_int16(c->predictor);
601  /* calculate new step and clamp it to range 511..32767 */
602  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
603  c->step = av_clip(new_step, 511, 32767);
604 
605  return (int16_t)c->predictor;
606 }
607 
608 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
609 {
610  int sign, delta, diff;
611 
612  sign = nibble & (1<<(size-1));
613  delta = nibble & ((1<<(size-1))-1);
614  diff = delta << (7 + c->step + shift);
615 
616  /* clamp result */
617  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
618 
619  /* calculate new step */
620  if (delta >= (2*size - 3) && c->step < 3)
621  c->step++;
622  else if (delta == 0 && c->step > 0)
623  c->step--;
624 
625  return (int16_t) c->predictor;
626 }
627 
628 static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
629 {
630  if(!c->step) {
631  c->predictor = 0;
632  c->step = 127;
633  }
634 
635  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
636  c->predictor = av_clip_int16(c->predictor);
637  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
638  c->step = av_clip(c->step, 127, 24576);
639  return c->predictor;
640 }
641 
642 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
643 {
644  c->predictor += mtaf_stepsize[c->step][nibble];
645  c->predictor = av_clip_int16(c->predictor);
646  c->step += ff_adpcm_index_table[nibble];
647  c->step = av_clip_uintp2(c->step, 5);
648  return c->predictor;
649 }
650 
651 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
652 {
653  int16_t index = c->step_index;
654  uint32_t lookup_sample = ff_adpcm_step_table[index];
655  int32_t sample = 0;
656 
657  if (nibble & 0x40)
658  sample += lookup_sample;
659  if (nibble & 0x20)
660  sample += lookup_sample >> 1;
661  if (nibble & 0x10)
662  sample += lookup_sample >> 2;
663  if (nibble & 0x08)
664  sample += lookup_sample >> 3;
665  if (nibble & 0x04)
666  sample += lookup_sample >> 4;
667  if (nibble & 0x02)
668  sample += lookup_sample >> 5;
669  if (nibble & 0x01)
670  sample += lookup_sample >> 6;
671  if (nibble & 0x80)
672  sample = -sample;
673 
674  sample += c->predictor;
676 
677  index += zork_index_table[(nibble >> 4) & 7];
678  index = av_clip(index, 0, 88);
679 
680  c->predictor = sample;
681  c->step_index = index;
682 
683  return sample;
684 }
685 
686 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
687  const uint8_t *in, ADPCMChannelStatus *left,
688  ADPCMChannelStatus *right, int channels, int sample_offset)
689 {
690  int i, j;
691  int shift,filter,f0,f1;
692  int s_1,s_2;
693  int d,s,t;
694 
695  out0 += sample_offset;
696  if (channels == 1)
697  out1 = out0 + 28;
698  else
699  out1 += sample_offset;
700 
701  for(i=0;i<4;i++) {
702  shift = 12 - (in[4+i*2] & 15);
703  filter = in[4+i*2] >> 4;
705  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
706  filter=0;
707  }
708  if (shift < 0) {
709  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
710  shift = 0;
711  }
712  f0 = xa_adpcm_table[filter][0];
713  f1 = xa_adpcm_table[filter][1];
714 
715  s_1 = left->sample1;
716  s_2 = left->sample2;
717 
718  for(j=0;j<28;j++) {
719  d = in[16+i+j*4];
720 
721  t = sign_extend(d, 4);
722  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
723  s_2 = s_1;
724  s_1 = av_clip_int16(s);
725  out0[j] = s_1;
726  }
727 
728  if (channels == 2) {
729  left->sample1 = s_1;
730  left->sample2 = s_2;
731  s_1 = right->sample1;
732  s_2 = right->sample2;
733  }
734 
735  shift = 12 - (in[5+i*2] & 15);
736  filter = in[5+i*2] >> 4;
737  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
738  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
739  filter=0;
740  }
741  if (shift < 0) {
742  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
743  shift = 0;
744  }
745 
746  f0 = xa_adpcm_table[filter][0];
747  f1 = xa_adpcm_table[filter][1];
748 
749  for(j=0;j<28;j++) {
750  d = in[16+i+j*4];
751 
752  t = sign_extend(d >> 4, 4);
753  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
754  s_2 = s_1;
755  s_1 = av_clip_int16(s);
756  out1[j] = s_1;
757  }
758 
759  if (channels == 2) {
760  right->sample1 = s_1;
761  right->sample2 = s_2;
762  } else {
763  left->sample1 = s_1;
764  left->sample2 = s_2;
765  }
766 
767  out0 += 28 * (3 - channels);
768  out1 += 28 * (3 - channels);
769  }
770 
771  return 0;
772 }
773 
774 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
775 {
776  ADPCMDecodeContext *c = avctx->priv_data;
777  GetBitContext gb;
778  const int8_t *table;
779  int channels = avctx->ch_layout.nb_channels;
780  int k0, signmask, nb_bits, count;
781  int size = buf_size*8;
782  int i;
783 
784  init_get_bits(&gb, buf, size);
785 
786  //read bits & initial values
787  nb_bits = get_bits(&gb, 2)+2;
788  table = swf_index_tables[nb_bits-2];
789  k0 = 1 << (nb_bits-2);
790  signmask = 1 << (nb_bits-1);
791 
792  while (get_bits_count(&gb) <= size - 22 * channels) {
793  for (i = 0; i < channels; i++) {
794  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
795  c->status[i].step_index = get_bits(&gb, 6);
796  }
797 
798  for (count = 0; get_bits_count(&gb) <= size - nb_bits * channels && count < 4095; count++) {
799  int i;
800 
801  for (i = 0; i < channels; i++) {
802  // similar to IMA adpcm
803  int delta = get_bits(&gb, nb_bits);
804  int step = ff_adpcm_step_table[c->status[i].step_index];
805  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
806  int k = k0;
807 
808  do {
809  if (delta & k)
810  vpdiff += step;
811  step >>= 1;
812  k >>= 1;
813  } while(k);
814  vpdiff += step;
815 
816  if (delta & signmask)
817  c->status[i].predictor -= vpdiff;
818  else
819  c->status[i].predictor += vpdiff;
820 
821  c->status[i].step_index += table[delta & (~signmask)];
822 
823  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
824  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
825 
826  *samples++ = c->status[i].predictor;
827  }
828  }
829  }
830 }
831 
832 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
833 {
834  int sample = sign_extend(nibble, 4) * (1 << shift);
835 
836  if (flag)
837  sample += (8 * cs->sample1) - (4 * cs->sample2);
838  else
839  sample += 4 * cs->sample1;
840 
841  sample = av_clip_int16(sample >> 2);
842 
843  cs->sample2 = cs->sample1;
844  cs->sample1 = sample;
845 
846  return sample;
847 }
848 
850 {
851  int sign, delta, add;
852 
853  sign = bits & 4;
854  if (sign)
855  delta = 4 - (bits & 3);
856  else
857  delta = bits;
858 
859  switch (delta) {
860  case 0:
861  add = 0;
862  c->step = (3 * c->step) >> 2;
863  break;
864  case 1:
865  add = c->step;
866  c->step = (4 * c->step - (c->step >> 1)) >> 2;
867  break;
868  case 2:
869  add = 2 * c->step;
870  c->step = ((c->step >> 1) + add) >> 1;
871  break;
872  case 3:
873  add = 4 * c->step - (c->step >> 1);
874  c->step = 2 * c->step;
875  break;
876  case 4:
877  add = (11 * c->step) >> 1;
878  c->step = 3 * c->step;
879  break;
880  default:
881  av_unreachable("There are cases for all control paths when bits is 3-bit");
882  }
883 
884  if (sign)
885  add = -add;
886 
887  c->predictor = av_clip_int16(c->predictor + add);
888  c->step = av_clip(c->step, 1, 7281);
889  return c->predictor;
890 }
891 
893 {
894  int sign, delta, add;
895 
896  sign = bits & 8;
897  if (sign)
898  delta = 8 - (bits & 7);
899  else
900  delta = bits;
901 
902  switch (delta) {
903  case 0:
904  add = 0;
905  c->step = (3 * c->step) >> 2;
906  break;
907  case 1:
908  add = c->step;
909  c->step = (3 * c->step) >> 2;
910  break;
911  case 2:
912  add = 2 * c->step;
913  break;
914  case 3:
915  add = 3 * c->step;
916  break;
917  case 4:
918  add = 4 * c->step;
919  break;
920  case 5:
921  add = (11 * c->step) >> 1;
922  c->step += c->step >> 2;
923  break;
924  case 6:
925  add = (15 * c->step) >> 1;
926  c->step = 2 * c->step;
927  break;
928  case 7:
929  if (sign)
930  add = (19 * c->step) >> 1;
931  else
932  add = (21 * c->step) >> 1;
933  c->step = (c->step >> 1) + 2 * c->step;
934  break;
935  case 8:
936  add = (25 * c->step) >> 1;
937  c->step = 5 * c->step;
938  break;
939  default:
940  av_unreachable("There are cases for all control paths when bits is 4-bit");
941  }
942 
943  if (sign)
944  add = -add;
945 
946  c->predictor = av_clip_int16(c->predictor + add);
947  c->step = av_clip(c->step, 1, 2621);
948  return c->predictor;
949 }
950 
952 {
953  int sign, delta, add;
954 
955  sign = bits & 0x10;
956  if (sign)
957  delta = 16 - (bits & 0xF);
958  else
959  delta = bits;
960 
961  add = delta * c->step;
962  switch (delta) {
963  case 0:
964  c->step += (c->step >> 2) - (c->step >> 1);
965  break;
966  case 1:
967  case 2:
968  case 3:
969  c->step += (c->step >> 3) - (c->step >> 2);
970  break;
971  case 4:
972  case 5:
973  c->step += (c->step >> 4) - (c->step >> 3);
974  break;
975  case 6:
976  break;
977  case 7:
978  c->step += c->step >> 3;
979  break;
980  case 8:
981  c->step += c->step >> 2;
982  break;
983  case 9:
984  c->step += c->step >> 1;
985  break;
986  case 10:
987  c->step = 2 * c->step - (c->step >> 3);
988  break;
989  case 11:
990  c->step = 2 * c->step + (c->step >> 3);
991  break;
992  case 12:
993  c->step = 2 * c->step + (c->step >> 1) - (c->step >> 3);
994  break;
995  case 13:
996  c->step = 3 * c->step - (c->step >> 2);
997  break;
998  case 14:
999  c->step *= 3;
1000  break;
1001  case 15:
1002  case 16:
1003  c->step = (7 * c->step) >> 1;
1004  break;
1005  }
1006 
1007  if (sign)
1008  add = -add;
1009 
1010  c->predictor = av_clip_int16(c->predictor + add);
1011  c->step = av_clip(c->step, 1, 1024);
1012  return c->predictor;
1013 }
1014 
1015 /**
1016  * Get the number of samples (per channel) that will be decoded from the packet.
1017  * In one case, this is actually the maximum number of samples possible to
1018  * decode with the given buf_size.
1019  *
1020  * @param[out] coded_samples set to the number of samples as coded in the
1021  * packet, or 0 if the codec does not encode the
1022  * number of samples in each frame.
1023  * @param[out] approx_nb_samples set to non-zero if the number of samples
1024  * returned is an approximation.
1025  */
1027  int buf_size, int *coded_samples, int *approx_nb_samples)
1028 {
1029  ADPCMDecodeContext *s = avctx->priv_data;
1030  int nb_samples = 0;
1031  int ch = avctx->ch_layout.nb_channels;
1032  int has_coded_samples = 0;
1033  int header_size;
1034 
1035  *coded_samples = 0;
1036  *approx_nb_samples = 0;
1037 
1038  if(ch <= 0)
1039  return 0;
1040 
1041  switch (avctx->codec->id) {
1042  /* constant, only check buf_size */
1044  if (buf_size < 76 * ch)
1045  return 0;
1046  nb_samples = 128;
1047  break;
1049  if (buf_size < 34 * ch)
1050  return 0;
1051  nb_samples = 64;
1052  break;
1053  /* simple 4-bit adpcm */
1054  case AV_CODEC_ID_ADPCM_CT:
1066  nb_samples = buf_size * 2 / ch;
1067  break;
1068  }
1069  if (nb_samples)
1070  return nb_samples;
1071 
1072  /* simple 4-bit adpcm, with header */
1073  header_size = 0;
1074  switch (avctx->codec->id) {
1075  case AV_CODEC_ID_ADPCM_4XM:
1076  case AV_CODEC_ID_ADPCM_AGM:
1080  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
1081  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
1082  }
1083  if (header_size > 0)
1084  return (buf_size - header_size) * 2 / ch;
1085 
1086  /* more complex formats */
1087  switch (avctx->codec->id) {
1089  bytestream2_skip(gb, 4);
1090  has_coded_samples = 1;
1091  *coded_samples = bytestream2_get_le32u(gb);
1092  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
1093  bytestream2_seek(gb, -8, SEEK_CUR);
1094  break;
1095  case AV_CODEC_ID_ADPCM_EA:
1096  /* Stereo is 30 bytes per block */
1097  /* Mono is 15 bytes per block */
1098  has_coded_samples = 1;
1099  *coded_samples = bytestream2_get_le32(gb);
1100  *coded_samples -= *coded_samples % 28;
1101  nb_samples = (buf_size - 12) / (ch == 2 ? 30 : 15) * 28;
1102  break;
1104  has_coded_samples = 1;
1105  *coded_samples = bytestream2_get_le32(gb);
1106  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
1107  break;
1109  nb_samples = (buf_size - ch) / ch * 2;
1110  break;
1114  /* maximum number of samples */
1115  /* has internal offsets and a per-frame switch to signal raw 16-bit */
1116  has_coded_samples = 1;
1117  switch (avctx->codec->id) {
1119  header_size = 4 + 9 * ch;
1120  *coded_samples = bytestream2_get_le32(gb);
1121  break;
1123  header_size = 4 + 5 * ch;
1124  *coded_samples = bytestream2_get_le32(gb);
1125  break;
1127  header_size = 4 + 5 * ch;
1128  *coded_samples = bytestream2_get_be32(gb);
1129  break;
1130  }
1131  *coded_samples -= *coded_samples % 28;
1132  nb_samples = (buf_size - header_size) * 2 / ch;
1133  nb_samples -= nb_samples % 28;
1134  *approx_nb_samples = 1;
1135  break;
1137  if (avctx->block_align > 0)
1138  buf_size = FFMIN(buf_size, avctx->block_align);
1139  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
1140  break;
1142  if (avctx->block_align > 0)
1143  buf_size = FFMIN(buf_size, avctx->block_align);
1144  if (buf_size < 4 * ch)
1145  return AVERROR_INVALIDDATA;
1146  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
1147  break;
1149  if (avctx->block_align > 0)
1150  buf_size = FFMIN(buf_size, avctx->block_align);
1151  nb_samples = (buf_size - 4 * ch) * 2 / ch;
1152  break;
1153  CASE(ADPCM_IMA_WAV,
1154  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1155  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1156  if (avctx->block_align > 0)
1157  buf_size = FFMIN(buf_size, avctx->block_align);
1158  if (buf_size < 4 * ch)
1159  return AVERROR_INVALIDDATA;
1160  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
1161  ) /* End of CASE */
1162  CASE(ADPCM_IMA_XBOX,
1163  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1164  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1165  if (avctx->block_align > 0)
1166  buf_size = FFMIN(buf_size, avctx->block_align);
1167  if (buf_size < 4 * ch)
1168  return AVERROR_INVALIDDATA;
1169  nb_samples = (buf_size - 4 * ch) / (bsize * ch) * bsamples + 1;
1170  ) /* End of CASE */
1171  case AV_CODEC_ID_ADPCM_MS:
1172  if (avctx->block_align > 0)
1173  buf_size = FFMIN(buf_size, avctx->block_align);
1174  nb_samples = (buf_size - 6 * ch) * 2 / ch;
1175  break;
1177  if (avctx->block_align > 0)
1178  buf_size = FFMIN(buf_size, avctx->block_align);
1179  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
1180  break;
1184  {
1185  int samples_per_byte;
1186  switch (avctx->codec->id) {
1187  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
1188  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
1189  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
1190  }
1191  if (!s->status[0].step_index) {
1192  if (buf_size < ch)
1193  return AVERROR_INVALIDDATA;
1194  nb_samples++;
1195  buf_size -= ch;
1196  }
1197  nb_samples += buf_size * samples_per_byte / ch;
1198  break;
1199  }
1200  case AV_CODEC_ID_ADPCM_SWF:
1201  {
1202  int buf_bits = buf_size * 8 - 2;
1203  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
1204  int block_hdr_size = 22 * ch;
1205  int block_size = block_hdr_size + nbits * ch * 4095;
1206  int nblocks = buf_bits / block_size;
1207  int bits_left = buf_bits - nblocks * block_size;
1208  nb_samples = nblocks * 4096;
1209  if (bits_left >= block_hdr_size)
1210  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
1211  break;
1212  }
1213  case AV_CODEC_ID_ADPCM_THP:
1215  if (avctx->extradata) {
1216  nb_samples = buf_size * 14 / (8 * ch);
1217  break;
1218  }
1219  has_coded_samples = 1;
1220  bytestream2_skip(gb, 4); // channel size
1221  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
1222  bytestream2_get_le32(gb) :
1223  bytestream2_get_be32(gb);
1224  buf_size -= 8 + 36 * ch;
1225  buf_size /= ch;
1226  nb_samples = buf_size / 8 * 14;
1227  if (buf_size % 8 > 1)
1228  nb_samples += (buf_size % 8 - 1) * 2;
1229  *approx_nb_samples = 1;
1230  break;
1231  case AV_CODEC_ID_ADPCM_AFC:
1232  nb_samples = buf_size / (9 * ch) * 16;
1233  break;
1234  case AV_CODEC_ID_ADPCM_XA:
1235  nb_samples = (buf_size / 128) * 224 / ch;
1236  break;
1237  case AV_CODEC_ID_ADPCM_XMD:
1238  nb_samples = buf_size / (21 * ch) * 32;
1239  break;
1240  case AV_CODEC_ID_ADPCM_DTK:
1241  case AV_CODEC_ID_ADPCM_PSX:
1242  nb_samples = buf_size / (16 * ch) * 28;
1243  break;
1245  nb_samples = buf_size / avctx->block_align * 32;
1246  break;
1248  nb_samples = buf_size / ch;
1249  break;
1251  if (!avctx->extradata || avctx->extradata_size != 2)
1252  return AVERROR_INVALIDDATA;
1253  nb_samples = AV_RL16(avctx->extradata);
1254  break;
1255  }
1256 
1257  /* validate coded sample count */
1258  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
1259  return AVERROR_INVALIDDATA;
1260 
1261  return nb_samples;
1262 }
1263 
1265  int *got_frame_ptr, AVPacket *avpkt)
1266 {
1267  const uint8_t *buf = avpkt->data;
1268  int buf_size = avpkt->size;
1269  ADPCMDecodeContext *c = avctx->priv_data;
1270  int channels = avctx->ch_layout.nb_channels;
1271  int16_t *samples;
1272  int16_t **samples_p;
1273  int st; /* stereo */
1274  int nb_samples, coded_samples, approx_nb_samples, ret;
1275  GetByteContext gb;
1276 
1277  bytestream2_init(&gb, buf, buf_size);
1278  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
1279  if (nb_samples <= 0) {
1280  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
1281  return AVERROR_INVALIDDATA;
1282  }
1283 
1284  /* get output buffer */
1285  frame->nb_samples = nb_samples;
1286  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
1287  return ret;
1288  samples = (int16_t *)frame->data[0];
1289  samples_p = (int16_t **)frame->extended_data;
1290 
1291  /* use coded_samples when applicable */
1292  /* it is always <= nb_samples, so the output buffer will be large enough */
1293  if (coded_samples) {
1294  if (!approx_nb_samples && coded_samples != nb_samples)
1295  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
1296  frame->nb_samples = nb_samples = coded_samples;
1297  }
1298 
1299  st = channels == 2 ? 1 : 0;
1300 
1301  switch(avctx->codec->id) {
1302  CASE(ADPCM_IMA_QT,
1303  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
1304  Channel data is interleaved per-chunk. */
1305  for (int channel = 0; channel < channels; channel++) {
1306  ADPCMChannelStatus *cs = &c->status[channel];
1307  int predictor;
1308  int step_index;
1309  /* (pppppp) (piiiiiii) */
1310 
1311  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
1312  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1313  step_index = predictor & 0x7F;
1314  predictor &= ~0x7F;
1315 
1316  if (cs->step_index == step_index) {
1317  int diff = predictor - cs->predictor;
1318  if (diff < 0)
1319  diff = - diff;
1320  if (diff > 0x7f)
1321  goto update;
1322  } else {
1323  update:
1324  cs->step_index = step_index;
1325  cs->predictor = predictor;
1326  }
1327 
1328  if (cs->step_index > 88u){
1329  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1330  channel, cs->step_index);
1331  return AVERROR_INVALIDDATA;
1332  }
1333 
1334  samples = samples_p[channel];
1335 
1336  for (int m = 0; m < 64; m += 2) {
1337  int byte = bytestream2_get_byteu(&gb);
1338  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1339  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1340  }
1341  }
1342  ) /* End of CASE */
1343  CASE(ADPCM_IMA_WAV,
1344  for (int i = 0; i < channels; i++) {
1345  ADPCMChannelStatus *cs = &c->status[i];
1346  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1347 
1348  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1349  if (cs->step_index > 88u){
1350  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1351  i, cs->step_index);
1352  return AVERROR_INVALIDDATA;
1353  }
1354  }
1355 
1356  if (avctx->bits_per_coded_sample != 4) {
1357  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1358  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1359  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
1360  GetBitContext g;
1361 
1362  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1363  for (int i = 0; i < channels; i++) {
1364  ADPCMChannelStatus *cs = &c->status[i];
1365  samples = &samples_p[i][1 + n * samples_per_block];
1366  for (int j = 0; j < block_size; j++) {
1367  temp[j] = buf[4 * channels + block_size * n * channels +
1368  (j % 4) + (j / 4) * (channels * 4) + i * 4];
1369  }
1370  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1371  if (ret < 0)
1372  return ret;
1373  for (int m = 0; m < samples_per_block; m++) {
1375  avctx->bits_per_coded_sample);
1376  }
1377  }
1378  }
1379  bytestream2_skip(&gb, avctx->block_align - channels * 4);
1380  } else {
1381  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
1382  for (int i = 0; i < channels; i++) {
1383  ADPCMChannelStatus *cs = &c->status[i];
1384  samples = &samples_p[i][1 + n * 8];
1385  for (int m = 0; m < 8; m += 2) {
1386  int v = bytestream2_get_byteu(&gb);
1387  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1388  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1389  }
1390  }
1391  }
1392  }
1393  ) /* End of CASE */
1394  CASE(ADPCM_IMA_XBOX,
1395  for (int i = 0; i < channels; i++) {
1396  ADPCMChannelStatus *cs = &c->status[i];
1397  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1398 
1399  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1400  if (cs->step_index > 88u) {
1401  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1402  i, cs->step_index);
1403  return AVERROR_INVALIDDATA;
1404  }
1405  }
1406 
1407  for (int n = 0; n < (nb_samples-1) / 8; n++) {
1408  for (int i = 0; i < channels; i++) {
1409  ADPCMChannelStatus *cs = &c->status[i];
1410  samples = &samples_p[i][1 + n * 8];
1411  for (int m = 0; m < 8; m += 2) {
1412  int v = bytestream2_get_byteu(&gb);
1413  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1414  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1415  }
1416  }
1417  }
1418  frame->nb_samples--;
1419  ) /* End of CASE */
1420  CASE(ADPCM_4XM,
1421  for (int i = 0; i < channels; i++)
1422  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1423 
1424  for (int i = 0; i < channels; i++) {
1425  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1426  if (c->status[i].step_index > 88u) {
1427  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1428  i, c->status[i].step_index);
1429  return AVERROR_INVALIDDATA;
1430  }
1431  }
1432 
1433  for (int i = 0; i < channels; i++) {
1434  ADPCMChannelStatus *cs = &c->status[i];
1435  samples = (int16_t *)frame->data[i];
1436  for (int n = nb_samples >> 1; n > 0; n--) {
1437  int v = bytestream2_get_byteu(&gb);
1438  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1439  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1440  }
1441  }
1442  ) /* End of CASE */
1443  CASE(ADPCM_AGM,
1444  for (int i = 0; i < channels; i++)
1445  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1446  for (int i = 0; i < channels; i++)
1447  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1448 
1449  for (int n = 0; n < nb_samples >> (1 - st); n++) {
1450  int v = bytestream2_get_byteu(&gb);
1451  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1452  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1453  }
1454  ) /* End of CASE */
1455  CASE(ADPCM_MS,
1456  int block_predictor;
1457 
1458  if (avctx->ch_layout.nb_channels > 2) {
1459  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
1460  samples = samples_p[channel];
1461  block_predictor = bytestream2_get_byteu(&gb);
1462  if (block_predictor > 6) {
1463  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1464  channel, block_predictor);
1465  return AVERROR_INVALIDDATA;
1466  }
1467  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1468  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1469  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1470  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1471  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1472  *samples++ = c->status[channel].sample2;
1473  *samples++ = c->status[channel].sample1;
1474  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
1475  int byte = bytestream2_get_byteu(&gb);
1476  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1477  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1478  }
1479  }
1480  } else {
1481  block_predictor = bytestream2_get_byteu(&gb);
1482  if (block_predictor > 6) {
1483  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1484  block_predictor);
1485  return AVERROR_INVALIDDATA;
1486  }
1487  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1488  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1489  if (st) {
1490  block_predictor = bytestream2_get_byteu(&gb);
1491  if (block_predictor > 6) {
1492  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1493  block_predictor);
1494  return AVERROR_INVALIDDATA;
1495  }
1496  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1497  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1498  }
1499  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1500  if (st){
1501  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1502  }
1503 
1504  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1505  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1506  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1507  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1508 
1509  *samples++ = c->status[0].sample2;
1510  if (st) *samples++ = c->status[1].sample2;
1511  *samples++ = c->status[0].sample1;
1512  if (st) *samples++ = c->status[1].sample1;
1513  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1514  int byte = bytestream2_get_byteu(&gb);
1515  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1516  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1517  }
1518  }
1519  ) /* End of CASE */
1520  CASE(ADPCM_MTAF,
1521  for (int channel = 0; channel < channels; channel += 2) {
1522  bytestream2_skipu(&gb, 4);
1523  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1524  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1525  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1526  bytestream2_skipu(&gb, 2);
1527  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1528  bytestream2_skipu(&gb, 2);
1529  for (int n = 0; n < nb_samples; n += 2) {
1530  int v = bytestream2_get_byteu(&gb);
1531  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1532  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1533  }
1534  for (int n = 0; n < nb_samples; n += 2) {
1535  int v = bytestream2_get_byteu(&gb);
1536  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1537  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1538  }
1539  }
1540  ) /* End of CASE */
1541  CASE(ADPCM_IMA_DK4,
1542  for (int channel = 0; channel < channels; channel++) {
1543  ADPCMChannelStatus *cs = &c->status[channel];
1544  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1545  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1546  if (cs->step_index > 88u){
1547  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1548  channel, cs->step_index);
1549  return AVERROR_INVALIDDATA;
1550  }
1551  }
1552  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1553  int v = bytestream2_get_byteu(&gb);
1554  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1555  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1556  }
1557  ) /* End of CASE */
1558 
1559  /* DK3 ADPCM support macro */
1560 #define DK3_GET_NEXT_NIBBLE() \
1561  if (decode_top_nibble_next) { \
1562  nibble = last_byte >> 4; \
1563  decode_top_nibble_next = 0; \
1564  } else { \
1565  last_byte = bytestream2_get_byteu(&gb); \
1566  nibble = last_byte & 0x0F; \
1567  decode_top_nibble_next = 1; \
1568  }
1569  CASE(ADPCM_IMA_DK3,
1570  int last_byte = 0;
1571  int nibble;
1572  int decode_top_nibble_next = 0;
1573  int diff_channel;
1574  const int16_t *samples_end = samples + channels * nb_samples;
1575 
1576  bytestream2_skipu(&gb, 10);
1577  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1578  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1579  c->status[0].step_index = bytestream2_get_byteu(&gb);
1580  c->status[1].step_index = bytestream2_get_byteu(&gb);
1581  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1582  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1583  c->status[0].step_index, c->status[1].step_index);
1584  return AVERROR_INVALIDDATA;
1585  }
1586  /* sign extend the predictors */
1587  diff_channel = c->status[1].predictor;
1588 
1589  while (samples < samples_end) {
1590 
1591  /* for this algorithm, c->status[0] is the sum channel and
1592  * c->status[1] is the diff channel */
1593 
1594  /* process the first predictor of the sum channel */
1596  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1597 
1598  /* process the diff channel predictor */
1600  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1601 
1602  /* process the first pair of stereo PCM samples */
1603  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1604  *samples++ = c->status[0].predictor + c->status[1].predictor;
1605  *samples++ = c->status[0].predictor - c->status[1].predictor;
1606 
1607  /* process the second predictor of the sum channel */
1609  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1610 
1611  /* process the second pair of stereo PCM samples */
1612  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1613  *samples++ = c->status[0].predictor + c->status[1].predictor;
1614  *samples++ = c->status[0].predictor - c->status[1].predictor;
1615  }
1616 
1617  if ((bytestream2_tell(&gb) & 1))
1618  bytestream2_skip(&gb, 1);
1619  ) /* End of CASE */
1620  CASE(ADPCM_IMA_ISS,
1621  for (int channel = 0; channel < channels; channel++) {
1622  ADPCMChannelStatus *cs = &c->status[channel];
1623  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1624  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1625  if (cs->step_index > 88u){
1626  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1627  channel, cs->step_index);
1628  return AVERROR_INVALIDDATA;
1629  }
1630  }
1631 
1632  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1633  int v1, v2;
1634  int v = bytestream2_get_byteu(&gb);
1635  /* nibbles are swapped for mono */
1636  if (st) {
1637  v1 = v >> 4;
1638  v2 = v & 0x0F;
1639  } else {
1640  v2 = v >> 4;
1641  v1 = v & 0x0F;
1642  }
1643  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1644  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1645  }
1646  ) /* End of CASE */
1647  CASE(ADPCM_IMA_MOFLEX,
1648  for (int channel = 0; channel < channels; channel++) {
1649  ADPCMChannelStatus *cs = &c->status[channel];
1650  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1651  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1652  if (cs->step_index > 88u){
1653  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1654  channel, cs->step_index);
1655  return AVERROR_INVALIDDATA;
1656  }
1657  }
1658 
1659  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1660  for (int channel = 0; channel < channels; channel++) {
1661  samples = samples_p[channel] + 256 * subframe;
1662  for (int n = 0; n < 256; n += 2) {
1663  int v = bytestream2_get_byteu(&gb);
1664  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1665  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1666  }
1667  }
1668  }
1669  ) /* End of CASE */
1670  CASE(ADPCM_IMA_DAT4,
1671  for (int channel = 0; channel < channels; channel++) {
1672  ADPCMChannelStatus *cs = &c->status[channel];
1673  samples = samples_p[channel];
1674  bytestream2_skip(&gb, 4);
1675  for (int n = 0; n < nb_samples; n += 2) {
1676  int v = bytestream2_get_byteu(&gb);
1677  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1678  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1679  }
1680  }
1681  ) /* End of CASE */
1682  CASE(ADPCM_IMA_APC,
1683  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1684  int v = bytestream2_get_byteu(&gb);
1685  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1686  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1687  }
1688  ) /* End of CASE */
1689  CASE(ADPCM_IMA_SSI,
1690  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1691  int v = bytestream2_get_byteu(&gb);
1692  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1693  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1694  }
1695  ) /* End of CASE */
1696  CASE(ADPCM_IMA_APM,
1697  for (int n = nb_samples / 2; n > 0; n--) {
1698  for (int channel = 0; channel < channels; channel++) {
1699  int v = bytestream2_get_byteu(&gb);
1700  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1701  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1702  }
1703  samples += channels;
1704  }
1705  ) /* End of CASE */
1706  CASE(ADPCM_IMA_ALP,
1707  for (int n = nb_samples / 2; n > 0; n--) {
1708  for (int channel = 0; channel < channels; channel++) {
1709  int v = bytestream2_get_byteu(&gb);
1710  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1711  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1712  }
1713  samples += channels;
1714  }
1715  ) /* End of CASE */
1716  CASE(ADPCM_IMA_CUNNING,
1717  for (int channel = 0; channel < channels; channel++) {
1718  int16_t *smp = samples_p[channel];
1719  for (int n = 0; n < nb_samples / 2; n++) {
1720  int v = bytestream2_get_byteu(&gb);
1721  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1722  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1723  }
1724  }
1725  ) /* End of CASE */
1726  CASE(ADPCM_IMA_OKI,
1727  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1728  int v = bytestream2_get_byteu(&gb);
1729  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1730  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1731  }
1732  ) /* End of CASE */
1733  CASE(ADPCM_IMA_RAD,
1734  for (int channel = 0; channel < channels; channel++) {
1735  ADPCMChannelStatus *cs = &c->status[channel];
1736  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1737  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1738  if (cs->step_index > 88u){
1739  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1740  channel, cs->step_index);
1741  return AVERROR_INVALIDDATA;
1742  }
1743  }
1744  for (int n = 0; n < nb_samples / 2; n++) {
1745  int byte[2];
1746 
1747  byte[0] = bytestream2_get_byteu(&gb);
1748  if (st)
1749  byte[1] = bytestream2_get_byteu(&gb);
1750  for (int channel = 0; channel < channels; channel++) {
1751  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1752  }
1753  for (int channel = 0; channel < channels; channel++) {
1754  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1755  }
1756  }
1757  ) /* End of CASE */
1758  CASE(ADPCM_IMA_WS,
1759  if (c->vqa_version == 3) {
1760  for (int channel = 0; channel < channels; channel++) {
1761  int16_t *smp = samples_p[channel];
1762 
1763  for (int n = nb_samples / 2; n > 0; n--) {
1764  int v = bytestream2_get_byteu(&gb);
1765  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1766  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1767  }
1768  }
1769  } else {
1770  for (int n = nb_samples / 2; n > 0; n--) {
1771  for (int channel = 0; channel < channels; channel++) {
1772  int v = bytestream2_get_byteu(&gb);
1773  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1774  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1775  }
1776  samples += channels;
1777  }
1778  }
1779  bytestream2_seek(&gb, 0, SEEK_END);
1780  ) /* End of CASE */
1781  CASE(ADPCM_XMD,
1782  int bytes_remaining, block = 0;
1783  while (bytestream2_get_bytes_left(&gb) >= 21 * channels) {
1784  for (int channel = 0; channel < channels; channel++) {
1785  int16_t *out = samples_p[channel] + block * 32;
1786  int16_t history[2];
1787  uint16_t scale;
1788 
1789  history[1] = sign_extend(bytestream2_get_le16(&gb), 16);
1790  history[0] = sign_extend(bytestream2_get_le16(&gb), 16);
1791  scale = bytestream2_get_le16(&gb);
1792 
1793  out[0] = history[1];
1794  out[1] = history[0];
1795 
1796  for (int n = 0; n < 15; n++) {
1797  unsigned byte = bytestream2_get_byte(&gb);
1798  int32_t nibble[2];
1799 
1800  nibble[0] = sign_extend(byte & 15, 4);
1801  nibble[1] = sign_extend(byte >> 4, 4);
1802 
1803  out[2+n*2] = nibble[0]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1804  history[1] = history[0];
1805  history[0] = out[2+n*2];
1806 
1807  out[2+n*2+1] = nibble[1]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
1808  history[1] = history[0];
1809  history[0] = out[2+n*2+1];
1810  }
1811  }
1812 
1813  block++;
1814  }
1815  bytes_remaining = bytestream2_get_bytes_left(&gb);
1816  if (bytes_remaining > 0) {
1817  bytestream2_skip(&gb, bytes_remaining);
1818  }
1819  ) /* End of CASE */
1820  CASE(ADPCM_XA,
1821  int16_t *out0 = samples_p[0];
1822  int16_t *out1 = samples_p[1];
1823  int samples_per_block = 28 * (3 - channels) * 4;
1824  int sample_offset = 0;
1825  int bytes_remaining;
1826  while (bytestream2_get_bytes_left(&gb) >= 128) {
1827  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1828  &c->status[0], &c->status[1],
1829  channels, sample_offset)) < 0)
1830  return ret;
1831  bytestream2_skipu(&gb, 128);
1832  sample_offset += samples_per_block;
1833  }
1834  /* Less than a full block of data left, e.g. when reading from
1835  * 2324 byte per sector XA; the remainder is padding */
1836  bytes_remaining = bytestream2_get_bytes_left(&gb);
1837  if (bytes_remaining > 0) {
1838  bytestream2_skip(&gb, bytes_remaining);
1839  }
1840  ) /* End of CASE */
1841  CASE(ADPCM_IMA_EA_EACS,
1842  for (int i = 0; i <= st; i++) {
1843  c->status[i].step_index = bytestream2_get_le32u(&gb);
1844  if (c->status[i].step_index > 88u) {
1845  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1846  i, c->status[i].step_index);
1847  return AVERROR_INVALIDDATA;
1848  }
1849  }
1850  for (int i = 0; i <= st; i++) {
1851  c->status[i].predictor = bytestream2_get_le32u(&gb);
1852  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1853  return AVERROR_INVALIDDATA;
1854  }
1855 
1856  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1857  int byte = bytestream2_get_byteu(&gb);
1858  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1859  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1860  }
1861  ) /* End of CASE */
1862  CASE(ADPCM_IMA_EA_SEAD,
1863  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1864  int byte = bytestream2_get_byteu(&gb);
1865  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1866  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1867  }
1868  ) /* End of CASE */
1869  CASE(ADPCM_EA,
1870  int previous_left_sample, previous_right_sample;
1871  int current_left_sample, current_right_sample;
1872  int next_left_sample, next_right_sample;
1873  int coeff1l, coeff2l, coeff1r, coeff2r;
1874  int shift_left, shift_right;
1875 
1876  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte (stereo) or 15-byte (mono) pieces,
1877  each coding 28 stereo/mono samples. */
1878 
1879  if (channels != 2 && channels != 1)
1880  return AVERROR_INVALIDDATA;
1881 
1882  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1883  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1884  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1885  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1886 
1887  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1888  int byte = bytestream2_get_byteu(&gb);
1889  coeff1l = ea_adpcm_table[ byte >> 4 ];
1890  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1891  coeff1r = ea_adpcm_table[ byte & 0x0F];
1892  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1893 
1894  if (channels == 2){
1895  byte = bytestream2_get_byteu(&gb);
1896  shift_left = 20 - (byte >> 4);
1897  shift_right = 20 - (byte & 0x0F);
1898  } else{
1899  /* Mono packs the shift into the coefficient byte's lower nibble instead */
1900  shift_left = 20 - (byte & 0x0F);
1901  }
1902 
1903  for (int count2 = 0; count2 < (channels == 2 ? 28 : 14); count2++) {
1904  byte = bytestream2_get_byteu(&gb);
1905  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1906 
1907  next_left_sample = (next_left_sample +
1908  (current_left_sample * coeff1l) +
1909  (previous_left_sample * coeff2l) + 0x80) >> 8;
1910 
1911  previous_left_sample = current_left_sample;
1912  current_left_sample = av_clip_int16(next_left_sample);
1913  *samples++ = current_left_sample;
1914 
1915  if (channels == 2){
1916  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1917 
1918  next_right_sample = (next_right_sample +
1919  (current_right_sample * coeff1r) +
1920  (previous_right_sample * coeff2r) + 0x80) >> 8;
1921 
1922  previous_right_sample = current_right_sample;
1923  current_right_sample = av_clip_int16(next_right_sample);
1924  *samples++ = current_right_sample;
1925  } else {
1926  next_left_sample = sign_extend(byte, 4) * (1 << shift_left);
1927 
1928  next_left_sample = (next_left_sample +
1929  (current_left_sample * coeff1l) +
1930  (previous_left_sample * coeff2l) + 0x80) >> 8;
1931 
1932  previous_left_sample = current_left_sample;
1933  current_left_sample = av_clip_int16(next_left_sample);
1934 
1935  *samples++ = current_left_sample;
1936  }
1937  }
1938  }
1939  bytestream2_skip(&gb, channels == 2 ? 2 : 3); // Skip terminating NULs
1940  ) /* End of CASE */
1941  CASE(ADPCM_EA_MAXIS_XA,
1942  int coeff[2][2], shift[2];
1943 
1944  for (int channel = 0; channel < channels; channel++) {
1945  int byte = bytestream2_get_byteu(&gb);
1946  for (int i = 0; i < 2; i++)
1947  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1948  shift[channel] = 20 - (byte & 0x0F);
1949  }
1950  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1951  int byte[2];
1952 
1953  byte[0] = bytestream2_get_byteu(&gb);
1954  if (st) byte[1] = bytestream2_get_byteu(&gb);
1955  for (int i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1956  for (int channel = 0; channel < channels; channel++) {
1957  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1958  sample = (sample +
1959  c->status[channel].sample1 * coeff[channel][0] +
1960  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1961  c->status[channel].sample2 = c->status[channel].sample1;
1962  c->status[channel].sample1 = av_clip_int16(sample);
1963  *samples++ = c->status[channel].sample1;
1964  }
1965  }
1966  }
1967  bytestream2_seek(&gb, 0, SEEK_END);
1968  ) /* End of CASE */
1969 #if CONFIG_ADPCM_EA_R1_DECODER || CONFIG_ADPCM_EA_R2_DECODER || CONFIG_ADPCM_EA_R3_DECODER
1972  case AV_CODEC_ID_ADPCM_EA_R3: {
1973  /* channel numbering
1974  2chan: 0=fl, 1=fr
1975  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1976  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1977  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1978  int previous_sample, current_sample, next_sample;
1979  int coeff1, coeff2;
1980  int shift;
1981  uint16_t *samplesC;
1982  int count = 0;
1983  int offsets[6];
1984 
1985  for (unsigned channel = 0; channel < channels; channel++)
1986  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1987  bytestream2_get_le32(&gb)) +
1988  (channels + 1) * 4;
1989 
1990  for (unsigned channel = 0; channel < channels; channel++) {
1991  int count1;
1992 
1993  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1994  samplesC = samples_p[channel];
1995 
1996  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1997  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1998  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1999  } else {
2000  current_sample = c->status[channel].predictor;
2001  previous_sample = c->status[channel].prev_sample;
2002  }
2003 
2004  for (count1 = 0; count1 < nb_samples / 28; count1++) {
2005  int byte = bytestream2_get_byte(&gb);
2006  if (byte == 0xEE) { /* only seen in R2 and R3 */
2007  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
2008  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
2009 
2010  for (int count2 = 0; count2 < 28; count2++)
2011  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
2012  } else {
2013  coeff1 = ea_adpcm_table[ byte >> 4 ];
2014  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
2015  shift = 20 - (byte & 0x0F);
2016 
2017  for (int count2 = 0; count2 < 28; count2++) {
2018  if (count2 & 1)
2019  next_sample = (unsigned)sign_extend(byte, 4) << shift;
2020  else {
2021  byte = bytestream2_get_byte(&gb);
2022  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
2023  }
2024 
2025  next_sample += (current_sample * coeff1) +
2026  (previous_sample * coeff2);
2027  next_sample = av_clip_int16(next_sample >> 8);
2028 
2029  previous_sample = current_sample;
2030  current_sample = next_sample;
2031  *samplesC++ = current_sample;
2032  }
2033  }
2034  }
2035  if (!count) {
2036  count = count1;
2037  } else if (count != count1) {
2038  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
2039  count = FFMAX(count, count1);
2040  }
2041 
2042  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
2043  c->status[channel].predictor = current_sample;
2044  c->status[channel].prev_sample = previous_sample;
2045  }
2046  }
2047 
2048  frame->nb_samples = count * 28;
2049  bytestream2_seek(&gb, 0, SEEK_END);
2050  break;
2051  }
2052 #endif /* CONFIG_ADPCM_EA_Rx_DECODER */
2053  CASE(ADPCM_EA_XAS,
2054  for (int channel=0; channel < channels; channel++) {
2055  int coeff[2][4], shift[4];
2056  int16_t *s = samples_p[channel];
2057  for (int n = 0; n < 4; n++, s += 32) {
2058  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
2059  for (int i = 0; i < 2; i++)
2060  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
2061  s[0] = val & ~0x0F;
2062 
2063  val = sign_extend(bytestream2_get_le16u(&gb), 16);
2064  shift[n] = 20 - (val & 0x0F);
2065  s[1] = val & ~0x0F;
2066  }
2067 
2068  for (int m = 2; m < 32; m += 2) {
2069  s = &samples_p[channel][m];
2070  for (int n = 0; n < 4; n++, s += 32) {
2071  int level, pred;
2072  int byte = bytestream2_get_byteu(&gb);
2073 
2074  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
2075  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
2076  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
2077 
2078  level = sign_extend(byte, 4) * (1 << shift[n]);
2079  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
2080  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
2081  }
2082  }
2083  }
2084  ) /* End of CASE */
2085  CASE(ADPCM_IMA_ACORN,
2086  for (int channel = 0; channel < channels; channel++) {
2087  ADPCMChannelStatus *cs = &c->status[channel];
2088  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
2089  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
2090  if (cs->step_index > 88u){
2091  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
2092  channel, cs->step_index);
2093  return AVERROR_INVALIDDATA;
2094  }
2095  }
2096  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2097  int byte = bytestream2_get_byteu(&gb);
2098  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
2099  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
2100  }
2101  ) /* End of CASE */
2102  CASE(ADPCM_IMA_AMV,
2103  av_assert0(channels == 1);
2104 
2105  /*
2106  * Header format:
2107  * int16_t predictor;
2108  * uint8_t step_index;
2109  * uint8_t reserved;
2110  * uint32_t frame_size;
2111  *
2112  * Some implementations have step_index as 16-bits, but others
2113  * only use the lower 8 and store garbage in the upper 8.
2114  */
2115  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
2116  c->status[0].step_index = bytestream2_get_byteu(&gb);
2117  bytestream2_skipu(&gb, 5);
2118  if (c->status[0].step_index > 88u) {
2119  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
2120  c->status[0].step_index);
2121  return AVERROR_INVALIDDATA;
2122  }
2123 
2124  for (int n = nb_samples >> 1; n > 0; n--) {
2125  int v = bytestream2_get_byteu(&gb);
2126 
2127  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
2128  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
2129  }
2130 
2131  if (nb_samples & 1) {
2132  int v = bytestream2_get_byteu(&gb);
2133  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
2134 
2135  if (v & 0x0F) {
2136  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
2137  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
2138  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
2139  }
2140  }
2141  ) /* End of CASE */
2142  CASE(ADPCM_IMA_SMJPEG,
2143  for (int i = 0; i < channels; i++) {
2144  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
2145  c->status[i].step_index = bytestream2_get_byteu(&gb);
2146  bytestream2_skipu(&gb, 1);
2147  if (c->status[i].step_index > 88u) {
2148  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
2149  c->status[i].step_index);
2150  return AVERROR_INVALIDDATA;
2151  }
2152  }
2153 
2154  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2155  int v = bytestream2_get_byteu(&gb);
2156 
2157  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
2158  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
2159  }
2160  ) /* End of CASE */
2161  CASE(ADPCM_CT,
2162  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2163  int v = bytestream2_get_byteu(&gb);
2164  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
2165  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
2166  }
2167  ) /* End of CASE */
2168 #if CONFIG_ADPCM_SBPRO_2_DECODER || CONFIG_ADPCM_SBPRO_3_DECODER || \
2169  CONFIG_ADPCM_SBPRO_4_DECODER
2173  if (!c->status[0].step_index) {
2174  /* the first byte is a raw sample */
2175  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
2176  if (st)
2177  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
2178  c->status[0].step_index = 1;
2179  nb_samples--;
2180  }
2181  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
2182  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2183  int byte = bytestream2_get_byteu(&gb);
2184  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2185  byte >> 4, 4, 0);
2186  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2187  byte & 0x0F, 4, 0);
2188  }
2189  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
2190  for (int n = (nb_samples<<st) / 3; n > 0; n--) {
2191  int byte = bytestream2_get_byteu(&gb);
2192  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2193  byte >> 5 , 3, 0);
2194  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2195  (byte >> 2) & 0x07, 3, 0);
2196  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2197  byte & 0x03, 2, 0);
2198  }
2199  } else {
2200  for (int n = nb_samples >> (2 - st); n > 0; n--) {
2201  int byte = bytestream2_get_byteu(&gb);
2202  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2203  byte >> 6 , 2, 2);
2204  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2205  (byte >> 4) & 0x03, 2, 2);
2206  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
2207  (byte >> 2) & 0x03, 2, 2);
2208  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
2209  byte & 0x03, 2, 2);
2210  }
2211  }
2212  break;
2213 #endif /* CONFIG_ADPCM_SBPRO_x_DECODER */
2214  CASE(ADPCM_SWF,
2215  adpcm_swf_decode(avctx, buf, buf_size, samples);
2216  bytestream2_seek(&gb, 0, SEEK_END);
2217  ) /* End of CASE */
2218  CASE(ADPCM_YAMAHA,
2219  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2220  int v = bytestream2_get_byteu(&gb);
2221  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
2222  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
2223  }
2224  ) /* End of CASE */
2225  CASE(ADPCM_AICA,
2226  for (int channel = 0; channel < channels; channel++) {
2227  samples = samples_p[channel];
2228  for (int n = nb_samples >> 1; n > 0; n--) {
2229  int v = bytestream2_get_byteu(&gb);
2230  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
2231  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
2232  }
2233  }
2234  ) /* End of CASE */
2235  CASE(ADPCM_AFC,
2236  int samples_per_block;
2237  int blocks;
2238 
2239  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
2240  samples_per_block = avctx->extradata[0] / 16;
2241  blocks = nb_samples / avctx->extradata[0];
2242  } else {
2243  samples_per_block = nb_samples / 16;
2244  blocks = 1;
2245  }
2246 
2247  for (int m = 0; m < blocks; m++) {
2248  for (int channel = 0; channel < channels; channel++) {
2249  int prev1 = c->status[channel].sample1;
2250  int prev2 = c->status[channel].sample2;
2251 
2252  samples = samples_p[channel] + m * 16;
2253  /* Read in every sample for this channel. */
2254  for (int i = 0; i < samples_per_block; i++) {
2255  int byte = bytestream2_get_byteu(&gb);
2256  int scale = 1 << (byte >> 4);
2257  int index = byte & 0xf;
2258  int factor1 = afc_coeffs[0][index];
2259  int factor2 = afc_coeffs[1][index];
2260 
2261  /* Decode 16 samples. */
2262  for (int n = 0; n < 16; n++) {
2263  int32_t sampledat;
2264 
2265  if (n & 1) {
2266  sampledat = sign_extend(byte, 4);
2267  } else {
2268  byte = bytestream2_get_byteu(&gb);
2269  sampledat = sign_extend(byte >> 4, 4);
2270  }
2271 
2272  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
2273  sampledat * scale;
2274  *samples = av_clip_int16(sampledat);
2275  prev2 = prev1;
2276  prev1 = *samples++;
2277  }
2278  }
2279 
2280  c->status[channel].sample1 = prev1;
2281  c->status[channel].sample2 = prev2;
2282  }
2283  }
2284  bytestream2_seek(&gb, 0, SEEK_END);
2285  ) /* End of CASE */
2286 #if CONFIG_ADPCM_THP_DECODER || CONFIG_ADPCM_THP_LE_DECODER
2287  case AV_CODEC_ID_ADPCM_THP:
2289  {
2290  int table[14][16];
2291 
2292 #define THP_GET16(g) \
2293  sign_extend( \
2294  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
2295  bytestream2_get_le16u(&(g)) : \
2296  bytestream2_get_be16u(&(g)), 16)
2297 
2298  if (avctx->extradata) {
2299  GetByteContext tb;
2300  if (avctx->extradata_size < 32 * channels) {
2301  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2302  return AVERROR_INVALIDDATA;
2303  }
2304 
2305  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
2306  for (int i = 0; i < channels; i++)
2307  for (int n = 0; n < 16; n++)
2308  table[i][n] = THP_GET16(tb);
2309  } else {
2310  for (int i = 0; i < channels; i++)
2311  for (int n = 0; n < 16; n++)
2312  table[i][n] = THP_GET16(gb);
2313 
2314  if (!c->has_status) {
2315  /* Initialize the previous sample. */
2316  for (int i = 0; i < channels; i++) {
2317  c->status[i].sample1 = THP_GET16(gb);
2318  c->status[i].sample2 = THP_GET16(gb);
2319  }
2320  c->has_status = 1;
2321  } else {
2322  bytestream2_skip(&gb, channels * 4);
2323  }
2324  }
2325 
2326  for (int ch = 0; ch < channels; ch++) {
2327  samples = samples_p[ch];
2328 
2329  /* Read in every sample for this channel. */
2330  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2331  int byte = bytestream2_get_byteu(&gb);
2332  int index = (byte >> 4) & 7;
2333  unsigned int exp = byte & 0x0F;
2334  int64_t factor1 = table[ch][index * 2];
2335  int64_t factor2 = table[ch][index * 2 + 1];
2336 
2337  /* Decode 14 samples. */
2338  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2339  int32_t sampledat;
2340 
2341  if (n & 1) {
2342  sampledat = sign_extend(byte, 4);
2343  } else {
2344  byte = bytestream2_get_byteu(&gb);
2345  sampledat = sign_extend(byte >> 4, 4);
2346  }
2347 
2348  sampledat = ((c->status[ch].sample1 * factor1
2349  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
2350  *samples = av_clip_int16(sampledat);
2351  c->status[ch].sample2 = c->status[ch].sample1;
2352  c->status[ch].sample1 = *samples++;
2353  }
2354  }
2355  }
2356  break;
2357  }
2358 #endif /* CONFIG_ADPCM_THP(_LE)_DECODER */
2359  CASE(ADPCM_DTK,
2360  for (int channel = 0; channel < channels; channel++) {
2361  samples = samples_p[channel];
2362 
2363  /* Read in every sample for this channel. */
2364  for (int i = 0; i < nb_samples / 28; i++) {
2365  int byte, header;
2366  if (channel)
2367  bytestream2_skipu(&gb, 1);
2368  header = bytestream2_get_byteu(&gb);
2369  bytestream2_skipu(&gb, 3 - channel);
2370 
2371  /* Decode 28 samples. */
2372  for (int n = 0; n < 28; n++) {
2373  int32_t sampledat, prev;
2374 
2375  switch (header >> 4) {
2376  case 1:
2377  prev = (c->status[channel].sample1 * 0x3c);
2378  break;
2379  case 2:
2380  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
2381  break;
2382  case 3:
2383  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
2384  break;
2385  default:
2386  prev = 0;
2387  }
2388 
2389  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2390 
2391  byte = bytestream2_get_byteu(&gb);
2392  if (!channel)
2393  sampledat = sign_extend(byte, 4);
2394  else
2395  sampledat = sign_extend(byte >> 4, 4);
2396 
2397  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2398  *samples++ = av_clip_int16(sampledat >> 6);
2399  c->status[channel].sample2 = c->status[channel].sample1;
2400  c->status[channel].sample1 = sampledat;
2401  }
2402  }
2403  if (!channel)
2404  bytestream2_seek(&gb, 0, SEEK_SET);
2405  }
2406  ) /* End of CASE */
2407  CASE(ADPCM_PSX,
2408  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * channels); block++) {
2409  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * channels) / (16 * channels);
2410  for (int channel = 0; channel < channels; channel++) {
2411  samples = samples_p[channel] + block * nb_samples_per_block;
2412  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2413 
2414  /* Read in every sample for this channel. */
2415  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2416  int filter, shift, flag, byte;
2417 
2418  filter = bytestream2_get_byteu(&gb);
2419  shift = filter & 0xf;
2420  filter = filter >> 4;
2422  return AVERROR_INVALIDDATA;
2423  flag = bytestream2_get_byteu(&gb) & 0x7;
2424 
2425  /* Decode 28 samples. */
2426  for (int n = 0; n < 28; n++) {
2427  int sample = 0, scale;
2428 
2429  if (n & 1) {
2430  scale = sign_extend(byte >> 4, 4);
2431  } else {
2432  byte = bytestream2_get_byteu(&gb);
2433  scale = sign_extend(byte, 4);
2434  }
2435 
2436  if (flag < 0x07) {
2437  scale = scale * (1 << 12);
2438  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2439  }
2441  c->status[channel].sample2 = c->status[channel].sample1;
2442  c->status[channel].sample1 = sample;
2443  }
2444  }
2445  }
2446  }
2447  ) /* End of CASE */
2448  CASE(ADPCM_SANYO,
2449  int (*expand)(ADPCMChannelStatus *c, int bits);
2450  GetBitContext g;
2451 
2452  switch(avctx->bits_per_coded_sample) {
2453  case 3: expand = adpcm_sanyo_expand3; break;
2454  case 4: expand = adpcm_sanyo_expand4; break;
2455  case 5: expand = adpcm_sanyo_expand5; break;
2456  }
2457 
2458  for (int ch = 0; ch < channels; ch++) {
2459  c->status[ch].predictor = sign_extend(bytestream2_get_le16(&gb), 16);
2460  c->status[ch].step = sign_extend(bytestream2_get_le16(&gb), 16);
2461  }
2462 
2464  for (int i = 0; i < nb_samples; i++)
2465  for (int ch = 0; ch < channels; ch++)
2466  samples_p[ch][i] = expand(&c->status[ch], get_bits_le(&g, avctx->bits_per_coded_sample));
2467 
2468  align_get_bits(&g);
2469  bytestream2_skip(&gb, get_bits_count(&g) / 8);
2470  ) /* End of CASE */
2471  CASE(ADPCM_ARGO,
2472  /*
2473  * The format of each block:
2474  * uint8_t left_control;
2475  * uint4_t left_samples[nb_samples];
2476  * ---- and if stereo ----
2477  * uint8_t right_control;
2478  * uint4_t right_samples[nb_samples];
2479  *
2480  * Format of the control byte:
2481  * MSB [SSSSRDRR] LSB
2482  * S = (Shift Amount - 2)
2483  * D = Decoder flag.
2484  * R = Reserved
2485  *
2486  * Each block relies on the previous two samples of each channel.
2487  * They should be 0 initially.
2488  */
2489  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2490  for (int channel = 0; channel < avctx->ch_layout.nb_channels; channel++) {
2491  ADPCMChannelStatus *cs = c->status + channel;
2492  int control, shift;
2493 
2494  samples = samples_p[channel] + block * 32;
2495 
2496  /* Get the control byte and decode the samples, 2 at a time. */
2497  control = bytestream2_get_byteu(&gb);
2498  shift = (control >> 4) + 2;
2499 
2500  for (int n = 0; n < 16; n++) {
2501  int sample = bytestream2_get_byteu(&gb);
2502  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2503  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2504  }
2505  }
2506  }
2507  ) /* End of CASE */
2508  CASE(ADPCM_ZORK,
2509  for (int n = 0; n < nb_samples * channels; n++) {
2510  int v = bytestream2_get_byteu(&gb);
2511  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels], v);
2512  }
2513  ) /* End of CASE */
2514  CASE(ADPCM_IMA_MTF,
2515  for (int n = nb_samples / 2; n > 0; n--) {
2516  for (int channel = 0; channel < channels; channel++) {
2517  int v = bytestream2_get_byteu(&gb);
2518  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2519  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2520  }
2521  samples += channels;
2522  }
2523  ) /* End of CASE */
2524  default:
2525  av_unreachable("There are cases for all codec ids using adpcm_decode_frame");
2526  }
2527 
2528  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2529  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2530  return AVERROR_INVALIDDATA;
2531  }
2532 
2533  *got_frame_ptr = 1;
2534 
2535  if (avpkt->size < bytestream2_tell(&gb)) {
2536  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2537  return avpkt->size;
2538  }
2539 
2540  return bytestream2_tell(&gb);
2541 }
2542 
2543 static void adpcm_flush(AVCodecContext *avctx)
2544 {
2545  ADPCMDecodeContext *c = avctx->priv_data;
2546 
2547  /* Just nuke the entire state and re-init. */
2548  memset(c, 0, sizeof(ADPCMDecodeContext));
2549 
2550  switch(avctx->codec_id) {
2551  case AV_CODEC_ID_ADPCM_CT:
2552  c->status[0].step = c->status[1].step = 511;
2553  break;
2554 
2556  if (avctx->extradata && avctx->extradata_size >= 8) {
2557  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2558  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2559  }
2560  break;
2561 
2563  if (avctx->extradata && avctx->extradata_size >= 28) {
2564  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2565  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2566  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2567  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2568  }
2569  break;
2570 
2572  if (avctx->extradata && avctx->extradata_size >= 2)
2573  c->vqa_version = AV_RL16(avctx->extradata);
2574  break;
2575  default:
2576  /* Other codecs may want to handle this during decoding. */
2577  c->has_status = 0;
2578  return;
2579  }
2580 
2581  c->has_status = 1;
2582 }
2583 
2584 
2592 
2593 #define ADPCM_DECODER_0(id_, sample_fmts_, name_, long_name_)
2594 #define ADPCM_DECODER_1(id_, sample_fmts_, name_, long_name_) \
2595 const FFCodec ff_ ## name_ ## _decoder = { \
2596  .p.name = #name_, \
2597  CODEC_LONG_NAME(long_name_), \
2598  .p.type = AVMEDIA_TYPE_AUDIO, \
2599  .p.id = id_, \
2600  .p.capabilities = AV_CODEC_CAP_DR1, \
2601  CODEC_SAMPLEFMTS_ARRAY(sample_fmts_), \
2602  .priv_data_size = sizeof(ADPCMDecodeContext), \
2603  .init = adpcm_decode_init, \
2604  FF_CODEC_DECODE_CB(adpcm_decode_frame), \
2605  .flush = adpcm_flush, \
2606 };
2607 #define ADPCM_DECODER_2(enabled, codec_id, name, sample_fmts, long_name) \
2608  ADPCM_DECODER_ ## enabled(codec_id, name, sample_fmts, long_name)
2609 #define ADPCM_DECODER_3(config, codec_id, name, sample_fmts, long_name) \
2610  ADPCM_DECODER_2(config, codec_id, name, sample_fmts, long_name)
2611 #define ADPCM_DECODER(codec, name, sample_fmts, long_name) \
2612  ADPCM_DECODER_3(CONFIG_ ## codec ## _DECODER, AV_CODEC_ID_ ## codec, \
2613  name, sample_fmts, long_name)
2614 
2615 /* Note: Do not forget to add new entries to the Makefile as well. */
2616 ADPCM_DECODER(ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie")
2617 ADPCM_DECODER(ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC")
2618 ADPCM_DECODER(ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie")
2619 ADPCM_DECODER(ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA")
2620 ADPCM_DECODER(ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games")
2621 ADPCM_DECODER(ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology")
2622 ADPCM_DECODER(ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK")
2623 ADPCM_DECODER(ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts")
2624 ADPCM_DECODER(ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA")
2625 ADPCM_DECODER(ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1")
2626 ADPCM_DECODER(ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2")
2627 ADPCM_DECODER(ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3")
2628 ADPCM_DECODER(ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS")
2629 ADPCM_DECODER(ADPCM_IMA_ACORN, sample_fmts_s16, adpcm_ima_acorn, "ADPCM IMA Acorn Replay")
2630 ADPCM_DECODER(ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV")
2631 ADPCM_DECODER(ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC")
2632 ADPCM_DECODER(ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM")
2633 ADPCM_DECODER(ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments")
2634 ADPCM_DECODER(ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4")
2635 ADPCM_DECODER(ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3")
2636 ADPCM_DECODER(ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4")
2637 ADPCM_DECODER(ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS")
2638 ADPCM_DECODER(ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD")
2639 ADPCM_DECODER(ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS")
2640 ADPCM_DECODER(ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX")
2641 ADPCM_DECODER(ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework")
2642 ADPCM_DECODER(ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI")
2643 ADPCM_DECODER(ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime")
2644 ADPCM_DECODER(ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical")
2645 ADPCM_DECODER(ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive")
2646 ADPCM_DECODER(ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG")
2647 ADPCM_DECODER(ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP")
2648 ADPCM_DECODER(ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV")
2649 ADPCM_DECODER(ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood")
2650 ADPCM_DECODER(ADPCM_IMA_XBOX, sample_fmts_s16p, adpcm_ima_xbox, "ADPCM IMA Xbox")
2651 ADPCM_DECODER(ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft")
2652 ADPCM_DECODER(ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF")
2653 ADPCM_DECODER(ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation")
2654 ADPCM_DECODER(ADPCM_SANYO, sample_fmts_s16p, adpcm_sanyo, "ADPCM Sanyo")
2655 ADPCM_DECODER(ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit")
2656 ADPCM_DECODER(ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit")
2657 ADPCM_DECODER(ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit")
2658 ADPCM_DECODER(ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash")
2659 ADPCM_DECODER(ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)")
2660 ADPCM_DECODER(ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP")
2661 ADPCM_DECODER(ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA")
2662 ADPCM_DECODER(ADPCM_XMD, sample_fmts_s16p, adpcm_xmd, "ADPCM Konami XMD")
2663 ADPCM_DECODER(ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha")
2664 ADPCM_DECODER(ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork")
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:381
adpcm_index_table5
static const int8_t adpcm_index_table5[32]
Definition: adpcm.c:138
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:375
level
uint8_t level
Definition: svq3.c:208
av_clip
#define av_clip
Definition: common.h:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
out
FILE * out
Definition: movenc.c:55
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:408
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
R3
#define R3
Definition: simple_idct.c:168
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:233
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:54
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:421
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:83
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:423
AVPacket::data
uint8_t * data
Definition: packet.h:552
table
static const uint16_t table[]
Definition: prosumer.c:203
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:396
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:413
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:407
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:521
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
R1
#define R1
Definition: simple_idct.c:166
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
AV_CODEC_ID_ADPCM_XMD
@ AV_CODEC_ID_ADPCM_XMD
Definition: codec_id.h:426
adpcm_sanyo_expand4
static int adpcm_sanyo_expand4(ADPCMChannelStatus *c, int bits)
Definition: adpcm.c:892
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:512
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:411
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:608
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:387
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1039
GetBitContext
Definition: get_bits.h:109
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:462
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:413
val
static double val(void *priv, double ch)
Definition: aeval.c:77
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2543
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:392
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2587
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:439
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:628
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
AV_CODEC_ID_ADPCM_IMA_ACORN
@ AV_CODEC_ID_ADPCM_IMA_ACORN
Definition: codec_id.h:425
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:651
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:406
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:398
g
const char * g
Definition: vf_curves.c:128
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:377
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:404
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:354
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:318
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:402
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:380
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:547
AV_CODEC_ID_ADPCM_IMA_XBOX
@ AV_CODEC_ID_ADPCM_IMA_XBOX
Definition: codec_id.h:427
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2585
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:400
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
ADPCM_DECODER
#define ADPCM_DECODER(codec, name, sample_fmts, long_name)
Definition: adpcm.c:2611
bits_left
#define bits_left
Definition: bitstream.h:116
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:389
oki_step_table
static const int16_t oki_step_table[49]
Definition: adpcm.c:217
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:379
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:109
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:399
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:417
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:378
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:394
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:95
ima_cunning_index_table
static const int8_t ima_cunning_index_table[9]
Definition: adpcm.c:109
exp
int8_t exp
Definition: eval.c:73
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
adpcm_sanyo_expand3
static int adpcm_sanyo_expand3(ADPCMChannelStatus *c, int bits)
Definition: adpcm.c:849
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:383
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:587
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:566
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:1264
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:419
afc_coeffs
static const int16_t afc_coeffs[2][16]
Definition: adpcm.c:90
adpcm_sanyo_expand5
static int adpcm_sanyo_expand5(ADPCMChannelStatus *c, int bits)
Definition: adpcm.c:951
ADPCMDecodeContext
Definition: adpcm.c:244
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:74
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1635
AVPacket::size
int size
Definition: packet.h:553
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:261
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:409
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:478
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:421
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
bps
unsigned bps
Definition: movenc.c:1935
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:39
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1031
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:1026
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
R2
#define R2
Definition: simple_idct.c:167
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:388
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:68
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:621
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
av_zero_extend
#define av_zero_extend
Definition: common.h:151
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:686
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
adpcm_index_table3
static const int8_t adpcm_index_table3[8]
Definition: adpcm.c:133
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1546
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2589
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:415
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:401
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:65
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
adpcm_index_tables
static const int8_t *const adpcm_index_tables[4]
Definition: adpcm.c:143
MT
#define MT(...)
Definition: codec_desc.c:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:420
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:246
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:414
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:832
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:82
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:30
AV_CODEC_ID_ADPCM_SANYO
@ AV_CODEC_ID_ADPCM_SANYO
Definition: codec_id.h:428
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:385
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:422
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1057
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
flag
#define flag(name)
Definition: cbs_av1.c:495
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:498
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:416
mtaf_stepsize
static const int16_t mtaf_stepsize[32][16]
Definition: adpcm.c:150
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:69
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:395
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:397
temp
else temp
Definition: vf_mcdeint.c:271
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:393
adpcm_index_table2
static const int8_t adpcm_index_table2[4]
Definition: adpcm.c:128
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:390
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:774
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:499
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:418
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:252
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:247
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:424
AVPacket
This structure stores compressed data.
Definition: packet.h:529
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:376
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:382
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:369
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:412
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:642
CASE
#define CASE(codec,...)
Definition: adpcm.c:78
ima_cunning_step_table
static const int16_t ima_cunning_step_table[61]
Definition: adpcm.c:119
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:237
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:391
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:245
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:226