Go to the documentation of this file.
80 { -1, -1, -1, -1, 2, 4, 6, 8 },
81 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
95 unsigned int min_channels = 1;
96 unsigned int max_channels = 2;
134 c->status[0].step =
c->status[1].step = 511;
195 if ((nibble & 8) == 0)
196 pred = av_clip(
pred + (add >> 3), -32767, 32767);
198 pred = av_clip(
pred - (add >> 3), -32767, 32767);
205 c->step = av_clip(
c->step * 2, 127, 24576);
223 c->step = av_clip(
c->step, 127, 24576);
236 step_index = av_clip(step_index, 0, 88);
244 predictor =
c->predictor;
245 if (sign) predictor -=
diff;
246 else predictor +=
diff;
248 c->predictor = av_clip_int16(predictor);
249 c->step_index = step_index;
251 return (int16_t)
c->predictor;
262 step_index = av_clip(step_index, 0, 88);
264 sign = nibble & (1 <<
shift);
267 predictor =
c->predictor;
268 if (sign) predictor -=
diff;
269 else predictor +=
diff;
271 c->predictor = av_clip_int16(predictor);
272 c->step_index = step_index;
274 return (int16_t)
c->predictor;
285 step_index = av_clip(step_index, 0, 88);
293 predictor =
c->predictor -
diff;
295 predictor =
c->predictor +
diff;
297 c->predictor = av_clip_int16(predictor);
298 c->step_index = step_index;
307 predictor = (((
c->sample1) * (
c->coeff1)) + ((
c->sample2) * (
c->coeff2))) / 64;
308 predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) *
c->idelta;
310 c->sample2 =
c->sample1;
311 c->sample1 = av_clip_int16(predictor);
313 if (
c->idelta < 16)
c->idelta = 16;
314 if (
c->idelta > INT_MAX/768) {
316 c->idelta = INT_MAX/768;
328 step_index = av_clip(step_index, 0, 48);
333 predictor =
c->predictor;
334 if (sign) predictor -=
diff;
335 else predictor +=
diff;
337 c->predictor = av_clip_intp2(predictor, 11);
338 c->step_index = step_index;
340 return c->predictor * 16;
355 c->predictor = ((
c->predictor * 254) >> 8) + (sign ? -
diff :
diff);
356 c->predictor = av_clip_int16(
c->predictor);
359 c->step = av_clip(new_step, 511, 32767);
361 return (int16_t)
c->predictor;
368 sign = nibble & (1<<(
size-1));
373 c->predictor = av_clip(
c->predictor + (sign ? -
diff :
diff), -16384,16256);
378 else if (
delta == 0 &&
c->step > 0)
381 return (int16_t)
c->predictor;
392 c->predictor = av_clip_int16(
c->predictor);
394 c->step = av_clip(
c->step, 127, 24576);
401 c->predictor = av_clip_int16(
c->predictor);
403 c->step = av_clip_uintp2(
c->step, 5);
416 out0 += sample_offset;
420 out1 += sample_offset;
443 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
445 s_1 = av_clip_int16(
s);
474 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
476 s_1 = av_clip_int16(
s);
500 int k0, signmask, nb_bits,
count;
501 int size = buf_size*8;
509 k0 = 1 << (nb_bits-2);
510 signmask = 1 << (nb_bits-1);
536 if (
delta & signmask)
537 c->status[
i].predictor -= vpdiff;
539 c->status[
i].predictor += vpdiff;
543 c->status[
i].step_index = av_clip(
c->status[
i].step_index, 0, 88);
544 c->status[
i].predictor = av_clip_int16(
c->status[
i].predictor);
564 int buf_size,
int *coded_samples,
int *approx_nb_samples)
569 int has_coded_samples = 0;
573 *approx_nb_samples = 0;
581 if (buf_size < 76 *
ch)
586 if (buf_size < 34 *
ch)
598 nb_samples = buf_size * 2 /
ch;
615 return (buf_size - header_size) * 2 /
ch;
620 has_coded_samples = 1;
621 *coded_samples = bytestream2_get_le32(gb);
622 *coded_samples -= *coded_samples % 28;
623 nb_samples = (buf_size - 12) / 30 * 28;
626 has_coded_samples = 1;
627 *coded_samples = bytestream2_get_le32(gb);
628 nb_samples = (buf_size - (4 + 8 *
ch)) * 2 /
ch;
631 nb_samples = (buf_size -
ch) /
ch * 2;
638 has_coded_samples = 1;
641 header_size = 4 + 9 *
ch;
642 *coded_samples = bytestream2_get_le32(gb);
645 header_size = 4 + 5 *
ch;
646 *coded_samples = bytestream2_get_le32(gb);
649 header_size = 4 + 5 *
ch;
650 *coded_samples = bytestream2_get_be32(gb);
653 *coded_samples -= *coded_samples % 28;
654 nb_samples = (buf_size - header_size) * 2 /
ch;
655 nb_samples -= nb_samples % 28;
656 *approx_nb_samples = 1;
661 nb_samples = ((buf_size - 16) * 2 / 3 * 4) /
ch;
666 if (buf_size < 4 *
ch)
668 nb_samples = 1 + (buf_size - 4 *
ch) * 2 /
ch;
673 nb_samples = (buf_size - 4 *
ch) * 2 /
ch;
681 if (buf_size < 4 *
ch)
683 nb_samples = 1 + (buf_size - 4 *
ch) / (bsize *
ch) * bsamples;
689 nb_samples = (buf_size - 6 *
ch) * 2 /
ch;
694 nb_samples = (buf_size - 16 * (
ch / 2)) * 2 /
ch;
700 int samples_per_byte;
706 if (!
s->status[0].step_index) {
712 nb_samples += buf_size * samples_per_byte /
ch;
717 int buf_bits = buf_size * 8 - 2;
718 int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
719 int block_hdr_size = 22 *
ch;
720 int block_size = block_hdr_size + nbits *
ch * 4095;
721 int nblocks = buf_bits / block_size;
722 int bits_left = buf_bits - nblocks * block_size;
723 nb_samples = nblocks * 4096;
724 if (bits_left >= block_hdr_size)
725 nb_samples += 1 + (bits_left - block_hdr_size) / (nbits *
ch);
731 nb_samples = buf_size * 14 / (8 *
ch);
734 has_coded_samples = 1;
737 bytestream2_get_le32(gb) :
738 bytestream2_get_be32(gb);
739 buf_size -= 8 + 36 *
ch;
741 nb_samples = buf_size / 8 * 14;
742 if (buf_size % 8 > 1)
743 nb_samples += (buf_size % 8 - 1) * 2;
744 *approx_nb_samples = 1;
747 nb_samples = buf_size / (9 *
ch) * 16;
750 nb_samples = (buf_size / 128) * 224 /
ch;
754 nb_samples = buf_size / (16 *
ch) * 28;
759 if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
766 int *got_frame_ptr,
AVPacket *avpkt)
770 int buf_size = avpkt->
size;
778 int nb_samples, coded_samples, approx_nb_samples,
ret;
782 nb_samples =
get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
783 if (nb_samples <= 0) {
789 frame->nb_samples = nb_samples;
793 samples_p = (int16_t **)
frame->extended_data;
798 if (!approx_nb_samples && coded_samples != nb_samples)
800 frame->nb_samples = nb_samples = coded_samples;
816 predictor =
sign_extend(bytestream2_get_be16u(&gb), 16);
817 step_index = predictor & 0x7F;
840 for (m = 0; m < 64; m += 2) {
841 int byte = bytestream2_get_byteu(&gb);
849 cs = &(
c->status[
i]);
866 for (
n = 0;
n < (nb_samples - 1) / samples_per_block;
n++) {
871 samples = &samples_p[
i][1 +
n * samples_per_block];
872 for (j = 0; j < block_size; j++) {
874 (j % 4) + (j / 4) * (avctx->
channels * 4) +
i * 4];
879 for (m = 0; m < samples_per_block; m++) {
887 for (
n = 0;
n < (nb_samples - 1) / 8;
n++) {
891 for (m = 0; m < 8; m += 2) {
892 int v = bytestream2_get_byteu(&gb);
902 c->status[
i].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
905 c->status[
i].step_index =
sign_extend(bytestream2_get_le16u(&gb), 16);
906 if (
c->status[
i].step_index > 88
u) {
908 i,
c->status[
i].step_index);
916 for (
n = nb_samples >> 1;
n > 0;
n--) {
917 int v = bytestream2_get_byteu(&gb);
925 c->status[
i].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
927 c->status[
i].step =
sign_extend(bytestream2_get_le16u(&gb), 16);
929 for (
n = 0; n < nb_samples >> (1 - st);
n++) {
930 int v = bytestream2_get_byteu(&gb);
939 block_predictor = bytestream2_get_byteu(&gb);
940 if (block_predictor > 6) {
948 block_predictor = bytestream2_get_byteu(&gb);
949 if (block_predictor > 6) {
957 c->status[0].idelta =
sign_extend(bytestream2_get_le16u(&gb), 16);
959 c->status[1].idelta =
sign_extend(bytestream2_get_le16u(&gb), 16);
962 c->status[0].sample1 =
sign_extend(bytestream2_get_le16u(&gb), 16);
963 if (st)
c->status[1].sample1 =
sign_extend(bytestream2_get_le16u(&gb), 16);
964 c->status[0].sample2 =
sign_extend(bytestream2_get_le16u(&gb), 16);
965 if (st)
c->status[1].sample2 =
sign_extend(bytestream2_get_le16u(&gb), 16);
968 if (st) *
samples++ =
c->status[1].sample2;
970 if (st) *
samples++ =
c->status[1].sample1;
971 for(
n = (nb_samples - 2) >> (1 - st);
n > 0;
n--) {
972 int byte = bytestream2_get_byteu(&gb);
981 c->status[
channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
982 c->status[
channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
987 for (
n = 0;
n < nb_samples;
n+=2) {
988 int v = bytestream2_get_byteu(&gb);
992 for (
n = 0;
n < nb_samples;
n+=2) {
993 int v = bytestream2_get_byteu(&gb);
1010 for (
n = (nb_samples - 1) >> (1 - st);
n > 0;
n--) {
1011 int v = bytestream2_get_byteu(&gb);
1020 int decode_top_nibble_next = 0;
1025 c->status[0].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1026 c->status[1].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1027 c->status[0].step_index = bytestream2_get_byteu(&gb);
1028 c->status[1].step_index = bytestream2_get_byteu(&gb);
1029 if (
c->status[0].step_index > 88
u ||
c->status[1].step_index > 88
u){
1031 c->status[0].step_index,
c->status[1].step_index);
1035 diff_channel =
c->status[1].predictor;
1038 #define DK3_GET_NEXT_NIBBLE() \
1039 if (decode_top_nibble_next) { \
1040 nibble = last_byte >> 4; \
1041 decode_top_nibble_next = 0; \
1043 last_byte = bytestream2_get_byteu(&gb); \
1044 nibble = last_byte & 0x0F; \
1045 decode_top_nibble_next = 1; \
1048 while (
samples < samples_end) {
1062 diff_channel = (diff_channel +
c->status[1].predictor) / 2;
1063 *
samples++ =
c->status[0].predictor +
c->status[1].predictor;
1064 *
samples++ =
c->status[0].predictor -
c->status[1].predictor;
1071 diff_channel = (diff_channel +
c->status[1].predictor) / 2;
1072 *
samples++ =
c->status[0].predictor +
c->status[1].predictor;
1073 *
samples++ =
c->status[0].predictor -
c->status[1].predictor;
1092 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1094 int v = bytestream2_get_byteu(&gb);
1112 for (
n = 0;
n < nb_samples;
n += 2) {
1113 int v = bytestream2_get_byteu(&gb);
1121 int v = bytestream2_get_byteu(&gb);
1128 int v = bytestream2_get_byteu(&gb);
1144 for (
n = 0;
n < nb_samples / 2;
n++) {
1147 byte[0] = bytestream2_get_byteu(&gb);
1149 byte[1] = bytestream2_get_byteu(&gb);
1159 if (
c->vqa_version == 3) {
1161 int16_t *smp = samples_p[
channel];
1163 for (
n = nb_samples / 2;
n > 0;
n--) {
1164 int v = bytestream2_get_byteu(&gb);
1170 for (
n = nb_samples / 2;
n > 0;
n--) {
1172 int v = bytestream2_get_byteu(&gb);
1183 int16_t *out0 = samples_p[0];
1184 int16_t *out1 = samples_p[1];
1185 int samples_per_block = 28 * (3 - avctx->
channels) * 4;
1186 int sample_offset = 0;
1187 int bytes_remaining;
1190 &
c->status[0], &
c->status[1],
1191 avctx->
channels, sample_offset)) < 0)
1194 sample_offset += samples_per_block;
1199 if (bytes_remaining > 0) {
1205 for (
i=0;
i<=st;
i++) {
1206 c->status[
i].step_index = bytestream2_get_le32u(&gb);
1207 if (
c->status[
i].step_index > 88
u) {
1209 i,
c->status[
i].step_index);
1213 for (
i=0;
i<=st;
i++) {
1214 c->status[
i].predictor = bytestream2_get_le32u(&gb);
1215 if (
FFABS((int64_t)
c->status[
i].predictor) > (1<<16))
1219 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1220 int byte = bytestream2_get_byteu(&gb);
1226 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1227 int byte = bytestream2_get_byteu(&gb);
1234 int previous_left_sample, previous_right_sample;
1235 int current_left_sample, current_right_sample;
1236 int next_left_sample, next_right_sample;
1237 int coeff1l, coeff2l, coeff1r, coeff2r;
1238 int shift_left, shift_right;
1246 current_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1247 previous_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1248 current_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1249 previous_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1251 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1252 int byte = bytestream2_get_byteu(&gb);
1258 byte = bytestream2_get_byteu(&gb);
1259 shift_left = 20 - (
byte >> 4);
1260 shift_right = 20 - (
byte & 0x0F);
1262 for (count2 = 0; count2 < 28; count2++) {
1263 byte = bytestream2_get_byteu(&gb);
1264 next_left_sample =
sign_extend(
byte >> 4, 4) * (1 << shift_left);
1265 next_right_sample =
sign_extend(
byte, 4) * (1 << shift_right);
1267 next_left_sample = (next_left_sample +
1268 (current_left_sample * coeff1l) +
1269 (previous_left_sample * coeff2l) + 0x80) >> 8;
1270 next_right_sample = (next_right_sample +
1271 (current_right_sample * coeff1r) +
1272 (previous_right_sample * coeff2r) + 0x80) >> 8;
1274 previous_left_sample = current_left_sample;
1275 current_left_sample = av_clip_int16(next_left_sample);
1276 previous_right_sample = current_right_sample;
1277 current_right_sample = av_clip_int16(next_right_sample);
1278 *
samples++ = current_left_sample;
1279 *
samples++ = current_right_sample;
1292 int byte = bytestream2_get_byteu(&gb);
1297 for (count1 = 0; count1 < nb_samples / 2; count1++) {
1300 byte[0] = bytestream2_get_byteu(&gb);
1301 if (st)
byte[1] = bytestream2_get_byteu(&gb);
1302 for(
i = 4;
i >= 0;
i-=4) {
1325 int previous_sample, current_sample, next_sample;
1334 offsets[
channel] = (big_endian ? bytestream2_get_be32(&gb) :
1335 bytestream2_get_le32(&gb)) +
1340 samplesC = samples_p[
channel];
1343 current_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1344 previous_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1346 current_sample =
c->status[
channel].predictor;
1347 previous_sample =
c->status[
channel].prev_sample;
1350 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1351 int byte = bytestream2_get_byte(&gb);
1353 current_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1354 previous_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1356 for (count2=0; count2<28; count2++)
1357 *samplesC++ =
sign_extend(bytestream2_get_be16(&gb), 16);
1361 shift = 20 - (
byte & 0x0F);
1363 for (count2=0; count2<28; count2++) {
1367 byte = bytestream2_get_byte(&gb);
1371 next_sample += (current_sample * coeff1) +
1372 (previous_sample * coeff2);
1373 next_sample = av_clip_int16(next_sample >> 8);
1375 previous_sample = current_sample;
1376 current_sample = next_sample;
1377 *samplesC++ = current_sample;
1383 }
else if (
count != count1) {
1389 c->status[
channel].predictor = current_sample;
1390 c->status[
channel].prev_sample = previous_sample;
1402 for (
n = 0;
n < 4;
n++,
s += 32) {
1413 for (m=2; m<32; m+=2) {
1415 for (
n = 0;
n < 4;
n++,
s += 32) {
1417 int byte = bytestream2_get_byteu(&gb);
1421 s[0] = av_clip_int16((
level +
pred + 0x80) >> 8);
1425 s[1] = av_clip_int16((
level +
pred + 0x80) >> 8);
1431 c->status[0].predictor =
sign_extend(bytestream2_get_le16u(&gb), 16);
1432 c->status[0].step_index = bytestream2_get_byteu(&gb);
1434 if (
c->status[0].step_index > 88
u) {
1436 c->status[0].step_index);
1440 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1441 int v = bytestream2_get_byteu(&gb);
1449 c->status[
i].predictor =
sign_extend(bytestream2_get_be16u(&gb), 16);
1450 c->status[
i].step_index = bytestream2_get_byteu(&gb);
1452 if (
c->status[
i].step_index > 88
u) {
1454 c->status[
i].step_index);
1459 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1460 int v = bytestream2_get_byteu(&gb);
1467 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1468 int v = bytestream2_get_byteu(&gb);
1476 if (!
c->status[0].step_index) {
1478 *
samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1480 *
samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1481 c->status[0].step_index = 1;
1485 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1486 int byte = bytestream2_get_byteu(&gb);
1493 for (
n = (nb_samples<<st) / 3;
n > 0;
n--) {
1494 int byte = bytestream2_get_byteu(&gb);
1498 (
byte >> 2) & 0x07, 3, 0);
1503 for (
n = nb_samples >> (2 - st);
n > 0;
n--) {
1504 int byte = bytestream2_get_byteu(&gb);
1508 (
byte >> 4) & 0x03, 2, 2);
1510 (
byte >> 2) & 0x03, 2, 2);
1521 for (
n = nb_samples >> (1 - st);
n > 0;
n--) {
1522 int v = bytestream2_get_byteu(&gb);
1528 if (!
c->has_status) {
1535 for (
n = nb_samples >> 1;
n > 0;
n--) {
1536 int v = bytestream2_get_byteu(&gb);
1544 int samples_per_block;
1548 samples_per_block = avctx->
extradata[0] / 16;
1549 blocks = nb_samples / avctx->
extradata[0];
1551 samples_per_block = nb_samples / 16;
1555 for (m = 0; m < blocks; m++) {
1557 int prev1 =
c->status[
channel].sample1;
1558 int prev2 =
c->status[
channel].sample2;
1562 for (
i = 0;
i < samples_per_block;
i++) {
1563 int byte = bytestream2_get_byteu(&gb);
1564 int scale = 1 << (
byte >> 4);
1565 int index =
byte & 0xf;
1570 for (
n = 0;
n < 16;
n++) {
1576 byte = bytestream2_get_byteu(&gb);
1580 sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1582 *
samples = av_clip_int16(sampledat);
1588 c->status[
channel].sample1 = prev1;
1589 c->status[
channel].sample2 = prev2;
1601 #define THP_GET16(g) \
1603 avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1604 bytestream2_get_le16u(&(g)) : \
1605 bytestream2_get_be16u(&(g)), 16)
1616 for (
n = 0;
n < 16;
n++)
1620 for (
n = 0;
n < 16;
n++)
1623 if (!
c->has_status) {
1639 for (
i = 0;
i < (nb_samples + 13) / 14;
i++) {
1640 int byte = bytestream2_get_byteu(&gb);
1641 int index = (
byte >> 4) & 7;
1642 unsigned int exp =
byte & 0x0F;
1647 for (
n = 0;
n < 14 && (
i * 14 +
n < nb_samples);
n++) {
1653 byte = bytestream2_get_byteu(&gb);
1657 sampledat = ((
c->status[
ch].sample1 * factor1
1658 +
c->status[
ch].sample2 * factor2) >> 11) + sampledat * (1 <<
exp);
1659 *
samples = av_clip_int16(sampledat);
1660 c->status[
ch].sample2 =
c->status[
ch].sample1;
1672 for (
i = 0;
i < nb_samples / 28;
i++) {
1676 header = bytestream2_get_byteu(&gb);
1680 for (
n = 0;
n < 28;
n++) {
1685 prev = (
c->status[
channel].sample1 * 0x3c);
1688 prev = (
c->status[
channel].sample1 * 0x73) - (
c->status[
channel].sample2 * 0x34);
1691 prev = (
c->status[
channel].sample1 * 0x62) - (
c->status[
channel].sample2 * 0x37);
1697 prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1699 byte = bytestream2_get_byteu(&gb);
1705 sampledat = ((sampledat * (1 << 12)) >> (
header & 0xf)) * (1 << 6) + prev;
1706 *
samples++ = av_clip_int16(sampledat >> 6);
1708 c->status[
channel].sample1 = sampledat;
1720 for (
i = 0;
i < nb_samples / 28;
i++) {
1723 filter = bytestream2_get_byteu(&gb);
1728 flag = bytestream2_get_byteu(&gb);
1731 for (
n = 0;
n < 28;
n++) {
1738 byte = bytestream2_get_byteu(&gb);
1742 scale = scale * (1 << 12);
1787 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
1788 AVCodec ff_ ## name_ ## _decoder = { \
1790 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
1791 .type = AVMEDIA_TYPE_AUDIO, \
1793 .priv_data_size = sizeof(ADPCMDecodeContext), \
1794 .init = adpcm_decode_init, \
1795 .decode = adpcm_decode_frame, \
1796 .flush = adpcm_flush, \
1797 .capabilities = AV_CODEC_CAP_DR1, \
1798 .sample_fmts = sample_fmts_, \
#define AV_LOG_WARNING
Something somehow does not look correct.
#define DK3_GET_NEXT_NIBBLE()
@ AV_CODEC_ID_ADPCM_IMA_QT
const int16_t ff_adpcm_oki_step_table[49]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define u(width, name, range_min, range_max)
const int16_t ff_adpcm_AdaptationTable[]
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
static int get_bits_count(const GetBitContext *s)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static const uint16_t table[]
@ AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_IMA_OKI
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
@ AV_CODEC_ID_ADPCM_THP_LE
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
const struct AVCodec * codec
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
static void adpcm_flush(AVCodecContext *avctx)
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static const uint8_t ff_adpcm_ima_block_sizes[4]
@ AV_CODEC_ID_ADPCM_SBPRO_2
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static enum AVSampleFormat sample_fmts_s16p[]
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_DK3
#define av_assert0(cond)
assert() equivalent, that is always enabled.
@ AV_CODEC_ID_ADPCM_IMA_APC
static unsigned int get_bits_le(GetBitContext *s, int n)
static int get_sbits(GetBitContext *s, int n)
@ AV_CODEC_ID_ADPCM_IMA_ISS
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static const uint8_t ff_adpcm_ima_block_samples[4]
static enum AVSampleFormat sample_fmts_s16[]
@ AV_CODEC_ID_ADPCM_EA_XAS
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
@ AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_DK4
const int16_t ff_adpcm_mtaf_stepsize[32][16]
@ AV_CODEC_ID_ADPCM_IMA_AMV
static const int16_t ea_adpcm_table[]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_tell(GetByteContext *g)
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
const int8_t ff_adpcm_yamaha_difflookup[]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
@ AV_CODEC_ID_ADPCM_IMA_RAD
const int16_t ff_adpcm_step_table[89]
This is the step table.
enum AVSampleFormat sample_fmt
audio sample format
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples that will be decoded from the packet.
const char const char void * val
static const uint8_t header[24]
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
int channels
number of audio channels
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static enum AVSampleFormat sample_fmts_both[]
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
#define i(width, name, range_min, range_max)
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
AVSampleFormat
Audio sample formats.
#define xf(width, name, var, range_min, range_max, subs,...)
@ AV_SAMPLE_FMT_S16
signed 16 bits
int vqa_version
VQA version.
@ AV_CODEC_ID_ADPCM_IMA_DAT4
static const int8_t xa_adpcm_table[5][2]
const int8_t ff_adpcm_index_table[16]
static const float pred[4]
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
#define FF_ARRAY_ELEMS(a)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
main external API structure.
const int16_t ff_adpcm_yamaha_indexscale[]
static av_const int sign_extend(int val, unsigned bits)
@ AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R2
Filter the word “frame” indicates either a video frame or a group of audio samples
static int shift(int a, int b)
@ AV_CODEC_ID_ADPCM_SBPRO_4
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
#define avpriv_request_sample(...)
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
@ AV_CODEC_ID_ADPCM_IMA_WAV
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static const double coeff[2][5]
const uint16_t ff_adpcm_afc_coeffs[2][16]
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
const int8_t *const ff_adpcm_index_tables[4]
@ AV_CODEC_ID_ADPCM_SBPRO_3
ADPCMChannelStatus status[14]
static const int8_t swf_index_tables[4][16]